code
stringlengths 31
1.05M
| apis
list | extract_api
stringlengths 97
1.91M
|
---|---|---|
import os
import tempfile
import numpy as np
import pytest
import calliope
def verify_solution_integrity(model_solution, solution_from_disk, tempdir):
# Check whether the two are the same
np.allclose(model_solution['e_cap'], solution_from_disk['e_cap'])
# Check that config AttrDict has been deserialized
assert(solution_from_disk.attrs['config_run'].output.path == tempdir)
class TestSave:
@pytest.fixture(scope='module')
def model(self):
model = calliope.Model()
model.run()
return model
def test_save_netcdf(self, model):
with tempfile.TemporaryDirectory() as tempdir:
model.config_run.set_key('output.path', tempdir)
model.save_solution('netcdf')
# Try reading solution back in
sol_file = os.path.join(tempdir, 'solution.nc')
solution_from_disk = calliope.read.read_netcdf(sol_file)
solution_from_disk.close() # so that temp dir can be deleted
verify_solution_integrity(model.solution, solution_from_disk, tempdir)
def test_save_csv(self, model):
with tempfile.TemporaryDirectory() as tempdir:
model.config_run.set_key('output.path', tempdir)
model.save_solution('csv')
# Try reading solution back in
solution_from_disk = calliope.read.read_csv(tempdir)
verify_solution_integrity(model.solution, solution_from_disk, tempdir)
|
[
"tempfile.TemporaryDirectory",
"calliope.read.read_netcdf",
"numpy.allclose",
"pytest.fixture",
"calliope.read.read_csv",
"calliope.Model",
"os.path.join"
] |
[((208, 273), 'numpy.allclose', 'np.allclose', (["model_solution['e_cap']", "solution_from_disk['e_cap']"], {}), "(model_solution['e_cap'], solution_from_disk['e_cap'])\n", (219, 273), True, 'import numpy as np\n'), ((435, 465), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""module"""'}), "(scope='module')\n", (449, 465), False, 'import pytest\n'), ((503, 519), 'calliope.Model', 'calliope.Model', ([], {}), '()\n', (517, 519), False, 'import calliope\n'), ((614, 643), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (641, 643), False, 'import tempfile\n'), ((826, 862), 'os.path.join', 'os.path.join', (['tempdir', '"""solution.nc"""'], {}), "(tempdir, 'solution.nc')\n", (838, 862), False, 'import os\n'), ((896, 931), 'calliope.read.read_netcdf', 'calliope.read.read_netcdf', (['sol_file'], {}), '(sol_file)\n', (921, 931), False, 'import calliope\n'), ((1136, 1165), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (1163, 1165), False, 'import tempfile\n'), ((1355, 1386), 'calliope.read.read_csv', 'calliope.read.read_csv', (['tempdir'], {}), '(tempdir)\n', (1377, 1386), False, 'import calliope\n')]
|
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Gaussian noise policy."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from caql import policy
class GaussianNoisePolicy(policy.BasePolicy):
"""Implementation for gaussian noise policy."""
def __init__(self, greedy_policy, sigma, sigma_decay, sigma_min):
"""Creates an epsilon greedy policy.
Args:
greedy_policy: policy.BasePolicy. The policy that is used to compute a
greedy action.
sigma: float. Standard deviation for a gaussian distribution.
sigma_decay: float. Decay rate for the sigma.
sigma_min: float. The minimum value of the sigma.
"""
if not 0 <= sigma <= 1.0:
raise ValueError('sigma should be in [0.0, 1.0]')
self._greedy_policy = greedy_policy
self._sigma = sigma
self._sigma_decay = sigma_decay
self._sigma_min = sigma_min
@property
def sigma(self):
return self._sigma
def _action(self, state, use_action_function, batch_mode=False):
mean_action = self._greedy_policy.action(state, use_action_function,
batch_mode)
if mean_action is None:
return None
batch_action_dim = np.shape(mean_action)
# Match the scale of noise value to action value.
noise_exploration = (
self._sigma * self._greedy_policy.action_spec.maximum *
np.random.randn(*batch_action_dim))
return mean_action + noise_exploration
def _update_params(self):
self._sigma = max(self._sigma * self._sigma_decay, self._sigma_min)
def _params_debug_str(self):
return 'sigma: %.3f' % self._sigma
|
[
"numpy.shape",
"numpy.random.randn"
] |
[((1836, 1857), 'numpy.shape', 'np.shape', (['mean_action'], {}), '(mean_action)\n', (1844, 1857), True, 'import numpy as np\n'), ((2010, 2044), 'numpy.random.randn', 'np.random.randn', (['*batch_action_dim'], {}), '(*batch_action_dim)\n', (2025, 2044), True, 'import numpy as np\n')]
|
from matplotlib.finance import quotes_historical_yahoo
import sys
from datetime import date
import matplotlib.pyplot as plt
import numpy as np
today = date.today()
start = (today.year - 1, today.month, today.day)
symbol = 'DISH'
if len(sys.argv) == 2:
symbol = sys.argv[1]
quotes = quotes_historical_yahoo(symbol, start, today)
quotes = np.array(quotes)
close = quotes.T[4]
volume = quotes.T[5]
ret = np.diff(close)/close[:-1]
volchange = np.diff(volume)/volume[:-1]
fig = plt.figure()
ax = fig.add_subplot(111)
ax.scatter(ret, volchange, c=ret * 100, s=volchange * 100, alpha=0.5)
ax.set_title('Close and volume returns')
ax.grid(True)
plt.show()
|
[
"matplotlib.finance.quotes_historical_yahoo",
"matplotlib.pyplot.show",
"datetime.date.today",
"matplotlib.pyplot.figure",
"numpy.diff",
"numpy.array"
] |
[((152, 164), 'datetime.date.today', 'date.today', ([], {}), '()\n', (162, 164), False, 'from datetime import date\n'), ((289, 334), 'matplotlib.finance.quotes_historical_yahoo', 'quotes_historical_yahoo', (['symbol', 'start', 'today'], {}), '(symbol, start, today)\n', (312, 334), False, 'from matplotlib.finance import quotes_historical_yahoo\n'), ((344, 360), 'numpy.array', 'np.array', (['quotes'], {}), '(quotes)\n', (352, 360), True, 'import numpy as np\n'), ((481, 493), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (491, 493), True, 'import matplotlib.pyplot as plt\n'), ((646, 656), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (654, 656), True, 'import matplotlib.pyplot as plt\n'), ((408, 422), 'numpy.diff', 'np.diff', (['close'], {}), '(close)\n', (415, 422), True, 'import numpy as np\n'), ((446, 461), 'numpy.diff', 'np.diff', (['volume'], {}), '(volume)\n', (453, 461), True, 'import numpy as np\n')]
|
import numpy as np
def test_rotation():
board_size = 3
states = np.array([[[[1, 2, 0],
[2, 1, 0],
[0, 1, 2]]],
[[[0, 3, 4],
[0, 0, 0],
[2, 1, 0]]]])
visit_counts = np.array([[0, 0, 3,
0, 0, 2,
1, 0, 0],
[5, 0, 0,
6, 7, 8,
0, 0, 9]])
new_states = []
new_visit_counts = []
for state, visit_count in zip(states, visit_counts):
for i in range(4):
rotated_state = np.rot90(state, i, axes=(1, 2))
rotated_visit_count = np.rot90(visit_count.reshape(board_size, board_size), i, axes=(0, 1))
new_states.append(rotated_state)
new_visit_counts.append(rotated_visit_count)
new_states.append(np.flip(rotated_state, 2))
new_visit_counts.append(np.fliplr(rotated_visit_count))
for i, (state, visit_count) in enumerate(zip(new_states, new_visit_counts)):
print("case: ", i)
for i in range(3):
for j in range(3):
print(state[0][i][j], end=' ')
print('')
print('')
for i in range(3):
for j in range(3):
print(visit_count[i][j], end=' ')
print('')
print('')
if __name__ == "__main__":
test_rotation()
|
[
"numpy.fliplr",
"numpy.rot90",
"numpy.array",
"numpy.flip"
] |
[((74, 163), 'numpy.array', 'np.array', (['[[[[1, 2, 0], [2, 1, 0], [0, 1, 2]]], [[[0, 3, 4], [0, 0, 0], [2, 1, 0]]]]'], {}), '([[[[1, 2, 0], [2, 1, 0], [0, 1, 2]]], [[[0, 3, 4], [0, 0, 0], [2, \n 1, 0]]]])\n', (82, 163), True, 'import numpy as np\n'), ((301, 369), 'numpy.array', 'np.array', (['[[0, 0, 3, 0, 0, 2, 1, 0, 0], [5, 0, 0, 6, 7, 8, 0, 0, 9]]'], {}), '([[0, 0, 3, 0, 0, 2, 1, 0, 0], [5, 0, 0, 6, 7, 8, 0, 0, 9]])\n', (309, 369), True, 'import numpy as np\n'), ((679, 710), 'numpy.rot90', 'np.rot90', (['state', 'i'], {'axes': '(1, 2)'}), '(state, i, axes=(1, 2))\n', (687, 710), True, 'import numpy as np\n'), ((948, 973), 'numpy.flip', 'np.flip', (['rotated_state', '(2)'], {}), '(rotated_state, 2)\n', (955, 973), True, 'import numpy as np\n'), ((1011, 1041), 'numpy.fliplr', 'np.fliplr', (['rotated_visit_count'], {}), '(rotated_visit_count)\n', (1020, 1041), True, 'import numpy as np\n')]
|
import numpy as np
import jetyak
import jviz
import sensors
import shapefile
import matplotlib
import matplotlib.pyplot as plt
import pandas as pd
import utm
from mpl_toolkits.basemap import Basemap
import mpl_toolkits.basemap as mb
from scipy import stats
def lat2str(deg):
min = 60 * (deg - np.floor(deg))
deg = np.floor(deg)
dir = 'N'
if deg < 0:
if min != 0.0:
deg += 1.0
min -= 60.0
dir = 'S'
return ("%d$\degree$ %g' N") % (np.abs(deg),np.abs(min))
def lon2str(deg):
min = 60 * (deg - np.floor(deg))
deg = np.floor(deg)
dir = 'E'
if deg < 0:
if min != 0.0:
deg += 1.0
min -= 60.0
dir = 'W'
return ("%d$\degree$ %g' W") % (np.abs(deg),np.abs(min))
if __name__ == '__main__':
#69.121595, -105.019215
base = Basemap(llcrnrlon=-170, llcrnrlat=0, urcrnrlon=-30, urcrnrlat=80,
resolution='l', projection='merc', suppress_ticks=True)
# base = Basemap(llcrnrlon=-120, llcrnrlat=68, urcrnrlon=-100, urcrnrlat=74,
# resolution='h', projection='merc', suppress_ticks=True)
# base.arcgisimage(service='World_Topo_Map', xpixels=1500, verbose=True)
base.drawcoastlines()
base.drawcountries()
# base.drawlakes()
# base.fillcontinents(color='coral',lake_color='aqua')
# base.drawlsmask(land_color='coral', ocean_color='aqua', lakes=True)
# base.drawparallels(np.arange(-80.,81.,2.),labels=[True,True,False,False],dashes=[2,2],color='white')
# base.drawmeridians(np.arange(-180.,181.,10.),labels=[True,True,True,False],dashes=[2,2],color='white')
# base.drawmapboundary(fill_color='aqua')
# base.drawrivers(linewidth=0.5, linestyle='solid', color='blue')
base.drawparallels(np.arange(-90.,91.,10.),labels=[True,True,False,False],dashes=[2,2],color='white')
base.drawmeridians(np.arange(-180.,181.,30.),labels=[False,False,False,True],dashes=[2,2],color='white')
base.drawparallels(np.arange(66.,67., 100.),labels=[False,False,False,True],dashes=[2,2],color='red')
base.drawstates(linewidth=2., color='grey')
base.bluemarble()
plt.show()
# base.scatter(dock_reference[1], dock_reference[0], s=500, marker='*', label='Freshwater Creek Mouth', zorder=10, edgecolor='k', facecolor='r')
# for radius in [500*i for i in range(10)]:
# lats, lons = getCircle(dock_reference[0], dock_reference[1], radius)
# base.plot(lons, lats, c='grey')
# if radius == 0:
# pass
# # plt.gca().annotate('Embayment', xy=(lons[270], lats[270]+0.001), xytext=(lons[270]+0.0005, lats[270]+0.002), fontsize=22, ha='center')
# # plt.gca().annotate('Freshwater Creek Mouth', xy=(lons[270], lats[270]+0.0005), fontsize=10, ha='right')
# else:
# plt.gca().annotate(str(radius)+'m', xy=(lons[270], lats[270]+0.0003), fontsize=22, ha='center')
# colors = np.flip(plt.cm.viridis(np.linspace(0,1,5)), axis=0)
# for i, m in enumerate(jy.mission[0:5]):
# base.scatter(m['Longitude'], m['Latitude'], label=date_labels[i], s=1, c=colors[i], zorder=10-i, lw=0)
# lgnd = plt.legend(loc='upper left')
# for handle in lgnd.legendHandles[1:]:
# handle.set_sizes([200])
# ax = plt.gca()
# def xformat(x, pos=None): return lon2str(x)
# def yformat(x, pos=None): return lat2str(x)
# ax.xaxis.set_major_formatter(matplotlib.ticker.FuncFormatter(xformat))
# ax.yaxis.set_major_formatter(matplotlib.ticker.FuncFormatter(yformat))
# plt.show()
# plt.close()
|
[
"numpy.abs",
"matplotlib.pyplot.show",
"numpy.floor",
"numpy.arange",
"mpl_toolkits.basemap.Basemap"
] |
[((323, 336), 'numpy.floor', 'np.floor', (['deg'], {}), '(deg)\n', (331, 336), True, 'import numpy as np\n'), ((582, 595), 'numpy.floor', 'np.floor', (['deg'], {}), '(deg)\n', (590, 595), True, 'import numpy as np\n'), ((844, 969), 'mpl_toolkits.basemap.Basemap', 'Basemap', ([], {'llcrnrlon': '(-170)', 'llcrnrlat': '(0)', 'urcrnrlon': '(-30)', 'urcrnrlat': '(80)', 'resolution': '"""l"""', 'projection': '"""merc"""', 'suppress_ticks': '(True)'}), "(llcrnrlon=-170, llcrnrlat=0, urcrnrlon=-30, urcrnrlat=80,\n resolution='l', projection='merc', suppress_ticks=True)\n", (851, 969), False, 'from mpl_toolkits.basemap import Basemap\n'), ((2157, 2167), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2165, 2167), True, 'import matplotlib.pyplot as plt\n'), ((1785, 1813), 'numpy.arange', 'np.arange', (['(-90.0)', '(91.0)', '(10.0)'], {}), '(-90.0, 91.0, 10.0)\n', (1794, 1813), True, 'import numpy as np\n'), ((1891, 1921), 'numpy.arange', 'np.arange', (['(-180.0)', '(181.0)', '(30.0)'], {}), '(-180.0, 181.0, 30.0)\n', (1900, 1921), True, 'import numpy as np\n'), ((2000, 2028), 'numpy.arange', 'np.arange', (['(66.0)', '(67.0)', '(100.0)'], {}), '(66.0, 67.0, 100.0)\n', (2009, 2028), True, 'import numpy as np\n'), ((298, 311), 'numpy.floor', 'np.floor', (['deg'], {}), '(deg)\n', (306, 311), True, 'import numpy as np\n'), ((491, 502), 'numpy.abs', 'np.abs', (['deg'], {}), '(deg)\n', (497, 502), True, 'import numpy as np\n'), ((503, 514), 'numpy.abs', 'np.abs', (['min'], {}), '(min)\n', (509, 514), True, 'import numpy as np\n'), ((557, 570), 'numpy.floor', 'np.floor', (['deg'], {}), '(deg)\n', (565, 570), True, 'import numpy as np\n'), ((750, 761), 'numpy.abs', 'np.abs', (['deg'], {}), '(deg)\n', (756, 761), True, 'import numpy as np\n'), ((762, 773), 'numpy.abs', 'np.abs', (['min'], {}), '(min)\n', (768, 773), True, 'import numpy as np\n')]
|
import torch
import torch.utils.data as data
from torchvision.datasets.folder import has_file_allowed_extension, is_image_file, IMG_EXTENSIONS, pil_loader, accimage_loader,default_loader
from PIL import Image
import sys
import os
import os.path
import numpy as np
from random import shuffle
REGIONS_DICT={'Alabama': 'South', 'Arizona': 'SW',
'California': 'Pacific',
'Florida': 'South',
'Indiana': 'MW',
'Iowa': 'MW',
'Kansas': 'MW',
'Massachusetts': 'NE',
'Michigan': 'MW',
'Missouri': 'South',
'Montana': 'RM',
'New-York': 'MA',
'North-Carolina': 'South',
'Ohio': 'MW',
'Oklahoma': 'SW',
'Oregon': 'Pacific',
'Pennsylvania': 'MA',
'South-Carolina': 'South',
'South-Dakota': 'MW',
'Texas': 'SW',
'Utah': 'RM',
'Vermont': 'NE',
'Virginia': 'South',
'Washington': 'Pacific',
'Wyoming': 'RM'}
REGIONS_TO_IDX={'RM': 6,'MA': 1,'NE': 2,'South': 3, 'Pacific': 4, 'MW': 0 , 'SW': 5}
IDX_TO_REGIONS={ 6:'RM',1:'MA',2:'NE',3:'South',4: 'Pacific', 0:'MW', 5:'SW'}
def make_dataset(dir, class_to_idx, extensions, domains,start=1934):
images = []
meta = []
dir = os.path.expanduser(dir)
for target in sorted(os.listdir(dir)):
d = os.path.join(dir, target)
if not os.path.isdir(d):
continue
for root, _, fnames in sorted(os.walk(d)):
for fname in sorted(fnames):
if has_file_allowed_extension(fname, extensions):
path = os.path.join(root, fname)
year=int(path.split('/')[-1].split('_')[0])
city=(path.split('/')[-1].split('_')[1])
region=REGIONS_DICT[city]
pivot_year=start+(year-start)//10*10
if (pivot_year, region) in domains:
item = (path, class_to_idx[target])
images.append(item)
meta.append([year,region])
return images, meta
class MNIST(data.Dataset):
def __init__(self, root, transform=None, target_transform=None,domains=[]):
extensions = IMG_EXTENSIONS
loader = default_loader
# classes, class_to_idx = self._find_classes(root)
# samples, self.meta = make_dataset(root, class_to_idx, extensions, domains)
# if len(samples) == 0:
# raise(RuntimeError("Found 0 files in subfolders of: " + root + "\n"
# "Supported extensions are: " + ",".join(extensions)))
self.root = root
X = np.load("{}/X.npy".format(self.root))
Y = np.load("{}/Y.npy".format(self.root))
A = np.load("{}/A.npy".format(self.root))
U = np.load("{}/U.npy".format(self.root))
# print(domains)
U_ = (U*6).astype('d')
indices = []
for d in domains:
# print(d)
indices += [i for i, x in enumerate(U_) if x == d[0]]
# print(len(indices))
self.X = X[indices]
self.Y = Y[indices]
self.U = U[indices]
self.A = A[indices]
self.loader = loader
# self.extensions = extensions
# self.classes = classes
# self.class_to_idx = class_to_idx
# self.samples = samples
# self.transform = transform
# self.target_transform = target_transform
# self.imgs = self.samples
def _find_classes(self, dir):
if sys.version_info >= (3, 5):
# Faster and available in Python 3.5 and above
classes = [d.name for d in os.scandir(dir) if d.is_dir()]
else:
classes = [d for d in os.listdir(dir) if os.path.isdir(os.path.join(dir, d))]
classes.sort()
class_to_idx = {classes[i]: i for i in range(len(classes))}
return classes, class_to_idx
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (sample, target) where target is class_index of the target class.
"""
# path, target = self.samples[index]
sample = self.X[index]
target = self.Y[index]
# print(sample.shape)
# if self.transform is not None:
# sample = self.transform(sample)
# if self.target_transform is not None:
# target = self.target_transform(target)
y,p = self.U[index], self.A[index]
return np.repeat(sample,3,axis=0).astype('f'), int(y*6), target
def get_meta(self):
return np.array(self.meta)
def __len__(self):
return len(self.X)
def __repr__(self):
fmt_str = 'Dataset ' + self.__class__.__name__ + '\n'
fmt_str += ' Number of datapoints: {}\n'.format(self.__len__())
fmt_str += ' Root Location: {}\n'.format(self.root)
tmp = ' Transforms (if any): '
fmt_str += '{0}{1}\n'.format(tmp, self.transform.__repr__().replace('\n', '\n' + ' ' * len(tmp)))
tmp = ' Target Transforms (if any): '
fmt_str += '{0}{1}'.format(tmp, self.target_transform.__repr__().replace('\n', '\n' + ' ' * len(tmp)))
return fmt_str
class MNISTSampler(torch.utils.data.sampler.Sampler):
r"""Base class for all Samplers.
Every Sampler subclass has to provide an __iter__ method, providing a way
to iterate over indices of dataset elements, and a __len__ method that
returns the length of the returned iterators.
"""
def __init__(self, data_source, bs):
self.data_source=data_source
self.meta=self.data_source.U
self.dict_meta={}
self.indeces={}
self.keys=[]
self.bs=bs
for idx, u in enumerate(self.meta):
try:
self.dict_meta[u].append(idx)
except:
self.dict_meta[u]=[idx]
self.keys.append(u)
self.indeces[u]=0
for idx in self.keys:
shuffle(self.dict_meta[idx])
def _sampling(self,idx, n):
if self.indeces[idx]+n>=len(self.dict_meta[idx]):
self.dict_meta[idx]=self.dict_meta[idx]+self.dict_meta[idx]
self.indeces[idx]=self.indeces[idx]+n
return self.dict_meta[idx][self.indeces[idx]-n:self.indeces[idx]]
def _shuffle(self):
order=np.random.randint(len(self.keys),size=(len(self.data_source)//(self.bs)))
sIdx=[]
for i in order:
sIdx=sIdx+self._sampling(self.keys[i],self.bs)
return np.array(sIdx)
def __iter__(self):
return iter(self._shuffle())
def __len__(self):
return len(self.data_source)/self.bs*self.bs
|
[
"os.path.expanduser",
"torchvision.datasets.folder.has_file_allowed_extension",
"os.path.isdir",
"random.shuffle",
"os.walk",
"numpy.array",
"os.path.join",
"os.listdir",
"os.scandir",
"numpy.repeat"
] |
[((1095, 1118), 'os.path.expanduser', 'os.path.expanduser', (['dir'], {}), '(dir)\n', (1113, 1118), False, 'import os\n'), ((1144, 1159), 'os.listdir', 'os.listdir', (['dir'], {}), '(dir)\n', (1154, 1159), False, 'import os\n'), ((1174, 1199), 'os.path.join', 'os.path.join', (['dir', 'target'], {}), '(dir, target)\n', (1186, 1199), False, 'import os\n'), ((4443, 4462), 'numpy.array', 'np.array', (['self.meta'], {}), '(self.meta)\n', (4451, 4462), True, 'import numpy as np\n'), ((6402, 6416), 'numpy.array', 'np.array', (['sIdx'], {}), '(sIdx)\n', (6410, 6416), True, 'import numpy as np\n'), ((1215, 1231), 'os.path.isdir', 'os.path.isdir', (['d'], {}), '(d)\n', (1228, 1231), False, 'import os\n'), ((1293, 1303), 'os.walk', 'os.walk', (['d'], {}), '(d)\n', (1300, 1303), False, 'import os\n'), ((5861, 5889), 'random.shuffle', 'shuffle', (['self.dict_meta[idx]'], {}), '(self.dict_meta[idx])\n', (5868, 5889), False, 'from random import shuffle\n'), ((1366, 1411), 'torchvision.datasets.folder.has_file_allowed_extension', 'has_file_allowed_extension', (['fname', 'extensions'], {}), '(fname, extensions)\n', (1392, 1411), False, 'from torchvision.datasets.folder import has_file_allowed_extension, is_image_file, IMG_EXTENSIONS, pil_loader, accimage_loader, default_loader\n'), ((1440, 1465), 'os.path.join', 'os.path.join', (['root', 'fname'], {}), '(root, fname)\n', (1452, 1465), False, 'import os\n'), ((3496, 3511), 'os.scandir', 'os.scandir', (['dir'], {}), '(dir)\n', (3506, 3511), False, 'import os\n'), ((3575, 3590), 'os.listdir', 'os.listdir', (['dir'], {}), '(dir)\n', (3585, 3590), False, 'import os\n'), ((4346, 4374), 'numpy.repeat', 'np.repeat', (['sample', '(3)'], {'axis': '(0)'}), '(sample, 3, axis=0)\n', (4355, 4374), True, 'import numpy as np\n'), ((3608, 3628), 'os.path.join', 'os.path.join', (['dir', 'd'], {}), '(dir, d)\n', (3620, 3628), False, 'import os\n')]
|
import random
import numpy as np
from bayesnet.network import Network
def hmc(model, call_args, parameter=None, sample_size=100, step_size=1e-3, n_step=10):
"""
Hamiltonian Monte Carlo sampling aka Hybrid Monte Carlo sampling
Parameters
----------
model : Network
bayesian network
call_args : tuple or dict
observations of the model
parameter : dict
dict of parameter to be sampled
sample_size : int
number of samples to be generated
step_size : float
update size of parameters
n_step : int
number of updation of parameters
Returns
-------
sample : dict of list of np.ndarray
samples from the model given observations
"""
if not isinstance(model, Network):
raise TypeError("model must be Network object")
if not isinstance(sample_size, int):
raise TypeError(f"sample_size must be int, not {type(sample_size)}")
if not isinstance(step_size, (int, float)):
raise TypeError(f"step_size must be float, not {type(step_size)}")
if not isinstance(n_step, int):
raise TypeError(f"n_step must be int, not {type(n_step)}")
def run_model():
model.clear()
if isinstance(call_args, tuple):
model(*call_args)
elif isinstance(call_args, dict):
model(**call_args)
else:
raise TypeError("call_args must be tuple or dict")
sample = dict()
previous = dict()
velocity = dict()
if parameter is not None:
if not isinstance(parameter, dict):
raise TypeError("parameter must be dict")
for key, p in parameter.items():
if p is not model.parameter[key]:
raise ValueError("parameter must be defined in the model")
variable = parameter
else:
variable = model.parameter
for key in variable:
sample[key] = []
for _ in range(sample_size):
run_model()
log_posterior = model.log_pdf()
log_posterior.backward()
kinetic_energy = 0
for key, v in variable.items():
previous[key] = v.value
velocity[key] = np.random.normal(size=v.shape)
kinetic_energy += 0.5 * np.square(velocity[key]).sum()
velocity[key] += 0.5 * v.grad * step_size
v.value = v.value + step_size * velocity[key]
hamiltonian = kinetic_energy - log_posterior.value
for _ in range(n_step):
run_model()
model.log_pdf().backward()
for key, v in variable.items():
velocity[key] += step_size * v.grad
v.value += step_size * velocity[key]
run_model()
log_posterior_new = model.log_pdf()
log_posterior_new.backward()
kinetic_energy_new = 0
for key, v in velocity.items():
v += 0.5 * step_size * variable[key].grad
kinetic_energy_new += 0.5 * np.square(v).sum()
hamiltonian_new = kinetic_energy_new - log_posterior_new.value
accept_proba = np.exp(hamiltonian - hamiltonian_new)
if random.random() < accept_proba:
for key, v in variable.items():
sample[key].append(v.value)
else:
for key, v in variable.items():
v.value = previous[key]
sample[key].append(v.value)
return sample
|
[
"random.random",
"numpy.square",
"numpy.exp",
"numpy.random.normal"
] |
[((3079, 3116), 'numpy.exp', 'np.exp', (['(hamiltonian - hamiltonian_new)'], {}), '(hamiltonian - hamiltonian_new)\n', (3085, 3116), True, 'import numpy as np\n'), ((2184, 2214), 'numpy.random.normal', 'np.random.normal', ([], {'size': 'v.shape'}), '(size=v.shape)\n', (2200, 2214), True, 'import numpy as np\n'), ((3129, 3144), 'random.random', 'random.random', ([], {}), '()\n', (3142, 3144), False, 'import random\n'), ((2251, 2275), 'numpy.square', 'np.square', (['velocity[key]'], {}), '(velocity[key])\n', (2260, 2275), True, 'import numpy as np\n'), ((2965, 2977), 'numpy.square', 'np.square', (['v'], {}), '(v)\n', (2974, 2977), True, 'import numpy as np\n')]
|
import collections
import math
import numbers
import numpy as np
from .. import base
from .. import optim
from .. import utils
__all__ = [
'LinearRegression',
'LogisticRegression'
]
class GLM:
"""Generalized Linear Model.
Parameters:
optimizer (optim.Optimizer): The sequential optimizer used for updating the weights. Note
that the intercept is handled separately.
loss (optim.Loss): The loss function to optimize for.
l2 (float): Amount of L2 regularization used to push weights towards 0.
intercept (float): Initial intercept value.
intercept_lr (optim.schedulers.Scheduler or float): Learning rate scheduler used for
updating the intercept. If a `float` is passed, then an instance of
`optim.schedulers.Constant` will be used. Setting this to 0 implies that the intercept
will be not be updated.
clip_gradient (float): Clips the absolute value of each gradient value.
initializer (optim.initializers.Initializer): Weights initialization scheme.
Attributes:
weights (collections.defaultdict): The current weights.
"""
def __init__(self, optimizer, loss, l2, intercept, intercept_lr, clip_gradient, initializer):
self.optimizer = optimizer
self.loss = loss
self.l2 = l2
self.intercept = intercept
self.intercept_lr = (
optim.schedulers.Constant(intercept_lr)
if isinstance(intercept_lr, numbers.Number) else
intercept_lr
)
self.clip_gradient = clip_gradient
self.weights = collections.defaultdict(initializer)
self.initializer = initializer
def _raw_dot(self, x):
return utils.math.dot(self.weights, x) + self.intercept
def _eval_gradient(self, x, y, sample_weight):
"""Returns the gradient for a given observation.
This logic is put into a separate function for testing purposes.
"""
loss_gradient = self.loss.gradient(y_true=y, y_pred=self._raw_dot(x))
# Apply the sample weight
loss_gradient *= sample_weight
# Clip the gradient to avoid numerical instability
loss_gradient = utils.math.clamp(
loss_gradient,
minimum=-self.clip_gradient,
maximum=self.clip_gradient
)
return (
{
i: (
xi * loss_gradient +
2. * self.l2 * self.weights.get(i, 0)
)
for i, xi in x.items()
},
loss_gradient
)
def fit_one(self, x, y, sample_weight=1.):
# Some optimizers need to do something before a prediction is made
self.weights = self.optimizer.update_before_pred(w=self.weights)
# Calculate the gradient
gradient, loss_gradient = self._eval_gradient(x=x, y=y, sample_weight=sample_weight)
# Update the intercept
self.intercept -= self.intercept_lr.get(self.optimizer.n_iterations) * loss_gradient
# Update the weights
self.weights = self.optimizer.update_after_pred(w=self.weights, g=gradient)
return self
class LinearRegression(GLM, base.Regressor):
"""Linear regression.
Parameters:
optimizer (optim.Optimizer): The sequential optimizer used for updating the weights. Note
that the intercept is handled separately. Defaults to ``optim.SGD(.01)``.
loss (optim.RegressionLoss): The loss function to optimize for. Defaults to
``optim.losses.SquaredLoss``.
l2 (float): Amount of L2 regularization used to push weights towards 0.
intercept (float): Initial intercept value.
intercept_lr (optim.schedulers.Scheduler or float): Learning rate scheduler used for
updating the intercept. If a `float` is passed, then an instance of
`optim.schedulers.Constant` will be used. Setting this to 0 implies that the intercept
will be not be updated.
l2 (float): Amount of L2 regularization used to push weights towards 0.
clip_gradient (float): Clips the absolute value of each gradient value.
initializer (optim.initializers.Initializer): Weights initialization scheme.
Attributes:
weights (collections.defaultdict): The current weights.
Example:
::
>>> from creme import datasets
>>> from creme import linear_model
>>> from creme import metrics
>>> from creme import model_selection
>>> from creme import preprocessing
>>> X_y = datasets.TrumpApproval()
>>> model = (
... preprocessing.StandardScaler() |
... linear_model.LinearRegression(intercept_lr=.1)
... )
>>> metric = metrics.MAE()
>>> model_selection.progressive_val_score(X_y, model, metric)
MAE: 0.616405
>>> model['LinearRegression'].intercept
38.000439
Note:
Using a feature scaler such as `preprocessing.StandardScaler` upstream helps the optimizer
to converge.
"""
def __init__(self, optimizer=None, loss=None, l2=.0, intercept=0., intercept_lr=.01,
clip_gradient=1e12, initializer=None):
super().__init__(
optimizer=(
optim.SGD(optim.schedulers.InverseScaling(.01, .25))
if optimizer is None else
optimizer
),
loss=optim.losses.Squared() if loss is None else loss,
intercept=intercept,
intercept_lr=intercept_lr,
l2=l2,
clip_gradient=clip_gradient,
initializer=initializer if initializer else optim.initializers.Zeros()
)
def predict_one(self, x):
return self.loss.mean_func(self._raw_dot(x))
def debug_one(self, x, decimals=5, **print_params):
"""
Example:
::
>>> from creme import datasets
>>> from creme import linear_model
>>> from creme import metrics
>>> from creme import model_selection
>>> from creme import preprocessing
>>> X_y = datasets.TrumpApproval()
>>> model = (
... preprocessing.StandardScaler() |
... linear_model.LinearRegression(intercept_lr=.1)
... )
>>> for x, y in X_y:
... y_pred = model.predict_one(x)
... model = model.fit_one(x, y)
>>> model.debug_one(x)
0. Input
--------
gallup: 43.84321 (float)
ipsos: 40.57068 (float)
morning_consult: 37.81875 (float)
ordinal_date: 737389 (int)
rasmussen: 40.10469 (float)
you_gov: 41.63691 (float)
<BLANKLINE>
1. StandardScaler
-----------------
gallup: 1.18751 (float)
ipsos: -0.04683 (float)
morning_consult: -1.22583 (float)
ordinal_date: 1.72946 (float)
rasmussen: -0.23857 (float)
you_gov: 0.44131 (float)
<BLANKLINE>
2. LinearRegression
-------------------
Name Value Weight Contribution
Intercept 1.00000 38.00044 38.00044
ordinal_date 1.72946 2.23125 3.85885
gallup 1.18751 0.28647 0.34019
you_gov 0.44131 -0.01270 -0.00560
ipsos -0.04683 1.01815 -0.04768
rasmussen -0.23857 0.45099 -0.10759
morning_consult -1.22583 0.35181 -0.43126
<BLANKLINE>
Prediction: 41.60735
"""
def fmt_float(x):
return '{: ,.{prec}f}'.format(x, prec=decimals)
names = list(map(str, x.keys())) + ['Intercept']
values = list(map(fmt_float, list(x.values()) + [1]))
weights = list(map(fmt_float, [self.weights.get(i, 0) for i in x] + [self.intercept]))
contributions = [xi * self.weights.get(i, 0) for i, xi in x.items()] + [self.intercept]
order = reversed(np.argsort(contributions))
contributions = list(map(fmt_float, contributions))
table = utils.pretty.print_table(
headers=['Name', 'Value', 'Weight', 'Contribution'],
columns=[names, values, weights, contributions],
order=order
)
print(table, **print_params)
class LogisticRegression(GLM, base.BinaryClassifier):
"""Logistic regression.
Parameters:
optimizer (optim.Optimizer): The sequential optimizer used for updating the weights. Note
that the intercept is handled separately. Defaults to ``optim.SGD(.05)``.
loss (optim.BinaryLoss): The loss function to optimize for. Defaults to
``optim.losses.Log``.
l2 (float): Amount of L2 regularization used to push weights towards 0.
intercept (float): Initial intercept value.
intercept_lr (optim.schedulers.Scheduler or float): Learning rate scheduler used for
updating the intercept. If a `float` is passed, then an instance of
`optim.schedulers.Constant` will be used. Setting this to 0 implies that the intercept
will be not be updated.
l2 (float): Amount of L2 regularization used to push weights towards 0.
clip_gradient (float): Clips the absolute value of each gradient value.
initializer (optim.initializers.Initializer): Weights initialization scheme.
Attributes:
weights (collections.defaultdict): The current weights.
Example:
::
>>> from creme import datasets
>>> from creme import linear_model
>>> from creme import metrics
>>> from creme import model_selection
>>> from creme import optim
>>> from creme import preprocessing
>>> X_y = datasets.Phishing()
>>> model = (
... preprocessing.StandardScaler() |
... linear_model.LogisticRegression(optimizer=optim.SGD(.1))
... )
>>> metric = metrics.Accuracy()
>>> model_selection.progressive_val_score(X_y, model, metric)
Accuracy: 88.96%
Note:
Using a feature scaler such as `preprocessing.StandardScaler` upstream helps the optimizer
to converge.
"""
def __init__(self, optimizer=None, loss=None, l2=.0, intercept=0., intercept_lr=.01,
clip_gradient=1e12, initializer=None):
super().__init__(
optimizer=optim.SGD(.01) if optimizer is None else optimizer,
loss=optim.losses.Log() if loss is None else loss,
intercept=intercept,
intercept_lr=intercept_lr,
l2=l2,
clip_gradient=clip_gradient,
initializer=initializer if initializer else optim.initializers.Zeros()
)
def predict_proba_one(self, x):
p = self.loss.mean_func(self._raw_dot(x)) # Convert logit to probability
return {False: 1. - p, True: p}
|
[
"collections.defaultdict",
"numpy.argsort"
] |
[((1625, 1661), 'collections.defaultdict', 'collections.defaultdict', (['initializer'], {}), '(initializer)\n', (1648, 1661), False, 'import collections\n'), ((8519, 8544), 'numpy.argsort', 'np.argsort', (['contributions'], {}), '(contributions)\n', (8529, 8544), True, 'import numpy as np\n')]
|
from ROAR.agent_module.agent import Agent
from ROAR.utilities_module.data_structures_models import SensorsData
from ROAR.utilities_module.vehicle_models import Vehicle, VehicleControl
from ROAR.configurations.configuration import Configuration as AgentConfig
import cv2
import numpy as np
import open3d as o3d
from ROAR.utilities_module.occupancy_map import OccupancyGridMap
from ROAR.perception_module.depth_to_pointcloud_detector import DepthToPointCloudDetector
from ROAR.perception_module.ground_plane_detector import GroundPlaneDetector
from ROAR.perception_module.lane_detector import LaneDetector
class iOSAgent(Agent):
def __init__(self, vehicle: Vehicle, agent_settings: AgentConfig, **kwargs):
super().__init__(vehicle, agent_settings, **kwargs)
# initialize occupancy grid map content
self.occu_map = OccupancyGridMap(agent=self)
self.depth_to_pcd = DepthToPointCloudDetector(agent=self)
self.ground_plane_detector = GroundPlaneDetector(agent=self)
self.lane_detector = LaneDetector(agent=self)
# initialize open3d related content
self.vis = o3d.visualization.Visualizer()
self.vis.create_window(width=500, height=500)
self.pcd = o3d.geometry.PointCloud()
self.coordinate_frame = o3d.geometry.TriangleMesh.create_coordinate_frame()
self.points_added = False
def run_step(self, sensors_data: SensorsData, vehicle: Vehicle) -> VehicleControl:
super(iOSAgent, self).run_step(sensors_data, vehicle)
if self.front_depth_camera.data is not None and self.front_rgb_camera.data is not None:
depth_img = self.front_depth_camera.data.copy()
lane_mask = self.lane_detector.run_in_series()
none_lane = np.where(lane_mask < 0.5)
depth_img[none_lane] = 0
pcd = self.depth_to_pcd.run_in_series(depth_image=depth_img)
points: np.ndarray = np.asarray(pcd.points)
self.occu_map.update(points)
self.occu_map.visualize()
self.non_blocking_pcd_visualization(pcd=pcd, should_center=True,
should_show_axis=True, axis_size=1)
return VehicleControl()
def non_blocking_pcd_visualization(self, pcd: o3d.geometry.PointCloud,
should_center=False,
should_show_axis=False,
axis_size: float = 0.1):
points = np.asarray(pcd.points)
colors = np.asarray(pcd.colors)
if should_center:
points = points - np.mean(points, axis=0)
if self.points_added is False:
self.pcd = o3d.geometry.PointCloud()
self.pcd.points = o3d.utility.Vector3dVector(points)
self.pcd.colors = o3d.utility.Vector3dVector(colors)
if should_show_axis:
self.coordinate_frame = o3d.geometry.TriangleMesh.create_coordinate_frame(size=axis_size,
origin=np.mean(points,
axis=0))
self.vis.add_geometry(self.coordinate_frame)
self.vis.add_geometry(self.pcd)
self.points_added = True
else:
# print(np.shape(np.vstack((np.asarray(self.pcd.points), points))))
self.pcd.points = o3d.utility.Vector3dVector(points)
self.pcd.colors = o3d.utility.Vector3dVector(colors)
if should_show_axis:
self.coordinate_frame = o3d.geometry.TriangleMesh.create_coordinate_frame(size=axis_size,
origin=np.mean(points,
axis=0))
self.vis.update_geometry(self.coordinate_frame)
self.vis.update_geometry(self.pcd)
self.vis.poll_events()
self.vis.update_renderer()
|
[
"ROAR.perception_module.ground_plane_detector.GroundPlaneDetector",
"open3d.visualization.Visualizer",
"ROAR.utilities_module.occupancy_map.OccupancyGridMap",
"numpy.asarray",
"open3d.geometry.PointCloud",
"open3d.geometry.TriangleMesh.create_coordinate_frame",
"ROAR.utilities_module.vehicle_models.VehicleControl",
"numpy.where",
"numpy.mean",
"ROAR.perception_module.depth_to_pointcloud_detector.DepthToPointCloudDetector",
"open3d.utility.Vector3dVector",
"ROAR.perception_module.lane_detector.LaneDetector"
] |
[((842, 870), 'ROAR.utilities_module.occupancy_map.OccupancyGridMap', 'OccupancyGridMap', ([], {'agent': 'self'}), '(agent=self)\n', (858, 870), False, 'from ROAR.utilities_module.occupancy_map import OccupancyGridMap\n'), ((899, 936), 'ROAR.perception_module.depth_to_pointcloud_detector.DepthToPointCloudDetector', 'DepthToPointCloudDetector', ([], {'agent': 'self'}), '(agent=self)\n', (924, 936), False, 'from ROAR.perception_module.depth_to_pointcloud_detector import DepthToPointCloudDetector\n'), ((974, 1005), 'ROAR.perception_module.ground_plane_detector.GroundPlaneDetector', 'GroundPlaneDetector', ([], {'agent': 'self'}), '(agent=self)\n', (993, 1005), False, 'from ROAR.perception_module.ground_plane_detector import GroundPlaneDetector\n'), ((1035, 1059), 'ROAR.perception_module.lane_detector.LaneDetector', 'LaneDetector', ([], {'agent': 'self'}), '(agent=self)\n', (1047, 1059), False, 'from ROAR.perception_module.lane_detector import LaneDetector\n'), ((1123, 1153), 'open3d.visualization.Visualizer', 'o3d.visualization.Visualizer', ([], {}), '()\n', (1151, 1153), True, 'import open3d as o3d\n'), ((1227, 1252), 'open3d.geometry.PointCloud', 'o3d.geometry.PointCloud', ([], {}), '()\n', (1250, 1252), True, 'import open3d as o3d\n'), ((1285, 1336), 'open3d.geometry.TriangleMesh.create_coordinate_frame', 'o3d.geometry.TriangleMesh.create_coordinate_frame', ([], {}), '()\n', (1334, 1336), True, 'import open3d as o3d\n'), ((2207, 2223), 'ROAR.utilities_module.vehicle_models.VehicleControl', 'VehicleControl', ([], {}), '()\n', (2221, 2223), False, 'from ROAR.utilities_module.vehicle_models import Vehicle, VehicleControl\n'), ((2504, 2526), 'numpy.asarray', 'np.asarray', (['pcd.points'], {}), '(pcd.points)\n', (2514, 2526), True, 'import numpy as np\n'), ((2544, 2566), 'numpy.asarray', 'np.asarray', (['pcd.colors'], {}), '(pcd.colors)\n', (2554, 2566), True, 'import numpy as np\n'), ((1760, 1785), 'numpy.where', 'np.where', (['(lane_mask < 0.5)'], {}), '(lane_mask < 0.5)\n', (1768, 1785), True, 'import numpy as np\n'), ((1929, 1951), 'numpy.asarray', 'np.asarray', (['pcd.points'], {}), '(pcd.points)\n', (1939, 1951), True, 'import numpy as np\n'), ((2710, 2735), 'open3d.geometry.PointCloud', 'o3d.geometry.PointCloud', ([], {}), '()\n', (2733, 2735), True, 'import open3d as o3d\n'), ((2766, 2800), 'open3d.utility.Vector3dVector', 'o3d.utility.Vector3dVector', (['points'], {}), '(points)\n', (2792, 2800), True, 'import open3d as o3d\n'), ((2831, 2865), 'open3d.utility.Vector3dVector', 'o3d.utility.Vector3dVector', (['colors'], {}), '(colors)\n', (2857, 2865), True, 'import open3d as o3d\n'), ((3499, 3533), 'open3d.utility.Vector3dVector', 'o3d.utility.Vector3dVector', (['points'], {}), '(points)\n', (3525, 3533), True, 'import open3d as o3d\n'), ((3564, 3598), 'open3d.utility.Vector3dVector', 'o3d.utility.Vector3dVector', (['colors'], {}), '(colors)\n', (3590, 3598), True, 'import open3d as o3d\n'), ((2623, 2646), 'numpy.mean', 'np.mean', (['points'], {'axis': '(0)'}), '(points, axis=0)\n', (2630, 2646), True, 'import numpy as np\n'), ((3103, 3126), 'numpy.mean', 'np.mean', (['points'], {'axis': '(0)'}), '(points, axis=0)\n', (3110, 3126), True, 'import numpy as np\n'), ((3835, 3858), 'numpy.mean', 'np.mean', (['points'], {'axis': '(0)'}), '(points, axis=0)\n', (3842, 3858), True, 'import numpy as np\n')]
|
#!/usr/bin/env python3
"""Scan serial ports for ping devices
Symlinks to detected devices are created under /dev/serial/ping/
This script needs root permission to create the symlinks
"""
import subprocess
import numpy as np
import rospy
from brping import PingDevice, PingParser, PingMessage
from brping.definitions import *
import serial
import socket
from collections import deque
from sensor_msgs.msg import Range, MultiEchoLaserScan, LaserEcho
class PingEnumerator:
def legacy_detect_ping1d(self, ping):
"""
Detects Ping1D devices without DEVICE_INFORMATION implemented
"""
firmware_version = ping.request(PING1D_FIRMWARE_VERSION)
if firmware_version is None:
return None
description = "/dev/serial/ping/Ping1D-id-{}-t-{}-m-{}-v-{}.{}".format (
firmware_version.src_device_id,
firmware_version.device_type,
firmware_version.device_model,
firmware_version.firmware_version_major,
firmware_version.firmware_version_minor
)
return description
def detect_device(self, dev):
"""
Attempts to detect the Ping device attached to serial port 'dev'
Returns the new path with encoded name if detected, or None if the
device was not detected
"""
print("Checking if " + dev + " is a Ping device...")
try:
ping = PingDevice()
ping.connect_serial("/dev/serial/by-id/" + dev, 115200)
except Exception as exception:
print("An exception has occurred: ", exception)
return None
if not ping.initialize():
return None
device_info = ping.request(COMMON_DEVICE_INFORMATION)
if not device_info:
return self.legacy_detect_ping1d(ping)
if device_info.device_type == 1:
description = "/dev/serial/ping/Ping1D-id-{}-r-{}-v-{}.{}.{}"
elif device_info.device_type == 2:
description = "/dev/serial/ping/Ping360-id-{}-r-{}-v-{}.{}.{}"
# Open device with 2M baud to setup Ping360
print("Setting baud to 2M...")
ser = serial.Serial("/dev/serial/by-id/" + dev, 2000000)
ser.send_break()
ser.write("UUUUUUU".encode())
ser.close()
self.set_low_latency(dev)
else:
return None
return description.format (
device_info.src_device_id,
device_info.device_revision,
device_info.firmware_version_major,
device_info.firmware_version_minor,
device_info.firmware_version_patch
)
def set_low_latency(self, dev):
"""
Receives /dev/serial/by-id/...
maps to it to ttyUSB and sets the latency_timer for the device
"""
raise NotImplementedError("This method currently not supported, requires root permissions.")
target_device = subprocess.check_output(' '.join(["readlink", "-f", "/dev/serial/by-id/%s" % dev]), shell=True)
device_name = target_device.decode().strip().split("/")[-1]
latency_file = "/sys/bus/usb-serial/devices/{0}/latency_timer".format(device_name)
with open(latency_file, 'w') as p:
p.write("1")
p.flush()
def make_symlink(self, origin, target):
"""
follows target to real device an links origin to it
origin => target
Returns True if sucessful
"""
raise NotImplementedError("This method currently not supported, requires root permissions.")
try:
# Follow link to actual device
target_device = subprocess.check_output(' '.join(["readlink", "-f", "/dev/serial/by-id/%s" % origin]), shell=True)
# Strip newline from output
target_device = target_device.decode().split('\n')[0]
# Create another link to it
subprocess.check_output(' '.join(["mkdir", "-p", "/dev/serial/ping"]), shell=True)
subprocess.check_output("ln -fs %s %s" % (
target_device,
target), shell=True)
print(origin, " linked to ", target)
return True
except subprocess.CalledProcessError as exception:
print(exception)
return False
def erase_old_symlinks(self):
"""
Erases all symlinks at "/dev/serial/ping/"
"""
raise NotImplementedError("This method currently not supported, requires root permissions.")
try:
subprocess.check_output(["rm", "-rf", "/dev/serial/ping"])
except subprocess.CalledProcessError as exception:
print(exception)
def list_serial_devices(self):
"""
Lists serial devices at "/dev/serial/by-id/"
"""
# Look for connected serial devices
try:
output = subprocess.check_output("ls /dev/serial/by-id", shell=True)
return output.decode().strip().split("\n")
except subprocess.CalledProcessError as exception:
print(exception)
return []
class PingDriver:
def __init__(self):
rospy.init_node("ping1d_driver_node")
self.ping_sensors = []
self.enumerator = PingEnumerator()
hz = rospy.Rate(1.0)
while not len(self.ping_sensors) and not rospy.is_shutdown():
self.ping_sensors = [f"/dev/serial/by-id/{dev}" for dev in self.enumerator.list_serial_devices()]
rospy.logerr_throttle(10.0, f"{rospy.get_name()} | Waiting for valid ping1d sensor to appear.")
hz.sleep()
## Messages that have the current distance measurement in the payload
self.distance_messages = [
PING1D_DISTANCE,
PING1D_DISTANCE_SIMPLE,
PING1D_PROFILE
]
## Parser to verify client comms
self.parser = PingParser()
self.range_publisher = rospy.Publisher("range", Range, queue_size=10)
self.profile_publisher = rospy.Publisher("profile", MultiEchoLaserScan, queue_size=10)
self.hz = rospy.Rate(15.0)
if not rospy.is_shutdown():
rospy.loginfo("Setting up serial device.")
self.device = PingDevice()
self.device.connect_serial(self.ping_sensors[0], 115200)
data = PingMessage(PING1D_CONTINUOUS_STOP)
data.pack_msg_data()
self.device.write(data.msg_data)
data = PingMessage(PING1D_SET_MODE_AUTO)
data.pack_msg_data()
self.device.write(data.msg_data)
data = PingMessage(PING1D_SET_RANGE)
data.scan_start = 200
data.scan_length = 30000
data.pack_msg_data()
self.device.write(data.msg_data)
## Digest incoming ping data
def parse(self, data: PingMessage):
range_msg = None
profile_msg = None
if data.message_id in self.distance_messages:
range_msg = Range()
range_msg.header.frame_id = "altimeter"
range_msg.header.stamp = rospy.Time.now()
range_msg.radiation_type = range_msg.ULTRASOUND
range_msg.field_of_view = 0.52
range_msg.max_range = (data.scan_start + data.scan_length) / 1000
range_msg.min_range = data.scan_start / 1000.0
if range_msg.min_range <= data.distance / 1000 <= range_msg.max_range:
range_msg.range = data.distance / 1000
if data.message_id == PING1D_PROFILE:
profile_msg = MultiEchoLaserScan()
profile_msg.header = range_msg.header
profile_msg.ranges = [LaserEcho(np.linspace(data.scan_start / 1000, data.scan_start / 1000 + data.scan_length / 1000, data.profile_data_length).tolist())]
profile_msg.range_min = data.scan_start / 1000.0
profile_msg.range_max = (data.scan_start + data.scan_length) / 1000
profile_msg.angle_increment = 0
profile_msg.angle_max = 0
profile_msg.angle_min = 0
profile_msg.intensities = [LaserEcho(np.frombuffer(data.profile_data, dtype=np.uint8).tolist())]
return range_msg, profile_msg
def send_ping1d_request(self):
data = PingMessage()
data.request_id = PING1D_DISTANCE
data.src_device_id = 0
data.pack_msg_data()
self.device.write(data.msg_data)
def run(self):
# read ping device from serial
try:
while not rospy.is_shutdown():
self.send_ping1d_request()
device_data = self.device.read()
if device_data is not None:
range_msg, profile_msg = self.parse(device_data)
if range_msg is not None:
self.range_publisher.publish(range_msg)
if profile_msg is not None:
self.profile_publisher.publish(profile_msg)
self.hz.sleep()
except rospy.ROSInterruptException:
pass
finally:
self.device.iodev.close()
class PingClient(object):
def __init__(self):
## Queued messages received from client
self.rx_msgs = deque([])
## Parser to verify client comms
self.parser = PingParser()
## Digest incoming client data
# @return None
def parse(self, data):
for b in bytearray(data):
if self.parser.parse_byte(b) == PingParser.NEW_MESSAGE:
self.rx_msgs.append(self.parser.rx_msg)
## Dequeue a message received from client
# @return None: if there are no comms in the queue
# @return PingMessage: the next ping message in the queue
def dequeue(self):
if len(self.rx_msgs) == 0:
return None
return self.rx_msgs.popleft()
class PingProxy(object):
def __init__(self, device: str, port: int, topic: str):
## A serial object for ping device comms
self.device = device
## UDP port number for server
self.port = port
## Publisher to send ROS range information on
self.range_msg = Range()
self.range_publisher = rospy.Publisher(topic, Range, queue_size=10)
## Connected client dictionary
self.clients = {}
## Socket to serve on
self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.socket.setblocking(False)
self.socket.bind(('0.0.0.0', self.port))
## Run proxy tasks
def run(self):
try:
data, address = self.socket.recvfrom(4096)
# new client
if address not in self.clients:
self.clients[address] = PingClient()
# digest data coming in from client
self.clients[address].parse(data)
except TimeoutError:
pass # waiting for data
except Exception as e:
print("Error reading data", e)
# read ping device from serial
device_data = self.device.read(self.device.in_waiting)
# send ping device data to all clients via UDP
if device_data: # don't write empty data
for client in self.clients:
# print("writing to client", client)
self.socket.sendto(device_data, client)
# send all client comms to ping device
for client in self.clients:
c = self.clients[client]
msg = c.dequeue()
while msg is not None:
self.device.write(msg.msg_data)
msg = c.dequeue()
|
[
"serial.Serial",
"brping.PingParser",
"rospy.Time.now",
"brping.PingDevice",
"numpy.frombuffer",
"subprocess.check_output",
"socket.socket",
"sensor_msgs.msg.MultiEchoLaserScan",
"rospy.Publisher",
"rospy.Rate",
"rospy.loginfo",
"sensor_msgs.msg.Range",
"rospy.is_shutdown",
"brping.PingMessage",
"rospy.init_node",
"numpy.linspace",
"rospy.get_name",
"collections.deque"
] |
[((5198, 5235), 'rospy.init_node', 'rospy.init_node', (['"""ping1d_driver_node"""'], {}), "('ping1d_driver_node')\n", (5213, 5235), False, 'import rospy\n'), ((5323, 5338), 'rospy.Rate', 'rospy.Rate', (['(1.0)'], {}), '(1.0)\n', (5333, 5338), False, 'import rospy\n'), ((5930, 5942), 'brping.PingParser', 'PingParser', ([], {}), '()\n', (5940, 5942), False, 'from brping import PingDevice, PingParser, PingMessage\n'), ((5975, 6021), 'rospy.Publisher', 'rospy.Publisher', (['"""range"""', 'Range'], {'queue_size': '(10)'}), "('range', Range, queue_size=10)\n", (5990, 6021), False, 'import rospy\n'), ((6055, 6116), 'rospy.Publisher', 'rospy.Publisher', (['"""profile"""', 'MultiEchoLaserScan'], {'queue_size': '(10)'}), "('profile', MultiEchoLaserScan, queue_size=10)\n", (6070, 6116), False, 'import rospy\n'), ((6135, 6151), 'rospy.Rate', 'rospy.Rate', (['(15.0)'], {}), '(15.0)\n', (6145, 6151), False, 'import rospy\n'), ((8278, 8291), 'brping.PingMessage', 'PingMessage', ([], {}), '()\n', (8289, 8291), False, 'from brping import PingDevice, PingParser, PingMessage\n'), ((9252, 9261), 'collections.deque', 'deque', (['[]'], {}), '([])\n', (9257, 9261), False, 'from collections import deque\n'), ((9326, 9338), 'brping.PingParser', 'PingParser', ([], {}), '()\n', (9336, 9338), False, 'from brping import PingDevice, PingParser, PingMessage\n'), ((10172, 10179), 'sensor_msgs.msg.Range', 'Range', ([], {}), '()\n', (10177, 10179), False, 'from sensor_msgs.msg import Range, MultiEchoLaserScan, LaserEcho\n'), ((10211, 10255), 'rospy.Publisher', 'rospy.Publisher', (['topic', 'Range'], {'queue_size': '(10)'}), '(topic, Range, queue_size=10)\n', (10226, 10255), False, 'import rospy\n'), ((10375, 10423), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_DGRAM'], {}), '(socket.AF_INET, socket.SOCK_DGRAM)\n', (10388, 10423), False, 'import socket\n'), ((1430, 1442), 'brping.PingDevice', 'PingDevice', ([], {}), '()\n', (1440, 1442), False, 'from brping import PingDevice, PingParser, PingMessage\n'), ((4048, 4125), 'subprocess.check_output', 'subprocess.check_output', (["('ln -fs %s %s' % (target_device, target))"], {'shell': '(True)'}), "('ln -fs %s %s' % (target_device, target), shell=True)\n", (4071, 4125), False, 'import subprocess\n'), ((4582, 4640), 'subprocess.check_output', 'subprocess.check_output', (["['rm', '-rf', '/dev/serial/ping']"], {}), "(['rm', '-rf', '/dev/serial/ping'])\n", (4605, 4640), False, 'import subprocess\n'), ((4921, 4980), 'subprocess.check_output', 'subprocess.check_output', (['"""ls /dev/serial/by-id"""'], {'shell': '(True)'}), "('ls /dev/serial/by-id', shell=True)\n", (4944, 4980), False, 'import subprocess\n'), ((6168, 6187), 'rospy.is_shutdown', 'rospy.is_shutdown', ([], {}), '()\n', (6185, 6187), False, 'import rospy\n'), ((6201, 6243), 'rospy.loginfo', 'rospy.loginfo', (['"""Setting up serial device."""'], {}), "('Setting up serial device.')\n", (6214, 6243), False, 'import rospy\n'), ((6270, 6282), 'brping.PingDevice', 'PingDevice', ([], {}), '()\n', (6280, 6282), False, 'from brping import PingDevice, PingParser, PingMessage\n'), ((6371, 6406), 'brping.PingMessage', 'PingMessage', (['PING1D_CONTINUOUS_STOP'], {}), '(PING1D_CONTINUOUS_STOP)\n', (6382, 6406), False, 'from brping import PingDevice, PingParser, PingMessage\n'), ((6504, 6537), 'brping.PingMessage', 'PingMessage', (['PING1D_SET_MODE_AUTO'], {}), '(PING1D_SET_MODE_AUTO)\n', (6515, 6537), False, 'from brping import PingDevice, PingParser, PingMessage\n'), ((6635, 6664), 'brping.PingMessage', 'PingMessage', (['PING1D_SET_RANGE'], {}), '(PING1D_SET_RANGE)\n', (6646, 6664), False, 'from brping import PingDevice, PingParser, PingMessage\n'), ((7017, 7024), 'sensor_msgs.msg.Range', 'Range', ([], {}), '()\n', (7022, 7024), False, 'from sensor_msgs.msg import Range, MultiEchoLaserScan, LaserEcho\n'), ((7114, 7130), 'rospy.Time.now', 'rospy.Time.now', ([], {}), '()\n', (7128, 7130), False, 'import rospy\n'), ((7581, 7601), 'sensor_msgs.msg.MultiEchoLaserScan', 'MultiEchoLaserScan', ([], {}), '()\n', (7599, 7601), False, 'from sensor_msgs.msg import Range, MultiEchoLaserScan, LaserEcho\n'), ((2186, 2236), 'serial.Serial', 'serial.Serial', (["('/dev/serial/by-id/' + dev)", '(2000000)'], {}), "('/dev/serial/by-id/' + dev, 2000000)\n", (2199, 2236), False, 'import serial\n'), ((5388, 5407), 'rospy.is_shutdown', 'rospy.is_shutdown', ([], {}), '()\n', (5405, 5407), False, 'import rospy\n'), ((8529, 8548), 'rospy.is_shutdown', 'rospy.is_shutdown', ([], {}), '()\n', (8546, 8548), False, 'import rospy\n'), ((5562, 5578), 'rospy.get_name', 'rospy.get_name', ([], {}), '()\n', (5576, 5578), False, 'import rospy\n'), ((7696, 7812), 'numpy.linspace', 'np.linspace', (['(data.scan_start / 1000)', '(data.scan_start / 1000 + data.scan_length / 1000)', 'data.profile_data_length'], {}), '(data.scan_start / 1000, data.scan_start / 1000 + data.\n scan_length / 1000, data.profile_data_length)\n', (7707, 7812), True, 'import numpy as np\n'), ((8129, 8177), 'numpy.frombuffer', 'np.frombuffer', (['data.profile_data'], {'dtype': 'np.uint8'}), '(data.profile_data, dtype=np.uint8)\n', (8142, 8177), True, 'import numpy as np\n')]
|
# BSD 3-Clause License
# Copyright (c) 2020, Instit<NAME>
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import networkx as nx
import numpy as np
import scipy.sparse as sparse
class StateTransitionSubGraphs:
def __init__(self, A_sparse, x0):
self.subnetws = None
self.scc_submats = None
self.nonempty_subgraphs = None
self.sorted_vertices = None
self.cyclic_sorted_subgraphs = None
self.fcn_scc_subgraphs(A_sparse, x0)
def fcn_metagraph_scc(self, A_sparse_sub):
matr_size = A_sparse_sub.shape[0]
g_sub = nx.from_scipy_sparse_matrix(A_sparse_sub, create_using=nx.DiGraph())
g_sub.remove_edges_from(nx.selfloop_edges(g_sub))
# Here we reverse it only for debugging purpose
# The order shouldn't matter, but it's nice to have the same as matlab
scc_list = list(reversed(list(nx.strongly_connected_components(g_sub))))
# print("%d connected components" % len(scc_list))
num_verts_per_scc = []
scc_memb_per_vert = np.zeros((matr_size, 1))
for i, scc in enumerate(scc_list):
num_verts_per_scc.append(len(scc))
scc_memb_per_vert[list(scc),:] = i
# row, col = np.where((A_sparse_sub - np.diag(A_sparse_sub.diagonal())) > 0)
# Yet another trick to get the exact same results as matlab
# The difference is returning the list from parsing via columns or via rows, hopefully nothing critical
t_matr = (A_sparse_sub - sparse.diags(A_sparse_sub.diagonal())).transpose()
col, row, _ = sparse.find(t_matr > 0)
diff = scc_memb_per_vert[row] != scc_memb_per_vert[col]
row_sel = row[np.where(diff[:, 0])]
col_sel = col[np.where(diff[:, 0])]
A_metagraph = sparse.csr_matrix(
(np.array(A_sparse_sub[row_sel, col_sel]).flatten(),
(scc_memb_per_vert[row_sel][:, 0], scc_memb_per_vert[col_sel][:, 0])),
shape=(len(num_verts_per_scc), len(num_verts_per_scc))
)
metagraph = nx.from_scipy_sparse_matrix(A_metagraph, create_using=nx.DiGraph())
metagraph_ordering=np.array(list(nx.topological_sort(metagraph)))
terminal_scc_ind, _ = np.where(A_metagraph.sum(axis=1) == 0)
terminal_scc_pos = np.isin(metagraph_ordering, terminal_scc_ind)
nonterm_scc_num = len(num_verts_per_scc) - len(terminal_scc_ind)
scc_sup1 = [i for i, scc in enumerate(scc_list) if len(scc) > 1]
term_cycles_ind = set(scc_sup1).intersection(set(terminal_scc_ind))
where_terminal_scc_pos, = np.where(terminal_scc_pos)
if np.sum(np.logical_not(where_terminal_scc_pos>(nonterm_scc_num-1))) > 0:
nonterm_scc_inds = np.logical_not(np.isin(metagraph_ordering, terminal_scc_ind))
metagraph_ordering_terminal_bottom = np.concatenate([
metagraph_ordering[nonterm_scc_inds],
metagraph_ordering[terminal_scc_pos]
])
else:
metagraph_ordering_terminal_bottom = metagraph_ordering
if len(term_cycles_ind) > 0:
scc_cell_reordered = [scc_list[i] for i in metagraph_ordering_terminal_bottom]
# index of cells containing term cycles after reordering
term_cycles_ind, = np.where(np.isin(metagraph_ordering_terminal_bottom, np.array(list(term_cycles_ind))))
# we need a cell of the indices of certices withing whese
scc_cell_reordered_lengths = np.array([len(scc) for scc in scc_cell_reordered])
scc_cell_reordered_cumsum = np.cumsum(scc_cell_reordered_lengths)
cycle_first_verts = scc_cell_reordered_cumsum[term_cycles_ind] - scc_cell_reordered_lengths[term_cycles_ind];
cycle_last_verts = scc_cell_reordered_cumsum[term_cycles_ind] - 1
term_cycles_bounds = [np.concatenate([cycle_first_verts, cycle_last_verts])]
else:
term_cycles_ind = []
term_cycles_bounds = []
# reordered original vertices
vert_topol_sort = np.concatenate([list(scc_list[i]) for i in metagraph_ordering_terminal_bottom])
return vert_topol_sort, term_cycles_ind, A_metagraph, scc_list, term_cycles_bounds
def fcn_scc_subgraphs(self, A_sparse, x0):
# print("Indentifying SCCs")
B_sparse = sparse.csc_matrix(A_sparse)
B_sparse.setdiag(0)
nb_scc, labels = sparse.csgraph.connected_components(B_sparse, directed=True,connection='weak')
scc = [[] for _ in range(nb_scc)]
for i, label in enumerate(labels):
scc[label].append(i)
self.subnetws = scc
cell_subgraphs = []
self.scc_submats = []
self.nonempty_subgraphs = []
# print("Identifying SCCs in subgraphs")
for i, subnet in enumerate(self.subnetws):
cell_subgraphs.append(subnet)
# Slicing done it two steps : First the rows, which is the most efficient for csr sparse matrix
# then columns. I should probably dig deeper
t_sparse = A_sparse[subnet, :][:, subnet]
t_sparse.setdiag(0)
nb_scc, labels = sparse.csgraph.connected_components(t_sparse, directed=True,connection='strong')
scc = [[] for _ in range(nb_scc)]
for j, label in enumerate(labels):
scc[label].append(j)
self.scc_submats.append(scc)
if sum(x0[subnet]) > 0:
self.nonempty_subgraphs.append(i)
self.sorted_vertices = []
self.cyclic_sorted_subgraphs = []
counter = 0
for nonempty_subgraph in self.nonempty_subgraphs:
A_sparse_sub = A_sparse[self.subnetws[nonempty_subgraph], :][:, self.subnetws[nonempty_subgraph]]
if A_sparse_sub.shape[0] == len(self.scc_submats[nonempty_subgraph]):
t_g = nx.from_scipy_sparse_matrix(A_sparse_sub, create_using=nx.DiGraph())
t_g.remove_edges_from(nx.selfloop_edges(t_g))
self.sorted_vertices.append(list(nx.topological_sort(t_g)))
else:
# print("Cycles in STG")
# If entire graph is only one connected component, no need for re-ordering
if len(self.scc_submats[nonempty_subgraph]) == 1:
self.sorted_vertices.append(self.scc_submats[nonempty_subgraph])
else:
vert_topol_sort,term_cycles_ind,_,scc_cell,term_cycle_bounds=self.fcn_metagraph_scc(A_sparse_sub)
cycle_lengths = [len(scc) for scc in scc_cell]
a = np.zeros((max(cycle_lengths)))
for i in range(max(cycle_lengths)):
for j in cycle_lengths:
if j == i+1:
a[j-1] += 1
# print('Cycles of lenth: %s (%s times)' % (set(cycle_lengths), a[np.where(a>0)]) )
self.cyclic_sorted_subgraphs.append((vert_topol_sort, term_cycles_ind, term_cycle_bounds))
counter += 1
|
[
"numpy.isin",
"scipy.sparse.find",
"numpy.logical_not",
"numpy.zeros",
"networkx.topological_sort",
"networkx.selfloop_edges",
"numpy.cumsum",
"scipy.sparse.csc_matrix",
"numpy.where",
"numpy.array",
"scipy.sparse.csgraph.connected_components",
"networkx.strongly_connected_components",
"networkx.DiGraph",
"numpy.concatenate"
] |
[((2565, 2589), 'numpy.zeros', 'np.zeros', (['(matr_size, 1)'], {}), '((matr_size, 1))\n', (2573, 2589), True, 'import numpy as np\n'), ((3138, 3161), 'scipy.sparse.find', 'sparse.find', (['(t_matr > 0)'], {}), '(t_matr > 0)\n', (3149, 3161), True, 'import scipy.sparse as sparse\n'), ((3860, 3905), 'numpy.isin', 'np.isin', (['metagraph_ordering', 'terminal_scc_ind'], {}), '(metagraph_ordering, terminal_scc_ind)\n', (3867, 3905), True, 'import numpy as np\n'), ((4181, 4207), 'numpy.where', 'np.where', (['terminal_scc_pos'], {}), '(terminal_scc_pos)\n', (4189, 4207), True, 'import numpy as np\n'), ((6019, 6046), 'scipy.sparse.csc_matrix', 'sparse.csc_matrix', (['A_sparse'], {}), '(A_sparse)\n', (6036, 6046), True, 'import scipy.sparse as sparse\n'), ((6100, 6179), 'scipy.sparse.csgraph.connected_components', 'sparse.csgraph.connected_components', (['B_sparse'], {'directed': '(True)', 'connection': '"""weak"""'}), "(B_sparse, directed=True, connection='weak')\n", (6135, 6179), True, 'import scipy.sparse as sparse\n'), ((2186, 2210), 'networkx.selfloop_edges', 'nx.selfloop_edges', (['g_sub'], {}), '(g_sub)\n', (2203, 2210), True, 'import networkx as nx\n'), ((3258, 3278), 'numpy.where', 'np.where', (['diff[:, 0]'], {}), '(diff[:, 0])\n', (3266, 3278), True, 'import numpy as np\n'), ((3302, 3322), 'numpy.where', 'np.where', (['diff[:, 0]'], {}), '(diff[:, 0])\n', (3310, 3322), True, 'import numpy as np\n'), ((4434, 4531), 'numpy.concatenate', 'np.concatenate', (['[metagraph_ordering[nonterm_scc_inds], metagraph_ordering[terminal_scc_pos]]'], {}), '([metagraph_ordering[nonterm_scc_inds], metagraph_ordering[\n terminal_scc_pos]])\n', (4448, 4531), True, 'import numpy as np\n'), ((5189, 5226), 'numpy.cumsum', 'np.cumsum', (['scc_cell_reordered_lengths'], {}), '(scc_cell_reordered_lengths)\n', (5198, 5226), True, 'import numpy as np\n'), ((6882, 6968), 'scipy.sparse.csgraph.connected_components', 'sparse.csgraph.connected_components', (['t_sparse'], {'directed': '(True)', 'connection': '"""strong"""'}), "(t_sparse, directed=True, connection=\n 'strong')\n", (6917, 6968), True, 'import scipy.sparse as sparse\n'), ((2140, 2152), 'networkx.DiGraph', 'nx.DiGraph', ([], {}), '()\n', (2150, 2152), True, 'import networkx as nx\n'), ((3667, 3679), 'networkx.DiGraph', 'nx.DiGraph', ([], {}), '()\n', (3677, 3679), True, 'import networkx as nx\n'), ((3722, 3752), 'networkx.topological_sort', 'nx.topological_sort', (['metagraph'], {}), '(metagraph)\n', (3741, 3752), True, 'import networkx as nx\n'), ((4227, 4287), 'numpy.logical_not', 'np.logical_not', (['(where_terminal_scc_pos > nonterm_scc_num - 1)'], {}), '(where_terminal_scc_pos > nonterm_scc_num - 1)\n', (4241, 4287), True, 'import numpy as np\n'), ((4338, 4383), 'numpy.isin', 'np.isin', (['metagraph_ordering', 'terminal_scc_ind'], {}), '(metagraph_ordering, terminal_scc_ind)\n', (4345, 4383), True, 'import numpy as np\n'), ((5487, 5540), 'numpy.concatenate', 'np.concatenate', (['[cycle_first_verts, cycle_last_verts]'], {}), '([cycle_first_verts, cycle_last_verts])\n', (5501, 5540), True, 'import numpy as np\n'), ((2394, 2433), 'networkx.strongly_connected_components', 'nx.strongly_connected_components', (['g_sub'], {}), '(g_sub)\n', (2426, 2433), True, 'import networkx as nx\n'), ((7758, 7780), 'networkx.selfloop_edges', 'nx.selfloop_edges', (['t_g'], {}), '(t_g)\n', (7775, 7780), True, 'import networkx as nx\n'), ((3379, 3419), 'numpy.array', 'np.array', (['A_sparse_sub[row_sel, col_sel]'], {}), '(A_sparse_sub[row_sel, col_sel])\n', (3387, 3419), True, 'import numpy as np\n'), ((7706, 7718), 'networkx.DiGraph', 'nx.DiGraph', ([], {}), '()\n', (7716, 7718), True, 'import networkx as nx\n'), ((7831, 7855), 'networkx.topological_sort', 'nx.topological_sort', (['t_g'], {}), '(t_g)\n', (7850, 7855), True, 'import networkx as nx\n')]
|
# -*- coding: utf-8 -*-
"""
Created on Sun Dec 27 22:04:58 2020
@author: zhangjun
"""
# -*- coding: utf-8 -*-
"""
Created on Sun Dec 27 20:06:37 2020
@author: zhangjun
"""
import numpy as np
class perceptron:
def __init__(self):
self.alpha = None
self.b = None
self.w = None
def train(self, x, y, learning_rate=1):
self.alpha = np.zeros(x.shape[0])
self.b = np.zeros(1)
G = np.dot(x,x.T)
while True:
index_ms = 0
for index,x_i in enumerate(x):
index_s = y[index]*(np.sum(np.dot(self.alpha*y,G[:,index]))+ self.b)
if index_s<=0:
self.alpha[index] = self.alpha[index] + learning_rate
self.b = self.b + learning_rate*y[index]
break
index_ms = index_ms + 1
self.w = np.dot(self.alpha.T*y,x)
print (self.alpha,self.w,self.b)
if index_ms==x.shape[0]:
break
def prediction(self,x_pred):
y_pred = np.zeros(x_pred.shape[0])
for index,x_i in enumerate(x_pred):
y_pred[index] = np.sum(self.w*x_i) + self.b
if y_pred[index]>0:
y_pred[index] = 1
else:
y_pred[index] = -1
return y_pred
if __name__ == '__main__':
x = np.array([[3,3],[4,3],[1,1]])
y = np.array([1,1,-1])
Model = perceptron()
Model.train(x,y,learning_rate=1)
y_pred = Model.prediction(x)
print ('w,b=',Model.w,Model.b)
|
[
"numpy.zeros",
"numpy.dot",
"numpy.array",
"numpy.sum"
] |
[((1480, 1514), 'numpy.array', 'np.array', (['[[3, 3], [4, 3], [1, 1]]'], {}), '([[3, 3], [4, 3], [1, 1]])\n', (1488, 1514), True, 'import numpy as np\n'), ((1519, 1539), 'numpy.array', 'np.array', (['[1, 1, -1]'], {}), '([1, 1, -1])\n', (1527, 1539), True, 'import numpy as np\n'), ((405, 425), 'numpy.zeros', 'np.zeros', (['x.shape[0]'], {}), '(x.shape[0])\n', (413, 425), True, 'import numpy as np\n'), ((444, 455), 'numpy.zeros', 'np.zeros', (['(1)'], {}), '(1)\n', (452, 455), True, 'import numpy as np\n'), ((469, 483), 'numpy.dot', 'np.dot', (['x', 'x.T'], {}), '(x, x.T)\n', (475, 483), True, 'import numpy as np\n'), ((1167, 1192), 'numpy.zeros', 'np.zeros', (['x_pred.shape[0]'], {}), '(x_pred.shape[0])\n', (1175, 1192), True, 'import numpy as np\n'), ((957, 984), 'numpy.dot', 'np.dot', (['(self.alpha.T * y)', 'x'], {}), '(self.alpha.T * y, x)\n', (963, 984), True, 'import numpy as np\n'), ((1267, 1287), 'numpy.sum', 'np.sum', (['(self.w * x_i)'], {}), '(self.w * x_i)\n', (1273, 1287), True, 'import numpy as np\n'), ((622, 657), 'numpy.dot', 'np.dot', (['(self.alpha * y)', 'G[:, index]'], {}), '(self.alpha * y, G[:, index])\n', (628, 657), True, 'import numpy as np\n')]
|
import numpy as np
import pylab as pl
from sklearn import mixture
np.random.seed(0)
#C1 = np.array([[3, -2.7], [1.5, 2.7]])
#C2 = np.array([[1, 2.0], [-1.5, 1.7]])
#
#X_train = np.r_[
# np.random.multivariate_normal((-7, -7), C1, size=7),
# np.random.multivariate_normal((7, 7), C2, size=7),
#]
X_train = np.r_[
np.array([[0,0],[0,1],[2,0],[3,2],[3,3],[2,2],[2,0]]),
np.array([[7,7],[8,6],[9,7],[8,10],[7,10],[8,9],[7,11]]),
]
print(X_train)
clf = mixture.GaussianMixture(n_components=2, covariance_type='full')
clf.weights_ = [2,1]
clf.fit(X_train)
#define g1(x, y) and g2(x, y)
def g1(x, y):
print("x = {},y = {} for g1".format(x,y))
return clf.predict_proba(np.column_stack((x, y)))[:, 0]
def g2(x, y):
print("x = {},y = {} for g2".format(x,y))
return clf.predict_proba(np.column_stack((x, y)))[:, 1]
X, Y = np.mgrid[-15:13:500j, -15:13:500j]
x = X.ravel()
y = Y.ravel()
p = (g1(x, y) - g2(x, y)).reshape(X.shape)
pl.scatter(X_train[:, 0], X_train[:, 1])
pl.contour(X, Y, p, levels=[0])
pl.show()
|
[
"pylab.contour",
"pylab.show",
"numpy.random.seed",
"sklearn.mixture.GaussianMixture",
"pylab.scatter",
"numpy.array",
"numpy.column_stack"
] |
[((67, 84), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (81, 84), True, 'import numpy as np\n'), ((470, 533), 'sklearn.mixture.GaussianMixture', 'mixture.GaussianMixture', ([], {'n_components': '(2)', 'covariance_type': '"""full"""'}), "(n_components=2, covariance_type='full')\n", (493, 533), False, 'from sklearn import mixture\n'), ((961, 1001), 'pylab.scatter', 'pl.scatter', (['X_train[:, 0]', 'X_train[:, 1]'], {}), '(X_train[:, 0], X_train[:, 1])\n', (971, 1001), True, 'import pylab as pl\n'), ((1002, 1033), 'pylab.contour', 'pl.contour', (['X', 'Y', 'p'], {'levels': '[0]'}), '(X, Y, p, levels=[0])\n', (1012, 1033), True, 'import pylab as pl\n'), ((1034, 1043), 'pylab.show', 'pl.show', ([], {}), '()\n', (1041, 1043), True, 'import pylab as pl\n'), ((324, 390), 'numpy.array', 'np.array', (['[[0, 0], [0, 1], [2, 0], [3, 2], [3, 3], [2, 2], [2, 0]]'], {}), '([[0, 0], [0, 1], [2, 0], [3, 2], [3, 3], [2, 2], [2, 0]])\n', (332, 390), True, 'import numpy as np\n'), ((383, 452), 'numpy.array', 'np.array', (['[[7, 7], [8, 6], [9, 7], [8, 10], [7, 10], [8, 9], [7, 11]]'], {}), '([[7, 7], [8, 6], [9, 7], [8, 10], [7, 10], [8, 9], [7, 11]])\n', (391, 452), True, 'import numpy as np\n'), ((693, 716), 'numpy.column_stack', 'np.column_stack', (['(x, y)'], {}), '((x, y))\n', (708, 716), True, 'import numpy as np\n'), ((814, 837), 'numpy.column_stack', 'np.column_stack', (['(x, y)'], {}), '((x, y))\n', (829, 837), True, 'import numpy as np\n')]
|
import numpy as np
import pandas as pd
from scipy.optimize import curve_fit
from .helper import exp_fit_func, inverse_exp_func, exp_func
def exp_curve_fit_(x_range, ln_y_range):
popc, pcov = curve_fit(exp_fit_func, x_range, ln_y_range)
ln_a, b = popc
a = np.exp(ln_a)
return a, b
def get_interm_zip_features_(ynew, _s4, _p4, _e1):
start_times = []
peak_times = []
end_times = []
peak_intensities = []
for i in range(len(_s4)):
if (_p4[i] - _s4[i] > 0) and (_e1[i] - _p4[i] > 0):
start_times.append(_s4[i])
peak_times.append(_p4[i])
end_times.append(_e1[i])
peak_intensities.append(ynew[_p4[i]])
return start_times, peak_times, end_times, peak_intensities
def get_interm_zip_(h1, h2, h3, h4):
_zip = pd.DataFrame(zip(h1, h2, h3, h4))
_zip.columns = ["start_time", "peak_time", "end_time", "peak_intensity"]
return _zip
def get_final_zip_features(xnew, ynew, _zip):
st = _zip["start_time"]
pt = _zip["peak_time"]
et = _zip["end_time"]
pi = _zip["peak_intensity"]
y_min = np.min(ynew)
final_st = []
final_pt = []
final_et = []
est_et = []
final_si = []
final_pi = []
final_err = []
final_bc = []
_class = []
for i in range(len(st)):
x_range = [int(xnew[j] - xnew[pt[i]]) for j in range(pt[i], et[i])]
ln_y_range = [np.log(ynew[j]) for j in range(pt[i], et[i])]
try:
popc, pcov = curve_fit(exp_fit_func, x_range, ln_y_range)
ln_a, b = popc
a = np.exp(ln_a)
# the 7th filter, can't allow increasing exponential so-called-flares!
# _calc_et is estimated end time from the analytical function fitted
if b < 0:
continue
_calc_et = inverse_exp_func(ynew[st[i]], a, b)
final_st.append(st[i])
final_pt.append(pt[i])
final_et.append(et[i])
final_pi.append(pi[i])
final_si.append(ynew[st[i]])
est_et.append(_calc_et + pt[i])
final_bc.append((ynew[st[i]] + ynew[et[i]]) / 2)
y_dash = []
y_diff = []
y_proj = []
x_proj = []
for _i, j in enumerate(x_range):
__y = exp_func(xnew[j], a, b)
y_dash.append(__y)
y_diff.append(abs(np.exp(ln_y_range[_i]) - __y))
for j in range(et[i] - pt[i], _calc_et):
if (j + pt[i]) < len(xnew):
x_proj.append(xnew[j + pt[i]])
y_proj.append(exp_func(xnew[j], a, b))
# error is sum(difference between fitted and actual) / ((peak intensity - minimum intensity) * duration from peak to actual end)
final_err.append((np.sum(y_dash)) / ((pi[i] - y_min) * (len(x_range))))
val = np.log10(pi[i] / 25)
_str = ""
_val = str(int(val * 100) / 10)[-3:]
if int(val) < 1:
_str = "A" + _val
elif int(val) == 1:
_str = "B" + _val
elif int(val) == 2:
_str = "C" + _val
elif int(val) == 3:
_str = "M" + _val
elif int(val) > 3:
_str = "X" + _val
_class.append(_str)
except:
print("Error in curve fitting")
return (
final_st,
final_pt,
final_et,
est_et,
final_si,
final_pi,
final_bc,
final_err,
_class,
)
def get_final_zip(g1, g2, g3, g4, g5, g6, g7, g8, g9):
final_zip = pd.DataFrame(zip(g1, g2, g3, g4, g5, g6, g7, g8, g9))
final_zip.columns = [
"start_time",
"peak_time",
"end_time",
"est_end_time",
"start_intensity",
"peak_intensity",
"background_counts",
"error",
"class",
]
return final_zip
|
[
"numpy.sum",
"numpy.log",
"scipy.optimize.curve_fit",
"numpy.min",
"numpy.exp",
"numpy.log10"
] |
[((197, 241), 'scipy.optimize.curve_fit', 'curve_fit', (['exp_fit_func', 'x_range', 'ln_y_range'], {}), '(exp_fit_func, x_range, ln_y_range)\n', (206, 241), False, 'from scipy.optimize import curve_fit\n'), ((269, 281), 'numpy.exp', 'np.exp', (['ln_a'], {}), '(ln_a)\n', (275, 281), True, 'import numpy as np\n'), ((1105, 1117), 'numpy.min', 'np.min', (['ynew'], {}), '(ynew)\n', (1111, 1117), True, 'import numpy as np\n'), ((1404, 1419), 'numpy.log', 'np.log', (['ynew[j]'], {}), '(ynew[j])\n', (1410, 1419), True, 'import numpy as np\n'), ((1488, 1532), 'scipy.optimize.curve_fit', 'curve_fit', (['exp_fit_func', 'x_range', 'ln_y_range'], {}), '(exp_fit_func, x_range, ln_y_range)\n', (1497, 1532), False, 'from scipy.optimize import curve_fit\n'), ((1576, 1588), 'numpy.exp', 'np.exp', (['ln_a'], {}), '(ln_a)\n', (1582, 1588), True, 'import numpy as np\n'), ((2882, 2902), 'numpy.log10', 'np.log10', (['(pi[i] / 25)'], {}), '(pi[i] / 25)\n', (2890, 2902), True, 'import numpy as np\n'), ((2810, 2824), 'numpy.sum', 'np.sum', (['y_dash'], {}), '(y_dash)\n', (2816, 2824), True, 'import numpy as np\n'), ((2401, 2423), 'numpy.exp', 'np.exp', (['ln_y_range[_i]'], {}), '(ln_y_range[_i])\n', (2407, 2423), True, 'import numpy as np\n')]
|
import numpy as np
from scipy.spatial import cKDTree
from scipy.spatial.distance import pdist, squareform
from scipy.sparse import coo_matrix
import pylab as plt
def squared_exponential(x2,D=3):
#x = np.reshape(x,(-1,D))
return np.exp(-x2/2.)
def matern52(x2):
x = np.sqrt(x2)
res = x2
res *= 5./3.
res += np.sqrt(5) * x
res += 1
res *= np.exp((-np.sqrt(5))*x)
return res
def sparse_covariance(cfun, points, sigma, corr,tol=0.1,upper_tri=True):
N,D = points.shape
if not isinstance(corr,np.ndarray):
corr = np.ones(D)*corr
#Get support
if tol == 0.:
isot = np.inf
else:
isot = 0.
for dim in range(D):
direction = (np.arange(D)==dim).astype(float)
t = 0
c0 = cfun(0)
c = c0
while c/c0 > tol:
t += 0.1
c = cfun(np.sum((t*direction/corr)**2))
isot = max(isot,t/corr[dim])
#print("isotropic support: {}".format(isot))
kd = cKDTree(points/corr)
if upper_tri:
pairs = kd.query_pairs(isot,p=2,output_type='ndarray')
pairs = np.concatenate([np.array([np.arange(N)]*2).T,pairs])
x1 = points[pairs[:,0],:]
x2 = points[pairs[:,1],:]
dx = x1-x2
dx /= corr
dx *= dx
dx = np.sum(dx,axis=1)
cval = cfun(dx)
csparse = coo_matrix((cval,(pairs[:,0],pairs[:,1])), shape=(N,N))
else:
X = kd.sparse_distance_matrix(kd,isot,output_type='coo_matrix')
cval = cfun(X.data**2)
csparse = coo_matrix((cval,(X.col,X.row)), shape=(N,N))
return (sigma**2)*csparse
def dense_covariance(cfun, points, sigma, corr):
N,D = points.shape
if not isinstance(corr,np.ndarray):
corr = np.ones(D)*corr
points = points / corr
X = squareform(pdist(points,metric='sqeuclidean'))
return (sigma**2)*cfun(X)
def test_sparse_covariance():
corr = np.array([0.2,0.5,0.1])
xvec = np.linspace(0,1,50)
yvec = np.linspace(0,1,10)
zvec = np.linspace(0,1,10)
X,Y,Z = np.meshgrid(xvec,yvec,zvec,indexing='ij')
points = np.array([X.flatten(),Y.flatten(), Z.flatten()]).T
#%timeit -n 2 cdense = dense_covariance(squared_exponential, points, None, corr)
cdense = dense_covariance(matern52, points, 1., corr)
# #print(cdense)
# plt.imshow(cdense)
# plt.colorbar()
# plt.show()
#%timeit -n 2 csparse = sparse_covariance(squared_exponential,points,None,corr,tol=0.1)
csparse = sparse_covariance(matern52,points,1.,corr,tol=0,upper_tri=False)
assert np.all(np.isclose(csparse.toarray(), cdense))
# #print(csparse.toarray())
# plt.imshow(csparse.toarray())
# plt.colorbar()
# plt.show()
# plt.imshow(csparse.toarray() - cdense)
# plt.colorbar()
# plt.show()
csparse = sparse_covariance(matern52,points,1.,corr,tol=0.1,upper_tri=True)
print("upper triangle tol=0.1 -> saving: {}%".format(1-csparse.nonzero()[0].size/cdense.size))
csparse = sparse_covariance(matern52,points,1.,corr,tol=0.01,upper_tri=True)
print("upper triangle tol=0.01 -> saving: {}%".format(1-csparse.nonzero()[0].size/cdense.size))
def test_sparse_covariance_performance():
corr = np.array([5.,5.,1.])
xvec = np.linspace(-80,80,150)
yvec = np.linspace(-80,80,150)
zvec = np.linspace(0,1000,20)
X,Y,Z = np.meshgrid(xvec,yvec,zvec,indexing='ij')
points = np.array([X.flatten(),Y.flatten(), Z.flatten()]).T
csparse = sparse_covariance(matern52,points,1.,corr,tol=0.1,upper_tri=True)
print("upper triangle tol=0.1 -> saving: {}%".format(1-csparse.nonzero()[0].size/points.size**2))
if __name__=='__main__':
test_sparse_covariance_performance()
|
[
"numpy.meshgrid",
"numpy.sum",
"numpy.ones",
"scipy.sparse.coo_matrix",
"numpy.array",
"numpy.exp",
"numpy.linspace",
"scipy.spatial.cKDTree",
"scipy.spatial.distance.pdist",
"numpy.arange",
"numpy.sqrt"
] |
[((237, 254), 'numpy.exp', 'np.exp', (['(-x2 / 2.0)'], {}), '(-x2 / 2.0)\n', (243, 254), True, 'import numpy as np\n'), ((279, 290), 'numpy.sqrt', 'np.sqrt', (['x2'], {}), '(x2)\n', (286, 290), True, 'import numpy as np\n'), ((1032, 1054), 'scipy.spatial.cKDTree', 'cKDTree', (['(points / corr)'], {}), '(points / corr)\n', (1039, 1054), False, 'from scipy.spatial import cKDTree\n'), ((1972, 1997), 'numpy.array', 'np.array', (['[0.2, 0.5, 0.1]'], {}), '([0.2, 0.5, 0.1])\n', (1980, 1997), True, 'import numpy as np\n'), ((2008, 2029), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(50)'], {}), '(0, 1, 50)\n', (2019, 2029), True, 'import numpy as np\n'), ((2039, 2060), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(10)'], {}), '(0, 1, 10)\n', (2050, 2060), True, 'import numpy as np\n'), ((2070, 2091), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(10)'], {}), '(0, 1, 10)\n', (2081, 2091), True, 'import numpy as np\n'), ((2103, 2147), 'numpy.meshgrid', 'np.meshgrid', (['xvec', 'yvec', 'zvec'], {'indexing': '"""ij"""'}), "(xvec, yvec, zvec, indexing='ij')\n", (2114, 2147), True, 'import numpy as np\n'), ((3261, 3286), 'numpy.array', 'np.array', (['[5.0, 5.0, 1.0]'], {}), '([5.0, 5.0, 1.0])\n', (3269, 3286), True, 'import numpy as np\n'), ((3294, 3319), 'numpy.linspace', 'np.linspace', (['(-80)', '(80)', '(150)'], {}), '(-80, 80, 150)\n', (3305, 3319), True, 'import numpy as np\n'), ((3329, 3354), 'numpy.linspace', 'np.linspace', (['(-80)', '(80)', '(150)'], {}), '(-80, 80, 150)\n', (3340, 3354), True, 'import numpy as np\n'), ((3364, 3388), 'numpy.linspace', 'np.linspace', (['(0)', '(1000)', '(20)'], {}), '(0, 1000, 20)\n', (3375, 3388), True, 'import numpy as np\n'), ((3400, 3444), 'numpy.meshgrid', 'np.meshgrid', (['xvec', 'yvec', 'zvec'], {'indexing': '"""ij"""'}), "(xvec, yvec, zvec, indexing='ij')\n", (3411, 3444), True, 'import numpy as np\n'), ((332, 342), 'numpy.sqrt', 'np.sqrt', (['(5)'], {}), '(5)\n', (339, 342), True, 'import numpy as np\n'), ((1341, 1359), 'numpy.sum', 'np.sum', (['dx'], {'axis': '(1)'}), '(dx, axis=1)\n', (1347, 1359), True, 'import numpy as np\n'), ((1402, 1462), 'scipy.sparse.coo_matrix', 'coo_matrix', (['(cval, (pairs[:, 0], pairs[:, 1]))'], {'shape': '(N, N)'}), '((cval, (pairs[:, 0], pairs[:, 1])), shape=(N, N))\n', (1412, 1462), False, 'from scipy.sparse import coo_matrix\n'), ((1589, 1637), 'scipy.sparse.coo_matrix', 'coo_matrix', (['(cval, (X.col, X.row))'], {'shape': '(N, N)'}), '((cval, (X.col, X.row)), shape=(N, N))\n', (1599, 1637), False, 'from scipy.sparse import coo_matrix\n'), ((1864, 1899), 'scipy.spatial.distance.pdist', 'pdist', (['points'], {'metric': '"""sqeuclidean"""'}), "(points, metric='sqeuclidean')\n", (1869, 1899), False, 'from scipy.spatial.distance import pdist, squareform\n'), ((563, 573), 'numpy.ones', 'np.ones', (['D'], {}), '(D)\n', (570, 573), True, 'import numpy as np\n'), ((1802, 1812), 'numpy.ones', 'np.ones', (['D'], {}), '(D)\n', (1809, 1812), True, 'import numpy as np\n'), ((380, 390), 'numpy.sqrt', 'np.sqrt', (['(5)'], {}), '(5)\n', (387, 390), True, 'import numpy as np\n'), ((902, 937), 'numpy.sum', 'np.sum', (['((t * direction / corr) ** 2)'], {}), '((t * direction / corr) ** 2)\n', (908, 937), True, 'import numpy as np\n'), ((727, 739), 'numpy.arange', 'np.arange', (['D'], {}), '(D)\n', (736, 739), True, 'import numpy as np\n'), ((1177, 1189), 'numpy.arange', 'np.arange', (['N'], {}), '(N)\n', (1186, 1189), True, 'import numpy as np\n')]
|
# Translation in python of the Matlab implementation of <NAME> and
# <NAME>, of the algorithm described in
# "Mixtures of Probabilistic Principal Component Analysers",
# <NAME> and <NAME>, Neural Computation 11(2),
# pp 443–482, MIT Press, 1999
import numpy as np
def initialization_kmeans(X, p, q, variance_level=None):
"""
X : dataset
p : number of clusters
q : dimension of the latent space
variance_level
pi : proportions of clusters
mu : centers of the clusters in the observation space
W : latent to observation matricies
sigma2 : noise
"""
N, d = X.shape
# initialization
init_centers = np.random.randint(0, N, p)
while (len(np.unique(init_centers)) != p):
init_centers = np.random.randint(0, N, p)
mu = X[init_centers, :]
distance_square = np.zeros((N, p))
clusters = np.zeros(N, dtype=np.int32)
D_old = -2
D = -1
while(D_old != D):
D_old = D
# assign clusters
for c in range(p):
distance_square[:, c] = np.power(X - mu[c, :], 2).sum(1)
clusters = np.argmin(distance_square, axis=1)
# compute distortion
distmin = distance_square[range(N), clusters]
D = distmin.sum()
# compute new centers
for c in range(p):
mu[c, :] = X[clusters == c, :].mean(0)
#for c in range(p):
# plt.scatter(X[clusters == c, 0], X[clusters == c, 1], c=np.random.rand(3,1))
# parameter initialization
pi = np.zeros(p)
W = np.zeros((p, d, q))
sigma2 = np.zeros(p)
for c in range(p):
if variance_level:
W[c, :, :] = variance_level * np.random.randn(d, q)
else:
W[c, :, :] = np.random.randn(d, q)
pi[c] = (clusters == c).sum() / N
if variance_level:
sigma2[c] = np.abs((variance_level/10) * np.random.randn())
else:
sigma2[c] = (distmin[clusters == c]).mean() / d
return pi, mu, W, sigma2, clusters
def mppca_gem(X, pi, mu, W, sigma2, niter):
N, d = X.shape
p = len(sigma2)
_, q = W[0].shape
sigma2hist = np.zeros((p, niter))
M = np.zeros((p, q, q))
Minv = np.zeros((p, q, q))
Cinv = np.zeros((p, d, d))
logR = np.zeros((N, p))
R = np.zeros((N, p))
M[:] = 0.
Minv[:] = 0.
Cinv[:] = 0.
L = np.zeros(niter)
for i in range(niter):
print('.', end='')
for c in range(p):
sigma2hist[c, i] = sigma2[c]
# M
M[c, :, :] = sigma2[c]*np.eye(q) + np.dot(W[c, :, :].T, W[c, :, :])
Minv[c, :, :] = np.linalg.inv(M[c, :, :])
# Cinv
Cinv[c, :, :] = (np.eye(d)
- np.dot(np.dot(W[c, :, :], Minv[c, :, :]), W[c, :, :].T)
) / sigma2[c]
# R_ni
deviation_from_center = X - mu[c, :]
logR[:, c] = ( np.log(pi[c])
+ 0.5*np.log(
np.linalg.det(
np.eye(d) - np.dot(np.dot(W[c, :, :], Minv[c, :, :]), W[c, :, :].T)
)
)
- 0.5*d*np.log(sigma2[c])
- 0.5*(deviation_from_center * np.dot(deviation_from_center, Cinv[c, :, :].T)).sum(1)
)
myMax = logR.max(axis=1).reshape((N, 1))
L[i] = (
(myMax.ravel() + np.log(np.exp(logR - myMax).sum(axis=1))).sum(axis=0)
- N*d*np.log(2*3.141593)/2.
)
logR = logR - myMax - np.reshape(np.log(np.exp(logR - myMax).sum(axis=1)), (N, 1))
myMax = logR.max(axis=0)
logpi = myMax + np.log(np.exp(logR - myMax).sum(axis=0)) - np.log(N)
logpi = logpi.T
pi = np.exp(logpi)
R = np.exp(logR)
for c in range(p):
mu[c, :] = (R[:, c].reshape((N, 1)) * X).sum(axis=0) / R[:, c].sum()
deviation_from_center = X - mu[c, :].reshape((1, d))
SW = ( (1/(pi[c]*N))
* np.dot((R[:, c].reshape((N, 1)) * deviation_from_center).T,
np.dot(deviation_from_center, W[c, :, :]))
)
Wnew = np.dot(SW, np.linalg.inv(sigma2[c]*np.eye(q) + np.dot(np.dot(Minv[c, :, :], W[c, :, :].T), SW)))
sigma2[c] = (1/d) * (
(R[:, c].reshape(N, 1) * np.power(deviation_from_center, 2)).sum()
/
(N*pi[c])
-
np.trace(np.dot(np.dot(SW, Minv[c, :, :]), Wnew.T))
)
W[c, :, :] = Wnew
return pi, mu, W, sigma2, R, L, sigma2hist
def mppca_predict(X, pi, mu, W, sigma2):
N, d = X.shape
p = len(sigma2)
_, q = W[0].shape
M = np.zeros((p, q, q))
Minv = np.zeros((p, q, q))
Cinv = np.zeros((p, d, d))
logR = np.zeros((N, p))
R = np.zeros((N, p))
for c in range(p):
# M
M[c, :, :] = sigma2[c] * np.eye(q) + np.dot(W[c, :, :].T, W[c, :, :])
Minv[c, :, :] = np.linalg.inv(M[c, :, :])
# Cinv
Cinv[c, :, :] = (np.eye(d)
- np.dot(np.dot(W[c, :, :], Minv[c, :, :]), W[c, :, :].T)
) / sigma2[c]
# R_ni
deviation_from_center = X - mu[c, :]
logR[:, c] = ( np.log(pi[c])
+ 0.5*np.log(
np.linalg.det(
np.eye(d) - np.dot(np.dot(W[c, :, :], Minv[c, :, :]), W[c, :, :].T)
)
)
- 0.5*d*np.log(sigma2[c])
- 0.5*(deviation_from_center * np.dot(deviation_from_center, Cinv[c, :, :].T)).sum(1)
)
myMax = logR.max(axis=1).reshape((N, 1))
logR = logR - myMax - np.reshape(np.log(np.exp(logR - myMax).sum(axis=1)), (N, 1))
R = np.exp(logR)
return R
|
[
"numpy.log",
"numpy.eye",
"numpy.random.randn",
"numpy.power",
"numpy.zeros",
"numpy.argmin",
"numpy.random.randint",
"numpy.linalg.inv",
"numpy.exp",
"numpy.dot",
"numpy.unique"
] |
[((653, 679), 'numpy.random.randint', 'np.random.randint', (['(0)', 'N', 'p'], {}), '(0, N, p)\n', (670, 679), True, 'import numpy as np\n'), ((828, 844), 'numpy.zeros', 'np.zeros', (['(N, p)'], {}), '((N, p))\n', (836, 844), True, 'import numpy as np\n'), ((860, 887), 'numpy.zeros', 'np.zeros', (['N'], {'dtype': 'np.int32'}), '(N, dtype=np.int32)\n', (868, 887), True, 'import numpy as np\n'), ((1505, 1516), 'numpy.zeros', 'np.zeros', (['p'], {}), '(p)\n', (1513, 1516), True, 'import numpy as np\n'), ((1525, 1544), 'numpy.zeros', 'np.zeros', (['(p, d, q)'], {}), '((p, d, q))\n', (1533, 1544), True, 'import numpy as np\n'), ((1558, 1569), 'numpy.zeros', 'np.zeros', (['p'], {}), '(p)\n', (1566, 1569), True, 'import numpy as np\n'), ((2126, 2146), 'numpy.zeros', 'np.zeros', (['(p, niter)'], {}), '((p, niter))\n', (2134, 2146), True, 'import numpy as np\n'), ((2155, 2174), 'numpy.zeros', 'np.zeros', (['(p, q, q)'], {}), '((p, q, q))\n', (2163, 2174), True, 'import numpy as np\n'), ((2186, 2205), 'numpy.zeros', 'np.zeros', (['(p, q, q)'], {}), '((p, q, q))\n', (2194, 2205), True, 'import numpy as np\n'), ((2217, 2236), 'numpy.zeros', 'np.zeros', (['(p, d, d)'], {}), '((p, d, d))\n', (2225, 2236), True, 'import numpy as np\n'), ((2248, 2264), 'numpy.zeros', 'np.zeros', (['(N, p)'], {}), '((N, p))\n', (2256, 2264), True, 'import numpy as np\n'), ((2273, 2289), 'numpy.zeros', 'np.zeros', (['(N, p)'], {}), '((N, p))\n', (2281, 2289), True, 'import numpy as np\n'), ((2347, 2362), 'numpy.zeros', 'np.zeros', (['niter'], {}), '(niter)\n', (2355, 2362), True, 'import numpy as np\n'), ((4692, 4711), 'numpy.zeros', 'np.zeros', (['(p, q, q)'], {}), '((p, q, q))\n', (4700, 4711), True, 'import numpy as np\n'), ((4723, 4742), 'numpy.zeros', 'np.zeros', (['(p, q, q)'], {}), '((p, q, q))\n', (4731, 4742), True, 'import numpy as np\n'), ((4754, 4773), 'numpy.zeros', 'np.zeros', (['(p, d, d)'], {}), '((p, d, d))\n', (4762, 4773), True, 'import numpy as np\n'), ((4785, 4801), 'numpy.zeros', 'np.zeros', (['(N, p)'], {}), '((N, p))\n', (4793, 4801), True, 'import numpy as np\n'), ((4810, 4826), 'numpy.zeros', 'np.zeros', (['(N, p)'], {}), '((N, p))\n', (4818, 4826), True, 'import numpy as np\n'), ((5704, 5716), 'numpy.exp', 'np.exp', (['logR'], {}), '(logR)\n', (5710, 5716), True, 'import numpy as np\n'), ((750, 776), 'numpy.random.randint', 'np.random.randint', (['(0)', 'N', 'p'], {}), '(0, N, p)\n', (767, 776), True, 'import numpy as np\n'), ((1099, 1133), 'numpy.argmin', 'np.argmin', (['distance_square'], {'axis': '(1)'}), '(distance_square, axis=1)\n', (1108, 1133), True, 'import numpy as np\n'), ((3712, 3725), 'numpy.exp', 'np.exp', (['logpi'], {}), '(logpi)\n', (3718, 3725), True, 'import numpy as np\n'), ((3738, 3750), 'numpy.exp', 'np.exp', (['logR'], {}), '(logR)\n', (3744, 3750), True, 'import numpy as np\n'), ((4965, 4990), 'numpy.linalg.inv', 'np.linalg.inv', (['M[c, :, :]'], {}), '(M[c, :, :])\n', (4978, 4990), True, 'import numpy as np\n'), ((695, 718), 'numpy.unique', 'np.unique', (['init_centers'], {}), '(init_centers)\n', (704, 718), True, 'import numpy as np\n'), ((1723, 1744), 'numpy.random.randn', 'np.random.randn', (['d', 'q'], {}), '(d, q)\n', (1738, 1744), True, 'import numpy as np\n'), ((2610, 2635), 'numpy.linalg.inv', 'np.linalg.inv', (['M[c, :, :]'], {}), '(M[c, :, :])\n', (2623, 2635), True, 'import numpy as np\n'), ((3665, 3674), 'numpy.log', 'np.log', (['N'], {}), '(N)\n', (3671, 3674), True, 'import numpy as np\n'), ((4908, 4940), 'numpy.dot', 'np.dot', (['W[c, :, :].T', 'W[c, :, :]'], {}), '(W[c, :, :].T, W[c, :, :])\n', (4914, 4940), True, 'import numpy as np\n'), ((1662, 1683), 'numpy.random.randn', 'np.random.randn', (['d', 'q'], {}), '(d, q)\n', (1677, 1683), True, 'import numpy as np\n'), ((2549, 2581), 'numpy.dot', 'np.dot', (['W[c, :, :].T', 'W[c, :, :]'], {}), '(W[c, :, :].T, W[c, :, :])\n', (2555, 2581), True, 'import numpy as np\n'), ((4896, 4905), 'numpy.eye', 'np.eye', (['q'], {}), '(q)\n', (4902, 4905), True, 'import numpy as np\n'), ((5032, 5041), 'numpy.eye', 'np.eye', (['d'], {}), '(d)\n', (5038, 5041), True, 'import numpy as np\n'), ((1047, 1072), 'numpy.power', 'np.power', (['(X - mu[c, :])', '(2)'], {}), '(X - mu[c, :], 2)\n', (1055, 1072), True, 'import numpy as np\n'), ((1868, 1885), 'numpy.random.randn', 'np.random.randn', ([], {}), '()\n', (1883, 1885), True, 'import numpy as np\n'), ((2537, 2546), 'numpy.eye', 'np.eye', (['q'], {}), '(q)\n', (2543, 2546), True, 'import numpy as np\n'), ((2685, 2694), 'numpy.eye', 'np.eye', (['d'], {}), '(d)\n', (2691, 2694), True, 'import numpy as np\n'), ((3436, 3456), 'numpy.log', 'np.log', (['(2 * 3.141593)'], {}), '(2 * 3.141593)\n', (3442, 3456), True, 'import numpy as np\n'), ((4056, 4097), 'numpy.dot', 'np.dot', (['deviation_from_center', 'W[c, :, :]'], {}), '(deviation_from_center, W[c, :, :])\n', (4062, 4097), True, 'import numpy as np\n'), ((5063, 5096), 'numpy.dot', 'np.dot', (['W[c, :, :]', 'Minv[c, :, :]'], {}), '(W[c, :, :], Minv[c, :, :])\n', (5069, 5096), True, 'import numpy as np\n'), ((5222, 5235), 'numpy.log', 'np.log', (['pi[c]'], {}), '(pi[c])\n', (5228, 5235), True, 'import numpy as np\n'), ((5433, 5450), 'numpy.log', 'np.log', (['sigma2[c]'], {}), '(sigma2[c])\n', (5439, 5450), True, 'import numpy as np\n'), ((2720, 2753), 'numpy.dot', 'np.dot', (['W[c, :, :]', 'Minv[c, :, :]'], {}), '(W[c, :, :], Minv[c, :, :])\n', (2726, 2753), True, 'import numpy as np\n'), ((2895, 2908), 'numpy.log', 'np.log', (['pi[c]'], {}), '(pi[c])\n', (2901, 2908), True, 'import numpy as np\n'), ((3130, 3147), 'numpy.log', 'np.log', (['sigma2[c]'], {}), '(sigma2[c])\n', (3136, 3147), True, 'import numpy as np\n'), ((5653, 5673), 'numpy.exp', 'np.exp', (['(logR - myMax)'], {}), '(logR - myMax)\n', (5659, 5673), True, 'import numpy as np\n'), ((3521, 3541), 'numpy.exp', 'np.exp', (['(logR - myMax)'], {}), '(logR - myMax)\n', (3527, 3541), True, 'import numpy as np\n'), ((3629, 3649), 'numpy.exp', 'np.exp', (['(logR - myMax)'], {}), '(logR - myMax)\n', (3635, 3649), True, 'import numpy as np\n'), ((4172, 4181), 'numpy.eye', 'np.eye', (['q'], {}), '(q)\n', (4178, 4181), True, 'import numpy as np\n'), ((4191, 4226), 'numpy.dot', 'np.dot', (['Minv[c, :, :]', 'W[c, :, :].T'], {}), '(Minv[c, :, :], W[c, :, :].T)\n', (4197, 4226), True, 'import numpy as np\n'), ((4446, 4471), 'numpy.dot', 'np.dot', (['SW', 'Minv[c, :, :]'], {}), '(SW, Minv[c, :, :])\n', (4452, 4471), True, 'import numpy as np\n'), ((5494, 5540), 'numpy.dot', 'np.dot', (['deviation_from_center', 'Cinv[c, :, :].T'], {}), '(deviation_from_center, Cinv[c, :, :].T)\n', (5500, 5540), True, 'import numpy as np\n'), ((3195, 3241), 'numpy.dot', 'np.dot', (['deviation_from_center', 'Cinv[c, :, :].T'], {}), '(deviation_from_center, Cinv[c, :, :].T)\n', (3201, 3241), True, 'import numpy as np\n'), ((3371, 3391), 'numpy.exp', 'np.exp', (['(logR - myMax)'], {}), '(logR - myMax)\n', (3377, 3391), True, 'import numpy as np\n'), ((4310, 4344), 'numpy.power', 'np.power', (['deviation_from_center', '(2)'], {}), '(deviation_from_center, 2)\n', (4318, 4344), True, 'import numpy as np\n'), ((5313, 5322), 'numpy.eye', 'np.eye', (['d'], {}), '(d)\n', (5319, 5322), True, 'import numpy as np\n'), ((2998, 3007), 'numpy.eye', 'np.eye', (['d'], {}), '(d)\n', (3004, 3007), True, 'import numpy as np\n'), ((5332, 5365), 'numpy.dot', 'np.dot', (['W[c, :, :]', 'Minv[c, :, :]'], {}), '(W[c, :, :], Minv[c, :, :])\n', (5338, 5365), True, 'import numpy as np\n'), ((3017, 3050), 'numpy.dot', 'np.dot', (['W[c, :, :]', 'Minv[c, :, :]'], {}), '(W[c, :, :], Minv[c, :, :])\n', (3023, 3050), True, 'import numpy as np\n')]
|
import os
import numpy as np
folder = ""
file_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
os.chdir(file_path)
map_files = {
"main": "main.csv",
"landmass": "landmass.csv"
}
save_file = "map_saves"
def save(maps):
for map_name in maps:
np.savetxt(save_file + '/' + map_files[map_name], maps[map_name], delimiter = ',')
def load(maps):
for map_name in map_files:
maps[map_name] = np.loadtxt(save_file + '/' + map_files[map_name], delimiter = ',')
return maps
|
[
"numpy.loadtxt",
"os.path.abspath",
"numpy.savetxt",
"os.chdir"
] |
[((121, 140), 'os.chdir', 'os.chdir', (['file_path'], {}), '(file_path)\n', (129, 140), False, 'import os\n'), ((92, 117), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (107, 117), False, 'import os\n'), ((300, 385), 'numpy.savetxt', 'np.savetxt', (["(save_file + '/' + map_files[map_name])", 'maps[map_name]'], {'delimiter': '""","""'}), "(save_file + '/' + map_files[map_name], maps[map_name], delimiter=','\n )\n", (310, 385), True, 'import numpy as np\n'), ((464, 528), 'numpy.loadtxt', 'np.loadtxt', (["(save_file + '/' + map_files[map_name])"], {'delimiter': '""","""'}), "(save_file + '/' + map_files[map_name], delimiter=',')\n", (474, 528), True, 'import numpy as np\n')]
|
from fromTxtToVec.corpus_build import Corpus
from fromTxtToVec.pad import Pad
from fromTxtToVec.BERT_feat import ExtractBertEmb
from fromTxtToVec.train_vector import Embedding
import numpy as np
class To_vec:
def __init__(self, mode, sent_maxlen):
self.mode = mode
self.sent_maxlen = sent_maxlen
def vector(self):
sents, labels = Corpus().read_txt()
pad_sents, pad_labels = Pad(self.sent_maxlen).pad_seq(sents, labels)
if self.mode == 'w2v':
sents_, labels_ = pad_sents, pad_labels
elif self.mode == 'bert':
path = input('请输入BERT模型的绝对路径or相对路径...')
extractor = ExtractBertEmb(bert_path=path)
granu = input('请输入抽取的粒度: token or cls')
if granu == 'token':
bert_sents = extractor.extract(sentences=[''.join(i) for i in sents], granularity=granu)
sents_ = []
for s in bert_sents:
if len(s) >= int(self.sent_maxlen):
matrix = s[:int(self.sent_maxlen)]
else:
matrix = np.zeros((int(self.sent_maxlen), 768))
for idx, i in enumerate(s):
matrix[idx] = i
sents_.append(matrix)
elif granu == 'token':
sents_ = extractor.extract(sentences=[''.join(i) for i in sents], granularity=granu)
labels_ = pad_labels
return np.array(sents_), labels_
def w2v_matrix(self, emb_size):
sents, labels = Corpus().read_txt()
matrix = Embedding(emb_size=emb_size).w2v(corpus=sents)
return matrix
|
[
"fromTxtToVec.BERT_feat.ExtractBertEmb",
"numpy.array",
"fromTxtToVec.corpus_build.Corpus",
"fromTxtToVec.train_vector.Embedding",
"fromTxtToVec.pad.Pad"
] |
[((1521, 1537), 'numpy.array', 'np.array', (['sents_'], {}), '(sents_)\n', (1529, 1537), True, 'import numpy as np\n'), ((382, 390), 'fromTxtToVec.corpus_build.Corpus', 'Corpus', ([], {}), '()\n', (388, 390), False, 'from fromTxtToVec.corpus_build import Corpus\n'), ((435, 456), 'fromTxtToVec.pad.Pad', 'Pad', (['self.sent_maxlen'], {}), '(self.sent_maxlen)\n', (438, 456), False, 'from fromTxtToVec.pad import Pad\n'), ((682, 712), 'fromTxtToVec.BERT_feat.ExtractBertEmb', 'ExtractBertEmb', ([], {'bert_path': 'path'}), '(bert_path=path)\n', (696, 712), False, 'from fromTxtToVec.BERT_feat import ExtractBertEmb\n'), ((1613, 1621), 'fromTxtToVec.corpus_build.Corpus', 'Corpus', ([], {}), '()\n', (1619, 1621), False, 'from fromTxtToVec.corpus_build import Corpus\n'), ((1651, 1679), 'fromTxtToVec.train_vector.Embedding', 'Embedding', ([], {'emb_size': 'emb_size'}), '(emb_size=emb_size)\n', (1660, 1679), False, 'from fromTxtToVec.train_vector import Embedding\n')]
|
""" Import needed modules """
"-----------------------------------------------------------------------------"
from scipy.integrate import solve_ivp
from Shared_Funcs.pemfc_transport_funcs import *
import cantera as ct
import numpy as np
import sys
""" Control options for derivative functions """
"-----------------------------------------------------------------------------"
# Toggles to turn on/off in/outer rxns, gas transports, or surface tracking:---
pt_rxn = 1
o2_rxn = 1
gas_tog = 1
gdl_tog = 1
surf_tog = 1
""" Define CL dsvdt for core-shell model """
"-----------------------------------------------------------------------------"
def dsvdt_cl_cs(t, sv, dsvdt, objs, p, iSV, gdl_BC):
""" Set up conditions at GDL/CL BC """
# Initialize indecies for looping:-----------------------------------------
cl_ymv = 0 # CL y direction mover (y: GDL -> Elyte)
# Load in BC state and flux from GDL:--------------------------------------
TDY1 = gdl_BC['TDY1']
flux_up = gdl_BC['flux_up']
i_io_up = 0 # no protons flow into the GDL
""" Begin loop - with if statements for CL/Elyte BC """
for i in range(cl['Ny']):
# Temperature at each Y node:------------------------------------------
dsvdt[iSV['T_cl'] +cl_ymv] = 0
# Gas phase species at each Y node:------------------------------------
if i == cl['Ny'] -1: # BC for CL and electrolyte interface
flux_dwn = np.zeros(gas_ca.n_species)
else:
rho_gas_k = sv[iSV['rho_gas_k'] +cl_ymv +cl['nxt_y']]
TDY2 = sv[iSV['T_cl'] +cl_ymv +cl['nxt_y']], sum(rho_gas_k), rho_gas_k
flux_dwn = fickian_adf(TDY1, TDY2, gas_ca, cl, gas_tog)
# Set the phases for O2 absorption rxn:
rho_gas_k = sv[iSV['rho_gas_k'] +cl_ymv]
rho_naf_k = sv[iSV['rho_naf_k'] +cl_ymv]
gas_ca.TDY = sv[iSV['T_cl'] +cl_ymv], sum(rho_gas_k), rho_gas_k
naf_b_ca.TDY = sv[iSV['T_cl'] +cl_ymv], sum(rho_naf_k), rho_naf_k
rho_dot_g = naf_s_ca.get_net_production_rates(gas_ca) *cl['SApv_naf']\
*cl['1/eps_g'] *gas_ca.molecular_weights *gas_tog
rho_dot_n = naf_s_ca.get_net_production_rates(naf_b_ca) *cl['SApv_naf']\
*cl['1/eps_n'] *naf_b_ca.molecular_weights
# Include rxn and flux in ODE term:
dsvdt[iSV['rho_gas_k'] +cl_ymv] = (flux_up - flux_dwn)*cl['1/eps_g']*cl['1/dy']\
+ o2_rxn *rho_dot_g
flux_up = flux_dwn
TDY1 = TDY2
# Nafion densities at each R node:-------------------------------------
# The Naftion densities change due to reactions at the outter and inner
# most shells as well as fluxes between adjacent shells. The direction
# of storage for the radial terms are done from the outermost shell
# to the innermost one.
" Start by evaluating the outermost shell "
# This node contains an O2 absorption rxn with the gas phase as well as
# a maxx flux with the adjacent inner node.
rho_k1 = sv[iSV['rho_naf_k'] +cl_ymv]
rho_k2 = sv[iSV['rho_naf_k'] +cl_ymv +cl['nxt_r']]
rho_flx_inr = radial_fdiff(rho_k1, rho_k2, cl, 0, ver, 'core_shell')
# Combine absorption and flux to get overall ODE for Nafion densities:
dsvdt[iSV['rho_naf_k'] +cl_ymv] = o2_rxn *rho_dot_n *cl['1/Vf_shl'][0]\
- rho_flx_inr *cl['1/r_j'][0]**2 *cl['1/t_shl'][0]
dsvdt[iSV['rho_naf_k'][cl['iH']] +cl_ymv] = 0 # Ensure constant proton density
rho_flx_otr = rho_flx_inr
rho_k1 = rho_k2
" Evaluate the inner shell nodes "
for j in range(1, cl['Nr'] -1):
rho_k2 = sv[iSV['rho_naf_k'] +cl_ymv +(j+1)*cl['nxt_r']]
rho_flx_inr = radial_fdiff(rho_k1, rho_k2, cl, j, ver, 'core_shell')
iMid = iSV['rho_naf_k'] +cl_ymv +j*cl['nxt_r']
dsvdt[iMid] = (rho_flx_otr - rho_flx_inr) *cl['1/r_j'][j]**2 *cl['1/t_shl'][j]
rho_flx_otr = rho_flx_inr
rho_k1 = rho_k2
" Apply the Pt reaction BC at the innermost shell "
# Set the phases for the ORR at the Pt surface:
carb_ca.electric_potential = 0
pt_s_ca.electric_potential = 0
naf_b_ca.electric_potential = -sv[iSV['phi_dl'] +cl_ymv]
naf_s_ca.electric_potential = -sv[iSV['phi_dl'] +cl_ymv]
pt_s_ca.coverages = sv[iSV['theta_pt_k'] +cl_ymv]
naf_b_ca.TDY = sv[iSV['T_cl'] +cl_ymv], sum(rho_k1), rho_k1
rho_dot_n = pt_s_ca.get_net_production_rates(naf_b_ca) *cl['SApv_pt']\
*cl['1/eps_n'] *naf_b_ca.molecular_weights
# Pt surface coverages:
dsvdt[iSV['theta_pt_k'] +cl_ymv] = pt_s_ca.get_net_production_rates(pt_s_ca)\
*cl['1/gamma'] *pt_rxn *surf_tog
# Innermost Nafion node densities:
iLast = iSV['rho_naf_k'] +cl_ymv +(cl['Nr'] -1)*cl['nxt_r']
dsvdt[iLast] = pt_rxn *rho_dot_n *cl['1/Vf_shl'][-1] \
+ rho_flx_otr *cl['1/r_j'][-1]**2 *cl['1/t_shl'][-1]
# Double layer potential at each Y node:-------------------------------
# The double layer potential is only stored as a function of CL depth.
# This means that no local potential gradients are shored in the radial
# direction throughout the Nafion shells.
# Find ionic currents and define ODE for phi_dl:
if i == cl['Ny'] -1: # BC for CL and electrolyte interface
i_io_dwn = cl['i_ext']
else:
i_io_dwn = (sv[iSV['phi_dl'] +cl_ymv] - sv[iSV['phi_dl'] +cl_ymv +cl['nxt_y']])\
*cl['sig_naf_io'] *cl['1/dy']
i_Far = pt_rxn *pt_s_ca.get_net_production_rates(carb_ca) *ct.faraday
i_dl = (i_io_up - i_io_dwn)*cl['1/dy'] - i_Far*cl['SApv_pt']
dsvdt[iSV['phi_dl'] +cl_ymv] = i_dl*cl['1/CA_dl']
i_io_up = i_io_dwn
# Update Y direction moving index:-------------------------------------
cl_ymv = cl_ymv +cl['nxt_y']
return dsvdt
""" Define CL dsvdt for flooded-agglomerate model """
"-----------------------------------------------------------------------------"
def dsvdt_cl_fa(t, sv, dsvdt, objs, p, iSV, gdl_BC):
""" Set up conditions at GDL/CL BC """
# Initialize indecies for looping:-----------------------------------------
cl_ymv = 0 # CL y direction mover (y: GDL -> Elyte)
# Load in BC state and flux from GDL:--------------------------------------
TDY1 = gdl_BC['TDY1']
flux_up = gdl_BC['flux_up']
i_io_up = 0 # no protons flow into the GDL
""" Begin loop - with if statements for CL/Elyte BC """
for i in range(cl['Ny']):
# Temperature at each Y node:------------------------------------------
dsvdt[iSV['T_cl'] +cl_ymv] = 0
# Gas phase species at each Y node:------------------------------------
if i == cl['Ny'] -1:
flux_dwn = np.zeros(gas_ca.n_species)
else:
rho_gas_k = sv[iSV['rho_gas_k'] +cl_ymv +cl['nxt_y']]
TDY2 = sv[iSV['T_cl'] +cl_ymv +cl['nxt_y']], sum(rho_gas_k), rho_gas_k
flux_dwn = fickian_adf(TDY1, TDY2, gas_ca, cl, gas_tog)
# Set the phases for O2 absorption rxn:
rho_gas_k = sv[iSV['rho_gas_k'] +cl_ymv]
rho_shl_k = sv[iSV['rho_shl_k'] +cl_ymv]
gas_ca.TDY = sv[iSV['T_cl'] +cl_ymv], sum(rho_gas_k), rho_gas_k
naf_b_ca.TDY = sv[iSV['T_cl'] +cl_ymv], sum(rho_shl_k), rho_shl_k
rho_dot_g = naf_s_ca.get_net_production_rates(gas_ca) *cl['SApv_naf']\
*cl['1/eps_g'] *gas_ca.molecular_weights *gas_tog
rho_dot_n = naf_s_ca.get_net_production_rates(naf_b_ca) *cl['SApv_naf']\
*cl['1/eps_n'] *naf_b_ca.molecular_weights
# Include rxn and flux in ODE term:
dsvdt[iSV['rho_gas_k'] +cl_ymv] = (flux_up - flux_dwn)*cl['1/eps_g']*cl['1/dy']\
+ o2_rxn *rho_dot_g
flux_up = flux_dwn
TDY1 = TDY2
# Nafion densities at each R node:-------------------------------------
# The Nafion densities change due to reactions throughout the inner
# agglomerate as well as fluxes between adjacent radial nodes. The
# direction of storage for the radial terms starts with a single node
# for the outer shell, and then continues from the outer agglomerate
# node into the center.
" Start by evaluating single-node nafion shell "
# This node contains an O2 absorption rxn with the gas phase as well as
# a mass flux with the inner agglomerate.
rho_k1 = sv[iSV['rho_shl_k'] +cl_ymv]
rho_k2 = sv[iSV['rho_naf_k'] +cl_ymv]
rho_flx_inr = radial_fdiff(rho_k1, rho_k2, cl, 0, ver, 'flooded_agg')
# Combine absorption and flux to get overall ODE:
dsvdt[iSV['rho_shl_k'] +cl_ymv] = o2_rxn *rho_dot_n - rho_flx_inr
dsvdt[iSV['rho_shl_k'][cl['iH']] +cl_ymv] = 0 # Ensure constant proton density
rho_flx_otr = rho_flx_inr
rho_k1 = rho_k2
" Evaluate the inner agglomerate nodes "
# Loop through radial nodes within agglomerate:
i_Far_r = np.zeros(cl['Nr'])
# Set the phases for ORR at the Pt surface:
carb_ca.electric_potential = 0
pt_s_ca.electric_potential = 0
naf_b_ca.electric_potential = -sv[iSV['phi_dl'] +cl_ymv]
naf_s_ca.electric_potential = -sv[iSV['phi_dl'] +cl_ymv]
for j in range(cl['Nr'] -1):
rho_k2 = sv[iSV['rho_naf_k'] +cl_ymv +(j+1)*cl['nxt_r']]
rho_flx_inr = radial_fdiff(rho_k1, rho_k2, cl, j+1, ver, 'flooded_agg')
pt_s_ca.coverages = sv[iSV['theta_pt_k'] +cl_ymv +j*cl['nxt_r']]
naf_b_ca.TDY = sv[iSV['T_cl'] +cl_ymv], sum(rho_k1), rho_k1
rho_dot_n = pt_s_ca.get_net_production_rates(naf_b_ca) *cl['SApv_pt']\
*cl['1/eps_n'] *naf_b_ca.molecular_weights *cl['Vf_ishl'][j]
i_Far_r[j] = pt_rxn *pt_s_ca.get_net_production_rates(carb_ca)\
*ct.faraday *cl['Vf_ishl'][j]
# Pt surface coverages:
iMid = iSV['theta_pt_k'] +cl_ymv +j*cl['nxt_r']
dsvdt[iMid] = pt_s_ca.get_net_production_rates(pt_s_ca) *cl['1/gamma']\
*pt_rxn *surf_tog
# Combine ORR and flux to get overall ODE for Nafion densities:
iMid = iSV['rho_naf_k'] +cl_ymv +j*cl['nxt_r']
dsvdt[iMid] = rho_flx_otr - rho_flx_inr + pt_rxn *rho_dot_n
dsvdt[iMid[cl['iH']]] = 0 # Ensure constant proton density
rho_flx_otr = rho_flx_inr
rho_k1 = rho_k2
" Apply symmetric flux BC at innermost agglomerate node "
rho_flx_inr = np.zeros(naf_b_ca.n_species)
# Set the phases for ORR at the Pt surface:
pt_s_ca.coverages = sv[iSV['theta_pt_k'] +cl_ymv +(cl['Nr'] -1)*cl['nxt_r']]
naf_b_ca.TDY = sv[iSV['T_cl'] +cl_ymv], sum(rho_k1), rho_k1
rho_dot_n = pt_s_ca.get_net_production_rates(naf_b_ca) *cl['SApv_pt']\
*cl['1/eps_n'] *naf_b_ca.molecular_weights *cl['Vf_ishl'][-1]
i_Far_r[-1] = pt_rxn *pt_s_ca.get_net_production_rates(carb_ca)\
*ct.faraday *cl['Vf_ishl'][-1]
# Pt surface coverages:
iLast = iSV['theta_pt_k'] +cl_ymv +(cl['Nr'] -1)*cl['nxt_r']
dsvdt[iLast] = pt_s_ca.get_net_production_rates(pt_s_ca) *cl['1/gamma']\
*pt_rxn *surf_tog
# Combine ORR and flux to get overall ODE:
iLast = iSV['rho_naf_k'] +cl_ymv +(cl['Nr'] -1)*cl['nxt_r']
dsvdt[iLast] = rho_flx_otr - rho_flx_inr + pt_rxn *rho_dot_n
dsvdt[iLast[cl['iH']]] = 0 # Ensure constant proton density
# Double layer potential at each Y node:-------------------------------
# The double layer potential is only stored as a function of CL depth,
# but is based on the reactions that occur throughout the radial
# direction of each agglomerate. Looping through the radial nodes of
# each agglomerate and summing over all faradaic currents is used to
# evaluate an overall double layer current.
" Simplify all radial terms into a single y-dependent double layer "
# Combine the faradaic currents to get overall i_Far:
i_Far = np.sum(i_Far_r)
# Find ionic currents and define ODE for phi_dl:
if i == cl['Ny'] -1:
i_io_dwn = cl['i_ext']
else:
i_io_dwn = (sv[iSV['phi_dl'] +cl_ymv] - sv[iSV['phi_dl'] +cl_ymv +cl['nxt_y']])\
*cl['sig_naf_io'] *cl['1/dy']
i_dl = (i_io_up - i_io_dwn)*cl['1/dy'] - i_Far*cl['SApv_pt']
dsvdt[iSV['phi_dl'] +cl_ymv] = i_dl*cl['1/CA_dl']
i_io_up = i_io_dwn
# Update Y direction moving index:-------------------------------------
cl_ymv = cl_ymv +cl['nxt_y']
return dsvdt
""" Define dsvdt for pemfc models - common for GDL and then CLs above """
"-----------------------------------------------------------------------------"
def dsvdt_func(t, sv, objs, p, iSV):
# Initialize indecies for looping:-----------------------------------------
gdl_ymv = 0 # GDL y direction mover (y: gas channel -> CL)
dsvdt = np.zeros_like(sv)
""" Bondary Condition - GDL and CL gas transport """
# Densities/Temp of GDL gas species and CL BC (top):-----------------------
gas_ca.TPY = gdl['TPY_BC']
TDY_BC = gas_ca.TDY
# If GDL diffusion is turned on, compare adjacent nodes with ADF flux to
# determine the BC composition between the GDL and CL.
rho_gdl_k = sv[iSV['rho_gdl_k']]
TDY1 = sv[iSV['T_gdl']], sum(rho_gdl_k), rho_gdl_k
flux_up = fickian_adf(TDY_BC, TDY1, gas_ca, gdl, gdl_tog)
for k in range(gdl['Ny'] -1):
rho_gdl_k = sv[iSV['rho_gdl_k'] +gdl_ymv +gdl['nxt_y']]
TDY2 = sv[iSV['T_gdl'] +gdl_ymv +gdl['nxt_y']], sum(rho_gdl_k), rho_gdl_k
flux_dwn = fickian_adf(TDY1, TDY2, gas_ca, gdl, gdl_tog)
dsvdt[iSV['rho_gdl_k'] +gdl_ymv] = (flux_up - flux_dwn)*gdl['1/eps_g']*gdl['1/dy']
flux_up = flux_dwn
TDY1 = TDY2
gdl_ymv = gdl_ymv +gdl['nxt_y']
# Use the composition and state of the last GDL node to calculate the flux
# into the first CL node.
rho_gas_k = sv[iSV['rho_gas_k']]
TDY2 = sv[iSV['T_cl']], sum(rho_gas_k), rho_gas_k
flux_dwn = fickian_adf(TDY1, TDY2, gas_ca, gdl_cl, gdl_tog)
dsvdt[iSV['rho_gdl_k'] +gdl_ymv] = (flux_up - flux_dwn)*gdl['1/eps_g']*gdl['1/dy']
flux_up = fickian_adf(TDY1, TDY2, gas_ca, gdl_cl, gas_tog)
TDY1 = TDY2
# Load BC values to pass into CL functions:
gdl_BC = {}
gdl_BC['TDY1'] = TDY1
gdl_BC['flux_up'] = flux_up
""" Generic loop for interal CL nodes in y-direction """
if model == 'core_shell':
dsvdt = dsvdt_cl_cs(t, sv, dsvdt, objs, p, iSV, gdl_BC)
elif model == 'flooded_agg':
dsvdt = dsvdt_cl_fa(t, sv, dsvdt, objs, p, iSV, gdl_BC)
# print(t)
# print(dsvdt)
#
# user_in = input('"Enter" to continue or "Ctrl+d" to cancel.')
# if user_in == KeyboardInterrupt:
# sys.exit(0)
return dsvdt
""" Use integrator to call dsvdt and solve to SS """
"-----------------------------------------------------------------------------"
# Create vectors to store outputs:
i_ext = np.hstack([i_OCV, i_ext0, i_ext1, i_ext2])
eta_ss, dphi_ss = np.zeros_like(i_ext), np.zeros_like(i_ext)
sv_save = np.zeros([len(SV_0) +1, len(i_ext)])
# Define common index for last CL node's phi_dl:
iPhi_f = int(iSV['phi_dl'] + (Ny_cl-1)*L_cl/Ny_cl)
# Update and convert i_ext: A/cm^2 -> A/m^2
cl['i_ext'] = i_ext[0] *100**2
sol = solve_ivp(lambda t, sv: dsvdt_func(t, sv, objs, p, iSV), [0, t_sim],
SV_0, method=method, atol=atol, rtol=rtol, max_step=max_t)
# Calculate extra PEM resistance terms to subtract off:
R_naf_vec = i_ext*(pem['R_naf'] + 0.5*cl['dy'] / cl['sig_naf_io'] *100**2)
# Store solution and update initial values:
SV_0, sv_save[:,0] = sol.y[:,-1], np.append(i_ext[0], sol.y[:,-1])
dphi_ss[0] = sol.y[iPhi_f, -1] - dphi_eq_an - R_naf_vec[0]
print('t_f:',sol.t[-1],'i_ext:',round(cl['i_ext']*1e-4,3), 'dPhi:',round(dphi_ss[0],3))
for i in range(len(i_ext) -1):
# Don't run the for loop if i_OCV was not set to 0...
if any([all([i == 0, i_OCV != 0]), polar == 'off']):
break
# Update and convert i_ext: A/cm^2 -> A/m^2
cl['i_ext'] = i_ext[i+1] *100**2
sol = solve_ivp(lambda t, sv: dsvdt_func(t, sv, objs, p, iSV), [0, t_sim],
SV_0, method=method, atol=atol, rtol=rtol, max_step=max_t)
# Store solution and update initial values:
SV_0, sv_save[:,i+1] = sol.y[:,-1], np.append(i_ext[i+1], sol.y[:,-1])
eta_ss[i+1] = dphi_ss[0] - sol.y[iPhi_f,-1]
dphi_ss[i+1] = sol.y[iPhi_f,-1] - dphi_eq_an - R_naf_vec[i+1]
print('t_f:',sol.t[-1], 'i_ext:',round(cl['i_ext']*1e-4,3), 'dPhi:',round(dphi_ss[i+1],3))
|
[
"numpy.zeros_like",
"numpy.sum",
"numpy.zeros",
"numpy.hstack",
"numpy.append"
] |
[((16058, 16100), 'numpy.hstack', 'np.hstack', (['[i_OCV, i_ext0, i_ext1, i_ext2]'], {}), '([i_OCV, i_ext0, i_ext1, i_ext2])\n', (16067, 16100), True, 'import numpy as np\n'), ((13776, 13793), 'numpy.zeros_like', 'np.zeros_like', (['sv'], {}), '(sv)\n', (13789, 13793), True, 'import numpy as np\n'), ((16119, 16139), 'numpy.zeros_like', 'np.zeros_like', (['i_ext'], {}), '(i_ext)\n', (16132, 16139), True, 'import numpy as np\n'), ((16141, 16161), 'numpy.zeros_like', 'np.zeros_like', (['i_ext'], {}), '(i_ext)\n', (16154, 16161), True, 'import numpy as np\n'), ((16749, 16782), 'numpy.append', 'np.append', (['i_ext[0]', 'sol.y[:, -1]'], {}), '(i_ext[0], sol.y[:, -1])\n', (16758, 16782), True, 'import numpy as np\n'), ((9532, 9550), 'numpy.zeros', 'np.zeros', (["cl['Nr']"], {}), "(cl['Nr'])\n", (9540, 9550), True, 'import numpy as np\n'), ((11169, 11197), 'numpy.zeros', 'np.zeros', (['naf_b_ca.n_species'], {}), '(naf_b_ca.n_species)\n', (11177, 11197), True, 'import numpy as np\n'), ((12814, 12829), 'numpy.sum', 'np.sum', (['i_Far_r'], {}), '(i_Far_r)\n', (12820, 12829), True, 'import numpy as np\n'), ((17451, 17488), 'numpy.append', 'np.append', (['i_ext[i + 1]', 'sol.y[:, -1]'], {}), '(i_ext[i + 1], sol.y[:, -1])\n', (17460, 17488), True, 'import numpy as np\n'), ((1460, 1486), 'numpy.zeros', 'np.zeros', (['gas_ca.n_species'], {}), '(gas_ca.n_species)\n', (1468, 1486), True, 'import numpy as np\n'), ((7242, 7268), 'numpy.zeros', 'np.zeros', (['gas_ca.n_species'], {}), '(gas_ca.n_species)\n', (7250, 7268), True, 'import numpy as np\n')]
|
from proteus import Domain, Context
from proteus.mprans import SpatialTools as st
from proteus import Gauges as ga
from proteus import WaveTools as wt
from math import *
import numpy as np
from proteus.mprans import BodyDynamics as bd
opts=Context.Options([
# predefined test cases
("water_level", 0.325, "Height of free surface above bottom"),
# Geometry
('Lgen', 1.0, 'Genaration zone in terms of wave lengths'),
('Labs', 1.0, 'Absorption zone in terms of wave lengths'),
('Ls', 1.0, 'Length of domain from genZone to the front toe of rubble mound in terms of wave lengths'),
('Lend', 1.0, 'Length of domain from absZone to the back toe of rubble mound in terms of wave lengths'),
# waves
('wave', True, 'Enable wave generation'),
('waveType', 'Fenton', 'Wavetype for regular waves, Linear or Fenton'),
("wave_period", 1.30, "Period of the waves"),
("wave_height", 0.167, "Height of the waves"),
('wavelength', 2.121, 'Wavelength only if Fenton is activated'),
('Ycoeff', [0.21107604, 0.07318902, 0.02782228, 0.01234846, 0.00618291, 0.00346483, 0.00227917, 0.00194241], 'Ycoeff only if Fenton is activated'),
('Bcoeff', [0.23112932, 0.03504843, 0.00431442, 0.00036993, 0.00004245, 0.00001877, 0.00000776, 0.00000196], 'Bcoeff only if Fenton is activated'),
('Nf', 8 ,'Number of frequency components for fenton waves'),
('meanVelocity', [ 0., 0., 0.],'Velocity used for currents'),
('phi0', 0.0 ,'Initial phase for waves'),
('Uwind', [0.0, 0.0, 0.0], 'Set air velocity'),
('fast', True ,'Switches ON fast cosh approximation'),
# rubble mound
('porousMedia', True, 'Enable porus media region'),
("hs", 0.175, "Height of the breakwater"),
("slope1", 1./3., "Slope1 of the breakwater"),
("slope2", 1./2., "Slope2 of the breakwater"),
('porosity', 0.4, "Porosity of the medium"),
('d50', 0.030, "Mean diameter of the medium"),
('d15', None, "15% grading curve diameter of the medium"),
('Resistance', 'Shih', 'Ergun or Engelund or Shih'),
# soil foundation
("springs", True, "Switch on/off soil module"),
("Kx", 541553.2, "Horizontal stiffness in Pa"),
("Ky", 582633.7, "Vertical stiffness in Pa"),
("Krot", 16246.6, "Rotational stiffness in N"),
("Cx", 1694.2, "Damping factor in Pa s "),
("Cy", 1757.32, "Damping factor in Pa s "),
("Crot", 69.61, "Rotational damping factor in N s "),
# caisson
("caisson2D", True, "Switch on/off caisson2D"),
('dimx', 0.300, 'X-dimension of the caisson2D'),
('dimy', 0.385, 'Y-dimension of the caisson2D'),
('width', 1.0, 'Z-dimension of the caisson2D'),
('mass', 64.8/0.4, 'Mass of the caisson2D [kg]'),
('caissonBC', 'FreeSlip', 'caisson2D boundaries: NoSlip or FreeSlip'),
("rotation", False, "Initial position for free oscillation"),
("friction", True, "Switch on/off friction module for sliding"),
("overturning", True, "Switch on/off overturning module"),
("m_static", 0.500, "Static friction factor between caisson2D and rubble mound"),
("m_dynamic", 0.500, "Dynamic friction factor between caisson2D and rubble mound"),
('scheme', 'Runge_Kutta', 'Numerical scheme applied to solve motion calculation (Runge_Kutta or Central_Difference)'),
# numerical options
("GenZone", True, 'Turn on generation zone at left side'),
("AbsZone", True, 'Turn on absorption zone at right side'),
("refinement_level", 0.0,"he=walength/refinement_level"),
("he", 0.05,"he=walength/refinement_level"),
("cfl", 0.450 ,"Target cfl"),
("duration", 20., "Durarion of the simulation"),
("freezeLevelSet", True, "No motion to the levelset"),
("useVF", 1.0, "For density and viscosity smoothing"),
('movingDomain', True, "Moving domain and mesh option"),
('conservativeFlux', True,'Fix post-processing velocity bug for porous interface'),
])
# ----- DOMAIN ----- #
domain = Domain.PlanarStraightLineGraphDomain()
# ----- WAVE CONDITIONS ----- #
period=opts.wave_period
omega=2*np.pi/opts.wave_period
waterLevel=opts.water_level
waveDir=np.array([1, 0., 0.])
mwl=waterLevel #coordinate of the initial mean level of water surface
waveHeight=opts.wave_height
inflowHeightMean=waterLevel
inflowVelocityMean =np.array([0.,0.,0.])
windVelocity = np.array([0.,0.,0.])
# ----- Phisical constants ----- #
rho_0=998.2
nu_0 =1.004e-6
rho_1=1.205
nu_1 =1.500e-5
sigma_01=0.0
g =np.array([0.,-9.8,0.])
gAbs=sqrt(sum(g**2))
# ----- WAVE input ----- #
if opts.wave == True:
waveinput = wt.MonochromaticWaves(period=period,
waveHeight=waveHeight,
mwl=mwl,
depth=waterLevel,
g=g,
waveDir=waveDir,
wavelength=opts.wavelength, # used by fenton waves
waveType=opts.waveType,
Ycoeff=np.array(opts.Ycoeff), # used by fenton waves
Bcoeff=np.array(opts.Bcoeff), # used by fenton waves
Nf=opts.Nf, # used by fenton waves
meanVelocity = np.array(opts.meanVelocity),
phi0 = opts.phi0,
fast = opts.fast,
)
#---------Domain Dimension
nd = 2
wl = waveinput.wavelength
#---------MESH SIZE
if opts.he == 0.0:
he = wl/opts.refinement_level
else:
he = opts.he
####################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################
# ----- SHAPES ----- #
####################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################
if opts.caisson2D:
L_leftSpo = opts.Lgen*wl
L_rightSpo = opts.Labs*wl
hs=opts.hs
slope1=opts.slope1
slope2=opts.slope2
#-caisson2D
dimx=opts.dimx
dimy=opts.dimy
b=dimx
#-Tank
x1=L_leftSpo
x2=x1+opts.Ls*wl
x3=x2+(hs/slope1)
xc1=x3+0.20
xc2=xc1+b
yc1=yc2=hs
x4=xc2+0.20
x5=x4+(hs/slope2)
x6=x5+opts.Lend*wl
x7=x6+L_rightSpo
tank_dim = [x7, 1.0]
boundaryOrientations = {'y-': np.array([0., -1.,0.]),
'x+': np.array([+1., 0.,0.]),
'y+': np.array([0., +1.,0.]),
'x-': np.array([-1., 0.,0.]),
'sponge': None,
'porousLayer': None,
'moving_porousLayer': None,
}
boundaryTags = {'y-' : 1,
'x+' : 2,
'y+' : 3,
'x-' : 4,
'sponge' : 5,
'porousLayer' : 6,
'moving_porousLayer' : 7,
}
else:
L_leftSpo = opts.Lgen*wl
L_rightSpo = opts.Labs*wl
#-Tank
x1=L_leftSpo
x2=x1+opts.Ls*wl
x3=x2+L_rightSpo
tank_dim = [x3, 1.0]
boundaryOrientations = {'y-': np.array([0., -1.,0.]),
'x+': np.array([+1., 0.,0.]),
'y+': np.array([0., +1.,0.]),
'x-': np.array([-1., 0.,0.]),
'sponge': None,
}
boundaryTags = {'y-': 1,
'x+': 2,
'y+': 3,
'x-': 4,
'sponge': 5,
}
##############################################################################################################################################################################################################
# caisson2D
############################################################################################################################################################################################################
if opts.caisson2D:
dimx=dimx
dimy=dimy
dim=(dimx,dimy)
coords=[xc1+b/2., hs+dimy/2.] # For bodyDimensions and barycenter
VCG=dim[1]/2. # For barycenter
width=opts.width # The 3rd dimension
mass=opts.mass #kg
volume=float(dimx*dimy*width)
density=float(mass/volume) #kg/m3
I=mass*(dimx**2.+dimy**2.)/12.
# It=(dimx**2.+dimy**2.)/12.
# --- Shape properties setup
caisson = st.Rectangle(domain, dim=dim, coords=coords)
caisson.vertices[0][0]=xc1
caisson.vertices[0][1]=yc1
caisson.vertices[1][0]=xc2
caisson.vertices[1][1]=yc2
# --- Body properties setup
caisson2D = bd.CaissonBody(shape=caisson, substeps=20)
free_x=(0.0, 0.0, 0.0) # Translational DOFs
free_r=(0.0, 0.0, 0.0) # Rotational DOFs
m_static=opts.m_static # Static friction
m_dynamic=opts.m_dynamic # Dynamic friction
if opts.movingDomain==True:
free_x=(1.0, 1.0, 0.0) # Translational DOFs
if opts.overturning==True:
free_r=(0.0, 0.0, 1.0) # Rotational DOFs
caisson2D.setMass(mass)
caisson2D.setConstraints(free_x=free_x, free_r=free_r)
caisson2D.setFriction(friction=opts.friction, m_static=m_static, m_dynamic=m_dynamic,
tolerance=he/(float(10**6)), grainSize=opts.d50)
overturning=opts.overturning
caisson2D.setOverturning(overturning)
if opts.rotation==True: # Initial position for free oscillation
caisson2D.rotate(rotation)
caisson2D.It= I/caisson2D.mass/width
caisson2D.setNumericalScheme(scheme=opts.scheme)
caisson2D.setRecordValues(filename='caisson2D', all_values=True)
##############################################################################################################################################################################################################
# Tank
#########################################################################################################################################################################################################
if opts.caisson2D==False:
vertices=[[0.0, 0.0],#0
[x1, 0.0],#1
[x2, 0.0], #2
[x3, 0.0 ],#3
[x3, tank_dim[1] ],#4
[x2, tank_dim[1] ],#5
[x1, tank_dim[1] ],#6
[0.0, tank_dim[1] ],#7
]
vertexFlags=np.array([1, 1, 1, 1,
3, 3, 3, 3,
])
segments=[[0,1],
[1,2],
[2,3],
[3,4],
[4,5],
[5,6],
[6,7],
[7,0],
[1,6],
[2,5],
]
segmentFlags=np.array([1, 1, 1,
2, 3, 3, 3, 4,
5, 5,
])
regions = [ [ 0.90*x1 , 0.10*tank_dim[1] ],
[ 0.90*x2 , 0.90*tank_dim[1] ],
[ 0.95*x3 , 0.95*tank_dim[1] ] ]
regionFlags=np.array([1, 2, 3])
else:
vertices=[[0.0, 0.0],#0
[x1, 0.0],#1
[x2, 0.0], #2
[x3, hs ],#3
[x4, hs ],#4
[x5, 0.0],#5
[x6, 0.0],#6
[x7, 0.0],#7
[x7, tank_dim[1]],#8
[x6, tank_dim[1]],#9
[x1, tank_dim[1]],#10
[0.0, tank_dim[1]],#11
[xc1, yc1],#12
[xc2, yc2],#13
]
vertexFlags=np.array([1, 1, 1,
6, 6,
1, 1, 1,
3, 3, 3, 3,
7, 7,
])
segments=[[0,1],
[1,2],
[2,3],
[4,5],
[5,6],
[6,7],
[7,8],
[8,9],
[9,10],
[10,11],
[11,0],
[2,5],
[1,10],
[6,9],
[3,12],
[13,4],
]
segmentFlags=np.array([1, 1,
6, 6,
1, 1,
2, 3, 3, 3, 4,
1,
5, 5,
7, 7,
])
regions = [ [ 0.90*x1 , 0.10*tank_dim[1] ],
[ 0.90*x2 , 0.90*tank_dim[1] ],
[ xc1 , 0.50*hs ],
[ 0.95*x7 , 0.95*tank_dim[1] ] ]
regionFlags=np.array([1, 2, 3, 4])
tank = st.CustomShape(domain, vertices=vertices, vertexFlags=vertexFlags,
segments=segments, segmentFlags=segmentFlags,
regions=regions, regionFlags=regionFlags,
boundaryTags=boundaryTags, boundaryOrientations=boundaryOrientations)
##################################################################################################################################################################################################################
# POROUS MEDIA
##################################################################################################################################################################################################################
porosity=opts.porosity
voidFrac=1.0-porosity
d50=opts.d50
if d50==None:
d15=opts.d15
else:
d15=d50/1.2
#----- SHIH
if opts.Resistance=='Shih':
term1=3.12*(10**-3.)
term2=(gAbs/(nu_0**2.))**(2./3.)
term3=(d15**2.)
Alpha1=1684+term1*term2*term3 #Shih
Alpha=Alpha1*nu_0*(voidFrac**2)/((porosity**3)*(d15**2))
term1=-5.10*(10**-3.)
term2=(gAbs/(nu_0**2.))**(1./3.)
term3=(d15)
Beta1=1.72+1.57*exp(term1*term2*term3) #Shih
Beta=Beta1*voidFrac/((porosity**3)*d15)
#----- ERGUN
if opts.Resistance=='Ergun':
Alpha1=150 #Ergun
Beta1=1.75 #Ergun
Alpha=Alpha1*nu_0*(voidFrac**2)/((porosity**3)*(d15**2))
Beta=Beta1*voidFrac/((porosity**3)*d15)
#----- ENGELUND
if opts.Resistance=='Engelund':
Alpha1=360 #Ergun
Beta1=3.6 #Ergun
Alpha=Alpha1*nu_0*(voidFrac**3)/((porosity**2)*(d15**2))
Beta=Beta1*voidFrac/((porosity**3)*d15)
#Proteus scale in viscosity, so i need to divide alpha and beta by nu_0
dragAlpha=(porosity**2)*Alpha/nu_0
dragBeta=(porosity**3)*Beta/nu_0
#----- Spring setup
springs=opts.springs
Kx = opts.Kx
Ky = opts.Ky
Krot = opts.Krot
Cx = opts.Cx
Cy = opts.Cy
Crot = opts.Crot
if opts.caisson2D:
caisson2D.setSprings(springs, Kx, Ky, Krot, Cx, Cy, Crot)
#############################################################################################################################################################################################################################################################################################################################################################################################
# ----- BOUNDARY CONDITIONS ----- #
#############################################################################################################################################################################################################################################################################################################################################################################################
if opts.caisson2D:
# Caisson boundaries
for bc in caisson.BC_list:
if opts.caissonBC == 'FreeSlip':
bc.setFreeSlip()
if opts.caissonBC == 'NoSlip':
bc.setNoSlip()
# Tank Boundaries
tank.BC['y+'].setAtmosphere()
tank.BC['x-'].setUnsteadyTwoPhaseVelocityInlet(wave=waveinput, vert_axis=1, smoothing=3.0*he)
tank.BC['y-'].setFreeSlip()
tank.BC['x+'].setFreeSlip()
tank.BC['sponge'].setNonMaterial()
if opts.caisson2D:
# Porous media buondaries
tank.BC['porousLayer'].reset()
tank.BC['moving_porousLayer'].reset()
# Moving Mesh Options
if opts.movingDomain==True:
for tb in [tank.BC['x+'], tank.BC['x-'], tank.BC['y+'], tank.BC['y-'], tank.BC['sponge'], tank.BC['porousLayer']]:
tb.hx_dirichlet.uOfXT= lambda x, t: 0.0
tb.hy_dirichlet.uOfXT= lambda x, t: 0.0
tb.hz_dirichlet.uOfXT= lambda x, t: 0.0
tb.u_stress.uOfXT=None
tb.v_stress.uOfXT=None
tb.w_stress.uOfXT=None
ms=tank.BC['moving_porousLayer']
ms.hx_dirichlet.uOfXT= None
ms.hy_dirichlet.uOfXT= None
ms.hz_dirichlet.uOfXT= lambda x, t: 0.0
ms.u_stress.uOfXT=None
ms.v_stress.uOfXT=None
ms.w_stress.uOfXT=None
########################################################################################################################################################################################################################################################################################################################################################
# ----- GENERATION ZONE & ABSORPTION ZONE ----- #
########################################################################################################################################################################################################################################################################################################################################################
# Waves and Generation zone
if opts.GenZone and opts.wave:
tank.setGenerationZones(flags=1, epsFact_solid=float(L_leftSpo/2.),
orientation=[1., 0.], center=(float(L_leftSpo/2.), 0., 0.),
waves=waveinput, smoothing=3.0*he, dragAlpha=10.*omega/nu_0)
# Only Generation zone
elif opts.GenZone:
tank.setAbsorptionZones(flags=1, epsFact_solid=float(L_leftSpo/2.),
orientation=[1., 0.], center=(float(L_leftSpo/2.), 0., 0.),
dragAlpha=10.*omega/nu_0)
# Porous zone
if opts.porousMedia:
tank.setPorousZones(flags=3,
dragAlpha=dragAlpha, dragBeta=dragBeta,
porosity=porosity,)
# Absorption zone
if opts.AbsZone:
if opts.caisson2D:
tank.setAbsorptionZones(flags=4, epsFact_solid=float(L_rightSpo/2.),
orientation=[-1., 0.], center=(float(tank_dim[0]-L_rightSpo/2.), 0., 0.),
dragAlpha=10.*omega/nu_0)
else:
tank.setAbsorptionZones(flags=3, epsFact_solid=float(L_rightSpo/2.),
orientation=[-1., 0.], center=(float(tank_dim[0]-L_rightSpo/2.), 0., 0.),
dragAlpha=10.*omega/nu_0)
############################################################################################################################################################################
# ----- Output Gauges ----- #
############################################################################################################################################################################
T = opts.duration
gauge_dx=0.25
tank_dim_x=int(tank_dim[0])
nprobes=int(tank_dim_x/gauge_dx)+1
probes=np.linspace(0., tank_dim_x, nprobes)
PG=[]
if opts.caisson2D:
zProbes=hs*0.5
else:
zProbes=opts.water_level*0.5
for i in probes:
PG.append((i, zProbes, 0.),)
if opts.caisson2D:
gauge_dy=0.01
tol=np.array([1*(10**-5),1*(10**-5),0.])
i_point_f=np.array([caisson.vertices[0][0],caisson.vertices[0][1],0.])
i_point_f += -tol #to avoid floating point error
i_point_b=np.array([caisson.vertices[1][0],caisson.vertices[1][1],0.])
i_point_b += tol #to avoid floating point error
yProbes = np.linspace(i_point_f[1],i_point_f[1]+dimy, int(dimy/gauge_dy)+1)
LG1=[]
LG2=[]
for j in yProbes:
LG1.append((i_point_f[0],j,0.),)
LG2.append((i_point_b[0],j,0.),)
#point_output=ga.PointGauges(gauges=((('p'),PG),
# ),
# activeTime = (0., T),
# sampleRate=0.,
# fileName='point_gauges.csv')
#loadingsGauges=ga.PointGauges(gauges=((('p'),LG1),
# (('p'),LG2),
# ),
# activeTime = (0., T),
# sampleRate=0.,
# fileName='loadingsGauges.csv')
levelset_output=ga.PointGauges(gauges=((('phi',),PG),
),
activeTime = (0., T),
sampleRate=0.,
fileName='levelset_gauges.csv')
######################################################################################################################################################################################################################
# Numerical Options and other parameters #
######################################################################################################################################################################################################################
he = he
domain.MeshOptions.he = he
from math import *
from proteus import MeshTools, AuxiliaryVariables
import numpy
import proteus.MeshTools
from proteus import Domain
from proteus.Profiling import logEvent
from proteus.default_n import *
from proteus.ctransportCoefficients import smoothedHeaviside
from proteus.ctransportCoefficients import smoothedHeaviside_integral
st.assembleDomain(domain)
#----------------------------------------------------
# Time stepping and velocity
#----------------------------------------------------
weak_bc_penalty_constant = 10.0/nu_0 #100
dt_fixed = 1
dt_init = min(0.1*dt_fixed,0.001)
T = T
nDTout= int(round(T/dt_fixed))
runCFL = opts.cfl
#----------------------------------------------------
# Discretization -- input options
#----------------------------------------------------
checkMass=False
applyCorrection=True
applyRedistancing=True
freezeLevelSet=opts.freezeLevelSet
useOnlyVF = False # if TRUE proteus uses only these modules --> twp_navier_stokes_p + twp_navier_stokes_n
# vof_p + vof_n
movingDomain=opts.movingDomain
useRANS = 0 # 0 -- None
# 1 -- K-Epsilon
# 2 -- K-Omega, 1998
# 3 -- K-Omega, 1988
genMesh=True
# By DEFAULT on the other files.py --> fullNewtonFlag = True
# multilevelNonlinearSolver & levelNonlinearSolver == NonlinearSolvers.Newton
useOldPETSc=False # if TRUE --> multilevelLinearSolver & levelLinearSolver == LinearSolvers.PETSc
# if FALSE --> multilevelLinearSolver & levelLinearSolver == LinearSolvers.KSP_petsc4py
useSuperlu = False #if TRUE --> multilevelLinearSolver & levelLinearSolver == LinearSolvers.LU
spaceOrder = 1
useHex = False # used for discretization, if 1.0 --> CubeGaussQuadrature
# ELSE --> SimplexGaussQuadrature
useRBLES = 0.0 # multiplied with subGridError
useMetrics = 1.0 # if 1.0 --> use of user's parameters as (ns_shockCapturingFactor, ns_lag_shockCapturing, ecc ...)
useVF = opts.useVF # used in the smoothing functions as (1.0-useVF)*smoothedHeaviside(eps_rho,phi) + useVF*fmin(1.0,fmax(0.0,vf))
# Input checks
if spaceOrder not in [1,2]:
print("INVALID: spaceOrder" + spaceOrder)
sys.exit()
if useRBLES not in [0.0, 1.0]:
print("INVALID: useRBLES" + useRBLES)
sys.exit()
if useMetrics not in [0.0, 1.0]:
print("INVALID: useMetrics")
sys.exit()
# Discretization
nd = 2
if spaceOrder == 1:
hFactor=1.0
if useHex:
basis=C0_AffineLinearOnCubeWithNodalBasis
elementQuadrature = CubeGaussQuadrature(nd,3)
elementBoundaryQuadrature = CubeGaussQuadrature(nd-1,3)
else:
basis=C0_AffineLinearOnSimplexWithNodalBasis
elementQuadrature = SimplexGaussQuadrature(nd,3)
elementBoundaryQuadrature = SimplexGaussQuadrature(nd-1,3)
#elementBoundaryQuadrature = SimplexLobattoQuadrature(nd-1,1)
elif spaceOrder == 2:
hFactor=0.5
if useHex:
basis=C0_AffineLagrangeOnCubeWithNodalBasis
elementQuadrature = CubeGaussQuadrature(nd,4)
elementBoundaryQuadrature = CubeGaussQuadrature(nd-1,4)
else:
basis=C0_AffineQuadraticOnSimplexWithNodalBasis
elementQuadrature = SimplexGaussQuadrature(nd,4)
elementBoundaryQuadrature = SimplexGaussQuadrature(nd-1,4)
# Numerical parameters
ns_forceStrongDirichlet = False
backgroundDiffusionFactor=0.01
if useMetrics:
ns_shockCapturingFactor = 0.5 # magnifies numerical viscosity in NS (smoothening velocity fields)
ns_lag_shockCapturing = True # lagging numerical viscosity speedsup Newton but destabilzes the solution
ns_lag_subgridError = True # less nonlinear but less stable
ls_shockCapturingFactor = 0.5 # numerical diffusion of level set (smoothening phi)
ls_lag_shockCapturing = True # less nonlinear but less stable
ls_sc_uref = 1.0 # reference gradient in numerical solution (higher=more diffusion)
ls_sc_beta = 1.5 # 1 is fully nonlinear, 2 is linear
vof_shockCapturingFactor = 0.5 # numerical diffusion of level set (smoothening volume of fraction)
vof_lag_shockCapturing = True # less nonlinear but less stable
vof_sc_uref = 1.0
vof_sc_beta = 1.5
rd_shockCapturingFactor = 0.5
rd_lag_shockCapturing = False
epsFact_density = 3.0 # control width of water/air transition zone
epsFact_viscosity = epsFact_curvature = epsFact_vof = epsFact_consrv_heaviside = epsFact_consrv_dirac = ecH = epsFact_density
epsFact_redistance = 0.33
epsFact_consrv_diffusion = 1.0 # affects smoothing diffusion in mass conservation
redist_Newton = True
kappa_shockCapturingFactor = 0.5
kappa_lag_shockCapturing = True # False
kappa_sc_uref = 1.0
kappa_sc_beta = 1.5
dissipation_shockCapturingFactor = 0.5
dissipation_lag_shockCapturing = True # False
dissipation_sc_uref = 1.0
dissipation_sc_beta = 1.5
else:
ns_shockCapturingFactor = 0.9
ns_lag_shockCapturing = True
ns_lag_subgridError = True
ls_shockCapturingFactor = 0.9
ls_lag_shockCapturing = True
ls_sc_uref = 1.0
ls_sc_beta = 1.0
vof_shockCapturingFactor = 0.9
vof_lag_shockCapturing = True
vof_sc_uref = 1.0
vof_sc_beta = 1.0
rd_shockCapturingFactor = 0.9
rd_lag_shockCapturing = False
epsFact_density = 1.5
epsFact_viscosity = epsFact_curvature = epsFact_vof = epsFact_consrv_heaviside = epsFact_consrv_dirac = epsFact_density
epsFact_redistance = 0.33
epsFact_consrv_diffusion = 10.0
redist_Newton = False
kappa_shockCapturingFactor = 0.9
kappa_lag_shockCapturing = True#False
kappa_sc_uref = 1.0
kappa_sc_beta = 1.0
dissipation_shockCapturingFactor = 0.9
dissipation_lag_shockCapturing = True#False
dissipation_sc_uref = 1.0
dissipation_sc_beta = 1.0
ns_nl_atol_res = max(1.0e-12,0.001*domain.MeshOptions.he**2)
vof_nl_atol_res = max(1.0e-12,0.001*domain.MeshOptions.he**2)
ls_nl_atol_res = max(1.0e-12,0.001*domain.MeshOptions.he**2)
mcorr_nl_atol_res = max(1.0e-12,0.0001*domain.MeshOptions.he**2)
rd_nl_atol_res = max(1.0e-12,0.01*domain.MeshOptions.he)
kappa_nl_atol_res = max(1.0e-12,0.001*domain.MeshOptions.he**2)
dissipation_nl_atol_res = max(1.0e-12,0.001*domain.MeshOptions.he**2)
mesh_nl_atol_res = max(1.0e-12,0.001*domain.MeshOptions.he**2)
#turbulence
ns_closure=0 #1-classic smagorinsky, 2-dynamic smagorinsky, 3 -- k-epsilon, 4 -- k-omega
if useRANS == 1:
ns_closure = 3
elif useRANS >= 2:
ns_closure == 4
# Initial condition
waterLine_x = 2*tank_dim[0]
waterLine_z = waterLevel
def waveHeight(x,t):
waterDepth = waveinput.eta(x, t) + waveinput.mwl
return waterDepth
def wavePhi(x,t):
[nd-1]- waveHeight(x,t)
def waveVF(x,t):
return smoothedHeaviside(epsFact_consrv_heaviside*he,wavePhi(x,t))
def signedDistance(x):
phi_x = x[0]-waterLine_x
phi_z = x[nd-1]-waterLine_z
if phi_x < 0.0:
if phi_z < 0.0:
return max(phi_x,phi_z)
else:
return phi_z
else:
if phi_z < 0.0:
return phi_x
else:
return sqrt(phi_x**2 + phi_z**2)
|
[
"proteus.mprans.BodyDynamics.CaissonBody",
"proteus.Domain.PlanarStraightLineGraphDomain",
"proteus.Gauges.PointGauges",
"proteus.mprans.SpatialTools.assembleDomain",
"proteus.mprans.SpatialTools.CustomShape",
"numpy.array",
"numpy.linspace",
"proteus.mprans.SpatialTools.Rectangle",
"proteus.Context.Options"
] |
[((242, 3785), 'proteus.Context.Options', 'Context.Options', (["[('water_level', 0.325, 'Height of free surface above bottom'), ('Lgen', \n 1.0, 'Genaration zone in terms of wave lengths'), ('Labs', 1.0,\n 'Absorption zone in terms of wave lengths'), ('Ls', 1.0,\n 'Length of domain from genZone to the front toe of rubble mound in terms of wave lengths'\n ), ('Lend', 1.0,\n 'Length of domain from absZone to the back toe of rubble mound in terms of wave lengths'\n ), ('wave', True, 'Enable wave generation'), ('waveType', 'Fenton',\n 'Wavetype for regular waves, Linear or Fenton'), ('wave_period', 1.3,\n 'Period of the waves'), ('wave_height', 0.167, 'Height of the waves'),\n ('wavelength', 2.121, 'Wavelength only if Fenton is activated'), (\n 'Ycoeff', [0.21107604, 0.07318902, 0.02782228, 0.01234846, 0.00618291, \n 0.00346483, 0.00227917, 0.00194241],\n 'Ycoeff only if Fenton is activated'), ('Bcoeff', [0.23112932, \n 0.03504843, 0.00431442, 0.00036993, 4.245e-05, 1.877e-05, 7.76e-06, \n 1.96e-06], 'Bcoeff only if Fenton is activated'), ('Nf', 8,\n 'Number of frequency components for fenton waves'), ('meanVelocity', [\n 0.0, 0.0, 0.0], 'Velocity used for currents'), ('phi0', 0.0,\n 'Initial phase for waves'), ('Uwind', [0.0, 0.0, 0.0],\n 'Set air velocity'), ('fast', True,\n 'Switches ON fast cosh approximation'), ('porousMedia', True,\n 'Enable porus media region'), ('hs', 0.175, 'Height of the breakwater'),\n ('slope1', 1.0 / 3.0, 'Slope1 of the breakwater'), ('slope2', 1.0 / 2.0,\n 'Slope2 of the breakwater'), ('porosity', 0.4, 'Porosity of the medium'\n ), ('d50', 0.03, 'Mean diameter of the medium'), ('d15', None,\n '15% grading curve diameter of the medium'), ('Resistance', 'Shih',\n 'Ergun or Engelund or Shih'), ('springs', True,\n 'Switch on/off soil module'), ('Kx', 541553.2,\n 'Horizontal stiffness in Pa'), ('Ky', 582633.7,\n 'Vertical stiffness in Pa'), ('Krot', 16246.6,\n 'Rotational stiffness in N'), ('Cx', 1694.2, 'Damping factor in Pa s '),\n ('Cy', 1757.32, 'Damping factor in Pa s '), ('Crot', 69.61,\n 'Rotational damping factor in N s '), ('caisson2D', True,\n 'Switch on/off caisson2D'), ('dimx', 0.3,\n 'X-dimension of the caisson2D'), ('dimy', 0.385,\n 'Y-dimension of the caisson2D'), ('width', 1.0,\n 'Z-dimension of the caisson2D'), ('mass', 64.8 / 0.4,\n 'Mass of the caisson2D [kg]'), ('caissonBC', 'FreeSlip',\n 'caisson2D boundaries: NoSlip or FreeSlip'), ('rotation', False,\n 'Initial position for free oscillation'), ('friction', True,\n 'Switch on/off friction module for sliding'), ('overturning', True,\n 'Switch on/off overturning module'), ('m_static', 0.5,\n 'Static friction factor between caisson2D and rubble mound'), (\n 'm_dynamic', 0.5,\n 'Dynamic friction factor between caisson2D and rubble mound'), (\n 'scheme', 'Runge_Kutta',\n 'Numerical scheme applied to solve motion calculation (Runge_Kutta or Central_Difference)'\n ), ('GenZone', True, 'Turn on generation zone at left side'), (\n 'AbsZone', True, 'Turn on absorption zone at right side'), (\n 'refinement_level', 0.0, 'he=walength/refinement_level'), ('he', 0.05,\n 'he=walength/refinement_level'), ('cfl', 0.45, 'Target cfl'), (\n 'duration', 20.0, 'Durarion of the simulation'), ('freezeLevelSet', \n True, 'No motion to the levelset'), ('useVF', 1.0,\n 'For density and viscosity smoothing'), ('movingDomain', True,\n 'Moving domain and mesh option'), ('conservativeFlux', True,\n 'Fix post-processing velocity bug for porous interface')]"], {}), "([('water_level', 0.325,\n 'Height of free surface above bottom'), ('Lgen', 1.0,\n 'Genaration zone in terms of wave lengths'), ('Labs', 1.0,\n 'Absorption zone in terms of wave lengths'), ('Ls', 1.0,\n 'Length of domain from genZone to the front toe of rubble mound in terms of wave lengths'\n ), ('Lend', 1.0,\n 'Length of domain from absZone to the back toe of rubble mound in terms of wave lengths'\n ), ('wave', True, 'Enable wave generation'), ('waveType', 'Fenton',\n 'Wavetype for regular waves, Linear or Fenton'), ('wave_period', 1.3,\n 'Period of the waves'), ('wave_height', 0.167, 'Height of the waves'),\n ('wavelength', 2.121, 'Wavelength only if Fenton is activated'), (\n 'Ycoeff', [0.21107604, 0.07318902, 0.02782228, 0.01234846, 0.00618291, \n 0.00346483, 0.00227917, 0.00194241],\n 'Ycoeff only if Fenton is activated'), ('Bcoeff', [0.23112932, \n 0.03504843, 0.00431442, 0.00036993, 4.245e-05, 1.877e-05, 7.76e-06, \n 1.96e-06], 'Bcoeff only if Fenton is activated'), ('Nf', 8,\n 'Number of frequency components for fenton waves'), ('meanVelocity', [\n 0.0, 0.0, 0.0], 'Velocity used for currents'), ('phi0', 0.0,\n 'Initial phase for waves'), ('Uwind', [0.0, 0.0, 0.0],\n 'Set air velocity'), ('fast', True,\n 'Switches ON fast cosh approximation'), ('porousMedia', True,\n 'Enable porus media region'), ('hs', 0.175, 'Height of the breakwater'),\n ('slope1', 1.0 / 3.0, 'Slope1 of the breakwater'), ('slope2', 1.0 / 2.0,\n 'Slope2 of the breakwater'), ('porosity', 0.4, 'Porosity of the medium'\n ), ('d50', 0.03, 'Mean diameter of the medium'), ('d15', None,\n '15% grading curve diameter of the medium'), ('Resistance', 'Shih',\n 'Ergun or Engelund or Shih'), ('springs', True,\n 'Switch on/off soil module'), ('Kx', 541553.2,\n 'Horizontal stiffness in Pa'), ('Ky', 582633.7,\n 'Vertical stiffness in Pa'), ('Krot', 16246.6,\n 'Rotational stiffness in N'), ('Cx', 1694.2, 'Damping factor in Pa s '),\n ('Cy', 1757.32, 'Damping factor in Pa s '), ('Crot', 69.61,\n 'Rotational damping factor in N s '), ('caisson2D', True,\n 'Switch on/off caisson2D'), ('dimx', 0.3,\n 'X-dimension of the caisson2D'), ('dimy', 0.385,\n 'Y-dimension of the caisson2D'), ('width', 1.0,\n 'Z-dimension of the caisson2D'), ('mass', 64.8 / 0.4,\n 'Mass of the caisson2D [kg]'), ('caissonBC', 'FreeSlip',\n 'caisson2D boundaries: NoSlip or FreeSlip'), ('rotation', False,\n 'Initial position for free oscillation'), ('friction', True,\n 'Switch on/off friction module for sliding'), ('overturning', True,\n 'Switch on/off overturning module'), ('m_static', 0.5,\n 'Static friction factor between caisson2D and rubble mound'), (\n 'm_dynamic', 0.5,\n 'Dynamic friction factor between caisson2D and rubble mound'), (\n 'scheme', 'Runge_Kutta',\n 'Numerical scheme applied to solve motion calculation (Runge_Kutta or Central_Difference)'\n ), ('GenZone', True, 'Turn on generation zone at left side'), (\n 'AbsZone', True, 'Turn on absorption zone at right side'), (\n 'refinement_level', 0.0, 'he=walength/refinement_level'), ('he', 0.05,\n 'he=walength/refinement_level'), ('cfl', 0.45, 'Target cfl'), (\n 'duration', 20.0, 'Durarion of the simulation'), ('freezeLevelSet', \n True, 'No motion to the levelset'), ('useVF', 1.0,\n 'For density and viscosity smoothing'), ('movingDomain', True,\n 'Moving domain and mesh option'), ('conservativeFlux', True,\n 'Fix post-processing velocity bug for porous interface')])\n", (257, 3785), False, 'from proteus import Domain, Context\n'), ((3941, 3979), 'proteus.Domain.PlanarStraightLineGraphDomain', 'Domain.PlanarStraightLineGraphDomain', ([], {}), '()\n', (3977, 3979), False, 'from proteus import Domain\n'), ((4109, 4132), 'numpy.array', 'np.array', (['[1, 0.0, 0.0]'], {}), '([1, 0.0, 0.0])\n', (4117, 4132), True, 'import numpy as np\n'), ((4279, 4304), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0])\n', (4287, 4304), True, 'import numpy as np\n'), ((4315, 4340), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0])\n', (4323, 4340), True, 'import numpy as np\n'), ((4444, 4470), 'numpy.array', 'np.array', (['[0.0, -9.8, 0.0]'], {}), '([0.0, -9.8, 0.0])\n', (4452, 4470), True, 'import numpy as np\n'), ((13442, 13681), 'proteus.mprans.SpatialTools.CustomShape', 'st.CustomShape', (['domain'], {'vertices': 'vertices', 'vertexFlags': 'vertexFlags', 'segments': 'segments', 'segmentFlags': 'segmentFlags', 'regions': 'regions', 'regionFlags': 'regionFlags', 'boundaryTags': 'boundaryTags', 'boundaryOrientations': 'boundaryOrientations'}), '(domain, vertices=vertices, vertexFlags=vertexFlags, segments\n =segments, segmentFlags=segmentFlags, regions=regions, regionFlags=\n regionFlags, boundaryTags=boundaryTags, boundaryOrientations=\n boundaryOrientations)\n', (13456, 13681), True, 'from proteus.mprans import SpatialTools as st\n'), ((19993, 20030), 'numpy.linspace', 'np.linspace', (['(0.0)', 'tank_dim_x', 'nprobes'], {}), '(0.0, tank_dim_x, nprobes)\n', (20004, 20030), True, 'import numpy as np\n'), ((21248, 21362), 'proteus.Gauges.PointGauges', 'ga.PointGauges', ([], {'gauges': "((('phi',), PG),)", 'activeTime': '(0.0, T)', 'sampleRate': '(0.0)', 'fileName': '"""levelset_gauges.csv"""'}), "(gauges=((('phi',), PG),), activeTime=(0.0, T), sampleRate=\n 0.0, fileName='levelset_gauges.csv')\n", (21262, 21362), True, 'from proteus import Gauges as ga\n'), ((22319, 22344), 'proteus.mprans.SpatialTools.assembleDomain', 'st.assembleDomain', (['domain'], {}), '(domain)\n', (22336, 22344), True, 'from proteus.mprans import SpatialTools as st\n'), ((9337, 9381), 'proteus.mprans.SpatialTools.Rectangle', 'st.Rectangle', (['domain'], {'dim': 'dim', 'coords': 'coords'}), '(domain, dim=dim, coords=coords)\n', (9349, 9381), True, 'from proteus.mprans import SpatialTools as st\n'), ((9552, 9594), 'proteus.mprans.BodyDynamics.CaissonBody', 'bd.CaissonBody', ([], {'shape': 'caisson', 'substeps': '(20)'}), '(shape=caisson, substeps=20)\n', (9566, 9594), True, 'from proteus.mprans import BodyDynamics as bd\n'), ((11288, 11322), 'numpy.array', 'np.array', (['[1, 1, 1, 1, 3, 3, 3, 3]'], {}), '([1, 1, 1, 1, 3, 3, 3, 3])\n', (11296, 11322), True, 'import numpy as np\n'), ((11636, 11676), 'numpy.array', 'np.array', (['[1, 1, 1, 2, 3, 3, 3, 4, 5, 5]'], {}), '([1, 1, 1, 2, 3, 3, 3, 4, 5, 5])\n', (11644, 11676), True, 'import numpy as np\n'), ((11914, 11933), 'numpy.array', 'np.array', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (11922, 11933), True, 'import numpy as np\n'), ((12411, 12463), 'numpy.array', 'np.array', (['[1, 1, 1, 6, 6, 1, 1, 1, 3, 3, 3, 3, 7, 7]'], {}), '([1, 1, 1, 6, 6, 1, 1, 1, 3, 3, 3, 3, 7, 7])\n', (12419, 12463), True, 'import numpy as np\n'), ((12974, 13032), 'numpy.array', 'np.array', (['[1, 1, 6, 6, 1, 1, 2, 3, 3, 3, 4, 1, 5, 5, 7, 7]'], {}), '([1, 1, 6, 6, 1, 1, 2, 3, 3, 3, 4, 1, 5, 5, 7, 7])\n', (12982, 13032), True, 'import numpy as np\n'), ((13409, 13431), 'numpy.array', 'np.array', (['[1, 2, 3, 4]'], {}), '([1, 2, 3, 4])\n', (13417, 13431), True, 'import numpy as np\n'), ((20209, 20252), 'numpy.array', 'np.array', (['[1 * 10 ** -5, 1 * 10 ** -5, 0.0]'], {}), '([1 * 10 ** -5, 1 * 10 ** -5, 0.0])\n', (20217, 20252), True, 'import numpy as np\n'), ((20260, 20323), 'numpy.array', 'np.array', (['[caisson.vertices[0][0], caisson.vertices[0][1], 0.0]'], {}), '([caisson.vertices[0][0], caisson.vertices[0][1], 0.0])\n', (20268, 20323), True, 'import numpy as np\n'), ((20388, 20451), 'numpy.array', 'np.array', (['[caisson.vertices[1][0], caisson.vertices[1][1], 0.0]'], {}), '([caisson.vertices[1][0], caisson.vertices[1][1], 0.0])\n', (20396, 20451), True, 'import numpy as np\n'), ((7161, 7187), 'numpy.array', 'np.array', (['[0.0, -1.0, 0.0]'], {}), '([0.0, -1.0, 0.0])\n', (7169, 7187), True, 'import numpy as np\n'), ((7219, 7245), 'numpy.array', 'np.array', (['[+1.0, 0.0, 0.0]'], {}), '([+1.0, 0.0, 0.0])\n', (7227, 7245), True, 'import numpy as np\n'), ((7277, 7303), 'numpy.array', 'np.array', (['[0.0, +1.0, 0.0]'], {}), '([0.0, +1.0, 0.0])\n', (7285, 7303), True, 'import numpy as np\n'), ((7335, 7361), 'numpy.array', 'np.array', (['[-1.0, 0.0, 0.0]'], {}), '([-1.0, 0.0, 0.0])\n', (7343, 7361), True, 'import numpy as np\n'), ((8009, 8035), 'numpy.array', 'np.array', (['[0.0, -1.0, 0.0]'], {}), '([0.0, -1.0, 0.0])\n', (8017, 8035), True, 'import numpy as np\n'), ((8067, 8093), 'numpy.array', 'np.array', (['[+1.0, 0.0, 0.0]'], {}), '([+1.0, 0.0, 0.0])\n', (8075, 8093), True, 'import numpy as np\n'), ((8125, 8151), 'numpy.array', 'np.array', (['[0.0, +1.0, 0.0]'], {}), '([0.0, +1.0, 0.0])\n', (8133, 8151), True, 'import numpy as np\n'), ((8183, 8209), 'numpy.array', 'np.array', (['[-1.0, 0.0, 0.0]'], {}), '([-1.0, 0.0, 0.0])\n', (8191, 8209), True, 'import numpy as np\n'), ((5026, 5047), 'numpy.array', 'np.array', (['opts.Ycoeff'], {}), '(opts.Ycoeff)\n', (5034, 5047), True, 'import numpy as np\n'), ((5117, 5138), 'numpy.array', 'np.array', (['opts.Bcoeff'], {}), '(opts.Bcoeff)\n', (5125, 5138), True, 'import numpy as np\n'), ((5307, 5334), 'numpy.array', 'np.array', (['opts.meanVelocity'], {}), '(opts.meanVelocity)\n', (5315, 5334), True, 'import numpy as np\n')]
|
"""
Helper functions for calculating MMD and performing MMD test
This module contains original code from: https://github.com/fengliu90/DK-for-TST
MIT License
Copyright (c) 2021 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import numpy as np
import torch
def get_item(x):
"""get the numpy value from a torch tensor."""
x = x.cpu().detach().numpy()
return x
def Pdist2(x, y):
"""compute the paired distance between x and y."""
x_norm = (x ** 2).sum(1).view(-1, 1)
if y is not None:
y_norm = (y ** 2).sum(1).view(1, -1)
else:
y = x
y_norm = x_norm.view(1, -1)
Pdist = x_norm + y_norm - 2.0 * torch.mm(x, torch.transpose(y, 0, 1))
Pdist[Pdist<0]=0
return Pdist
def h1_mean_var_gram(Kx, Ky, Kxy, is_var_computed, use_1sample_U=True):
"""compute value of MMD and std of MMD using kernel matrix."""
Kxxy = torch.cat((Kx,Kxy),1)
Kyxy = torch.cat((Kxy.transpose(0,1),Ky),1)
Kxyxy = torch.cat((Kxxy,Kyxy),0)
nx = Kx.shape[0]
ny = Ky.shape[0]
is_unbiased = True
if is_unbiased:
xx = torch.div((torch.sum(Kx) - torch.sum(torch.diag(Kx))), (nx * (nx - 1)))
yy = torch.div((torch.sum(Ky) - torch.sum(torch.diag(Ky))), (ny * (ny - 1)))
# one-sample U-statistic.
if use_1sample_U:
xy = torch.div((torch.sum(Kxy) - torch.sum(torch.diag(Kxy))), (nx * (ny - 1)))
else:
xy = torch.div(torch.sum(Kxy), (nx * ny))
mmd2 = xx - 2 * xy + yy
else:
xx = torch.div((torch.sum(Kx)), (nx * nx))
yy = torch.div((torch.sum(Ky)), (ny * ny))
# one-sample U-statistic.
if use_1sample_U:
xy = torch.div((torch.sum(Kxy)), (nx * ny))
else:
xy = torch.div(torch.sum(Kxy), (nx * ny))
mmd2 = xx - 2 * xy + yy
if not is_var_computed:
return mmd2, None, Kxyxy
hh = Kx+Ky-Kxy-Kxy.transpose(0,1)
V1 = torch.dot(hh.sum(1)/ny,hh.sum(1)/ny) / ny
V2 = (hh).sum() / (nx) / nx
varEst = 4*(V1 - V2**2)
return mmd2, varEst, Kxyxy
def MMDu(Fea, len_s, Fea_org, sigma, sigma0=0.1, epsilon = 10**(-10), is_smooth=True, is_var_computed=True, use_1sample_U=True):
"""compute value of deep-kernel MMD and std of deep-kernel MMD using merged data."""
X = Fea[0:len_s, :] # fetch the sample 1 (features of deep networks)
Y = Fea[len_s:, :] # fetch the sample 2 (features of deep networks)
X_org = Fea_org[0:len_s, :] # fetch the original sample 1
Y_org = Fea_org[len_s:, :] # fetch the original sample 2
L = 1 # generalized Gaussian (if L>1)
Dxx = Pdist2(X, X)
Dyy = Pdist2(Y, Y)
Dxy = Pdist2(X, Y)
Dxx_org = Pdist2(X_org, X_org)
Dyy_org = Pdist2(Y_org, Y_org)
Dxy_org = Pdist2(X_org, Y_org)
if is_smooth:
Kx = (1-epsilon) * torch.exp(-(Dxx / sigma0)**L -Dxx_org / sigma) + epsilon * torch.exp(-Dxx_org / sigma)
Ky = (1-epsilon) * torch.exp(-(Dyy / sigma0)**L -Dyy_org / sigma) + epsilon * torch.exp(-Dyy_org / sigma)
Kxy = (1-epsilon) * torch.exp(-(Dxy / sigma0)**L -Dxy_org / sigma) + epsilon * torch.exp(-Dxy_org / sigma)
else:
Kx = torch.exp(-Dxx / sigma0)
Ky = torch.exp(-Dyy / sigma0)
Kxy = torch.exp(-Dxy / sigma0)
return h1_mean_var_gram(Kx, Ky, Kxy, is_var_computed, use_1sample_U)
def TST_MMD_u(Fea, N_per, N1, Fea_org, sigma, sigma0, ep, alpha, device, dtype, is_smooth=True):
"""run two-sample test (TST) using deep kernel kernel."""
mmd_vector = np.zeros(N_per)
TEMP = MMDu(Fea, N1, Fea_org, sigma, sigma0, ep, is_smooth)
mmd_value = get_item(TEMP[0])
Kxyxy = TEMP[2]
count = 0
nxy = Fea.shape[0]
nx = N1
for r in range(N_per):
# print r
ind = np.random.choice(nxy, nxy, replace=False)
# divide into new X, Y
indx = ind[:nx]
# print(indx)
indy = ind[nx:]
Kx = Kxyxy[np.ix_(indx, indx)]
# print(Kx)
Ky = Kxyxy[np.ix_(indy, indy)]
Kxy = Kxyxy[np.ix_(indx, indy)]
TEMP = h1_mean_var_gram(Kx, Ky, Kxy, is_var_computed=False)
mmd_vector[r] = TEMP[0]
if mmd_vector[r] > mmd_value:
count = count + 1
if count > np.ceil(N_per * alpha):
h = 0
threshold = "NaN"
break
else:
h = 1
if h == 1:
S_mmd_vector = np.sort(mmd_vector)
# print(np.int(np.ceil(N_per*alpha)))
threshold = S_mmd_vector[np.int(np.ceil(N_per * (1 - alpha)))]
return h, threshold, mmd_value.item()
|
[
"numpy.ceil",
"numpy.ix_",
"numpy.zeros",
"torch.cat",
"torch.diag",
"torch.exp",
"numpy.sort",
"numpy.random.choice",
"torch.sum",
"torch.transpose"
] |
[((1922, 1945), 'torch.cat', 'torch.cat', (['(Kx, Kxy)', '(1)'], {}), '((Kx, Kxy), 1)\n', (1931, 1945), False, 'import torch\n'), ((2006, 2032), 'torch.cat', 'torch.cat', (['(Kxxy, Kyxy)', '(0)'], {}), '((Kxxy, Kyxy), 0)\n', (2015, 2032), False, 'import torch\n'), ((4606, 4621), 'numpy.zeros', 'np.zeros', (['N_per'], {}), '(N_per)\n', (4614, 4621), True, 'import numpy as np\n'), ((4245, 4269), 'torch.exp', 'torch.exp', (['(-Dxx / sigma0)'], {}), '(-Dxx / sigma0)\n', (4254, 4269), False, 'import torch\n'), ((4284, 4308), 'torch.exp', 'torch.exp', (['(-Dyy / sigma0)'], {}), '(-Dyy / sigma0)\n', (4293, 4308), False, 'import torch\n'), ((4324, 4348), 'torch.exp', 'torch.exp', (['(-Dxy / sigma0)'], {}), '(-Dxy / sigma0)\n', (4333, 4348), False, 'import torch\n'), ((4857, 4898), 'numpy.random.choice', 'np.random.choice', (['nxy', 'nxy'], {'replace': '(False)'}), '(nxy, nxy, replace=False)\n', (4873, 4898), True, 'import numpy as np\n'), ((5507, 5526), 'numpy.sort', 'np.sort', (['mmd_vector'], {}), '(mmd_vector)\n', (5514, 5526), True, 'import numpy as np\n'), ((2585, 2598), 'torch.sum', 'torch.sum', (['Kx'], {}), '(Kx)\n', (2594, 2598), False, 'import torch\n'), ((2637, 2650), 'torch.sum', 'torch.sum', (['Ky'], {}), '(Ky)\n', (2646, 2650), False, 'import torch\n'), ((5024, 5042), 'numpy.ix_', 'np.ix_', (['indx', 'indx'], {}), '(indx, indx)\n', (5030, 5042), True, 'import numpy as np\n'), ((5085, 5103), 'numpy.ix_', 'np.ix_', (['indy', 'indy'], {}), '(indy, indy)\n', (5091, 5103), True, 'import numpy as np\n'), ((5126, 5144), 'numpy.ix_', 'np.ix_', (['indx', 'indy'], {}), '(indx, indy)\n', (5132, 5144), True, 'import numpy as np\n'), ((5340, 5362), 'numpy.ceil', 'np.ceil', (['(N_per * alpha)'], {}), '(N_per * alpha)\n', (5347, 5362), True, 'import numpy as np\n'), ((1701, 1725), 'torch.transpose', 'torch.transpose', (['y', '(0)', '(1)'], {}), '(y, 0, 1)\n', (1716, 1725), False, 'import torch\n'), ((2145, 2158), 'torch.sum', 'torch.sum', (['Kx'], {}), '(Kx)\n', (2154, 2158), False, 'import torch\n'), ((2231, 2244), 'torch.sum', 'torch.sum', (['Ky'], {}), '(Ky)\n', (2240, 2244), False, 'import torch\n'), ((2489, 2503), 'torch.sum', 'torch.sum', (['Kxy'], {}), '(Kxy)\n', (2498, 2503), False, 'import torch\n'), ((2755, 2769), 'torch.sum', 'torch.sum', (['Kxy'], {}), '(Kxy)\n', (2764, 2769), False, 'import torch\n'), ((2826, 2840), 'torch.sum', 'torch.sum', (['Kxy'], {}), '(Kxy)\n', (2835, 2840), False, 'import torch\n'), ((3902, 3951), 'torch.exp', 'torch.exp', (['(-(Dxx / sigma0) ** L - Dxx_org / sigma)'], {}), '(-(Dxx / sigma0) ** L - Dxx_org / sigma)\n', (3911, 3951), False, 'import torch\n'), ((3961, 3988), 'torch.exp', 'torch.exp', (['(-Dxx_org / sigma)'], {}), '(-Dxx_org / sigma)\n', (3970, 3988), False, 'import torch\n'), ((4017, 4066), 'torch.exp', 'torch.exp', (['(-(Dyy / sigma0) ** L - Dyy_org / sigma)'], {}), '(-(Dyy / sigma0) ** L - Dyy_org / sigma)\n', (4026, 4066), False, 'import torch\n'), ((4076, 4103), 'torch.exp', 'torch.exp', (['(-Dyy_org / sigma)'], {}), '(-Dyy_org / sigma)\n', (4085, 4103), False, 'import torch\n'), ((4133, 4182), 'torch.exp', 'torch.exp', (['(-(Dxy / sigma0) ** L - Dxy_org / sigma)'], {}), '(-(Dxy / sigma0) ** L - Dxy_org / sigma)\n', (4142, 4182), False, 'import torch\n'), ((4192, 4219), 'torch.exp', 'torch.exp', (['(-Dxy_org / sigma)'], {}), '(-Dxy_org / sigma)\n', (4201, 4219), False, 'import torch\n'), ((5622, 5650), 'numpy.ceil', 'np.ceil', (['(N_per * (1 - alpha))'], {}), '(N_per * (1 - alpha))\n', (5629, 5650), True, 'import numpy as np\n'), ((2171, 2185), 'torch.diag', 'torch.diag', (['Kx'], {}), '(Kx)\n', (2181, 2185), False, 'import torch\n'), ((2257, 2271), 'torch.diag', 'torch.diag', (['Ky'], {}), '(Ky)\n', (2267, 2271), False, 'import torch\n'), ((2383, 2397), 'torch.sum', 'torch.sum', (['Kxy'], {}), '(Kxy)\n', (2392, 2397), False, 'import torch\n'), ((2410, 2425), 'torch.diag', 'torch.diag', (['Kxy'], {}), '(Kxy)\n', (2420, 2425), False, 'import torch\n')]
|
import numpy as np
from blind_walking.envs.env_modifiers.env_modifier import EnvModifier
from blind_walking.envs.env_modifiers.heightfield import HeightField
from blind_walking.envs.env_modifiers.stairs import Stairs, boxHalfLength, boxHalfWidth
""" Train robot to walk up stairs curriculum.
Equal chances for the robot to encounter going up and going down the stairs.
"""
class TrainStairs(EnvModifier):
def __init__(self):
super().__init__()
self.step_rise_levels = [0.02, 0.05, 0.075, 0.10]
self.num_levels = len(self.step_rise_levels)
self.num_steps = 10
self.stair_gap = 1.5
self.step_run = 0.3
self.stair_length = (self.num_steps - 1) * self.step_run * 2 + boxHalfLength * 2 * 2
self._level = 0
self.stairs = []
for _ in range(self.num_levels):
self.stairs.append(Stairs())
def _generate(self, env):
start_x = self.stair_gap
for i in range(self.num_levels):
self.stairs[i]._generate(
env, start_x=start_x, num_steps=self.num_steps, step_rise=self.step_rise_levels[i], step_run=self.step_run
)
start_x += self.stair_length + self.stair_gap
def _reset(self, env):
if self._level > 0 and self.down_level(env):
# robot down-levels
self._level -= 1
print(f"DOWNGRADE TO LEVEL {self._level}")
elif self._level < self.num_levels and self.up_level(env):
# robot up-levels
self._level += 1
print(f"LEVEL UP TO LEVEL {self._level}!")
level = self._level
if level >= self.num_levels:
# Loop back to randomly selected level
level_list = np.arange(self.num_levels) + 1
level_probs = level_list / sum(level_list)
level = np.random.choice(self.num_levels, p=level_probs)
print(f"LOOP TO LEVEL {level}")
x_pos = level * (self.stair_length + self.stair_gap)
z_pos = 0
# Equal chances to encouter going up and down the stair level
if np.random.uniform() < 0.4:
x_pos += self.stair_gap + self.stair_length / 2 - 1
z_pos = self.step_rise_levels[level] * self.num_steps
self.adjust_position = (x_pos, 0, z_pos)
def up_level(self, env):
"""To succeed the current level, robot needs to climb over the current stair level
and reach the start of next stair level"""
base_pos = env._robot.GetBasePosition()
target_x = (self._level + 1) * (self.stair_length + self.stair_gap) + 0.5
return (
self.adjust_position[2] == 0
and base_pos[0] > target_x
and base_pos[1] > -boxHalfWidth
and base_pos[1] < boxHalfWidth
)
def down_level(self, env):
"""Downgrade to the previous level if robot was unable to travel a quarter of the stair length"""
start_pos = self.adjust_position
base_pos = env._robot.GetBasePosition()
x_dist_travelled = base_pos[0] - start_pos[0]
return x_dist_travelled < self.stair_length / 5
class TrainUneven(EnvModifier):
def __init__(self):
super().__init__()
self.hf = HeightField()
def _generate(self, env):
self.hf._generate(env, start_x=10, heightPerturbationRange=0.08)
class TrainMultiple(EnvModifier):
def __init__(self):
super().__init__()
self.hf_length = 20
self.hf_perturb = 0.08
self.hf = HeightField()
self.step_rise_levels = [0.02, 0.05]
self.num_levels = len(self.step_rise_levels)
self.num_steps = 10
self.stair_gap = 1.5
self.step_run = 0.3
self.stair_length = (self.num_steps - 1) * self.step_run * 2 + boxHalfLength * 2 * 2
self._stair_level = 0
self.stairs = []
for _ in range(self.num_levels):
self.stairs.append(Stairs())
self._reset_manual_override = None
def _generate(self, env):
self.hf._generate(env, start_x=10, heightPerturbationRange=self.hf_perturb)
start_x = self.stair_gap + self.hf_length
for i in range(self.num_levels):
self.stairs[i]._generate(
env, start_x=start_x, num_steps=self.num_steps, step_rise=self.step_rise_levels[i], step_run=self.step_run
)
start_x += self.stair_length + self.stair_gap
def _reset_to_heightfield(self):
"""Reset position to before the heightfield"""
self.adjust_position = (0, 0, 0)
def _select_stairs_level(self, env):
# Check if robot has succeeded current level
if self._stair_level < self.num_levels and self.succeed_level(env):
print(f"LEVEL {self._stair_level} PASSED!")
self._stair_level += 1
level = self._stair_level
if level >= self.num_levels:
# Loop back to randomly selected level
level_list = np.arange(self.num_levels) + 1
level_probs = level_list / sum(level_list)
level = np.random.choice(self.num_levels, p=level_probs)
print(f"LOOP TO LEVEL {level}")
elif level > 0 and np.random.uniform() < 0.2:
# Redo previous level
level -= 1
return level
def _reset_to_stairs(self, level):
"""Reset position to just before the stairs of a given level"""
x_pos = self.hf_length + level * (self.stair_length + self.stair_gap)
z_pos = 0
# Equal chances to encouter going up and down the stair level
if np.random.uniform() < 0.4:
x_pos += self.stair_gap + self.stair_length / 2 - 1
z_pos = self.step_rise_levels[level] * self.num_steps
self.adjust_position = (x_pos, 0, z_pos)
def _reset_randomly(self, env):
if np.random.uniform() < 0.5:
# See heightfield
self._reset_to_heightfield()
else:
# See stairs
level = self._select_stairs_level(env)
self._reset_to_stairs(level)
def _reset(self, env):
if self._reset_manual_override is not None:
self._reset_manually()
# Remove override for subsequent resets
# self._reset_manual_override = None
else:
self._reset_randomly(env)
def _reset_manually(self):
if self._reset_manual_override == "heightfield":
self._reset_to_heightfield()
elif self._reset_manual_override == "stairs_0":
self._reset_to_stairs(level=0)
elif self._reset_manual_override == "stairs_1":
self._reset_to_stairs(level=1)
else:
raise ValueError(f"Invalid override {self._reset_manual_override}")
def _override_reset(self, override: str):
"""Manually set what the next reset should be"""
assert override in ("heightfield", "stairs_0", "stairs_1")
self._reset_manual_override = override
def succeed_level(self, env):
"""To succeed the current level, robot needs to climb over the current stair level
and reach the start of next stair level"""
base_pos = env._robot.GetBasePosition()
target_x = self.hf_length + (self._stair_level + 1) * (self.stair_length + self.stair_gap) + 0.5
return (
self.adjust_position[2] == 0
and base_pos[0] > target_x
and base_pos[1] > -boxHalfWidth
and base_pos[1] < boxHalfWidth
)
|
[
"numpy.random.uniform",
"numpy.arange",
"numpy.random.choice",
"blind_walking.envs.env_modifiers.stairs.Stairs",
"blind_walking.envs.env_modifiers.heightfield.HeightField"
] |
[((3324, 3337), 'blind_walking.envs.env_modifiers.heightfield.HeightField', 'HeightField', ([], {}), '()\n', (3335, 3337), False, 'from blind_walking.envs.env_modifiers.heightfield import HeightField\n'), ((3619, 3632), 'blind_walking.envs.env_modifiers.heightfield.HeightField', 'HeightField', ([], {}), '()\n', (3630, 3632), False, 'from blind_walking.envs.env_modifiers.heightfield import HeightField\n'), ((1894, 1942), 'numpy.random.choice', 'np.random.choice', (['self.num_levels'], {'p': 'level_probs'}), '(self.num_levels, p=level_probs)\n', (1910, 1942), True, 'import numpy as np\n'), ((2154, 2173), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (2171, 2173), True, 'import numpy as np\n'), ((5215, 5263), 'numpy.random.choice', 'np.random.choice', (['self.num_levels'], {'p': 'level_probs'}), '(self.num_levels, p=level_probs)\n', (5231, 5263), True, 'import numpy as np\n'), ((5741, 5760), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (5758, 5760), True, 'import numpy as np\n'), ((6001, 6020), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (6018, 6020), True, 'import numpy as np\n'), ((897, 905), 'blind_walking.envs.env_modifiers.stairs.Stairs', 'Stairs', ([], {}), '()\n', (903, 905), False, 'from blind_walking.envs.env_modifiers.stairs import Stairs, boxHalfLength, boxHalfWidth\n'), ((1786, 1812), 'numpy.arange', 'np.arange', (['self.num_levels'], {}), '(self.num_levels)\n', (1795, 1812), True, 'import numpy as np\n'), ((4048, 4056), 'blind_walking.envs.env_modifiers.stairs.Stairs', 'Stairs', ([], {}), '()\n', (4054, 4056), False, 'from blind_walking.envs.env_modifiers.stairs import Stairs, boxHalfLength, boxHalfWidth\n'), ((5107, 5133), 'numpy.arange', 'np.arange', (['self.num_levels'], {}), '(self.num_levels)\n', (5116, 5133), True, 'import numpy as np\n'), ((5337, 5356), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (5354, 5356), True, 'import numpy as np\n')]
|
# 2-electron VMC code for 2dim quantum dot with importance sampling
# Using gaussian rng for new positions and Metropolis- Hastings
# Added energy minimization
from math import exp, sqrt
from random import random, seed, normalvariate
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
from matplotlib.ticker import LinearLocator, FormatStrFormatter
from scipy.optimize import minimize
import sys
# Trial wave function for the 2-electron quantum dot in two dims
def WaveFunction(r,alpha,beta):
r1 = r[0,0]**2 + r[0,1]**2
r2 = r[1,0]**2 + r[1,1]**2
r12 = sqrt((r[0,0]-r[1,0])**2 + (r[0,1]-r[1,1])**2)
deno = r12/(1+beta*r12)
return exp(-0.5*alpha*(r1+r2)+deno)
# Local energy for the 2-electron quantum dot in two dims, using analytical local energy
def LocalEnergy(r,alpha,beta):
r1 = (r[0,0]**2 + r[0,1]**2)
r2 = (r[1,0]**2 + r[1,1]**2)
r12 = sqrt((r[0,0]-r[1,0])**2 + (r[0,1]-r[1,1])**2)
deno = 1.0/(1+beta*r12)
deno2 = deno*deno
return 0.5*(1-alpha*alpha)*(r1 + r2) +2.0*alpha + 1.0/r12+deno2*(alpha*r12-deno2+2*beta*deno-1.0/r12)
# Derivate of wave function ansatz as function of variational parameters
def DerivativeWFansatz(r,alpha,beta):
WfDer = np.zeros((2), np.double)
r1 = (r[0,0]**2 + r[0,1]**2)
r2 = (r[1,0]**2 + r[1,1]**2)
r12 = sqrt((r[0,0]-r[1,0])**2 + (r[0,1]-r[1,1])**2)
deno = 1.0/(1+beta*r12)
deno2 = deno*deno
WfDer[0] = -0.5*(r1+r2)
WfDer[1] = -r12*r12*deno2
return WfDer
# Setting up the quantum force for the two-electron quantum dot, recall that it is a vector
def QuantumForce(r,alpha,beta):
qforce = np.zeros((NumberParticles,Dimension), np.double)
r12 = sqrt((r[0,0]-r[1,0])**2 + (r[0,1]-r[1,1])**2)
deno = 1.0/(1+beta*r12)
qforce[0,:] = -2*r[0,:]*alpha*(r[0,:]-r[1,:])*deno*deno/r12
qforce[1,:] = -2*r[1,:]*alpha*(r[1,:]-r[0,:])*deno*deno/r12
return qforce
# Computing the derivative of the energy and the energy
def EnergyDerivative(x0):
# Parameters in the Fokker-Planck simulation of the quantum force
D = 0.5
TimeStep = 0.05
# positions
PositionOld = np.zeros((NumberParticles,Dimension), np.double)
PositionNew = np.zeros((NumberParticles,Dimension), np.double)
# Quantum force
QuantumForceOld = np.zeros((NumberParticles,Dimension), np.double)
QuantumForceNew = np.zeros((NumberParticles,Dimension), np.double)
energy = 0.0
DeltaE = 0.0
alpha = x0[0]
beta = x0[1]
EnergyDer = 0.0
DeltaPsi = 0.0
DerivativePsiE = 0.0
#Initial position
for i in range(NumberParticles):
for j in range(Dimension):
PositionOld[i,j] = normalvariate(0.0,1.0)*sqrt(TimeStep)
wfold = WaveFunction(PositionOld,alpha,beta)
QuantumForceOld = QuantumForce(PositionOld,alpha, beta)
#Loop over MC MCcycles
for MCcycle in range(NumberMCcycles):
#Trial position moving one particle at the time
for i in range(NumberParticles):
for j in range(Dimension):
PositionNew[i,j] = PositionOld[i,j]+normalvariate(0.0,1.0)*sqrt(TimeStep)+\
QuantumForceOld[i,j]*TimeStep*D
wfnew = WaveFunction(PositionNew,alpha,beta)
QuantumForceNew = QuantumForce(PositionNew,alpha, beta)
GreensFunction = 0.0
for j in range(Dimension):
GreensFunction += 0.5*(QuantumForceOld[i,j]+QuantumForceNew[i,j])*\
(D*TimeStep*0.5*(QuantumForceOld[i,j]-QuantumForceNew[i,j])-\
PositionNew[i,j]+PositionOld[i,j])
GreensFunction = exp(GreensFunction)
ProbabilityRatio = GreensFunction*wfnew**2/wfold**2
#Metropolis-Hastings test to see whether we accept the move
if random() <= ProbabilityRatio:
for j in range(Dimension):
PositionOld[i,j] = PositionNew[i,j]
QuantumForceOld[i,j] = QuantumForceNew[i,j]
wfold = wfnew
DeltaE = LocalEnergy(PositionOld,alpha,beta)
DerPsi = DerivativeWFansatz(PositionOld,alpha,beta)
DeltaPsi += DerPsi
energy += DeltaE
DerivativePsiE += DerPsi*DeltaE
# We calculate mean values
energy /= NumberMCcycles
DerivativePsiE /= NumberMCcycles
DeltaPsi /= NumberMCcycles
EnergyDer = 2*(DerivativePsiE-DeltaPsi*energy)
return EnergyDer
# Computing the expectation value of the local energy
def Energy(x0):
# Parameters in the Fokker-Planck simulation of the quantum force
D = 0.5
TimeStep = 0.05
# positions
PositionOld = np.zeros((NumberParticles,Dimension), np.double)
PositionNew = np.zeros((NumberParticles,Dimension), np.double)
# Quantum force
QuantumForceOld = np.zeros((NumberParticles,Dimension), np.double)
QuantumForceNew = np.zeros((NumberParticles,Dimension), np.double)
energy = 0.0
DeltaE = 0.0
alpha = x0[0]
beta = x0[1]
#Initial position
for i in range(NumberParticles):
for j in range(Dimension):
PositionOld[i,j] = normalvariate(0.0,1.0)*sqrt(TimeStep)
wfold = WaveFunction(PositionOld,alpha,beta)
QuantumForceOld = QuantumForce(PositionOld,alpha, beta)
#Loop over MC MCcycles
for MCcycle in range(NumberMCcycles):
#Trial position moving one particle at the time
for i in range(NumberParticles):
for j in range(Dimension):
PositionNew[i,j] = PositionOld[i,j]+normalvariate(0.0,1.0)*sqrt(TimeStep)+\
QuantumForceOld[i,j]*TimeStep*D
wfnew = WaveFunction(PositionNew,alpha,beta)
QuantumForceNew = QuantumForce(PositionNew,alpha, beta)
GreensFunction = 0.0
for j in range(Dimension):
GreensFunction += 0.5*(QuantumForceOld[i,j]+QuantumForceNew[i,j])*\
(D*TimeStep*0.5*(QuantumForceOld[i,j]-QuantumForceNew[i,j])-\
PositionNew[i,j]+PositionOld[i,j])
GreensFunction = exp(GreensFunction)
ProbabilityRatio = GreensFunction*wfnew**2/wfold**2
#Metropolis-Hastings test to see whether we accept the move
if random() <= ProbabilityRatio:
for j in range(Dimension):
PositionOld[i,j] = PositionNew[i,j]
QuantumForceOld[i,j] = QuantumForceNew[i,j]
wfold = wfnew
DeltaE = LocalEnergy(PositionOld,alpha,beta)
energy += DeltaE
if Printout:
outfile.write('%f\n' %(energy/(MCcycle+1.0)))
# We calculate mean values
energy /= NumberMCcycles
return energy
#Here starts the main program with variable declarations
NumberParticles = 2
Dimension = 2
# seed for rng generator
seed()
# Monte Carlo cycles for parameter optimization
Printout = False
NumberMCcycles= 10000
# guess for variational parameters
x0 = np.array([0.9,0.2])
# Using Broydens method to find optimal parameters
res = minimize(Energy, x0, method='BFGS', jac=EnergyDerivative, options={'gtol': 1e-4,'disp': True})
x0 = res.x
print(x0)
# Compute the energy again with the optimal parameters and increased number of Monte Cycles
NumberMCcycles= 100000
Printout = True
outfile = open("Energies.dat",'w')
print(Energy(x0))
outfile.close()
|
[
"scipy.optimize.minimize",
"math.exp",
"math.sqrt",
"random.normalvariate",
"numpy.zeros",
"random.random",
"numpy.array",
"random.seed"
] |
[((6999, 7005), 'random.seed', 'seed', ([], {}), '()\n', (7003, 7005), False, 'from random import random, seed, normalvariate\n'), ((7133, 7153), 'numpy.array', 'np.array', (['[0.9, 0.2]'], {}), '([0.9, 0.2])\n', (7141, 7153), True, 'import numpy as np\n'), ((7210, 7312), 'scipy.optimize.minimize', 'minimize', (['Energy', 'x0'], {'method': '"""BFGS"""', 'jac': 'EnergyDerivative', 'options': "{'gtol': 0.0001, 'disp': True}"}), "(Energy, x0, method='BFGS', jac=EnergyDerivative, options={'gtol': \n 0.0001, 'disp': True})\n", (7218, 7312), False, 'from scipy.optimize import minimize\n'), ((635, 692), 'math.sqrt', 'sqrt', (['((r[0, 0] - r[1, 0]) ** 2 + (r[0, 1] - r[1, 1]) ** 2)'], {}), '((r[0, 0] - r[1, 0]) ** 2 + (r[0, 1] - r[1, 1]) ** 2)\n', (639, 692), False, 'from math import exp, sqrt\n'), ((720, 756), 'math.exp', 'exp', (['(-0.5 * alpha * (r1 + r2) + deno)'], {}), '(-0.5 * alpha * (r1 + r2) + deno)\n', (723, 756), False, 'from math import exp, sqrt\n'), ((952, 1009), 'math.sqrt', 'sqrt', (['((r[0, 0] - r[1, 0]) ** 2 + (r[0, 1] - r[1, 1]) ** 2)'], {}), '((r[0, 0] - r[1, 0]) ** 2 + (r[0, 1] - r[1, 1]) ** 2)\n', (956, 1009), False, 'from math import exp, sqrt\n'), ((1284, 1306), 'numpy.zeros', 'np.zeros', (['(2)', 'np.double'], {}), '(2, np.double)\n', (1292, 1306), True, 'import numpy as np\n'), ((1385, 1442), 'math.sqrt', 'sqrt', (['((r[0, 0] - r[1, 0]) ** 2 + (r[0, 1] - r[1, 1]) ** 2)'], {}), '((r[0, 0] - r[1, 0]) ** 2 + (r[0, 1] - r[1, 1]) ** 2)\n', (1389, 1442), False, 'from math import exp, sqrt\n'), ((1696, 1745), 'numpy.zeros', 'np.zeros', (['(NumberParticles, Dimension)', 'np.double'], {}), '((NumberParticles, Dimension), np.double)\n', (1704, 1745), True, 'import numpy as np\n'), ((1755, 1812), 'math.sqrt', 'sqrt', (['((r[0, 0] - r[1, 0]) ** 2 + (r[0, 1] - r[1, 1]) ** 2)'], {}), '((r[0, 0] - r[1, 0]) ** 2 + (r[0, 1] - r[1, 1]) ** 2)\n', (1759, 1812), False, 'from math import exp, sqrt\n'), ((2206, 2255), 'numpy.zeros', 'np.zeros', (['(NumberParticles, Dimension)', 'np.double'], {}), '((NumberParticles, Dimension), np.double)\n', (2214, 2255), True, 'import numpy as np\n'), ((2273, 2322), 'numpy.zeros', 'np.zeros', (['(NumberParticles, Dimension)', 'np.double'], {}), '((NumberParticles, Dimension), np.double)\n', (2281, 2322), True, 'import numpy as np\n'), ((2364, 2413), 'numpy.zeros', 'np.zeros', (['(NumberParticles, Dimension)', 'np.double'], {}), '((NumberParticles, Dimension), np.double)\n', (2372, 2413), True, 'import numpy as np\n'), ((2435, 2484), 'numpy.zeros', 'np.zeros', (['(NumberParticles, Dimension)', 'np.double'], {}), '((NumberParticles, Dimension), np.double)\n', (2443, 2484), True, 'import numpy as np\n'), ((4765, 4814), 'numpy.zeros', 'np.zeros', (['(NumberParticles, Dimension)', 'np.double'], {}), '((NumberParticles, Dimension), np.double)\n', (4773, 4814), True, 'import numpy as np\n'), ((4832, 4881), 'numpy.zeros', 'np.zeros', (['(NumberParticles, Dimension)', 'np.double'], {}), '((NumberParticles, Dimension), np.double)\n', (4840, 4881), True, 'import numpy as np\n'), ((4923, 4972), 'numpy.zeros', 'np.zeros', (['(NumberParticles, Dimension)', 'np.double'], {}), '((NumberParticles, Dimension), np.double)\n', (4931, 4972), True, 'import numpy as np\n'), ((4994, 5043), 'numpy.zeros', 'np.zeros', (['(NumberParticles, Dimension)', 'np.double'], {}), '((NumberParticles, Dimension), np.double)\n', (5002, 5043), True, 'import numpy as np\n'), ((3743, 3762), 'math.exp', 'exp', (['GreensFunction'], {}), '(GreensFunction)\n', (3746, 3762), False, 'from math import exp, sqrt\n'), ((6237, 6256), 'math.exp', 'exp', (['GreensFunction'], {}), '(GreensFunction)\n', (6240, 6256), False, 'from math import exp, sqrt\n'), ((2744, 2767), 'random.normalvariate', 'normalvariate', (['(0.0)', '(1.0)'], {}), '(0.0, 1.0)\n', (2757, 2767), False, 'from random import random, seed, normalvariate\n'), ((2767, 2781), 'math.sqrt', 'sqrt', (['TimeStep'], {}), '(TimeStep)\n', (2771, 2781), False, 'from math import exp, sqrt\n'), ((3914, 3922), 'random.random', 'random', ([], {}), '()\n', (3920, 3922), False, 'from random import random, seed, normalvariate\n'), ((5238, 5261), 'random.normalvariate', 'normalvariate', (['(0.0)', '(1.0)'], {}), '(0.0, 1.0)\n', (5251, 5261), False, 'from random import random, seed, normalvariate\n'), ((5261, 5275), 'math.sqrt', 'sqrt', (['TimeStep'], {}), '(TimeStep)\n', (5265, 5275), False, 'from math import exp, sqrt\n'), ((6408, 6416), 'random.random', 'random', ([], {}), '()\n', (6414, 6416), False, 'from random import random, seed, normalvariate\n'), ((3149, 3172), 'random.normalvariate', 'normalvariate', (['(0.0)', '(1.0)'], {}), '(0.0, 1.0)\n', (3162, 3172), False, 'from random import random, seed, normalvariate\n'), ((3172, 3186), 'math.sqrt', 'sqrt', (['TimeStep'], {}), '(TimeStep)\n', (3176, 3186), False, 'from math import exp, sqrt\n'), ((5643, 5666), 'random.normalvariate', 'normalvariate', (['(0.0)', '(1.0)'], {}), '(0.0, 1.0)\n', (5656, 5666), False, 'from random import random, seed, normalvariate\n'), ((5666, 5680), 'math.sqrt', 'sqrt', (['TimeStep'], {}), '(TimeStep)\n', (5670, 5680), False, 'from math import exp, sqrt\n')]
|
import numpy as np
import pandas as pd
import tensorflow as tf
from sklearn.metrics import average_precision_score as auprc
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Dense, concatenate, Input, LSTM
from tensorflow.keras.layers import Conv1D, Reshape, Lambda
from tensorflow.keras.optimizers import SGD
from tensorflow.keras.callbacks import Callback
from tensorflow.keras.callbacks import ModelCheckpoint
import tensorflow.keras.backend as K
from iterutils import train_generator
def data_generator(path, batchsize, seqlen, bin_size):
dat_seq = train_generator(path['seq'], batchsize, seqlen, 'seq', 'repeat')
dat_chromatin = []
for chromatin_track in path['chromatin_tracks']:
dat_chromatin.append(
train_generator(chromatin_track, batchsize, seqlen, 'chrom', 'repeat'))
y = train_generator(path['labels'], batchsize, seqlen, 'labels', 'repeat')
while True:
combined_chrom_data = []
for chromatin_track_generators in dat_chromatin:
curr_chromatin_mark = next(chromatin_track_generators)
mark_resolution = curr_chromatin_mark.shape
assert (mark_resolution == (batchsize, seqlen/bin_size)),\
"Please check binning, specified bin size=50"
combined_chrom_data.append(pd.DataFrame(curr_chromatin_mark))
chromatin_features = pd.concat(combined_chrom_data, axis=1).values
print(chromatin_features.shape)
sequence_features = next(dat_seq)
labels = next(y)
yield [sequence_features, chromatin_features], labels
def add_new_layers(base_model, seq_len, no_of_chromatin_tracks, bin_size):
"""
Takes a pre-existing M-SEQ (Definition in README) & adds structure to \
use it as part of a bimodal DNA sequence + prior chromatin network
Parameters:
base_model (keras Model): A pre-trained sequence-only (M-SEQ) model
chrom_size (int) : The expected number of chromatin tracks
Returns:
model: a Keras Model
"""
def permute(x):
return K.permute_dimensions(x, (0, 2, 1))
# Transfer from a pre-trained M-SEQ
curr_layer = base_model.get_layer(name='dense_2')
curr_tensor = curr_layer.output
xs = Dense(1, name='MSEQ-dense-new', activation='tanh')(curr_tensor)
# Defining a M-C sub-network
chrom_input = Input(shape=(no_of_chromatin_tracks * int(seq_len/bin_size),), name='chrom_input')
ci = Reshape((no_of_chromatin_tracks, int(seq_len/bin_size)),
input_shape=(no_of_chromatin_tracks * int(seq_len/bin_size),))(chrom_input)
# Permuting the input dimensions to match Keras input requirements:
permute_func = Lambda(permute)
ci = permute_func(ci)
xc = Conv1D(15, 1, padding='valid', activation='relu', name='MC-conv1d')(ci)
xc = LSTM(5, activation='relu', name='MC-lstm')(xc)
xc = Dense(1, activation='tanh', name='MC-dense')(xc)
# Concatenating sequence (MSEQ) and chromatin (MC) networks:
merged_layer = concatenate([xs, xc])
result = Dense(1, activation='sigmoid', name='MSC-dense')(merged_layer)
model = Model(inputs=[base_model.input, chrom_input], outputs=result)
return model
class PrecisionRecall(Callback):
def __init__(self, val_data):
super().__init__()
self.validation_data = val_data
def on_train_begin(self, logs=None):
self.val_auprc = []
self.train_auprc = []
def on_epoch_end(self, epoch, logs=None):
(x_val, c_val), y_val = self.validation_data
predictions = self.model.predict([x_val, c_val])
aupr = auprc(y_val, predictions)
self.val_auprc.append(aupr)
def save_metrics(hist_object, pr_history, records_path):
loss = hist_object.history['loss']
val_loss = hist_object.history['val_loss']
val_pr = pr_history.val_auprc
# Saving the training metrics
np.savetxt(records_path + 'trainingLoss.txt', loss, fmt='%1.2f')
np.savetxt(records_path + 'valLoss.txt', val_loss, fmt='%1.2f')
np.savetxt(records_path + 'valPRC.txt', val_pr, fmt='%1.2f')
return loss, val_pr
def transfer(train_path, val_path, basemodel, model, steps_per_epoch,
batchsize, records_path, bin_size, seq_len):
"""
Trains the M-SC, transferring weights from the pre-trained M-SEQ.
The M-SEQ weights are kept fixed except for the final layer.
Parameters:
train_path (str): Path + prefix to training data
val_path (str): Path + prefix to the validation data
basemodel (Model): Pre-trained keras M-SEQ model
model (Model): Defined bimodal network
steps_per_epoch (int): Len(training_data/batchsize)
batchsize (int): Batch size used in SGD
records_path (str): Path + prefix to output directory
Returns:
loss (ndarray): An array with the validation loss at each epoch
"""
# Making the base model layers non-trainable:
for layer in basemodel.layers:
layer.trainable = False
# Training rest of the model.
sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(loss='binary_crossentropy', optimizer=sgd)
# Get train and validation data
train_data_generator = data_generator(train_path, batchsize, seqlen=seq_len, bin_size=bin_size)
val_data_generator = data_generator(val_path, 200000, seqlen=seq_len, bin_size=bin_size)
validation_data = next(val_data_generator)
precision_recall_history = PrecisionRecall(validation_data)
checkpointer = ModelCheckpoint(records_path + 'model_epoch{epoch}.hdf5',
verbose=1, save_best_only=False)
hist = model.fit_generator(epochs=15, steps_per_epoch=steps_per_epoch,
generator=train_data_generator,
validation_data=validation_data,
callbacks=[precision_recall_history,
checkpointer])
loss, val_pr = save_metrics(hist_object=hist, pr_history=precision_recall_history,
records_path=records_path)
return loss, val_pr
def transfer_and_train_msc(train_path, val_path, basemodel,
batch_size, records_path, bin_size, seq_len):
# Calculate size of the training set:
training_set_size = len(np.loadtxt(train_path['labels']))
# Calculate the steps per epoch
steps_per_epoch = training_set_size / batch_size
# Calculate number of chromatin tracks
no_of_chrom_tracks = len(train_path['chromatin_tracks'])
model = add_new_layers(basemodel, seq_len, no_of_chrom_tracks, bin_size)
loss, val_pr = transfer(train_path, val_path, basemodel, model, steps_per_epoch,
batch_size, records_path, bin_size, seq_len)
return loss, val_pr
|
[
"pandas.DataFrame",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.layers.Conv1D",
"numpy.savetxt",
"tensorflow.keras.optimizers.SGD",
"tensorflow.keras.callbacks.ModelCheckpoint",
"tensorflow.keras.models.Model",
"iterutils.train_generator",
"tensorflow.keras.layers.concatenate",
"tensorflow.keras.backend.permute_dimensions",
"tensorflow.keras.layers.LSTM",
"numpy.loadtxt",
"sklearn.metrics.average_precision_score",
"pandas.concat",
"tensorflow.keras.layers.Lambda"
] |
[((589, 653), 'iterutils.train_generator', 'train_generator', (["path['seq']", 'batchsize', 'seqlen', '"""seq"""', '"""repeat"""'], {}), "(path['seq'], batchsize, seqlen, 'seq', 'repeat')\n", (604, 653), False, 'from iterutils import train_generator\n'), ((852, 922), 'iterutils.train_generator', 'train_generator', (["path['labels']", 'batchsize', 'seqlen', '"""labels"""', '"""repeat"""'], {}), "(path['labels'], batchsize, seqlen, 'labels', 'repeat')\n", (867, 922), False, 'from iterutils import train_generator\n'), ((2704, 2719), 'tensorflow.keras.layers.Lambda', 'Lambda', (['permute'], {}), '(permute)\n', (2710, 2719), False, 'from tensorflow.keras.layers import Conv1D, Reshape, Lambda\n'), ((3026, 3047), 'tensorflow.keras.layers.concatenate', 'concatenate', (['[xs, xc]'], {}), '([xs, xc])\n', (3037, 3047), False, 'from tensorflow.keras.layers import Dense, concatenate, Input, LSTM\n'), ((3136, 3197), 'tensorflow.keras.models.Model', 'Model', ([], {'inputs': '[base_model.input, chrom_input]', 'outputs': 'result'}), '(inputs=[base_model.input, chrom_input], outputs=result)\n', (3141, 3197), False, 'from tensorflow.keras.models import Model\n'), ((3903, 3967), 'numpy.savetxt', 'np.savetxt', (["(records_path + 'trainingLoss.txt')", 'loss'], {'fmt': '"""%1.2f"""'}), "(records_path + 'trainingLoss.txt', loss, fmt='%1.2f')\n", (3913, 3967), True, 'import numpy as np\n'), ((3972, 4035), 'numpy.savetxt', 'np.savetxt', (["(records_path + 'valLoss.txt')", 'val_loss'], {'fmt': '"""%1.2f"""'}), "(records_path + 'valLoss.txt', val_loss, fmt='%1.2f')\n", (3982, 4035), True, 'import numpy as np\n'), ((4040, 4100), 'numpy.savetxt', 'np.savetxt', (["(records_path + 'valPRC.txt')", 'val_pr'], {'fmt': '"""%1.2f"""'}), "(records_path + 'valPRC.txt', val_pr, fmt='%1.2f')\n", (4050, 4100), True, 'import numpy as np\n'), ((5063, 5117), 'tensorflow.keras.optimizers.SGD', 'SGD', ([], {'lr': '(0.01)', 'decay': '(1e-06)', 'momentum': '(0.9)', 'nesterov': '(True)'}), '(lr=0.01, decay=1e-06, momentum=0.9, nesterov=True)\n', (5066, 5117), False, 'from tensorflow.keras.optimizers import SGD\n'), ((5538, 5632), 'tensorflow.keras.callbacks.ModelCheckpoint', 'ModelCheckpoint', (["(records_path + 'model_epoch{epoch}.hdf5')"], {'verbose': '(1)', 'save_best_only': '(False)'}), "(records_path + 'model_epoch{epoch}.hdf5', verbose=1,\n save_best_only=False)\n", (5553, 5632), False, 'from tensorflow.keras.callbacks import ModelCheckpoint\n'), ((2080, 2114), 'tensorflow.keras.backend.permute_dimensions', 'K.permute_dimensions', (['x', '(0, 2, 1)'], {}), '(x, (0, 2, 1))\n', (2100, 2114), True, 'import tensorflow.keras.backend as K\n'), ((2255, 2305), 'tensorflow.keras.layers.Dense', 'Dense', (['(1)'], {'name': '"""MSEQ-dense-new"""', 'activation': '"""tanh"""'}), "(1, name='MSEQ-dense-new', activation='tanh')\n", (2260, 2305), False, 'from tensorflow.keras.layers import Dense, concatenate, Input, LSTM\n'), ((2755, 2822), 'tensorflow.keras.layers.Conv1D', 'Conv1D', (['(15)', '(1)'], {'padding': '"""valid"""', 'activation': '"""relu"""', 'name': '"""MC-conv1d"""'}), "(15, 1, padding='valid', activation='relu', name='MC-conv1d')\n", (2761, 2822), False, 'from tensorflow.keras.layers import Conv1D, Reshape, Lambda\n'), ((2836, 2878), 'tensorflow.keras.layers.LSTM', 'LSTM', (['(5)'], {'activation': '"""relu"""', 'name': '"""MC-lstm"""'}), "(5, activation='relu', name='MC-lstm')\n", (2840, 2878), False, 'from tensorflow.keras.layers import Dense, concatenate, Input, LSTM\n'), ((2892, 2936), 'tensorflow.keras.layers.Dense', 'Dense', (['(1)'], {'activation': '"""tanh"""', 'name': '"""MC-dense"""'}), "(1, activation='tanh', name='MC-dense')\n", (2897, 2936), False, 'from tensorflow.keras.layers import Dense, concatenate, Input, LSTM\n'), ((3061, 3109), 'tensorflow.keras.layers.Dense', 'Dense', (['(1)'], {'activation': '"""sigmoid"""', 'name': '"""MSC-dense"""'}), "(1, activation='sigmoid', name='MSC-dense')\n", (3066, 3109), False, 'from tensorflow.keras.layers import Dense, concatenate, Input, LSTM\n'), ((3624, 3649), 'sklearn.metrics.average_precision_score', 'auprc', (['y_val', 'predictions'], {}), '(y_val, predictions)\n', (3629, 3649), True, 'from sklearn.metrics import average_precision_score as auprc\n'), ((6369, 6401), 'numpy.loadtxt', 'np.loadtxt', (["train_path['labels']"], {}), "(train_path['labels'])\n", (6379, 6401), True, 'import numpy as np\n'), ((772, 842), 'iterutils.train_generator', 'train_generator', (['chromatin_track', 'batchsize', 'seqlen', '"""chrom"""', '"""repeat"""'], {}), "(chromatin_track, batchsize, seqlen, 'chrom', 'repeat')\n", (787, 842), False, 'from iterutils import train_generator\n'), ((1388, 1426), 'pandas.concat', 'pd.concat', (['combined_chrom_data'], {'axis': '(1)'}), '(combined_chrom_data, axis=1)\n', (1397, 1426), True, 'import pandas as pd\n'), ((1324, 1357), 'pandas.DataFrame', 'pd.DataFrame', (['curr_chromatin_mark'], {}), '(curr_chromatin_mark)\n', (1336, 1357), True, 'import pandas as pd\n')]
|
import os
import numpy as np
from netCDF4 import Dataset
from compliance_checker.ioos import (
IOOS0_1Check,
IOOS1_1Check,
IOOS1_2_PlatformIDValidator,
IOOS1_2Check,
NamingAuthorityValidator,
)
from compliance_checker.tests import BaseTestCase
from compliance_checker.tests.helpers import MockTimeSeries, MockVariable
from compliance_checker.tests.resources import STATIC_FILES
from compliance_checker.tests.test_cf import get_results
class TestIOOS0_1(BaseTestCase):
"""
Tests for the IOOS Inventory Metadata v0.1
"""
def setUp(self):
# Use the NCEI Gold Standard Point dataset for IOOS checks
self.ds = self.load_dataset(STATIC_FILES["ncei_gold_point_1"])
self.ioos = IOOS0_1Check()
def test_cc_meta(self):
assert self.ioos._cc_spec == "ioos"
assert self.ioos._cc_spec_version == "0.1"
def test_global_attributes(self):
"""
Tests that all global attributes checks are working
"""
# Create an empty dataset that writes to /dev/null This acts as a
# temporary netCDF file in-memory that never gets written to disk.
nc_obj = Dataset(os.devnull, "w", diskless=True)
self.addCleanup(nc_obj.close)
results = self.ioos.check_global_attributes(nc_obj)
for result in results:
self.assert_result_is_bad(result)
attrs = [
"acknowledgement",
"publisher_email",
"institution",
"publisher_name",
"Conventions",
]
for attr in attrs:
setattr(nc_obj, attr, "test")
results = self.ioos.check_global_attributes(nc_obj)
for result in results:
self.assert_result_is_good(result)
def test_variable_attributes(self):
"""
Tests that the platform variable attributes check is working
"""
# Create an empty dataset that writes to /dev/null This acts as a
# temporary netCDF file in-memory that never gets written to disk.
nc_obj = Dataset(os.devnull, "w", diskless=True)
self.addCleanup(nc_obj.close)
# The dataset needs at least one variable to check that it's missing
# all the required attributes.
nc_obj.createDimension("time", 1)
nc_obj.createVariable("platform", "S1", ())
platform = nc_obj.variables["platform"]
results = self.ioos.check_variable_attributes(nc_obj)
for result in results:
self.assert_result_is_bad(result)
platform.long_name = "platform"
platform.short_name = "platform"
platform.source = "glider"
platform.ioos_name = "urn:ioos:station:glos:leorgn"
platform.wmo_id = "1234"
platform.comment = "test"
results = self.ioos.check_variable_attributes(nc_obj)
for result in results:
self.assert_result_is_good(result)
def test_variable_units(self):
"""
Tests that the variable units test is working
"""
# this check tests that units attribute is present on EVERY variable
# Create an empty dataset that writes to /dev/null This acts as a
# temporary netCDF file in-memory that never gets written to disk.
nc_obj = Dataset(os.devnull, "w", diskless=True)
self.addCleanup(nc_obj.close)
# The dataset needs at least one variable to check that it's missing
# all the required attributes.
nc_obj.createDimension("time", 1)
nc_obj.createVariable("sample_var", "d", ("time",))
sample_var = nc_obj.variables["sample_var"]
results = self.ioos.check_variable_units(nc_obj)
self.assert_result_is_bad(results)
sample_var.units = "m"
sample_var.short_name = "sample_var"
results = self.ioos.check_variable_units(nc_obj)
self.assert_result_is_good(results)
def test_altitude_units(self):
"""
Tests that the altitude variable units test is working
"""
results = self.ioos.check_altitude_units(self.ds)
self.assert_result_is_good(results)
# Now test an nc file with a 'z' variable without units
# Create an empty dataset that writes to /dev/null This acts as a
# temporary netCDF file in-memory that never gets written to disk.
nc_obj = Dataset(os.devnull, "w", diskless=True)
self.addCleanup(nc_obj.close)
# The dataset needs at least one variable to check that it's missing
# all the required attributes.
nc_obj.createDimension("time", 1)
nc_obj.createVariable("z", "d", ("time",))
z = nc_obj.variables["z"]
z.short_name = "sample_var"
results = self.ioos.check_variable_units(nc_obj)
self.assert_result_is_bad(results)
class TestIOOS1_1(BaseTestCase):
"""
Tests for the compliance checker implementation of IOOS Metadata Profile
for NetCDF, Version 1.1
"""
def setUp(self):
# Use the IOOS 1_1 dataset for testing
self.ds = self.load_dataset(STATIC_FILES["ioos_gold_1_1"])
self.ioos = IOOS1_1Check()
def test_cc_meta(self):
assert self.ioos._cc_spec == "ioos"
assert self.ioos._cc_spec_version == "1.1"
def test_required_attributes(self):
"""
Tests that required attributes test is working properly
"""
results = self.ioos.check_high(self.ds)
for result in results:
self.assert_result_is_good(result)
def test_recomended_attributes(self):
"""
Tests that recommended attributes test is working properly
"""
results = self.ioos.check_recommended(self.ds)
for result in results:
self.assert_result_is_good(result)
def test_bad_platform_variables(self):
"""
Tests that the platform variable attributes check is working
"""
# Create an empty dataset that writes to /dev/null This acts as a
# temporary netCDF file in-memory that never gets written to disk.
nc_obj = Dataset(os.devnull, "w", diskless=True)
self.addCleanup(nc_obj.close)
# The dataset needs at least one variable to check that it's missing
# all the required attributes.
nc_obj.createDimension("time", 1)
nc_obj.platform = "platform"
# global attribute 'platform' points to variable that does not exist in dataset
results = self.ioos.check_platform_variables(nc_obj)
for result in results:
self.assert_result_is_bad(result)
def test_good_platform_variables(self):
"""
Tests that the platform variable attributes check is working
"""
results = self.ioos.check_platform_variables(self.ds)
for result in results:
self.assert_result_is_good(result)
def test_bad_geophysical_vars_fill_value(self):
"""
Tests that the geophysical variable _FillValue check is working
"""
# Create an empty dataset that writes to /dev/null This acts as a
# temporary netCDF file in-memory that never gets written to disk.
nc_obj = Dataset(os.devnull, "w", diskless=True)
self.addCleanup(nc_obj.close)
# The dataset needs at least one variable to check that it's missing
# all the required attributes.
nc_obj.createDimension("time", 1)
nc_obj.createVariable("sample_var", "d", ("time",))
# Define some variable attributes but don't specify _FillValue
sample_var = nc_obj.variables["sample_var"]
sample_var.units = "m"
sample_var.short_name = "temp"
# global attribute 'platform' points to variable that does not exist in dataset
results = self.ioos.check_geophysical_vars_fill_value(nc_obj)
for result in results:
self.assert_result_is_bad(result)
def test_good_geophysical_vars_fill_value(self):
"""
Tests that the geophysical variable _FillValue check is working
"""
results = self.ioos.check_geophysical_vars_fill_value(self.ds)
for result in results:
self.assert_result_is_good(result)
def test_bad_geophysical_vars_standard_name(self):
"""
Tests that the platform variable attributes check is working
"""
# Create an empty dataset that writes to /dev/null This acts as a
# temporary netCDF file in-memory that never gets written to disk.
nc_obj = Dataset(os.devnull, "w", diskless=True)
self.addCleanup(nc_obj.close)
# The dataset needs at least one variable to check that it's missing
# all the required attributes.
nc_obj.createDimension("time", 1)
nc_obj.createVariable("sample_var", "d", ("time",))
# Define some variable attributes but don't specify _FillValue
sample_var = nc_obj.variables["sample_var"]
sample_var.units = "m"
sample_var.short_name = "temp"
# global attribute 'platform' points to variable that does not exist in dataset
results = self.ioos.check_geophysical_vars_standard_name(nc_obj)
for result in results:
self.assert_result_is_bad(result)
def test_good_geophysical_vars_standard_name(self):
"""
Tests that the geophysical variable _FillValue check is working
"""
results = self.ioos.check_geophysical_vars_standard_name(self.ds)
for result in results:
self.assert_result_is_good(result)
def test_bad_units(self):
"""
Tests that the valid units check is working
"""
# Create an empty dataset that writes to /dev/null This acts as a
# temporary netCDF file in-memory that never gets written to disk.
nc_obj = Dataset(os.devnull, "w", diskless=True)
self.addCleanup(nc_obj.close)
# The dataset needs at least one variable to check that it's missing
# all the required attributes.
nc_obj.createDimension("time", 1)
nc_obj.createVariable("temperature", "d", ("time",))
# Define some variable attributes but don't specify _FillValue
sample_var = nc_obj.variables["temperature"]
sample_var.units = "degC" # Not valid units
sample_var.short_name = "temp"
# global attribute 'platform' points to variable that does not exist in dataset
results = self.ioos.check_geophysical_vars_standard_name(nc_obj)
for result in results:
self.assert_result_is_bad(result)
def test_good_units(self):
"""
Tests that the valid units check is working
"""
results = self.ioos.check_units(self.ds)
for result in results:
self.assert_result_is_good(result)
class TestIOOS1_2(BaseTestCase):
"""
Tests for the compliance checker implementation of IOOS Metadata Profile
for NetCDF, Version 1.1
"""
def setUp(self):
self.ioos = IOOS1_2Check()
def test_check_geophysical_vars_have_attrs(self):
# create geophysical variable
ds = MockTimeSeries() # time, lat, lon, depth
temp = ds.createVariable("temp", np.float64, dimensions=("time",))
# should fail here
results = self.ioos.check_geophysical_vars_have_attrs(ds)
scored, out_of, messages = get_results(results)
self.assertLess(scored, out_of)
# set the necessary attributes
ds = MockTimeSeries(default_fill_value=9999999999.0) # time, lat, lon, depth
temp = ds.createVariable(
"temp", np.float64, fill_value=9999999999.0
) # _FillValue
temp.setncattr("missing_value", 9999999999.0)
temp.setncattr("standard_name", "sea_surface_temperature")
temp.setncattr(
"standard_name_url",
"http://cfconventions.org/Data/cf-standard-names/64/build/cf-standard-name-table.html",
)
temp.setncattr("units", "degree_C")
temp.setncattr("platform", "myPlatform")
results = self.ioos.check_geophysical_vars_have_attrs(ds)
scored, out_of, messages = get_results(results)
self.assertEqual(scored, out_of)
def test_check_geospatial_vars_have_attrs(self):
# create geophysical variable
ds = MockTimeSeries() # time, lat, lon, depth
temp = ds.createVariable("temp", np.float64, dimensions=("time",))
# should fail here
results = self.ioos.check_geospatial_vars_have_attrs(ds)
scored, out_of, messages = get_results(results)
self.assertLess(scored, out_of)
# should pass - default_fill_value sets _FillValue attr
ds = MockTimeSeries(default_fill_value=9999999999.0) # time, lat, lon, depth
ds.variables["time"].setncattr("standard_name", "time")
ds.variables["time"].setncattr(
"standard_name_url",
"http://cfconventions.org/Data/cf-standard-names/64/build/cf-standard-name-table.html",
)
ds.variables["time"].setncattr("units", "hours since 1970-01-01T00:00:00")
ds.variables["time"].setncattr("missing_value", 9999999999.0)
results = self.ioos.check_geospatial_vars_have_attrs(ds)
scored, out_of, messages = get_results(results)
self.assertEqual(scored, out_of)
def test_check_contributor_role_and_vocabulary(self):
ds = MockTimeSeries() # time, lat, lon, depth
# no contributor_role or vocab, fail both
results = self.ioos.check_contributor_role_and_vocabulary(ds)
self.assertFalse(all(r.value for r in results))
# bad contributor_role and vocab
ds.setncattr("contributor_role", "bad")
ds.setncattr("contributor_role_vocabulary", "bad")
results = self.ioos.check_contributor_role_and_vocabulary(ds)
self.assertFalse(all(r.value for r in results))
# good role, bad vocab
ds.setncattr("contributor_role", "contributor")
results = self.ioos.check_contributor_role_and_vocabulary(ds)
self.assertTrue(results[0].value)
self.assertEqual(results[0].msgs, [])
self.assertFalse(results[1].value)
# bad role, good vocab
ds.setncattr("contributor_role", "bad")
ds.setncattr(
"contributor_role_vocabulary",
"http://vocab.nerc.ac.uk/collection/G04/current/",
)
results = self.ioos.check_contributor_role_and_vocabulary(ds)
self.assertFalse(results[0].value)
self.assertTrue(results[1].value)
self.assertEqual(results[1].msgs, [])
# good role, good vocab
ds.setncattr("contributor_role", "contributor")
ds.setncattr(
"contributor_role_vocabulary",
"http://vocab.nerc.ac.uk/collection/G04/current/",
)
results = self.ioos.check_contributor_role_and_vocabulary(ds)
self.assertTrue(results[0].value)
self.assertEqual(results[0].msgs, [])
self.assertTrue(results[1].value)
self.assertEqual(results[1].msgs, [])
ds.setncattr("contributor_role", "resourceProvider")
ds.setncattr(
"contributor_role_vocabulary",
"https://www.ngdc.noaa.gov/wiki/index.php?title=ISO_19115_and_19115-2_CodeList_Dictionaries#CI_RoleCode",
)
results = self.ioos.check_contributor_role_and_vocabulary(ds)
self.assertTrue(results[0].value)
self.assertEqual(results[0].msgs, [])
self.assertTrue(results[1].value)
self.assertEqual(results[1].msgs, [])
def test_check_creator_and_publisher_type(self):
"""
Checks the creator_type and publisher_type global attributes with
the following values:
Empty: Valid, defaults to "person" when not specified, which is
contained in the list of valid values.
Bad values: Invalid, not contained in list of valid values.
Good values: Valid, contained in list.
"""
ds = MockTimeSeries()
# values which are not set/specified default to person, which is valid
result_list = self.ioos.check_creator_and_publisher_type(ds)
self.assertTrue(all(res.value for res in result_list))
# create invalid values for attribute
ds.setncattr("creator_type", "PI")
ds.setncattr("publisher_type", "Funder")
result_list = self.ioos.check_creator_and_publisher_type(ds)
err_regex = (
r"^If specified, \w+_type must be in value list "
r"\(\['group', 'institution', 'person', 'position'\]\)$"
)
for res in result_list:
self.assertFalse(res.value)
self.assertRegex(res.msgs[0], err_regex)
# good values
ds.setncattr("creator_type", "person")
ds.setncattr("publisher_type", "institution")
result_list = self.ioos.check_creator_and_publisher_type(ds)
self.assertTrue(all(res.value for res in result_list))
def test_check_gts_ingest_global(self):
ds = MockTimeSeries() # time, lat, lon, depth
# no gts_ingest_requirements, should pass
result = self.ioos.check_gts_ingest_global(ds)
self.assertTrue(result.value)
self.assertEqual(result.msgs, [])
# passing value
ds.setncattr("gts_ingest", "true")
result = self.ioos.check_gts_ingest_global(ds)
self.assertTrue(result.value)
self.assertEqual(result.msgs, [])
ds.setncattr("gts_ingest", "false")
result = self.ioos.check_gts_ingest_global(ds)
self.assertTrue(result.value)
ds.setncattr("gts_ingest", "notgood")
result = self.ioos.check_gts_ingest_global(ds)
self.assertFalse(result.value)
def test_check_gts_ingest_requirements(self):
ds = MockTimeSeries() # time, lat, lon, depth
# NOTE: this check will always have a "failing" result; see
# https://github.com/ioos/compliance-checker/issues/759#issuecomment-625356938
# and subsequent discussion
# no gts_ingest_requirements, should pass
result = self.ioos.check_gts_ingest_requirements(ds)
self.assertFalse(result.value)
# flag for ingest, no variables flagged - default pass
ds.setncattr("gts_ingest", "true")
result = self.ioos.check_gts_ingest_requirements(ds)
self.assertFalse(result.value)
# give one variable the gts_ingest attribute
# no standard_name or ancillary vars, should fail
ds.variables["time"].setncattr("gts_ingest", "true")
result = self.ioos.check_gts_ingest_requirements(ds)
self.assertFalse(result.value)
# no ancillary vars, should fail
ds.variables["time"].setncattr("gts_ingest", "true")
ds.variables["time"].setncattr("standard_name", "time")
result = self.ioos.check_gts_ingest_requirements(ds)
self.assertFalse(result.value)
self.assertIn(
"The following variables did not qualify for NDBC/GTS Ingest: time\n",
result.msgs,
)
# set ancillary var with bad standard name
tmp = ds.createVariable("tmp", np.byte, ("time",))
tmp.setncattr("standard_name", "bad")
ds.variables["time"].setncattr("ancillary_variables", "tmp")
result = self.ioos.check_gts_ingest_requirements(ds)
self.assertFalse(result.value)
self.assertIn(
"The following variables did not qualify for NDBC/GTS Ingest: time\n",
result.msgs,
)
# good ancillary var standard name, time units are bad
tmp.setncattr("standard_name", "aggregate_quality_flag")
ds.variables["time"].setncattr("units", "bad since bad")
result = self.ioos.check_gts_ingest_requirements(ds)
self.assertFalse(result.value)
self.assertIn(
"The following variables did not qualify for NDBC/GTS Ingest: time\n",
result.msgs,
)
# good ancillary var stdname, good units, pass
tmp.setncattr("standard_name", "aggregate_quality_flag")
ds.variables["time"].setncattr("units", "seconds since 1970-01-01T00:00:00Z")
result = self.ioos.check_gts_ingest_requirements(ds)
self.assertFalse(result.value)
self.assertIn(
"The following variables qualified for NDBC/GTS Ingest: time\n", result.msgs
)
def test_check_instrument_variables(self):
ds = MockTimeSeries() # time, lat, lon, depth
# no instrument variable, should pass
results = self.ioos.check_instrument_variables(ds)
scored, out_of, messages = get_results(results)
self.assertEqual(scored, out_of)
temp = ds.createVariable("temp", np.float64, dimensions=("time",))
temp.setncattr("cf_role", "timeseries")
temp.setncattr("standard_name", "sea_surface_temperature")
temp.setncattr("units", "degree_C")
temp.setncattr("axis", "Y")
temp.setncattr("instrument", "myInstrument")
temp[:] = 45.0
instr = ds.createVariable("myInstrument", np.float64, dimensions=("time",))
# give instrument variable with component
instr.setncattr("component", "someComponent")
results = self.ioos.check_instrument_variables(ds)
scored, out_of, messages = get_results(results)
self.assertEqual(scored, out_of)
# give discriminant
instr.setncattr("discriminant", "someDiscriminant")
results = self.ioos.check_instrument_variables(ds)
scored, out_of, messages = get_results(results)
self.assertEqual(scored, out_of)
# bad component
instr.setncattr("component", 45)
results = self.ioos.check_instrument_variables(ds)
scored, out_of, messages = get_results(results)
self.assertLess(scored, out_of)
def test_check_wmo_platform_code(self):
ds = MockTimeSeries() # time, lat, lon, depth
# no wmo_platform_code, pass
result = self.ioos.check_wmo_platform_code(ds)
self.assertTrue(result.value)
self.assertEqual(result.msgs, [])
# valid code
ds.setncattr("wmo_platform_code", "12345")
result = self.ioos.check_wmo_platform_code(ds)
self.assertTrue(result.value)
# valid code
ds.setncattr("wmo_platform_code", "7654321")
result = self.ioos.check_wmo_platform_code(ds)
self.assertTrue(result.value)
# alphanumeric, valid
ds.setncattr("wmo_platform_code", "abcd1")
result = self.ioos.check_wmo_platform_code(ds)
self.assertTrue(result.value)
# invalid length, fail
ds.setncattr("wmo_platform_code", "123")
result = self.ioos.check_wmo_platform_code(ds)
self.assertFalse(result.value)
# alphanumeric len 7, fail
ds.setncattr("wmo_platform_code", "1a2b3c7")
result = self.ioos.check_wmo_platform_code(ds)
self.assertFalse(result.value)
def test_check_standard_name(self):
ds = MockTimeSeries() # time, lat, lon, depth
# no standard names
results = self.ioos.check_standard_name(ds)
scored, out_of, messages = get_results(results)
self.assertLess(scored, out_of)
# give standard names to all variables
ds.variables["time"].setncattr("standard_name", "time")
ds.variables["lon"].setncattr("standard_name", "longitude")
ds.variables["lat"].setncattr("standard_name", "latitude")
ds.variables["depth"].setncattr("standard_name", "depth")
results = self.ioos.check_standard_name(ds)
scored, out_of, messages = get_results(results)
self.assertEqual(scored, out_of)
# add a QARTOD variable, no standard name - should fail
qr = ds.createVariable("depth_qc", np.byte)
qr.setncattr("flag_meanings", "blah")
results = self.ioos.check_standard_name(ds)
scored, out_of, messages = get_results(results)
self.assertLess(scored, out_of)
# bad standard name
qr.setncattr("standard_name", "blah")
results = self.ioos.check_standard_name(ds)
scored, out_of, messages = get_results(results)
self.assertLess(scored, out_of)
# good standard name
qr.setncattr("standard_name", "spike_test_quality_flag")
results = self.ioos.check_standard_name(ds)
scored, out_of, messages = get_results(results)
self.assertEqual(scored, out_of)
def test_naming_authority_validation(self):
test_attr_name = "naming_authority"
validator = NamingAuthorityValidator()
# check URL - should pass
self.assertTrue(validator.validate(test_attr_name, "https://ioos.us")[0])
# check reverse DNS - should pass
self.assertTrue(validator.validate(test_attr_name, "edu.ucar.unidata")[0])
# email address is neither of the above, so should fail
bad_result = validator.validate(test_attr_name, "<EMAIL>")
self.assertFalse(bad_result[0])
self.assertEqual(
bad_result[1],
[
"naming_authority should either be a URL or a "
'reversed DNS name (e.g "edu.ucar.unidata")'
],
)
def test_platform_id_validation(self):
attn = "platform_id"
attv = "alphaNum3R1C"
v = IOOS1_2_PlatformIDValidator()
self.assertTrue(v.validate(attn, attv)[0])
attv = "alpha"
v = IOOS1_2_PlatformIDValidator()
self.assertTrue(v.validate(attn, attv)[0])
attv = "311123331112"
v = IOOS1_2_PlatformIDValidator()
self.assertTrue(v.validate(attn, attv)[0])
attv = "---fail---"
v = IOOS1_2_PlatformIDValidator()
self.assertFalse(v.validate(attn, attv)[0])
def test_check_platform_cf_role(self):
"""
Check that cf_role inside platform variables only allows certain
values, namely "profile_id", "timeseries_id", or "trajectory_id"
"""
ds = MockTimeSeries()
plat_var = ds.createVariable("platform", np.int8, ())
ds.variables["depth"].platform = "platform"
self.ioos.setup(ds)
results = self.ioos.check_platform_variable_cf_role(ds)
# don't set attribute, should raise error about attribute not
# existing
self.assertEqual(len(results), 1)
score, out_of = results[0].value
self.assertLess(score, out_of)
# set to invalid value
plat_var.setncattr("cf_role", "bad_value")
results = self.ioos.check_platform_variable_cf_role(ds)
self.assertLess(score, out_of)
expected_vals = {"profile_id", "timeseries_id", "trajectory_id"}
expect_msg = (
'Platform variable "platform" must have a cf_role attribute '
"with one of the values {}".format(sorted(expected_vals))
)
self.assertEqual(results[0].msgs, [expect_msg])
# set to valid value
plat_var.setncattr("cf_role", "timeseries_id")
results = self.ioos.check_platform_variable_cf_role(ds)
score, out_of = results[0].value
self.assertEqual(score, out_of)
def test_check_platform_global(self):
ds = MockTimeSeries() # time, lat, lon, depth
# no global attr, fail
self.assertFalse(self.ioos.check_platform_global(ds).value)
# bad global attr, fail
ds.setncattr("platform", "bad value")
self.assertFalse(self.ioos.check_platform_global(ds).value)
# another bad value
ds.setncattr("platform", " bad")
self.assertFalse(self.ioos.check_platform_global(ds).value)
# good value
ds.setncattr("platform", "single_string")
res = self.ioos.check_platform_global(ds)
self.assertTrue(res.value)
self.assertEqual(res.msgs, [])
def test_check_single_platform(self):
ds = MockTimeSeries() # time, lat, lon, depth
# no global attr but also no platform variables, should pass
result = self.ioos.check_single_platform(ds)
self.assertTrue(result.value)
self.assertEqual(result.msgs, [])
# give platform global, no variables, fail
ds.setncattr("platform", "buoy")
result = self.ioos.check_single_platform(ds)
self.assertFalse(result.value)
# global platform, one platform variable, pass
temp = ds.createVariable("temp", "d", ("time"))
temp.setncattr("platform", "platform_var")
plat = ds.createVariable("platform_var", np.byte)
result = self.ioos.check_single_platform(ds)
self.assertTrue(result.value)
self.assertEqual(result.msgs, [])
# two platform variables, fail
temp2 = ds.createVariable("temp2", "d", ("time"))
temp2.setncattr("platform", "platform_var2")
plat = ds.createVariable("platform_var2", np.byte)
result = self.ioos.check_single_platform(ds)
self.assertFalse(result.value)
# no global attr, one variable, fail
ds = MockTimeSeries() # time, lat, lon, depth
temp = ds.createVariable("temp", "d", ("time"))
temp.setncattr("platform", "platform_var")
plat = ds.createVariable("platform_var", np.byte)
result = self.ioos.check_single_platform(ds)
self.assertFalse(result.value)
def test_check_cf_dsg(self):
ds = MockTimeSeries() # time, lat, lon, depth
ds.setncattr("platform", "single_string")
# correct cf_role & featureType, pass
ds.setncattr("featureType", "profile")
ds.createDimension("profile", 1)
temp = ds.createVariable("temp", "d", ("time"))
temp.setncattr("platform", "platform_var")
plat = ds.createVariable("platform_var", np.byte)
cf_role_var = ds.createVariable("cf_role_var", np.byte, ("profile",))
cf_role_var.setncattr("cf_role", "timeseries_id")
results = self.ioos.check_cf_dsg(ds)
self.assertTrue(all(r.value for r in results))
self.assertTrue(all(r.msgs == [] for r in results))
# correct featureType, incorrect cf_role var dimension
ds = MockTimeSeries() # time, lat, lon, depth
ds.setncattr("featureType", "trajectoryprofile")
ds.createDimension("trajectory", 2) # should only be 1
temp = ds.createVariable("temp", "d", ("time"))
temp.setncattr("platform", "platform_var")
plat = ds.createVariable("platform_var", np.byte)
cf_role_var = ds.createVariable("cf_role_var", np.byte, ("trajectory",))
cf_role_var.setncattr("cf_role", "trajectory_id")
results = self.ioos.check_cf_dsg(ds)
self.assertFalse(results[0].value)
# featureType==timeSeries, cf_role=timeseries_id
ds = MockTimeSeries()
ds.setncattr("featureType", "timeSeries")
ds.createDimension("station", 1)
temp = ds.createVariable("temp", "d", ("time"))
temp.setncattr("platform", "platform_var")
plat = ds.createVariable("platform_var", np.byte)
cf_role_var = ds.createVariable("cf_role_var", np.byte, ("station",))
cf_role_var.setncattr("cf_role", "timeseries_id")
results = self.ioos.check_cf_dsg(ds)
# check should pass with no results
self.assertEqual(results, [])
# featureType==timeSeriesProfile, cf_role==timeseries_id, dim 1, pass
ds = MockTimeSeries()
ds.setncattr("featureType", "timeSeriesProfile")
ds.createDimension("station", 1)
temp = ds.createVariable("temp", "d", ("time"))
temp.setncattr("platform", "platform_var")
plat = ds.createVariable("platform_var", np.byte)
cf_role_var = ds.createVariable("cf_role_var", np.byte, ("station",))
cf_role_var.setncattr("cf_role", "timeseries_id")
results = self.ioos.check_cf_dsg(ds)
self.assertEqual(results, [])
# featureType==timeSeriesProfile, cf_role==timeseries_id, dim 2, fail
ds = MockTimeSeries()
ds.setncattr("platform", "platform")
ds.setncattr("featureType", "timeSeriesProfile")
ds.createDimension("station", 2)
temp = ds.createVariable("temp", "d", ("time"))
temp.setncattr("platform", "platform_var")
plat = ds.createVariable("platform_var", np.byte)
cf_role_var = ds.createVariable("cf_role_var", np.byte, ("station",))
cf_role_var.setncattr("cf_role", "timeseries_id")
results = self.ioos.check_cf_dsg(ds)
self.assertFalse(results[0].value)
# featureType==trajectory, cf_role==trajectory_id, dim 1, pass
ds = MockTimeSeries()
ds.setncattr("featureType", "trajectory")
ds.createDimension("trajectory", 1)
temp = ds.createVariable("temp", "d", ("time"))
temp.setncattr("platform", "platform_var")
plat = ds.createVariable("platform_var", np.byte)
cf_role_var = ds.createVariable("cf_role_var", np.byte, ("trajectory",))
cf_role_var.setncattr("cf_role", "trajectory_id")
results = self.ioos.check_cf_dsg(ds)
self.assertEqual(results, [])
# featureType==trajectory, cf_role==trajectory, dim 2, fail
ds = MockTimeSeries()
ds.setncattr("featureType", "trajectory")
ds.createDimension("trajectory", 2)
temp = ds.createVariable("temp", "d", ("time"))
temp.setncattr("platform", "platform_var")
plat = ds.createVariable("platform_var", np.byte)
cf_role_var = ds.createVariable("cf_role_var", np.byte, ("trajectory",))
cf_role_var.setncattr("cf_role", "trajectory_id")
results = self.ioos.check_cf_dsg(ds)
self.assertFalse(results[0].value)
# featureType==trajectoryProfile, cf_role==trajectory_id, dim 1, pass
ds = MockTimeSeries()
ds.setncattr("featureType", "trajectoryProfile")
ds.createDimension("trajectoryprof", 1)
temp = ds.createVariable("temp", "d", ("time"))
temp.setncattr("platform", "platform_var")
plat = ds.createVariable("platform_var", np.byte)
cf_role_var = ds.createVariable("cf_role_var", np.byte, ("trajectoryprof",))
cf_role_var.setncattr("cf_role", "trajectory_id")
results = self.ioos.check_cf_dsg(ds)
self.assertEqual(results, [])
# featureType==trajectoryProfile, cf_role==trajectory_id, dim 2, fail
ds = MockTimeSeries()
ds.setncattr("featureType", "trajectoryProfile")
ds.createDimension("trajectoryprof", 2)
temp = ds.createVariable("temp", "d", ("time"))
temp.setncattr("platform", "platform_var")
plat = ds.createVariable("platform_var", np.byte)
cf_role_var = ds.createVariable("cf_role_var", np.byte, ("trajectoryprof",))
cf_role_var.setncattr("cf_role", "trajectory_id")
results = self.ioos.check_cf_dsg(ds)
self.assertFalse(results[0].value)
# featureType==profile, cf_role==profile_id, dim 1, pass
ds = MockTimeSeries()
ds.setncattr("featureType", "profile")
ds.createDimension("prof", 1)
temp = ds.createVariable("temp", "d", ("time"))
temp.setncattr("platform", "platform_var")
plat = ds.createVariable("platform_var", np.byte)
cf_role_var = ds.createVariable("cf_role_var", np.byte, ("prof",))
cf_role_var.setncattr("cf_role", "profile_id")
results = self.ioos.check_cf_dsg(ds)
self.assertEqual(results, [])
# featureType==profile, cf_role==profile_id, dim 2, fail
ds = MockTimeSeries()
ds.setncattr("featureType", "profile")
ds.createDimension("prof", 2)
temp = ds.createVariable("temp", "d", ("time"))
temp.setncattr("platform", "platform_var")
plat = ds.createVariable("platform_var", np.byte)
cf_role_var = ds.createVariable("cf_role_var", np.byte, ("prof",))
cf_role_var.setncattr("cf_role", "profile_id")
results = self.ioos.check_cf_dsg(ds)
self.assertFalse(results[0].value)
# featureType==point -- do nothing
ds = MockTimeSeries()
ds.setncattr("featureType", "point")
ds.createDimension("blah", 2)
temp = ds.createVariable("temp", "d", ("time"))
temp.setncattr("platform", "platform_var")
plat = ds.createVariable("platform_var", np.byte)
cf_role_var = ds.createVariable("cf_role_var", np.byte, ("blah",))
cf_role_var.setncattr("cf_role", "profile_id")
results = self.ioos.check_cf_dsg(ds)
self.assertEqual(results, [])
def test_check_platform_vocabulary(self):
ds = MockTimeSeries() # time, lat, lon, depth
ds.setncattr("platform_vocabulary", "http://google.com")
result = self.ioos.check_platform_vocabulary(ds)
self.assertTrue(result.value)
self.assertEqual(result.msgs, [])
ds.setncattr("platform_vocabulary", "bad")
self.assertFalse(self.ioos.check_platform_vocabulary(ds).value)
def test_check_qartod_variables_flags(self):
ds = MockTimeSeries() # time, lat, lon, depth
# no QARTOD variables
results = self.ioos.check_qartod_variables_flags(ds)
scored, out_of, messages = get_results(results)
self.assertEqual(scored, out_of)
# QARTOD variable without flag_values, flag_meanings (fail)
qr = ds.createVariable("depth_qc", np.byte)
qr.setncattr("standard_name", "spike_test_quality_flag")
results = self.ioos.check_qartod_variables_flags(ds)
self.assertTrue(not any(r.value for r in results)) # all False
# QARTOD variable with flag meanings, without flag_meanings
qr.setncattr("flag_values", np.array([0, 1, 2], dtype=np.byte))
results = self.ioos.check_qartod_variables_flags(ds)
self.assertEqual(results[0].value[0], results[0].value[1]) # should pass
self.assertFalse(results[1].value) # still fail
# QARTOD variable with flag meanings, flag_values
qr.setncattr("flag_meanings", "x y z") # alphanumeric, space-separated
results = self.ioos.check_qartod_variables_flags(ds)
self.assertEqual(results[0].value[0], results[0].value[1]) # pass
self.assertEqual(results[1].value[0], results[1].value[1]) # pass
# flag_values array not equal to length of flag_meanings
qr.setncattr("flag_values", np.array([0, 1], dtype=np.byte))
results = self.ioos.check_qartod_variables_flags(ds)
self.assertLess(results[0].value[0], results[0].value[1]) # should fail
self.assertEqual(results[1].value[0], results[1].value[1]) # pass
# flag_values right length, wrong type
qr.setncattr("flag_values", np.array([0, 1, 2], dtype=np.float64))
results = self.ioos.check_qartod_variables_flags(ds)
self.assertLess(results[0].value[0], results[0].value[1]) # should fail
self.assertEqual(results[1].value[0], results[1].value[1]) # pass
def test_check_qartod_variables_references(self):
ds = MockTimeSeries() # time, lat, lon, depth
# no QARTOD variables
results = self.ioos.check_qartod_variables_references(ds)
scored, out_of, messages = get_results(results)
self.assertEqual(scored, out_of)
# QARTOD variable without references (fail)
qr = ds.createVariable("depth_qc", np.byte)
qr.setncattr("flag_meanings", "blah")
qr.setncattr("standard_name", "spike_test_quality_flag")
results = self.ioos.check_qartod_variables_references(ds)
self.assertFalse(all(r.value for r in results))
# QARTOD variable with references (pass)
qr.setncattr("references", "http://services.cormp.org/quality.php")
results = self.ioos.check_qartod_variables_references(ds)
self.assertTrue(all(r.value for r in results))
self.assertEqual(results[0].msgs, []) # only one Result to test
# QARTOD variable with bad references (fail)
qr.setncattr(
"references", r"p9q384ht09q38@@####???????////??//\/\/\/\//\/\74ht"
)
results = self.ioos.check_qartod_variables_references(ds)
self.assertFalse(all(r.value for r in results))
def test_check_ioos_ingest(self):
ds = MockTimeSeries()
# no value, pass
res = self.ioos.check_ioos_ingest(ds)
self.assertTrue(res.value)
self.assertEqual(res.msgs, [])
# value false
ds.setncattr("ioos_ingest", "false")
self.assertTrue(self.ioos.check_ioos_ingest(ds).value)
# value true
ds.setncattr("ioos_ingest", "true")
self.assertTrue(self.ioos.check_ioos_ingest(ds).value)
# case insensitive
ds.setncattr("ioos_ingest", "True")
self.assertTrue(self.ioos.check_ioos_ingest(ds).value)
ds.setncattr("ioos_ingest", "False")
self.assertTrue(self.ioos.check_ioos_ingest(ds).value)
# anything else fails
ds.setncattr("ioos_ingest", "badval")
self.assertFalse(self.ioos.check_ioos_ingest(ds).value)
ds.setncattr("ioos_ingest", 0)
self.assertFalse(self.ioos.check_ioos_ingest(ds).value)
def test_vertical_dimension(self):
# MockTimeSeries has a depth variable, with axis of 'Z', units of 'm',
# and positive = 'down'
nc_obj = MockTimeSeries()
result = self.ioos.check_vertical_coordinates(nc_obj)[0]
self.assertEqual(*result.value)
nc_obj.variables["depth"].positive = "upwards"
result = self.ioos.check_vertical_coordinates(nc_obj)[0]
self.assertNotEqual(*result.value)
nc_obj.variables["depth"].positive = "up"
result = self.ioos.check_vertical_coordinates(nc_obj)[0]
self.assertEqual(*result.value)
# test units
nc_obj.variables["depth"].units = "furlong"
result = self.ioos.check_vertical_coordinates(nc_obj)[0]
expected_msg = (
"depth's units attribute furlong is not equivalent to "
"one of ('meter', 'inch', 'foot', 'yard', "
"'US_survey_foot', 'mile', 'fathom')"
)
self.assertEqual(result.msgs[0], expected_msg)
self.assertNotEqual(*result.value)
accepted_units = (
"meter",
"meters",
"inch",
"foot",
"yard",
"mile",
"miles",
"US_survey_foot",
"US_survey_feet",
"fathom",
"fathoms",
"international_inch",
"international_inches",
"international_foot",
"international_feet",
"international_yard",
"international_yards",
"international_mile",
"international_miles",
"inches",
"in",
"feet",
"ft",
"yd",
"mi",
)
for units in accepted_units:
nc_obj.variables["depth"].units = units
result = self.ioos.check_vertical_coordinates(nc_obj)[0]
self.assertEqual(*result.value)
|
[
"netCDF4.Dataset",
"compliance_checker.ioos.IOOS1_1Check",
"compliance_checker.tests.test_cf.get_results",
"compliance_checker.ioos.IOOS0_1Check",
"compliance_checker.ioos.IOOS1_2_PlatformIDValidator",
"compliance_checker.ioos.IOOS1_2Check",
"compliance_checker.ioos.NamingAuthorityValidator",
"numpy.array",
"compliance_checker.tests.helpers.MockTimeSeries"
] |
[((737, 751), 'compliance_checker.ioos.IOOS0_1Check', 'IOOS0_1Check', ([], {}), '()\n', (749, 751), False, 'from compliance_checker.ioos import IOOS0_1Check, IOOS1_1Check, IOOS1_2_PlatformIDValidator, IOOS1_2Check, NamingAuthorityValidator\n'), ((1166, 1205), 'netCDF4.Dataset', 'Dataset', (['os.devnull', '"""w"""'], {'diskless': '(True)'}), "(os.devnull, 'w', diskless=True)\n", (1173, 1205), False, 'from netCDF4 import Dataset\n'), ((2066, 2105), 'netCDF4.Dataset', 'Dataset', (['os.devnull', '"""w"""'], {'diskless': '(True)'}), "(os.devnull, 'w', diskless=True)\n", (2073, 2105), False, 'from netCDF4 import Dataset\n'), ((3288, 3327), 'netCDF4.Dataset', 'Dataset', (['os.devnull', '"""w"""'], {'diskless': '(True)'}), "(os.devnull, 'w', diskless=True)\n", (3295, 3327), False, 'from netCDF4 import Dataset\n'), ((4375, 4414), 'netCDF4.Dataset', 'Dataset', (['os.devnull', '"""w"""'], {'diskless': '(True)'}), "(os.devnull, 'w', diskless=True)\n", (4382, 4414), False, 'from netCDF4 import Dataset\n'), ((5147, 5161), 'compliance_checker.ioos.IOOS1_1Check', 'IOOS1_1Check', ([], {}), '()\n', (5159, 5161), False, 'from compliance_checker.ioos import IOOS0_1Check, IOOS1_1Check, IOOS1_2_PlatformIDValidator, IOOS1_2Check, NamingAuthorityValidator\n'), ((6114, 6153), 'netCDF4.Dataset', 'Dataset', (['os.devnull', '"""w"""'], {'diskless': '(True)'}), "(os.devnull, 'w', diskless=True)\n", (6121, 6153), False, 'from netCDF4 import Dataset\n'), ((7210, 7249), 'netCDF4.Dataset', 'Dataset', (['os.devnull', '"""w"""'], {'diskless': '(True)'}), "(os.devnull, 'w', diskless=True)\n", (7217, 7249), False, 'from netCDF4 import Dataset\n'), ((8551, 8590), 'netCDF4.Dataset', 'Dataset', (['os.devnull', '"""w"""'], {'diskless': '(True)'}), "(os.devnull, 'w', diskless=True)\n", (8558, 8590), False, 'from netCDF4 import Dataset\n'), ((9859, 9898), 'netCDF4.Dataset', 'Dataset', (['os.devnull', '"""w"""'], {'diskless': '(True)'}), "(os.devnull, 'w', diskless=True)\n", (9866, 9898), False, 'from netCDF4 import Dataset\n'), ((11045, 11059), 'compliance_checker.ioos.IOOS1_2Check', 'IOOS1_2Check', ([], {}), '()\n', (11057, 11059), False, 'from compliance_checker.ioos import IOOS0_1Check, IOOS1_1Check, IOOS1_2_PlatformIDValidator, IOOS1_2Check, NamingAuthorityValidator\n'), ((11167, 11183), 'compliance_checker.tests.helpers.MockTimeSeries', 'MockTimeSeries', ([], {}), '()\n', (11181, 11183), False, 'from compliance_checker.tests.helpers import MockTimeSeries, MockVariable\n'), ((11413, 11433), 'compliance_checker.tests.test_cf.get_results', 'get_results', (['results'], {}), '(results)\n', (11424, 11433), False, 'from compliance_checker.tests.test_cf import get_results\n'), ((11527, 11574), 'compliance_checker.tests.helpers.MockTimeSeries', 'MockTimeSeries', ([], {'default_fill_value': '(9999999999.0)'}), '(default_fill_value=9999999999.0)\n', (11541, 11574), False, 'from compliance_checker.tests.helpers import MockTimeSeries, MockVariable\n'), ((12197, 12217), 'compliance_checker.tests.test_cf.get_results', 'get_results', (['results'], {}), '(results)\n', (12208, 12217), False, 'from compliance_checker.tests.test_cf import get_results\n'), ((12365, 12381), 'compliance_checker.tests.helpers.MockTimeSeries', 'MockTimeSeries', ([], {}), '()\n', (12379, 12381), False, 'from compliance_checker.tests.helpers import MockTimeSeries, MockVariable\n'), ((12610, 12630), 'compliance_checker.tests.test_cf.get_results', 'get_results', (['results'], {}), '(results)\n', (12621, 12630), False, 'from compliance_checker.tests.test_cf import get_results\n'), ((12749, 12796), 'compliance_checker.tests.helpers.MockTimeSeries', 'MockTimeSeries', ([], {'default_fill_value': '(9999999999.0)'}), '(default_fill_value=9999999999.0)\n', (12763, 12796), False, 'from compliance_checker.tests.helpers import MockTimeSeries, MockVariable\n'), ((13324, 13344), 'compliance_checker.tests.test_cf.get_results', 'get_results', (['results'], {}), '(results)\n', (13335, 13344), False, 'from compliance_checker.tests.test_cf import get_results\n'), ((13458, 13474), 'compliance_checker.tests.helpers.MockTimeSeries', 'MockTimeSeries', ([], {}), '()\n', (13472, 13474), False, 'from compliance_checker.tests.helpers import MockTimeSeries, MockVariable\n'), ((16070, 16086), 'compliance_checker.tests.helpers.MockTimeSeries', 'MockTimeSeries', ([], {}), '()\n', (16084, 16086), False, 'from compliance_checker.tests.helpers import MockTimeSeries, MockVariable\n'), ((17106, 17122), 'compliance_checker.tests.helpers.MockTimeSeries', 'MockTimeSeries', ([], {}), '()\n', (17120, 17122), False, 'from compliance_checker.tests.helpers import MockTimeSeries, MockVariable\n'), ((17880, 17896), 'compliance_checker.tests.helpers.MockTimeSeries', 'MockTimeSeries', ([], {}), '()\n', (17894, 17896), False, 'from compliance_checker.tests.helpers import MockTimeSeries, MockVariable\n'), ((20546, 20562), 'compliance_checker.tests.helpers.MockTimeSeries', 'MockTimeSeries', ([], {}), '()\n', (20560, 20562), False, 'from compliance_checker.tests.helpers import MockTimeSeries, MockVariable\n'), ((20729, 20749), 'compliance_checker.tests.test_cf.get_results', 'get_results', (['results'], {}), '(results)\n', (20740, 20749), False, 'from compliance_checker.tests.test_cf import get_results\n'), ((21421, 21441), 'compliance_checker.tests.test_cf.get_results', 'get_results', (['results'], {}), '(results)\n', (21432, 21441), False, 'from compliance_checker.tests.test_cf import get_results\n'), ((21666, 21686), 'compliance_checker.tests.test_cf.get_results', 'get_results', (['results'], {}), '(results)\n', (21677, 21686), False, 'from compliance_checker.tests.test_cf import get_results\n'), ((21888, 21908), 'compliance_checker.tests.test_cf.get_results', 'get_results', (['results'], {}), '(results)\n', (21899, 21908), False, 'from compliance_checker.tests.test_cf import get_results\n'), ((22007, 22023), 'compliance_checker.tests.helpers.MockTimeSeries', 'MockTimeSeries', ([], {}), '()\n', (22021, 22023), False, 'from compliance_checker.tests.helpers import MockTimeSeries, MockVariable\n'), ((23143, 23159), 'compliance_checker.tests.helpers.MockTimeSeries', 'MockTimeSeries', ([], {}), '()\n', (23157, 23159), False, 'from compliance_checker.tests.helpers import MockTimeSeries, MockVariable\n'), ((23301, 23321), 'compliance_checker.tests.test_cf.get_results', 'get_results', (['results'], {}), '(results)\n', (23312, 23321), False, 'from compliance_checker.tests.test_cf import get_results\n'), ((23762, 23782), 'compliance_checker.tests.test_cf.get_results', 'get_results', (['results'], {}), '(results)\n', (23773, 23782), False, 'from compliance_checker.tests.test_cf import get_results\n'), ((24074, 24094), 'compliance_checker.tests.test_cf.get_results', 'get_results', (['results'], {}), '(results)\n', (24085, 24094), False, 'from compliance_checker.tests.test_cf import get_results\n'), ((24297, 24317), 'compliance_checker.tests.test_cf.get_results', 'get_results', (['results'], {}), '(results)\n', (24308, 24317), False, 'from compliance_checker.tests.test_cf import get_results\n'), ((24540, 24560), 'compliance_checker.tests.test_cf.get_results', 'get_results', (['results'], {}), '(results)\n', (24551, 24560), False, 'from compliance_checker.tests.test_cf import get_results\n'), ((24715, 24741), 'compliance_checker.ioos.NamingAuthorityValidator', 'NamingAuthorityValidator', ([], {}), '()\n', (24739, 24741), False, 'from compliance_checker.ioos import IOOS0_1Check, IOOS1_1Check, IOOS1_2_PlatformIDValidator, IOOS1_2Check, NamingAuthorityValidator\n'), ((25486, 25515), 'compliance_checker.ioos.IOOS1_2_PlatformIDValidator', 'IOOS1_2_PlatformIDValidator', ([], {}), '()\n', (25513, 25515), False, 'from compliance_checker.ioos import IOOS0_1Check, IOOS1_1Check, IOOS1_2_PlatformIDValidator, IOOS1_2Check, NamingAuthorityValidator\n'), ((25603, 25632), 'compliance_checker.ioos.IOOS1_2_PlatformIDValidator', 'IOOS1_2_PlatformIDValidator', ([], {}), '()\n', (25630, 25632), False, 'from compliance_checker.ioos import IOOS0_1Check, IOOS1_1Check, IOOS1_2_PlatformIDValidator, IOOS1_2Check, NamingAuthorityValidator\n'), ((25727, 25756), 'compliance_checker.ioos.IOOS1_2_PlatformIDValidator', 'IOOS1_2_PlatformIDValidator', ([], {}), '()\n', (25754, 25756), False, 'from compliance_checker.ioos import IOOS0_1Check, IOOS1_1Check, IOOS1_2_PlatformIDValidator, IOOS1_2Check, NamingAuthorityValidator\n'), ((25849, 25878), 'compliance_checker.ioos.IOOS1_2_PlatformIDValidator', 'IOOS1_2_PlatformIDValidator', ([], {}), '()\n', (25876, 25878), False, 'from compliance_checker.ioos import IOOS0_1Check, IOOS1_1Check, IOOS1_2_PlatformIDValidator, IOOS1_2Check, NamingAuthorityValidator\n'), ((26158, 26174), 'compliance_checker.tests.helpers.MockTimeSeries', 'MockTimeSeries', ([], {}), '()\n', (26172, 26174), False, 'from compliance_checker.tests.helpers import MockTimeSeries, MockVariable\n'), ((27368, 27384), 'compliance_checker.tests.helpers.MockTimeSeries', 'MockTimeSeries', ([], {}), '()\n', (27382, 27384), False, 'from compliance_checker.tests.helpers import MockTimeSeries, MockVariable\n'), ((28048, 28064), 'compliance_checker.tests.helpers.MockTimeSeries', 'MockTimeSeries', ([], {}), '()\n', (28062, 28064), False, 'from compliance_checker.tests.helpers import MockTimeSeries, MockVariable\n'), ((29193, 29209), 'compliance_checker.tests.helpers.MockTimeSeries', 'MockTimeSeries', ([], {}), '()\n', (29207, 29209), False, 'from compliance_checker.tests.helpers import MockTimeSeries, MockVariable\n'), ((29540, 29556), 'compliance_checker.tests.helpers.MockTimeSeries', 'MockTimeSeries', ([], {}), '()\n', (29554, 29556), False, 'from compliance_checker.tests.helpers import MockTimeSeries, MockVariable\n'), ((30305, 30321), 'compliance_checker.tests.helpers.MockTimeSeries', 'MockTimeSeries', ([], {}), '()\n', (30319, 30321), False, 'from compliance_checker.tests.helpers import MockTimeSeries, MockVariable\n'), ((30931, 30947), 'compliance_checker.tests.helpers.MockTimeSeries', 'MockTimeSeries', ([], {}), '()\n', (30945, 30947), False, 'from compliance_checker.tests.helpers import MockTimeSeries, MockVariable\n'), ((31560, 31576), 'compliance_checker.tests.helpers.MockTimeSeries', 'MockTimeSeries', ([], {}), '()\n', (31574, 31576), False, 'from compliance_checker.tests.helpers import MockTimeSeries, MockVariable\n'), ((32151, 32167), 'compliance_checker.tests.helpers.MockTimeSeries', 'MockTimeSeries', ([], {}), '()\n', (32165, 32167), False, 'from compliance_checker.tests.helpers import MockTimeSeries, MockVariable\n'), ((32785, 32801), 'compliance_checker.tests.helpers.MockTimeSeries', 'MockTimeSeries', ([], {}), '()\n', (32799, 32801), False, 'from compliance_checker.tests.helpers import MockTimeSeries, MockVariable\n'), ((33365, 33381), 'compliance_checker.tests.helpers.MockTimeSeries', 'MockTimeSeries', ([], {}), '()\n', (33379, 33381), False, 'from compliance_checker.tests.helpers import MockTimeSeries, MockVariable\n'), ((33960, 33976), 'compliance_checker.tests.helpers.MockTimeSeries', 'MockTimeSeries', ([], {}), '()\n', (33974, 33976), False, 'from compliance_checker.tests.helpers import MockTimeSeries, MockVariable\n'), ((34565, 34581), 'compliance_checker.tests.helpers.MockTimeSeries', 'MockTimeSeries', ([], {}), '()\n', (34579, 34581), False, 'from compliance_checker.tests.helpers import MockTimeSeries, MockVariable\n'), ((35162, 35178), 'compliance_checker.tests.helpers.MockTimeSeries', 'MockTimeSeries', ([], {}), '()\n', (35176, 35178), False, 'from compliance_checker.tests.helpers import MockTimeSeries, MockVariable\n'), ((35721, 35737), 'compliance_checker.tests.helpers.MockTimeSeries', 'MockTimeSeries', ([], {}), '()\n', (35735, 35737), False, 'from compliance_checker.tests.helpers import MockTimeSeries, MockVariable\n'), ((36263, 36279), 'compliance_checker.tests.helpers.MockTimeSeries', 'MockTimeSeries', ([], {}), '()\n', (36277, 36279), False, 'from compliance_checker.tests.helpers import MockTimeSeries, MockVariable\n'), ((36801, 36817), 'compliance_checker.tests.helpers.MockTimeSeries', 'MockTimeSeries', ([], {}), '()\n', (36815, 36817), False, 'from compliance_checker.tests.helpers import MockTimeSeries, MockVariable\n'), ((37232, 37248), 'compliance_checker.tests.helpers.MockTimeSeries', 'MockTimeSeries', ([], {}), '()\n', (37246, 37248), False, 'from compliance_checker.tests.helpers import MockTimeSeries, MockVariable\n'), ((37401, 37421), 'compliance_checker.tests.test_cf.get_results', 'get_results', (['results'], {}), '(results)\n', (37412, 37421), False, 'from compliance_checker.tests.test_cf import get_results\n'), ((39233, 39249), 'compliance_checker.tests.helpers.MockTimeSeries', 'MockTimeSeries', ([], {}), '()\n', (39247, 39249), False, 'from compliance_checker.tests.helpers import MockTimeSeries, MockVariable\n'), ((39407, 39427), 'compliance_checker.tests.test_cf.get_results', 'get_results', (['results'], {}), '(results)\n', (39418, 39427), False, 'from compliance_checker.tests.test_cf import get_results\n'), ((40467, 40483), 'compliance_checker.tests.helpers.MockTimeSeries', 'MockTimeSeries', ([], {}), '()\n', (40481, 40483), False, 'from compliance_checker.tests.helpers import MockTimeSeries, MockVariable\n'), ((41545, 41561), 'compliance_checker.tests.helpers.MockTimeSeries', 'MockTimeSeries', ([], {}), '()\n', (41559, 41561), False, 'from compliance_checker.tests.helpers import MockTimeSeries, MockVariable\n'), ((37887, 37921), 'numpy.array', 'np.array', (['[0, 1, 2]'], {'dtype': 'np.byte'}), '([0, 1, 2], dtype=np.byte)\n', (37895, 37921), True, 'import numpy as np\n'), ((38575, 38606), 'numpy.array', 'np.array', (['[0, 1]'], {'dtype': 'np.byte'}), '([0, 1], dtype=np.byte)\n', (38583, 38606), True, 'import numpy as np\n'), ((38909, 38946), 'numpy.array', 'np.array', (['[0, 1, 2]'], {'dtype': 'np.float64'}), '([0, 1, 2], dtype=np.float64)\n', (38917, 38946), True, 'import numpy as np\n')]
|
import multiprocessing as mp
from multiprocessing.sharedctypes import RawArray
from ctypes import c_bool, c_double
import numpy as np
import pandas as pd
def standardize(X):
"""
Standardize each row in X to mean = 0 and SD = 1.
"""
X_m = np.ma.masked_invalid(X)
return ((X.T - X_m.mean(axis=1)) / X_m.std(axis=1)).T.data
mask = None
X_s = None
X = None
k = None
def knn_init(k_, mask_, X_, X_s_):
global k, mask, X_s, X
mask = from_shared(mask_)
X_s = from_shared(X_s_)
X = from_shared(X_)
k = k_
def knn_work(i):
print(i)
dx = X_s.dot(X_s[i,:]) / ((~ mask) & (~ mask[i,:])).sum(axis=1)
ix = (-dx).argsort()
for j in np.isnan(X[i,:]).nonzero()[0]:
v = X[ix,j]
v = v[np.invert(np.isnan(v))]
X[i,j] = v[:k].mean()
def ctype_to_dtype(ctype):
if ctype == c_double:
return np.float64
elif ctype == c_bool:
return np.bool
else:
raise Exception
def to_shared(arr, type=c_double):
shared = RawArray(type, arr.flat)
return (shared, ctype_to_dtype(type), arr.shape)
def from_shared(args):
arr, dtype, shape = args
return np.frombuffer(arr, dtype=dtype).reshape(shape)
class KNNImputer(object):
def __init__(self, k=50):
self._k = k
def fit_transform(self, X, axis=0):
assert(axis in (0,1))
if isinstance(X, pd.DataFrame):
X = X.dropna(axis=0, how="all").dropna(axis=1, thresh=self._k)
return pd.DataFrame(
self.fit_transform(X.as_matrix(), axis=axis),
index=X.index,
columns=X.columns)
if axis==0:
return self.fit_transform(X.T, axis=1).T
X_s = standardize(X)
mask = np.ma.masked_invalid(X_s).mask
X_s[np.isnan(X_s)] = 0
mask_shared = to_shared(mask, c_bool)
X_shared = to_shared(X)
X_s_shared = to_shared(X_s)
pool = mp.Pool(initializer=knn_init,
initargs=(self._k, mask_shared, X_shared, X_s_shared))
pool.map(knn_work, range(X.shape[0]))
return from_shared(X_shared)
|
[
"numpy.frombuffer",
"multiprocessing.sharedctypes.RawArray",
"numpy.ma.masked_invalid",
"numpy.isnan",
"multiprocessing.Pool"
] |
[((255, 278), 'numpy.ma.masked_invalid', 'np.ma.masked_invalid', (['X'], {}), '(X)\n', (275, 278), True, 'import numpy as np\n'), ((1008, 1032), 'multiprocessing.sharedctypes.RawArray', 'RawArray', (['type', 'arr.flat'], {}), '(type, arr.flat)\n', (1016, 1032), False, 'from multiprocessing.sharedctypes import RawArray\n'), ((1948, 2036), 'multiprocessing.Pool', 'mp.Pool', ([], {'initializer': 'knn_init', 'initargs': '(self._k, mask_shared, X_shared, X_s_shared)'}), '(initializer=knn_init, initargs=(self._k, mask_shared, X_shared,\n X_s_shared))\n', (1955, 2036), True, 'import multiprocessing as mp\n'), ((1150, 1181), 'numpy.frombuffer', 'np.frombuffer', (['arr'], {'dtype': 'dtype'}), '(arr, dtype=dtype)\n', (1163, 1181), True, 'import numpy as np\n'), ((1739, 1764), 'numpy.ma.masked_invalid', 'np.ma.masked_invalid', (['X_s'], {}), '(X_s)\n', (1759, 1764), True, 'import numpy as np\n'), ((1782, 1795), 'numpy.isnan', 'np.isnan', (['X_s'], {}), '(X_s)\n', (1790, 1795), True, 'import numpy as np\n'), ((677, 694), 'numpy.isnan', 'np.isnan', (['X[i, :]'], {}), '(X[i, :])\n', (685, 694), True, 'import numpy as np\n'), ((752, 763), 'numpy.isnan', 'np.isnan', (['v'], {}), '(v)\n', (760, 763), True, 'import numpy as np\n')]
|
from __future__ import print_function
import torch, PIL.Image, cv2, pickle, sys, argparse
import numpy as np
import openmesh as om
from tqdm import trange
sys.path.append("../src/")
from network import shading_net
import renderer as rd
from utility import subdiv_mesh_x4
from utility import CamPara
from utility import make_trimesh
from utility import flatten_naval
from utility import smpl_detoe
from matplotlib import pyplot as plt
# parse arguments
parser = argparse.ArgumentParser()
parser.add_argument('--num', type = int, required = True,
help = 'data_num')
parser.add_argument('--set', type = str, required = True,
help = 'recon or syn')
opt = parser.parse_args()
assert opt.set in ["recon", "syn"], \
"set must be one of [recon, syn]"
# prepare
data_num = int(opt.num)
model_file = "../demo/pretrained_model/pretrained_shading.pth"
device = torch.device("cuda:0")
net_shading = shading_net().to(device).eval()
net_shading.load_state_dict(torch.load(model_file, map_location='cuda:0'))
renderer = rd.SMPLRenderer(face_path =
"../predef/smpl_faces.npy")
cam_para = CamPara(K = np.array([[1000, 0, 224],
[0, 1000, 224],
[0, 0, 1]]))
with open ('../predef/exempt_vert_list.pkl', 'rb') as fp:
exempt_vert_list = pickle.load(fp)
tr = trange(data_num, desc='Bar desc', leave=True)
for test_num in tr:
# read mesh
mesh = om.read_trimesh("./eval_data/%s_set/pred_save/a_%03d.obj" % \
(opt.set, test_num))
proj_sil = renderer.silhouette(verts = mesh.points())
proj_sil_l = cv2.resize(proj_sil, dsize=(448, 448))
proj_sil_l[proj_sil_l<0.5] = 0
proj_sil_l[proj_sil_l>=0.5] = 1
# load data
src_img = np.array(PIL.Image.open("./eval_data/%s_set/input_img/%03d_img.png"%\
(opt.set, test_num)))
src_img_l = cv2.resize(src_img, dsize=(448, 448))
input_arr = np.rollaxis(src_img_l, 2, 0)
input_arr = np.expand_dims(input_arr, 0)
input_arr = torch.tensor(input_arr).float().to(device)
input_arr = input_arr/255.0
proj_sil_l = np.expand_dims(proj_sil_l, 0)
proj_sil_l = np.expand_dims(proj_sil_l, 0)
proj_sil_l = torch.tensor(proj_sil_l)
proj_sil_l = proj_sil_l.float().to(device)
# predict
pred = net_shading(input_arr, proj_sil_l)
pred_depth = np.array(pred.data.cpu()[0][0])
# pred_depth = np.load('/home/zhangtianyi/github/hmd/eval/eval_data/syn_set/pred_depth/' + '%03d_img.npy'%\
# (test_num))
# pred_depth = pred_depth*5.0
#show_img_arr(src_img)
mesh = flatten_naval(mesh)
# remove toes
mesh = smpl_detoe(mesh)
# subdivide the mesh to x4
subdiv_mesh = subdiv_mesh_x4(mesh)
# genrate boundary buffering mask
sil_img = rd.render_sil(subdiv_mesh)
bound_img = rd.render_bound(subdiv_mesh)
radius = 10
circ_template = np.zeros((radius*2+1, radius*2+1))
for i in range(radius):
cv2.circle(img = circ_template,
center = (radius, radius),
radius = i+2,
color = (radius-i)*0.1,
thickness = 2)
img_size = bound_img.shape
draw_img = np.zeros(img_size, dtype=np.float)
draw_img = np.pad(draw_img, radius, 'edge')
for y in range(img_size[0]):
for x in range(img_size[1]):
if bound_img[y, x] == 0:
continue
win = draw_img[y:y+2*radius+1, x:x+2*radius+1]
win[circ_template>win] = circ_template[circ_template>win]
draw_img[y:y+2*radius+1, x:x+2*radius+1] = win
final_mask = sil_img - draw_img[10:10+img_size[0], 10:10+img_size[1]]
final_mask[sil_img==0] = 0
# apply bias
d_max = np.max(pred_depth[pred_depth!=0])
d_min = np.min(pred_depth[pred_depth!=0])
bias = -(d_max - d_min)/2.
pred_depth = pred_depth + bias
# apply bright scale
weight_map = np.dot(src_img_l[...,:3], [0.299, 0.587, 0.114])
pred_depth = pred_depth * weight_map / 255.
pred_depth = pred_depth * 0.001
pred_depth = pred_depth * final_mask
# plt.imshow(pred_depth)
# plt.show()
# project mesh to depth and merge with depth difference
proj_depth, visi_map = rd.render_depth(subdiv_mesh, require_visi = True)
# get all visible vertex index
verts = subdiv_mesh.points()
faces = subdiv_mesh.face_vertex_indices()
visi_vert_inds = []
for y in range(visi_map.shape[0]):
for x in range(visi_map.shape[1]):
f_ind = visi_map[y, x]
if f_ind >= len(faces):
continue
else:
fv = faces[f_ind]
visi_vert_inds.append(fv[0])
visi_vert_inds.append(fv[1])
visi_vert_inds.append(fv[2])
visi_vert_inds = set(visi_vert_inds)
# filter out exempt version
visi_vert_inds = list(set(visi_vert_inds).difference(exempt_vert_list))
visi_vert_inds_m = []
for i in visi_vert_inds:
xy = cam_para.project(verts[i])
x = int(round(xy[1]))
y = int(round(xy[0]))
if x<0 or y<0 or x>=448 or y>=448:
continue
if np.absolute(proj_depth[x, y] - verts[i,2])<0.01:
visi_vert_inds_m.append(i)
for i in visi_vert_inds_m:
xy = cam_para.project(verts[i])
x = int(round(xy[1]))
y = int(round(xy[0]))
depth = proj_depth[x, y] + pred_depth[x, y]
#print(depth, verts[i])
if depth>8.:
continue
verts[i][2] = depth
deformed_mesh = make_trimesh(verts, faces)
om.write_mesh("./eval_data/%s_set/pred_save/s_%03d.obj" % \
(opt.set, test_num), deformed_mesh)
|
[
"numpy.absolute",
"network.shading_net",
"argparse.ArgumentParser",
"pickle.load",
"utility.subdiv_mesh_x4",
"torch.device",
"sys.path.append",
"numpy.pad",
"torch.load",
"utility.flatten_naval",
"utility.smpl_detoe",
"numpy.max",
"utility.make_trimesh",
"numpy.rollaxis",
"cv2.resize",
"openmesh.write_mesh",
"renderer.SMPLRenderer",
"cv2.circle",
"tqdm.trange",
"numpy.min",
"numpy.dot",
"openmesh.read_trimesh",
"renderer.render_bound",
"renderer.render_sil",
"numpy.zeros",
"numpy.expand_dims",
"renderer.render_depth",
"numpy.array",
"torch.tensor"
] |
[((155, 181), 'sys.path.append', 'sys.path.append', (['"""../src/"""'], {}), "('../src/')\n", (170, 181), False, 'import torch, PIL.Image, cv2, pickle, sys, argparse\n'), ((463, 488), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (486, 488), False, 'import torch, PIL.Image, cv2, pickle, sys, argparse\n'), ((902, 924), 'torch.device', 'torch.device', (['"""cuda:0"""'], {}), "('cuda:0')\n", (914, 924), False, 'import torch, PIL.Image, cv2, pickle, sys, argparse\n'), ((1057, 1110), 'renderer.SMPLRenderer', 'rd.SMPLRenderer', ([], {'face_path': '"""../predef/smpl_faces.npy"""'}), "(face_path='../predef/smpl_faces.npy')\n", (1072, 1110), True, 'import renderer as rd\n'), ((1388, 1433), 'tqdm.trange', 'trange', (['data_num'], {'desc': '"""Bar desc"""', 'leave': '(True)'}), "(data_num, desc='Bar desc', leave=True)\n", (1394, 1433), False, 'from tqdm import trange\n'), ((999, 1044), 'torch.load', 'torch.load', (['model_file'], {'map_location': '"""cuda:0"""'}), "(model_file, map_location='cuda:0')\n", (1009, 1044), False, 'import torch, PIL.Image, cv2, pickle, sys, argparse\n'), ((1366, 1381), 'pickle.load', 'pickle.load', (['fp'], {}), '(fp)\n', (1377, 1381), False, 'import torch, PIL.Image, cv2, pickle, sys, argparse\n'), ((1477, 1562), 'openmesh.read_trimesh', 'om.read_trimesh', (["('./eval_data/%s_set/pred_save/a_%03d.obj' % (opt.set, test_num))"], {}), "('./eval_data/%s_set/pred_save/a_%03d.obj' % (opt.set, test_num)\n )\n", (1492, 1562), True, 'import openmesh as om\n'), ((1672, 1710), 'cv2.resize', 'cv2.resize', (['proj_sil'], {'dsize': '(448, 448)'}), '(proj_sil, dsize=(448, 448))\n', (1682, 1710), False, 'import torch, PIL.Image, cv2, pickle, sys, argparse\n'), ((1963, 2000), 'cv2.resize', 'cv2.resize', (['src_img'], {'dsize': '(448, 448)'}), '(src_img, dsize=(448, 448))\n', (1973, 2000), False, 'import torch, PIL.Image, cv2, pickle, sys, argparse\n'), ((2017, 2045), 'numpy.rollaxis', 'np.rollaxis', (['src_img_l', '(2)', '(0)'], {}), '(src_img_l, 2, 0)\n', (2028, 2045), True, 'import numpy as np\n'), ((2062, 2090), 'numpy.expand_dims', 'np.expand_dims', (['input_arr', '(0)'], {}), '(input_arr, 0)\n', (2076, 2090), True, 'import numpy as np\n'), ((2200, 2229), 'numpy.expand_dims', 'np.expand_dims', (['proj_sil_l', '(0)'], {}), '(proj_sil_l, 0)\n', (2214, 2229), True, 'import numpy as np\n'), ((2247, 2276), 'numpy.expand_dims', 'np.expand_dims', (['proj_sil_l', '(0)'], {}), '(proj_sil_l, 0)\n', (2261, 2276), True, 'import numpy as np\n'), ((2294, 2318), 'torch.tensor', 'torch.tensor', (['proj_sil_l'], {}), '(proj_sil_l)\n', (2306, 2318), False, 'import torch, PIL.Image, cv2, pickle, sys, argparse\n'), ((2699, 2718), 'utility.flatten_naval', 'flatten_naval', (['mesh'], {}), '(mesh)\n', (2712, 2718), False, 'from utility import flatten_naval\n'), ((2749, 2765), 'utility.smpl_detoe', 'smpl_detoe', (['mesh'], {}), '(mesh)\n', (2759, 2765), False, 'from utility import smpl_detoe\n'), ((2816, 2836), 'utility.subdiv_mesh_x4', 'subdiv_mesh_x4', (['mesh'], {}), '(mesh)\n', (2830, 2836), False, 'from utility import subdiv_mesh_x4\n'), ((2890, 2916), 'renderer.render_sil', 'rd.render_sil', (['subdiv_mesh'], {}), '(subdiv_mesh)\n', (2903, 2916), True, 'import renderer as rd\n'), ((2933, 2961), 'renderer.render_bound', 'rd.render_bound', (['subdiv_mesh'], {}), '(subdiv_mesh)\n', (2948, 2961), True, 'import renderer as rd\n'), ((2999, 3041), 'numpy.zeros', 'np.zeros', (['(radius * 2 + 1, radius * 2 + 1)'], {}), '((radius * 2 + 1, radius * 2 + 1))\n', (3007, 3041), True, 'import numpy as np\n'), ((3313, 3347), 'numpy.zeros', 'np.zeros', (['img_size'], {'dtype': 'np.float'}), '(img_size, dtype=np.float)\n', (3321, 3347), True, 'import numpy as np\n'), ((3363, 3395), 'numpy.pad', 'np.pad', (['draw_img', 'radius', '"""edge"""'], {}), "(draw_img, radius, 'edge')\n", (3369, 3395), True, 'import numpy as np\n'), ((3860, 3895), 'numpy.max', 'np.max', (['pred_depth[pred_depth != 0]'], {}), '(pred_depth[pred_depth != 0])\n', (3866, 3895), True, 'import numpy as np\n'), ((3906, 3941), 'numpy.min', 'np.min', (['pred_depth[pred_depth != 0]'], {}), '(pred_depth[pred_depth != 0])\n', (3912, 3941), True, 'import numpy as np\n'), ((4053, 4102), 'numpy.dot', 'np.dot', (['src_img_l[..., :3]', '[0.299, 0.587, 0.114]'], {}), '(src_img_l[..., :3], [0.299, 0.587, 0.114])\n', (4059, 4102), True, 'import numpy as np\n'), ((4371, 4418), 'renderer.render_depth', 'rd.render_depth', (['subdiv_mesh'], {'require_visi': '(True)'}), '(subdiv_mesh, require_visi=True)\n', (4386, 4418), True, 'import renderer as rd\n'), ((5701, 5727), 'utility.make_trimesh', 'make_trimesh', (['verts', 'faces'], {}), '(verts, faces)\n', (5713, 5727), False, 'from utility import make_trimesh\n'), ((5732, 5829), 'openmesh.write_mesh', 'om.write_mesh', (["('./eval_data/%s_set/pred_save/s_%03d.obj' % (opt.set, test_num))", 'deformed_mesh'], {}), "('./eval_data/%s_set/pred_save/s_%03d.obj' % (opt.set,\n test_num), deformed_mesh)\n", (5745, 5829), True, 'import openmesh as om\n'), ((1164, 1217), 'numpy.array', 'np.array', (['[[1000, 0, 224], [0, 1000, 224], [0, 0, 1]]'], {}), '([[1000, 0, 224], [0, 1000, 224], [0, 0, 1]])\n', (1172, 1217), True, 'import numpy as np\n'), ((3070, 3182), 'cv2.circle', 'cv2.circle', ([], {'img': 'circ_template', 'center': '(radius, radius)', 'radius': '(i + 2)', 'color': '((radius - i) * 0.1)', 'thickness': '(2)'}), '(img=circ_template, center=(radius, radius), radius=i + 2, color=\n (radius - i) * 0.1, thickness=2)\n', (3080, 3182), False, 'import torch, PIL.Image, cv2, pickle, sys, argparse\n'), ((5306, 5349), 'numpy.absolute', 'np.absolute', (['(proj_depth[x, y] - verts[i, 2])'], {}), '(proj_depth[x, y] - verts[i, 2])\n', (5317, 5349), True, 'import numpy as np\n'), ((939, 952), 'network.shading_net', 'shading_net', ([], {}), '()\n', (950, 952), False, 'from network import shading_net\n'), ((2107, 2130), 'torch.tensor', 'torch.tensor', (['input_arr'], {}), '(input_arr)\n', (2119, 2130), False, 'import torch, PIL.Image, cv2, pickle, sys, argparse\n')]
|
import ovito
print("Hello, this is OVITO %i.%i.%i" % ovito.version)
# Import OVITO modules.
from ovito.io import *
from ovito.modifiers import *
from ovito.data import *
from collections import Counter
# Import standard Python and NumPy modules.
import sys
import numpy
import os
from ovito.pipeline import StaticSource, Pipeline
from ovito.io.ase import ase_to_ovito
from ase.atoms import Atoms
from ase.db import connect
from sklearn.metrics import accuracy_score
from sklearn.metrics import confusion_matrix
import numpy as np
import matplotlib.pyplot as plt
import itertools
##################
# run with
# conda activate ovito
# ~/apps/ovito-3.0.0-dev284-x86_64/bin/ovitos benchmarking_ovito.py
####################
def read_ase_db(db_path):
"""From the path to an ASE database file, return a list of ASE atom object contained in it.
.. codeauthor:: <NAME> <<EMAIL>>
"""
db = connect(db_path)
ase_list = []
for idx_db in range(len(db)):
atoms = db.get_atoms(selection=idx_db + 1, add_additional_information=True)
# put info from atoms.info['data'] back at their original place (atoms.info)
# this is because when an ASE atoms object is saved into the SQLite database,
# ASE does not save automatically atoms.info but instead to
# atoms.info are saved in atoms.info['data']
if 'data' in atoms.info.keys():
atoms.info = atoms.info['data']
ase_list.append(atoms)
return ase_list
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
print(cm)
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.tight_layout()
plt.show()
#filepath = '/home/ziletti/Documents/calc_nomadml/rot_inv_3d/structures_for_paper/four_grains/four_grains_poly.xyz'
#node = import_file(filepath, columns=["Particle Type", "Position.X", "Position.Y", "Position.Z"])
ase_db_dataset_dir = '/home/ziletti/Documents/calc_nomadml/rot_inv_3d/db_ase'
# ase_db = os.path.join(ase_db_dataset_dir, 'hcp-sc-fcc-diam-bcc_pristine' + '.db')
# ase_db = os.path.join(ase_db_dataset_dir, 'hcp-sc-fcc-diam-bcc_displacement-0.1%' + '.db')
# ase_db = os.path.join(ase_db_dataset_dir, 'hcp-sc-fcc-diam-bcc_displacement-0.2%' + '.db')
# ase_db = os.path.join(ase_db_dataset_dir, 'hcp-sc-fcc-diam-bcc_displacement-0.6%' + '.db')
# ase_db = os.path.join(ase_db_dataset_dir, 'hcp-sc-fcc-diam-bcc_displacement-1%' + '.db')
# ase_db = os.path.join(ase_db_dataset_dir, 'hcp-sc-fcc-diam-bcc_displacement-2%' + '.db')
# ase_db = os.path.join(ase_db_dataset_dir, 'hcp-sc-fcc-diam-bcc_displacement-4%' + '.db')
# ase_db = os.path.join(ase_db_dataset_dir, 'hcp-sc-fcc-diam-bcc_displacement-5%' + '.db')
# ase_db = os.path.join(ase_db_dataset_dir, 'hcp-sc-fcc-diam-bcc_displacement-8%' + '.db')
# ase_db = os.path.join(ase_db_dataset_dir, 'hcp-sc-fcc-diam-bcc_displacement-10%' + '.db')
# ase_db = os.path.join(ase_db_dataset_dir, 'hcp-sc-fcc-diam-bcc_displacement-12%' + '.db')
# ase_db = os.path.join(ase_db_dataset_dir, 'hcp-sc-fcc-diam-bcc_displacement-20%' + '.db')
ase_db = os.path.join(ase_db_dataset_dir, 'hcp-sc-fcc-diam-bcc_displacement-30%' + '.db')
# ase_db = os.path.join(ase_db_dataset_dir, 'hcp-sc-fcc-diam-bcc_displacement-50%' + '.db')
# ase_db = os.path.join(ase_db_dataset_dir, 'hcp-sc-fcc-diam-bcc_vacancies-1%' + '.db')
# ase_db = os.path.join(ase_db_dataset_dir, 'hcp-sc-fcc-diam-bcc_vacancies-2%' + '.db')
# ase_db = os.path.join(ase_db_dataset_dir, 'hcp-sc-fcc-diam-bcc_vacancies-5%' + '.db')
# ase_db = os.path.join(ase_db_dataset_dir, 'hcp-sc-fcc-diam-bcc_vacancies-10%' + '.db')
# ase_db = os.path.join(ase_db_dataset_dir, 'hcp-sc-fcc-diam-bcc_vacancies-20%' + '.db')
# ase_db = os.path.join(ase_db_dataset_dir, 'hcp-sc-fcc-diam-bcc_vacancies-50%' + '.db')
ase_atoms_list = read_ase_db(db_path=ase_db)
y_pred = []
y_true = []
atom_classes_list = []
for idx, atoms in enumerate(ase_atoms_list):
if idx % 1000 == 0:
print(idx)
# if str(atoms.info['target']) == '227':
if str(atoms.info['target']) == '227' or str(atoms.info['target']) == '221':
pass
# if False:
# pass
else:
# atoms = atoms*(2, 2, 2)
data = ase_to_ovito(atoms)
node = Pipeline(source=StaticSource(data=data))
# node.modifiers.append(CommonNeighborAnalysisModifier(mode=CommonNeighborAnalysisModifier.Mode.FixedCutoff))
# node.modifiers.append(CommonNeighborAnalysisModifier(mode=CommonNeighborAnalysisModifier.Mode.AdaptiveCutoff))
node.modifiers.append(AcklandJonesModifier())
# node.modifiers.append(BondAngleAnalysisModifier())
# node.modifiers.append(PolyhedralTemplateMatchingModifier(rmsd_cutoff=0.0))
# Let OVITO's data pipeline do the heavy work.
node.compute()
# A two-dimensional array containing the three CNA indices
# computed for each bond in the system.
atom_classes = list(node.output.particle_properties['Structure Type'].array)
#AcklandJonesModifier.Type.OTHER(0)
#AcklandJonesModifier.Type.FCC(1)
#AcklandJonesModifier.Type.HCP(2)
#AcklandJonesModifier.Type.BCC(3)
#AcklandJonesModifier.Type.ICO(4)
# CommonNeighborAnalysisModifier.Type.OTHER(0)
# CommonNeighborAnalysisModifier.Type.FCC(1)
# CommonNeighborAnalysisModifier.Type.HCP(2)
# CommonNeighborAnalysisModifier.Type.BCC(3)
# CommonNeighborAnalysisModifier.Type.ICO(4)
#
classes = dict(ack_jones=['None', '225', '194', '229', 'Ic'], cna=['None', '225', '194', '229', 'Ic'],
ptm=['None', '225', '194', '229', 'Ic', '221', '227', '227'],
baa=['None', '225', '194', '229', 'Ic'])
# ovito 3.0.0
# Type.OTHER(0)
# PolyhedralTemplateMatchingModifier.Type.FCC(1)
# PolyhedralTemplateMatchingModifier.Type.HCP(2)
# PolyhedralTemplateMatchingModifier.Type.BCC(3)
# PolyhedralTemplateMatchingModifier.Type.ICO(4)
# PolyhedralTemplateMatchingModifier.Type.SC(5)
# PolyhedralTemplateMatchingModifier.Type.CUBIC_DIAMOND(6)
# PolyhedralTemplateMatchingModifier.Type.HEX_DIAMOND(7)
y_pred_i = [classes['cna'][item] for item in atom_classes]
#y_pred_acna = [acna_classes[item] for item in y_pred]
# y_pred_baa = [baa_classes[item] for item in y_pred]
#print(y_pred_this)
#atoms = atoms * (2, 2, 2)
atom_class_true = [str(atoms.info['target'])] * len(atoms)
y_true.extend(atom_class_true)
y_pred.extend(y_pred_i)
atom_classes_list.extend(atom_classes)
print(len(y_true))
print('y_true', Counter(y_true))
print('y_pred', Counter(y_pred))
#print(Counter(y_true), Counter(y_pred))
print('Accuracy: {}'.format(accuracy_score(y_true, y_pred)))
cnf_matrix = confusion_matrix(y_true, y_pred)
np.set_printoptions(precision=4)
print(cnf_matrix)
# y_pred Counter({'194': 583828, '229': 116999, '225': 115152, 'None': 968})
ack_jones_classes = ['194', '229', '225', 'None']
# plot_confusion_matrix(cnf_matrix, classes=ack_jones_classes,
# normalize=False, title='Confusion matrix, without normalization')
# Loop over particles and print their CNA indices.
#for idx_particle, particle_index in enumerate(range(node.output.number_of_particles)):
#pass
# Print particle index (1-based).
#sys.stdout.write("%i " % (particle_index + 1))
#outname = 'BondAngleAnalysis.counts.'
#print(node.output.particle_properties['Structure Type'].array[idx_particle])
# print(y_pred[idx_particle])
# Create local list with CNA indices of the bonds of the current particle.
#bond_index_list = list(bond_enumerator.bonds_of_particle(particle_index))
#local_cna_indices = cna_indices[bond_index_list]
# Count how often each type of CNA triplet occurred.
#unique_triplets, triplet_counts = row_histogram(local_cna_indices)
# Print list of triplets with their respective counts.
#for triplet, count in zip(unique_triplets, triplet_counts):
# sys.stdout.write("%s:%i " % (triplet, count))
# End of particle line
#sys.stdout.write("\n")
|
[
"matplotlib.pyplot.title",
"matplotlib.pyplot.tight_layout",
"numpy.set_printoptions",
"matplotlib.pyplot.show",
"ovito.pipeline.StaticSource",
"matplotlib.pyplot.imshow",
"sklearn.metrics.accuracy_score",
"matplotlib.pyplot.yticks",
"matplotlib.pyplot.ylabel",
"collections.Counter",
"matplotlib.pyplot.colorbar",
"ovito.io.ase.ase_to_ovito",
"ase.db.connect",
"sklearn.metrics.confusion_matrix",
"matplotlib.pyplot.xticks",
"os.path.join",
"matplotlib.pyplot.xlabel"
] |
[((4035, 4120), 'os.path.join', 'os.path.join', (['ase_db_dataset_dir', "('hcp-sc-fcc-diam-bcc_displacement-30%' + '.db')"], {}), "(ase_db_dataset_dir, 'hcp-sc-fcc-diam-bcc_displacement-30%' + '.db'\n )\n", (4047, 4120), False, 'import os\n'), ((7813, 7845), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (7829, 7845), False, 'from sklearn.metrics import confusion_matrix\n'), ((7846, 7878), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'precision': '(4)'}), '(precision=4)\n', (7865, 7878), True, 'import numpy as np\n'), ((904, 920), 'ase.db.connect', 'connect', (['db_path'], {}), '(db_path)\n', (911, 920), False, 'from ase.db import connect\n'), ((2018, 2068), 'matplotlib.pyplot.imshow', 'plt.imshow', (['cm'], {'interpolation': '"""nearest"""', 'cmap': 'cmap'}), "(cm, interpolation='nearest', cmap=cmap)\n", (2028, 2068), True, 'import matplotlib.pyplot as plt\n'), ((2073, 2089), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (2082, 2089), True, 'import matplotlib.pyplot as plt\n'), ((2094, 2108), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (2106, 2108), True, 'import matplotlib.pyplot as plt\n'), ((2154, 2198), 'matplotlib.pyplot.xticks', 'plt.xticks', (['tick_marks', 'classes'], {'rotation': '(45)'}), '(tick_marks, classes, rotation=45)\n', (2164, 2198), True, 'import matplotlib.pyplot as plt\n'), ((2203, 2234), 'matplotlib.pyplot.yticks', 'plt.yticks', (['tick_marks', 'classes'], {}), '(tick_marks, classes)\n', (2213, 2234), True, 'import matplotlib.pyplot as plt\n'), ((2540, 2564), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""True label"""'], {}), "('True label')\n", (2550, 2564), True, 'import matplotlib.pyplot as plt\n'), ((2569, 2598), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Predicted label"""'], {}), "('Predicted label')\n", (2579, 2598), True, 'import matplotlib.pyplot as plt\n'), ((2603, 2621), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (2619, 2621), True, 'import matplotlib.pyplot as plt\n'), ((2626, 2636), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2634, 2636), True, 'import matplotlib.pyplot as plt\n'), ((7645, 7660), 'collections.Counter', 'Counter', (['y_true'], {}), '(y_true)\n', (7652, 7660), False, 'from collections import Counter\n'), ((7678, 7693), 'collections.Counter', 'Counter', (['y_pred'], {}), '(y_pred)\n', (7685, 7693), False, 'from collections import Counter\n'), ((5154, 5173), 'ovito.io.ase.ase_to_ovito', 'ase_to_ovito', (['atoms'], {}), '(atoms)\n', (5166, 5173), False, 'from ovito.io.ase import ase_to_ovito\n'), ((7767, 7797), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (7781, 7797), False, 'from sklearn.metrics import accuracy_score\n'), ((5205, 5228), 'ovito.pipeline.StaticSource', 'StaticSource', ([], {'data': 'data'}), '(data=data)\n', (5217, 5228), False, 'from ovito.pipeline import StaticSource, Pipeline\n')]
|
import subprocess as sbp
import sys
import os
import numpy as np
import numpy.linalg as la
import pandas as pd
import time
import math
from ast import literal_eval
from pdb import set_trace as pst
'''
decfreq01: The original opitimization with negative freq as first one.
decfreq02: Move the atoms to direction of negative freq then got the normal positive freq.
decfreq03: Extract from the decfreq02's last optimized geometry.
All the indexs start with 0!
All carbon number should be even and should be odd's double !! why? otherwise need to check cellsnumber
'''
#=========================prefix setup part==============================#
#filesname_FC2fchk = 'decfreq02'
filesname_FC3fchk = 'C14H30Freq'
filesname_FC2fchk = 'C34H70Freq'
#filesname_FC2fchk = 'C34H70HFixed'
#filesname_com = 'decfreq03cart'
filesname_FC3com = 'C14H30Freq'
filesname_FC2com ='C34H70Freq'
filesname_FC3csv = 'FC3_C14AnaHess.csv'
#++++++++++++++constant setting++++++++++++++++++++++++++++++++++++++
meconstant = 1822.888486
Ang_bohr = 1.8897259886
au_cm = 4.359743E-18/(1.660538E-27 * 0.5292E-10 * 0.5292E-10/meconstant)#hatree/(amu*bohr*bohr) it transfer to SI unit
len_a = 2.567381*Ang_bohr #The average length of cell(transfer to bohr)
massau = [12.0107*meconstant,1.00794*meconstant] #From NIST database
#XXX the K is depended on how many the cells we used. here FC only take neighbor 1 cell so in total is 3 cells
#klist= np.linspace(0,1,K//2+1)#XXX here still use 0-1 but later should times pi/len_a when using
#FC4klist = np.linspace(0,2,K4+1)[:K4//2+1]#XXX here still use 0-1 but later should times pi/len_a when using
#XXX: plus cellsnumber and endidx
#........................... 2nd
#XXX this global variables could only be modified in local scope but not redefined.
FC2Coef_kp = {}
#............................3rd
FC2atomsname= []
#coordA = []
cal_method = '#p freq B3YLP/6-31G(d)'
#atomcharge = 0
#atommult = 1
'''
H13 H14
\/
C4-- C1 -- C2 -- C3
/\
H11 H12
'''
#===============================HARMONIC PART===============================#
def harmFreq_per_k():
for i in range(len(FC2klist)):
getCoef_w_perk(i)
print("The w (omg) in a.u is :\n")
print(w_omgkpcm[0])
#XXX the following is to check Coeficient is right
#eigvaltest = np.zeros((len(FC2klist),P),dtype = np.complex_)
#for _p in range(P):
# for kidx in range(len(FC2klist)):
# for kappa in range(P):
# atom1 = 3*(cellsnumber[0][kappa//3] - 1) + kappa%3
# for gamma in range(P):
# for midx in range(-endidx,endidx + 1):
# atom2 = 3*(cellsnumber[midx][gamma//3] - 1) + gamma%3
# eigvaltest[kidx][_p] += FC2[getidx(atom1,atom2)] * Coef_kp[kidx][kappa][_p] * Coef_kp[kidx][gamma][_p].conjugate()* math.e**(- 1j * midx * klistFC2[kidx] * math.pi) / (math.sqrt(massau[int(kappa>5)]*massau[int(gamma>5)]))
#print(w_omgkp)
#print(eigvaltest)
#For now I just calculate the neaby cells
#Fuvk is 18*18 for atoms in first cell but Force constant was store in 96*96 but in lower dense triangular form.
#XXX: u and v is the uth vth Cartesian Coordinates!!!
def getCoef_w_perk(kidx,Fcc):
kk = FC2klist[kidx] + 0.1
Fuvk = np.zeros((P,P),dtype = np.complex_)
#XXX: m is just -1 0 1 for decane
for u in range(P):
atom1 = 3*(FC2cellsnumber[0][u//3] - 1) + u%3
for v in range(P):
eachterm = 0.0
for midx in range(-FC2endidx,FC2endidx+1):
# F u(0)v(m) :
# Cell[m] [v//3] give us the atoms number in FC matrix XXX:which started with 1!
# atom2 is the nth coordinates of each atoms XXX: which started with 0!
atom2 = 3*(FC2cellsnumber[midx][v//3] - 1) + v%3
# transfer to k space
eachterm += Fcc[getidx(atom1,atom2)]* math.e ** (-1j * kk * midx*math.pi)#/(math.sqrt(massau[int(u>5)]*massau[int(v>5)]))
#eachterm += Fcc[atom1,atom2]* math.e ** (-1j * kk * midx * len_a)
# mass weighted : if u and v is > 5 so it is not Carbon's coordinates
Fuvk[u][v] = eachterm /(math.sqrt(massau[int(u>5)]*massau[int(v>5)]))
eigval, eigvector = la.eigh(Fuvk)#hermition matrix to get real eigenvalue
#print(eigval)
for i in range(P):
w_omgkp[kidx][i] = math.sqrt(abs(eigval[i]))
w_omgkpcm[kidx][i] = math.sqrt(abs(eigval[i]*au_cm))/(2.99792458E10 * 2 * math.pi)
print(w_omgkpcm[kidx])
FC2Coef_kp[kidx] = eigvector.conjugate() #here we add v is a p*p matrix (p is branch number and number of atoms in cell
return eigvector, Fuvk ,eigval
#df = pd.DataFrame(w_omgkpcm)
#df.to_csv('./w_omgkpcmNorm.csv')
def cleanFC2():
#XXX My way
#test = []
#u = 1
#atom1 = 3*(FC2cellsnumber[0][u//3] - 1) + u%3
#v = 3
#for midx in range(-FC2endidx,FC2endidx+1):
# atom2 = 3*(FC2cellsnumber[midx][v//3] - 1) + v%3
# test.append(FC2[getidx(atom1,atom2)])
#print(test)
#print(w)
#XXX Sode way
#mass weighted first
FCinput = FC2.copy()
for u in range(P):
atom1 = 3*(FC2cellsnumber[0][u//3] - 1) + u%3
for v in range(P):
for midx in range(-FC2endidx,FC2endidx+1):
atom2 = 3*(FC2cellsnumber[midx][v//3] - 1) + v%3
FCinput[getidx(atom1,atom2)] = FC2[getidx(atom1,atom2)]/(math.sqrt(massau[int(u>5)]*massau[int(v>5)]))
L,D0,k = getCoef_w_perk(0,FCinput.copy())
print(L[:,0])
print(L[:,1])
print(L[:,2])
print(L[:,3])
#I = np.eye(P)
#L1 = np.outer(L[:,0],L[:,0])
#L2 = np.outer(L[:,1],L[:,1])
#L3 = np.outer(L[:,2],L[:,2])
#L4 = np.outer(L[:,3],L[:,3])
##Pp = (I - L1@L1)@(I - L2@L2)@(I - L3@L3)@(I - L4@L4)
#Pp = (I - L4@L4)
#corrct = (Pp@D0@Pp - D0)/(15)
##print(corrct.shape)
#FC2new = np.zeros(FC2.shape,dtype = np.complex_)
#for u in range(P):
# atom1 = 3*(FC2cellsnumber[0][u//3] - 1) + u%3
# for v in range(P):
# for midx in range(-FC2endidx,FC2endidx+1):
# atom2 = 3*(FC2cellsnumber[midx][v//3] - 1) + v%3
# FC2new[getidx(atom1,atom2)] = FCinput[getidx(atom1,atom2)] + corrct[u,v]
#FCinput = FC2new.copy()
#L,D0,k = getCoef_w_perk(1,FCinput.copy())
#return Fnew
#XXX:Works really well! Check!
#def C14harmonicFreqCheck():
# eigvaltestOriginFC3 = np.zeros((len(FC3klist),P),dtype = np.complex_)
# for kk in range(len(FC3klist)):
# Fuvk = np.zeros((P,P),dtype = np.complex_)
# #XXX: m is just -1 0 1 for decane
# #Carbon 1
# for kappa in range(P):
# atom1 = 3*(FC3cellsnumber[0][kappa//3] - 1) + kappa%3
# for gamma in range(P):
# eachterm = 0.0
# for midx in range(-FC3endidx,FC3endidx+1):
# # F u(0)v(m) :
# # Cell[m] [v//3] give us the atoms number in FC matrix XXX:which started with 1!
# # atom2 is the nth coordinates of each atoms XXX: which started with 0!
# atom2 = 3*(FC3cellsnumber[midx][gamma//3] - 1) + gamma%3
# # transfer to k space
# eachterm += FC3FC2[getidx(atom1,atom2)]* math.e ** (-1j * klistFC3[kk] * midx * math.pi)
# # mass weighted : if u and v is > 5 so it is not Carbon's coordinates
# Fuvk[kappa][gamma] = eachterm /(math.sqrt(massau[int(kappa>5)]*massau[int(gamma>5)]))
# eigval, eigvector = la.eigh(Fuvk)#hermition matrix to get real eigenvalue
# for i in range(P):
# eigvaltestOriginFC3[kk][i] = math.sqrt(abs(eigval[i]*au_cm))/(2.99792458E10 * 2 * math.pi)
# print(eigvaltestOriginFC3)
# eigvaltestFC3 = np.zeros((len(FC3klist),P),dtype = np.complex_)
# for _p in range(P):
# for kidx in range(len(FC3klist)):
# for kappa in range(P):
# atom1 = 3*(FC3cellsnumber[0][kappa//3] - 1) + kappa%3
# for gamma in range(P):
# for midx in range(-FC3endidx,FC3endidx + 1):
# atom2 = 3*(FC3cellsnumber[midx][gamma//3] - 1) + gamma%3
# eigvaltestFC3[kidx][_p] += FC3FC2[getidx(atom1,atom2)] * FC2Coef_kp[3*kidx][kappa][_p] * FC2Coef_kp[3*kidx][gamma][_p].conjugate()* math.e**(- 1j * midx * FC2klist[3* kidx] * math.pi) / (math.sqrt(massau[int(kappa>5)]*massau[int(gamma>5)]))
# eigvaltestFC3[kidx][_p] = math.sqrt(abs(eigvaltestFC3[kidx][_p] * au_cm))/(2.99792458E10 * 2 * math.pi)
# print(eigvaltestFC3)
#===============================ANHARM PART=============================#
#read in the csv file for force constant directly.
#TODO:Finish the code for polyethylene (already have FC)
#TODO:- readin FC - transfer FC to k space - diagrams - find root - last step.
"""
FC3 is stored in csv file need to read in
"""
#===============================HELPER FUNCTION========================#
"""
#helper function to readin the fchk FC2 and store in array and return copy
"""
def readFC2(filename):
for fname in os.listdir('.'):
if fname == filename + '.fchk':
with open(fname) as f:
search = f.readlines()
for fcidx in range(len(search)):
eachline = search[fcidx].split()
if eachline and eachline[0] == "Cartesian" and eachline[1] == "Force":
fcnum = int(eachline[5])
break
tempFC2 = [0]*fcnum
i = 0
plus = int(fcnum%5==0)
for itr in range(fcidx+1, fcidx+int(fcnum)//5+2- plus):
for ele in search[itr].split():
tempFC2[i] = float(ele)
i+=1
return tempFC2
"""
#get idx of FCs
"""
def getidx(*args):#XXX:started with 0!
output = list(args)
if len(args)==2:
output.sort()
return int(output[1]*(output[1]+1)/2 + output[0])
elif len(output) == 3:
output.sort()
return str(output[0]) + '_' + str(output[1]) + '_' + str(output[2])
elif len(output) == 4:
output.sort()
return str(output[0]) + '_' + str(output[1]) + '_' + str(output[2]) +'_' + str(output[3])
sys.exit("wrong input for idx()")
return 0
"""
#cells setting
#return a numpy array of the index of the cell atoms
"""
def cellsetting():
##totalnum = len(FC2atomsname)
##assert (totalnum-2)%3 == 0
###eg carbon_num is 10
FC2carbon_num = int((len(FC2atomsname)-2)/3)
###eg numcellused is 3
FC2numcellused = int((FC2carbon_num-4)/2)
##assert (carbon_num-4)%2 == 0
global FC2cellsnumber
FC2cellsnumber = np.zeros((FC2numcellused,6))#XXX:we use EVEN number of carbon here!!! and cut off the end 4 carbons
FC2cellsnumber = FC2cellsnumber.astype(int)
FC2cellsnumber[:2,:2] = np.array([[1,2],[3,5]])
FC2cellsnumber[FC2numcellused//2 + 1,:2] = np.array([FC2carbon_num - 4,FC2carbon_num - 6])
for i in range(FC2numcellused):
if i > 1 and i < FC2numcellused//2 + 1:
FC2cellsnumber[i,:2] = FC2cellsnumber[i-1,:2] + 4
elif i > FC2numcellused//2 + 1 :
FC2cellsnumber[i,:2] = FC2cellsnumber[i-1,:2] - 4
for j in range(1,3):
FC2cellsnumber[i,2*j] = 2*(FC2cellsnumber[i,j-1]-1) + FC2carbon_num +1
FC2cellsnumber[i,2*j+1] = 2*(FC2cellsnumber[i,j-1]-1) + FC2carbon_num +2
FC2cellused = len(FC2cellsnumber)#XXX should be odd
global FC2endidx
FC2endidx = FC2cellused//2# if cellused is 3 then endidx is 3//2 = 1 so the range is (-1, 2)
print("For FC2 number of cells used is", FC2numcellused,"and the endidx is", FC2endidx)
print(FC2cellsnumber)
'''
#get atoms name, charge, multi num, coordA(actually no use here)
'''
def init_para():
with open(filesname_FC2com + ".com") as f:
read = f.readlines()
for idx in range(len(read)):
eachline = read[idx].split()
if eachline and eachline[0] == "calculation":
break
idx += 3 #move from the title section to coordinates part
while read[idx]!= '\n':
eachline = read[idx].split()
FC2atomsname.append(eachline[0])
#for cdidx in range(1, len(eachline)):
#coordA.append(float(eachline[cdidx]))
idx+=1
print("The number of FC2 atoms is",len(FC2atomsname))
#readin the FC2 of the oject
global FC2
FC2 = np.array(readFC2(filesname_FC2fchk))
global K
K = 15
global K2
K2 = 15 # number of cells harmonic
K3 = 5 # number of cells FC3
K4 = 3 # number of cells FC4
N = 6
global P
P = 3*N #branch number of normal modes in first BZ
global FC2klist
FC2klist = np.linspace(0,2,K2+1)[:K2//2+1]#XXX here still use 0-1 but later should times pi/len_a when using
#FC3klist = np.linspace(0,2,K3+1)[:K3//2+1]#XXX here still use 0-1 but later should times pi/len_a when using
#global FC3FC2
#FC3FC2 = readFC2(filesname_FC3fchk)
global w_omgkp
global w_omgkpcm
w_omgkp = np.zeros((len(FC2klist),P))#We just store the half BZ plus zero's w_omg since they are symmetric
w_omgkpcm = np.zeros((len(FC2klist),P))
#===================================TEST PART ==============================#
t1 = time.time()
init_para()
cellsetting()
#cleanFC2()
L,D0,k = getCoef_w_perk(0,FC2)
#do mass-weighted back
#print(L.real)
for i in range(4):
temp = L[i,:].real.copy()
print(temp)
#for a in range(len(temp)):
# temp[a] *= (math.sqrt(massau[int(a>5)]*massau[int(i>5)]))
#print("cellsnumber is ,",FC2cellsnumber)#,FC3cellsnumber)
print(time.time()-t1)
#testpart(0)
|
[
"numpy.zeros",
"time.time",
"numpy.linalg.eigh",
"numpy.array",
"numpy.linspace",
"os.listdir",
"sys.exit"
] |
[((13279, 13290), 'time.time', 'time.time', ([], {}), '()\n', (13288, 13290), False, 'import time\n'), ((3268, 3303), 'numpy.zeros', 'np.zeros', (['(P, P)'], {'dtype': 'np.complex_'}), '((P, P), dtype=np.complex_)\n', (3276, 3303), True, 'import numpy as np\n'), ((4268, 4281), 'numpy.linalg.eigh', 'la.eigh', (['Fuvk'], {}), '(Fuvk)\n', (4275, 4281), True, 'import numpy.linalg as la\n'), ((9183, 9198), 'os.listdir', 'os.listdir', (['"""."""'], {}), "('.')\n", (9193, 9198), False, 'import os\n'), ((10224, 10257), 'sys.exit', 'sys.exit', (['"""wrong input for idx()"""'], {}), "('wrong input for idx()')\n", (10232, 10257), False, 'import sys\n'), ((10672, 10701), 'numpy.zeros', 'np.zeros', (['(FC2numcellused, 6)'], {}), '((FC2numcellused, 6))\n', (10680, 10701), True, 'import numpy as np\n'), ((10849, 10875), 'numpy.array', 'np.array', (['[[1, 2], [3, 5]]'], {}), '([[1, 2], [3, 5]])\n', (10857, 10875), True, 'import numpy as np\n'), ((10920, 10968), 'numpy.array', 'np.array', (['[FC2carbon_num - 4, FC2carbon_num - 6]'], {}), '([FC2carbon_num - 4, FC2carbon_num - 6])\n', (10928, 10968), True, 'import numpy as np\n'), ((12725, 12750), 'numpy.linspace', 'np.linspace', (['(0)', '(2)', '(K2 + 1)'], {}), '(0, 2, K2 + 1)\n', (12736, 12750), True, 'import numpy as np\n'), ((13629, 13640), 'time.time', 'time.time', ([], {}), '()\n', (13638, 13640), False, 'import time\n')]
|
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Shared utility functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from concurrent import futures
import os
import pickle
from absl import flags
from absl import logging
import gym
import numpy as np
import tensorflow.compat.v1 as tf
from tf_agents.environments import suite_mujoco
from tf_agents.specs import array_spec
flags.DEFINE_integer('checkpoint_iterations', 50, 'Periodicity of checkpoints.')
flags.DEFINE_integer('eval_iterations', 50, 'Periodicity of evaluations.')
flags.DEFINE_integer('num_evals', 10, 'Number of evaluations.')
FLAGS = flags.FLAGS
_CHECKPOINT_FILENAME = 'model.ckpt'
def get_state_and_action_specs(gym_env, action_bounds=None):
"""Returns state and action specs for a Gym environment.
Args:
gym_env: gym.core.Env. A Gym environment.
action_bounds: list of strings. Min and max values in string for action
variables.
Returns:
(BoundedArraySpec, BoundedArraySpec). The first is a state spec and the
second is a action spec.
"""
if isinstance(gym_env.observation_space, gym.spaces.Box):
state_spec = array_spec.BoundedArraySpec(
shape=gym_env.observation_space.shape,
dtype=gym_env.observation_space.dtype,
minimum=gym_env.observation_space.low,
maximum=gym_env.observation_space.high)
else:
raise NotImplementedError(type(gym_env.observation_space))
if action_bounds:
assert len(action_bounds) == 2
action_min = np.tile(float(action_bounds[0]), gym_env.action_space.shape)
action_max = np.tile(float(action_bounds[1]), gym_env.action_space.shape)
else:
action_min = gym_env.action_space.low
action_max = gym_env.action_space.high
if isinstance(gym_env.action_space, gym.spaces.Box):
action_spec = array_spec.BoundedArraySpec(
shape=gym_env.action_space.shape,
dtype=gym_env.action_space.dtype,
minimum=action_min,
maximum=action_max)
else:
raise NotImplementedError(type(gym_env.action_space))
return state_spec, action_spec
def create_env(env_name):
"""Creates Environment."""
if env_name == 'Pendulum':
env = gym.make('Pendulum-v0')
elif env_name == 'Hopper':
env = suite_mujoco.load('Hopper-v2')
elif env_name == 'Walker2D':
env = suite_mujoco.load('Walker2d-v2')
elif env_name == 'HalfCheetah':
env = suite_mujoco.load('HalfCheetah-v2')
elif env_name == 'Ant':
env = suite_mujoco.load('Ant-v2')
elif env_name == 'Humanoid':
env = suite_mujoco.load('Humanoid-v2')
else:
raise ValueError('Unsupported environment: %s' % env_name)
return env
def _env_reset(env):
if hasattr(env, 'time_step_spec'):
return env.reset().observation
else:
return env.reset()
def _env_step(env, action):
if hasattr(env, 'time_step_spec'):
ts = env.step(action)
return ts.observation, ts.reward, env.done, env.get_info()
else:
return env.step(action)
def warm_up_replay_memory(session, behavior_policy, time_out, discount_factor,
replay_memory):
# The number of events in an epsidoe could be less than the maximum episode
# length (i.e., time_out) when the environment has a termination state.
min_replay_memory_size = FLAGS.batch_size * FLAGS.train_steps_per_iteration
while replay_memory.size < min_replay_memory_size:
num_events = min_replay_memory_size - replay_memory.size
num_episodes = int(num_events / time_out) + 1
collect_experience_parallel(num_episodes, session, behavior_policy,
time_out, discount_factor, replay_memory)
def collect_experience_parallel(num_episodes,
session,
behavior_policy,
time_out,
discount_factor,
replay_memory,
collect_init_state_step=False):
"""Executes threads for data collection."""
old_size = replay_memory.size
if num_episodes > 1:
with futures.ThreadPoolExecutor(
max_workers=FLAGS.collect_experience_parallelism) as executor:
for _ in range(num_episodes):
executor.submit(collect_experience, session, behavior_policy, time_out,
discount_factor, replay_memory, collect_init_state_step)
else:
collect_experience(session, behavior_policy, time_out, discount_factor,
replay_memory, collect_init_state_step)
return replay_memory.size - old_size
def collect_experience(session,
behavior_policy,
time_out,
discount_factor,
replay_memory,
collect_init_state_step=False):
"""Adds experiences into replay memory.
Generates an episode, computes Q targets for state and action pairs in the
episode, and adds them into the replay memory.
"""
with session.as_default():
with session.graph.as_default():
env = create_env(FLAGS.env_name)
episode, _, _ = _collect_episode(env, time_out, discount_factor,
behavior_policy, collect_init_state_step)
replay_memory.extend(episode)
if hasattr(env, 'close'):
env.close()
def _collect_episode(env, time_out, discount_factor, behavior_policy,
collect_init_state_step=False):
"""Collects episodes of trajectories by following a behavior policy."""
episode = []
episode_lengths = []
episode_rewards = []
state = _env_reset(env)
init_state = _env_reset(env)
done = False
episode_step_count = 0
e_reward = 0
for _ in range(time_out):
# First, sample an action
action = behavior_policy.action(state, use_action_function=True)
if action is None:
break
next_state, reward, done, info = _env_step(env, action)
reward = reward if not done else 0.0
# Save the experience to our buffer
if collect_init_state_step:
episode.append([
init_state, state, action, reward, next_state, episode_step_count,
done, info
])
else:
episode.append([state, action, reward, next_state, done, info])
# update state, e_reward and step count
state = next_state
if discount_factor < 1:
e_reward += (discount_factor**episode_step_count) * reward
else:
e_reward += reward
episode_step_count += 1
if done:
break
if episode_step_count > 0:
episode_lengths.append(episode_step_count)
episode_rewards.append(e_reward)
return (episode, episode_lengths, episode_rewards)
def periodic_updates(iteration,
train_step,
replay_memories,
greedy_policy,
saver,
sess,
time_out,
use_action_function=True,
tf_summary=None):
"""Evaluates the algorithm."""
if (FLAGS.checkpoint_dir and FLAGS.checkpoint_iterations and
iteration % FLAGS.checkpoint_iterations == 0):
logging.info('Iteration: %d, writing checkpoints..', iteration)
if not tf.gfile.Exists(FLAGS.checkpoint_dir):
tf.gfile.MakeDirs(FLAGS.checkpoint_dir)
checkpoint_file = os.path.join(FLAGS.checkpoint_dir, _CHECKPOINT_FILENAME)
saver.save(
sess, checkpoint_file, global_step=train_step, write_meta_graph=False)
for replay_memory in replay_memories:
replay_memory.save(FLAGS.checkpoint_dir, delete_old=True)
logging.info('Iteration: %d, completed writing checkpoints.', iteration)
if FLAGS.eval_iterations and iteration % FLAGS.eval_iterations == 0:
logging.info('Iteration: %d, evaluating the model..', iteration)
scores = []
action_magnitudes = []
episode_lens = []
future_list = []
with futures.ThreadPoolExecutor(max_workers=FLAGS.num_evals) as executor:
for _ in range(FLAGS.num_evals):
future_list.append(
executor.submit(
_evaluate_model,
time_out,
greedy_policy,
use_action_function=use_action_function,
render=False))
for future in futures.as_completed(future_list):
score, action_magnitude, episode_len = future.result()
scores.append(score)
action_magnitudes.append(action_magnitude)
episode_lens.append(episode_len)
avg_score = np.mean(scores)
avg_action_magitude = np.mean(action_magnitudes)
avg_episode_len = np.mean(episode_lens)
logging.info(
'Iteration: %d, avg_score: %.3f, avg_episode_len: %.3f, '
'avg_action_magnitude: %.3f', iteration, avg_score, avg_episode_len,
avg_action_magitude)
if tf_summary:
tf_summary.value.extend([
tf.Summary.Value(tag='avg_score', simple_value=avg_score),
tf.Summary.Value(
tag='avg_action_magnitude', simple_value=avg_action_magitude),
tf.Summary.Value(tag='avg_episode_len', simple_value=avg_episode_len)
])
def _evaluate_model(time_out,
greedy_policy,
use_action_function=False,
render=False):
"""Evaluates the model."""
env = create_env(FLAGS.env_name)
state = _env_reset(env)
total_reward = 0.0
total_action_magnitude = 0.0
episode_len = 0
for _ in range(time_out):
if render:
env.render()
action = greedy_policy.action(
np.reshape(state, [1, -1]), use_action_function)
if action is None:
break
next_state, reward, done, _ = _env_step(env, action)
state = next_state
total_reward += reward
if greedy_policy.continuous_action:
total_action_magnitude += np.linalg.norm(action, np.inf)
episode_len += 1
if done:
break
return total_reward, total_action_magnitude / episode_len, episode_len
def save_hparam_config(dict_to_save, config_dir):
"""Saves config file of hparam."""
filename = os.path.join(config_dir, 'hparam.pickle')
print('Saving results to %s' % filename)
if not tf.gfile.Exists(config_dir):
tf.gfile.MakeDirs(config_dir)
with tf.gfile.GFile(filename, 'w') as f:
pickle.dump(dict_to_save, f, protocol=2)
def action_projection(action, action_spec, softmax=False):
"""Projects action tensor onto a bound."""
if isinstance(action, np.ndarray):
if softmax:
e_x = np.exp(action - np.max(action, axis=1))
return e_x / np.sum(e_x, axis=1)
else:
return np.minimum(action_spec.maximum,
np.maximum(action_spec.minimum, action))
else:
# TF version
if softmax:
return tf.nn.softmax(action, axis=1)
else:
return tf.minimum(action_spec.maximum,
tf.maximum(action_spec.minimum, action))
def create_placeholders_for_q_net(tf_vars):
"""Creates placeholders for feeding values to TF variables.
Args:
tf_vars: list. A list of TF variables. These are variables for a neural
network approximating a Q function.
Returns:
dict. A dictionary mapping a string to a tf.placeholder.
"""
ph_dict = {}
for var in tf_vars:
ph_dict['{}_ph'.format(var.name)] = tf.placeholder(
dtype=var.dtype, shape=var.shape)
return ph_dict
def build_dummy_q_net(state, action, ph_dict, q_net_vars):
"""Builds a dummy Q network.
This function builds a neural network where parameters are given by
placeholders.
Args:
state: TF Tensor. State tensor.
action: TF Tensor. Action tensor.
ph_dict: dict. A dictionary mapping a TF variable's name to a
tf.placeholder. There is one placeholder for each variable in
`q_net_vars`.
q_net_vars: list. A list of TF variables. The list should have even number
of variables. One for weights and other for bias for each layer of a
neural network.
Returns:
TF Tensor. Output tensor of a Q network.
"""
assert bool(q_net_vars) and len(q_net_vars) % 2 == 0
net = tf.concat([state, action], axis=1)
# Specific for MLP
for itr, var in enumerate(q_net_vars):
if itr % 2 == 0:
# even itr, multiplicative weights
net = tf.einsum('ij,jk->ik', net, ph_dict['{}_ph'.format(var.name)])
else:
# odd itr, additive weights
net = tf.nn.bias_add(net, ph_dict['{}_ph'.format(var.name)])
# Output layer doesn't have an activation function.
if itr < len(q_net_vars) - 1:
net = tf.nn.relu(net)
return net
def make_tf_summary_histogram(values, num_bins=10):
"""Constructs a tf Summary of type histogram from a np array of values.
Args:
values: list or np.array.
num_bins: int. Number of histogram bins.
Returns:
tf.HistogramProto.
"""
values = np.reshape(values, [-1])
counts, limits = np.histogram(values, bins=num_bins)
return tf.HistogramProto(
min=np.amin(values),
max=np.amax(values),
num=values.size,
sum=np.sum(values),
sum_squares=np.sum(values**2),
bucket_limit=limits.tolist()[1:],
bucket=counts.tolist())
|
[
"tf_agents.specs.array_spec.BoundedArraySpec",
"pickle.dump",
"numpy.sum",
"numpy.amin",
"numpy.maximum",
"absl.logging.info",
"tensorflow.compat.v1.Summary.Value",
"numpy.histogram",
"numpy.mean",
"tensorflow.compat.v1.gfile.Exists",
"numpy.linalg.norm",
"os.path.join",
"tensorflow.compat.v1.placeholder",
"tf_agents.environments.suite_mujoco.load",
"numpy.max",
"absl.flags.DEFINE_integer",
"numpy.reshape",
"concurrent.futures.ThreadPoolExecutor",
"tensorflow.compat.v1.nn.relu",
"tensorflow.compat.v1.gfile.GFile",
"concurrent.futures.as_completed",
"tensorflow.compat.v1.nn.softmax",
"gym.make",
"tensorflow.compat.v1.gfile.MakeDirs",
"tensorflow.compat.v1.concat",
"numpy.amax",
"tensorflow.compat.v1.maximum"
] |
[((1009, 1094), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""checkpoint_iterations"""', '(50)', '"""Periodicity of checkpoints."""'], {}), "('checkpoint_iterations', 50, 'Periodicity of checkpoints.'\n )\n", (1029, 1094), False, 'from absl import flags\n'), ((1090, 1164), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""eval_iterations"""', '(50)', '"""Periodicity of evaluations."""'], {}), "('eval_iterations', 50, 'Periodicity of evaluations.')\n", (1110, 1164), False, 'from absl import flags\n'), ((1165, 1228), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""num_evals"""', '(10)', '"""Number of evaluations."""'], {}), "('num_evals', 10, 'Number of evaluations.')\n", (1185, 1228), False, 'from absl import flags\n'), ((10639, 10680), 'os.path.join', 'os.path.join', (['config_dir', '"""hparam.pickle"""'], {}), "(config_dir, 'hparam.pickle')\n", (10651, 10680), False, 'import os\n'), ((12642, 12676), 'tensorflow.compat.v1.concat', 'tf.concat', (['[state, action]'], {'axis': '(1)'}), '([state, action], axis=1)\n', (12651, 12676), True, 'import tensorflow.compat.v1 as tf\n'), ((13389, 13413), 'numpy.reshape', 'np.reshape', (['values', '[-1]'], {}), '(values, [-1])\n', (13399, 13413), True, 'import numpy as np\n'), ((13433, 13468), 'numpy.histogram', 'np.histogram', (['values'], {'bins': 'num_bins'}), '(values, bins=num_bins)\n', (13445, 13468), True, 'import numpy as np\n'), ((1757, 1950), 'tf_agents.specs.array_spec.BoundedArraySpec', 'array_spec.BoundedArraySpec', ([], {'shape': 'gym_env.observation_space.shape', 'dtype': 'gym_env.observation_space.dtype', 'minimum': 'gym_env.observation_space.low', 'maximum': 'gym_env.observation_space.high'}), '(shape=gym_env.observation_space.shape, dtype=\n gym_env.observation_space.dtype, minimum=gym_env.observation_space.low,\n maximum=gym_env.observation_space.high)\n', (1784, 1950), False, 'from tf_agents.specs import array_spec\n'), ((2425, 2565), 'tf_agents.specs.array_spec.BoundedArraySpec', 'array_spec.BoundedArraySpec', ([], {'shape': 'gym_env.action_space.shape', 'dtype': 'gym_env.action_space.dtype', 'minimum': 'action_min', 'maximum': 'action_max'}), '(shape=gym_env.action_space.shape, dtype=gym_env\n .action_space.dtype, minimum=action_min, maximum=action_max)\n', (2452, 2565), False, 'from tf_agents.specs import array_spec\n'), ((2789, 2812), 'gym.make', 'gym.make', (['"""Pendulum-v0"""'], {}), "('Pendulum-v0')\n", (2797, 2812), False, 'import gym\n'), ((7736, 7799), 'absl.logging.info', 'logging.info', (['"""Iteration: %d, writing checkpoints.."""', 'iteration'], {}), "('Iteration: %d, writing checkpoints..', iteration)\n", (7748, 7799), False, 'from absl import logging\n'), ((7918, 7974), 'os.path.join', 'os.path.join', (['FLAGS.checkpoint_dir', '_CHECKPOINT_FILENAME'], {}), '(FLAGS.checkpoint_dir, _CHECKPOINT_FILENAME)\n', (7930, 7974), False, 'import os\n'), ((8181, 8253), 'absl.logging.info', 'logging.info', (['"""Iteration: %d, completed writing checkpoints."""', 'iteration'], {}), "('Iteration: %d, completed writing checkpoints.', iteration)\n", (8193, 8253), False, 'from absl import logging\n'), ((8330, 8394), 'absl.logging.info', 'logging.info', (['"""Iteration: %d, evaluating the model.."""', 'iteration'], {}), "('Iteration: %d, evaluating the model..', iteration)\n", (8342, 8394), False, 'from absl import logging\n'), ((9090, 9105), 'numpy.mean', 'np.mean', (['scores'], {}), '(scores)\n', (9097, 9105), True, 'import numpy as np\n'), ((9132, 9158), 'numpy.mean', 'np.mean', (['action_magnitudes'], {}), '(action_magnitudes)\n', (9139, 9158), True, 'import numpy as np\n'), ((9181, 9202), 'numpy.mean', 'np.mean', (['episode_lens'], {}), '(episode_lens)\n', (9188, 9202), True, 'import numpy as np\n'), ((9207, 9374), 'absl.logging.info', 'logging.info', (['"""Iteration: %d, avg_score: %.3f, avg_episode_len: %.3f, avg_action_magnitude: %.3f"""', 'iteration', 'avg_score', 'avg_episode_len', 'avg_action_magitude'], {}), "(\n 'Iteration: %d, avg_score: %.3f, avg_episode_len: %.3f, avg_action_magnitude: %.3f'\n , iteration, avg_score, avg_episode_len, avg_action_magitude)\n", (9219, 9374), False, 'from absl import logging\n'), ((10733, 10760), 'tensorflow.compat.v1.gfile.Exists', 'tf.gfile.Exists', (['config_dir'], {}), '(config_dir)\n', (10748, 10760), True, 'import tensorflow.compat.v1 as tf\n'), ((10766, 10795), 'tensorflow.compat.v1.gfile.MakeDirs', 'tf.gfile.MakeDirs', (['config_dir'], {}), '(config_dir)\n', (10783, 10795), True, 'import tensorflow.compat.v1 as tf\n'), ((10803, 10832), 'tensorflow.compat.v1.gfile.GFile', 'tf.gfile.GFile', (['filename', '"""w"""'], {}), "(filename, 'w')\n", (10817, 10832), True, 'import tensorflow.compat.v1 as tf\n'), ((10843, 10883), 'pickle.dump', 'pickle.dump', (['dict_to_save', 'f'], {'protocol': '(2)'}), '(dict_to_save, f, protocol=2)\n', (10854, 10883), False, 'import pickle\n'), ((11849, 11897), 'tensorflow.compat.v1.placeholder', 'tf.placeholder', ([], {'dtype': 'var.dtype', 'shape': 'var.shape'}), '(dtype=var.dtype, shape=var.shape)\n', (11863, 11897), True, 'import tensorflow.compat.v1 as tf\n'), ((2852, 2882), 'tf_agents.environments.suite_mujoco.load', 'suite_mujoco.load', (['"""Hopper-v2"""'], {}), "('Hopper-v2')\n", (2869, 2882), False, 'from tf_agents.environments import suite_mujoco\n'), ((4690, 4766), 'concurrent.futures.ThreadPoolExecutor', 'futures.ThreadPoolExecutor', ([], {'max_workers': 'FLAGS.collect_experience_parallelism'}), '(max_workers=FLAGS.collect_experience_parallelism)\n', (4716, 4766), False, 'from concurrent import futures\n'), ((7811, 7848), 'tensorflow.compat.v1.gfile.Exists', 'tf.gfile.Exists', (['FLAGS.checkpoint_dir'], {}), '(FLAGS.checkpoint_dir)\n', (7826, 7848), True, 'import tensorflow.compat.v1 as tf\n'), ((7856, 7895), 'tensorflow.compat.v1.gfile.MakeDirs', 'tf.gfile.MakeDirs', (['FLAGS.checkpoint_dir'], {}), '(FLAGS.checkpoint_dir)\n', (7873, 7895), True, 'import tensorflow.compat.v1 as tf\n'), ((8490, 8545), 'concurrent.futures.ThreadPoolExecutor', 'futures.ThreadPoolExecutor', ([], {'max_workers': 'FLAGS.num_evals'}), '(max_workers=FLAGS.num_evals)\n', (8516, 8545), False, 'from concurrent import futures\n'), ((8854, 8887), 'concurrent.futures.as_completed', 'futures.as_completed', (['future_list'], {}), '(future_list)\n', (8874, 8887), False, 'from concurrent import futures\n'), ((10122, 10148), 'numpy.reshape', 'np.reshape', (['state', '[1, -1]'], {}), '(state, [1, -1])\n', (10132, 10148), True, 'import numpy as np\n'), ((10386, 10416), 'numpy.linalg.norm', 'np.linalg.norm', (['action', 'np.inf'], {}), '(action, np.inf)\n', (10400, 10416), True, 'import numpy as np\n'), ((11308, 11337), 'tensorflow.compat.v1.nn.softmax', 'tf.nn.softmax', (['action'], {'axis': '(1)'}), '(action, axis=1)\n', (11321, 11337), True, 'import tensorflow.compat.v1 as tf\n'), ((13507, 13522), 'numpy.amin', 'np.amin', (['values'], {}), '(values)\n', (13514, 13522), True, 'import numpy as np\n'), ((13534, 13549), 'numpy.amax', 'np.amax', (['values'], {}), '(values)\n', (13541, 13549), True, 'import numpy as np\n'), ((13584, 13598), 'numpy.sum', 'np.sum', (['values'], {}), '(values)\n', (13590, 13598), True, 'import numpy as np\n'), ((13618, 13637), 'numpy.sum', 'np.sum', (['(values ** 2)'], {}), '(values ** 2)\n', (13624, 13637), True, 'import numpy as np\n'), ((2924, 2956), 'tf_agents.environments.suite_mujoco.load', 'suite_mujoco.load', (['"""Walker2d-v2"""'], {}), "('Walker2d-v2')\n", (2941, 2956), False, 'from tf_agents.environments import suite_mujoco\n'), ((11114, 11133), 'numpy.sum', 'np.sum', (['e_x'], {'axis': '(1)'}), '(e_x, axis=1)\n', (11120, 11133), True, 'import numpy as np\n'), ((11213, 11252), 'numpy.maximum', 'np.maximum', (['action_spec.minimum', 'action'], {}), '(action_spec.minimum, action)\n', (11223, 11252), True, 'import numpy as np\n'), ((11417, 11456), 'tensorflow.compat.v1.maximum', 'tf.maximum', (['action_spec.minimum', 'action'], {}), '(action_spec.minimum, action)\n', (11427, 11456), True, 'import tensorflow.compat.v1 as tf\n'), ((13096, 13111), 'tensorflow.compat.v1.nn.relu', 'tf.nn.relu', (['net'], {}), '(net)\n', (13106, 13111), True, 'import tensorflow.compat.v1 as tf\n'), ((3001, 3036), 'tf_agents.environments.suite_mujoco.load', 'suite_mujoco.load', (['"""HalfCheetah-v2"""'], {}), "('HalfCheetah-v2')\n", (3018, 3036), False, 'from tf_agents.environments import suite_mujoco\n'), ((9455, 9512), 'tensorflow.compat.v1.Summary.Value', 'tf.Summary.Value', ([], {'tag': '"""avg_score"""', 'simple_value': 'avg_score'}), "(tag='avg_score', simple_value=avg_score)\n", (9471, 9512), True, 'import tensorflow.compat.v1 as tf\n'), ((9524, 9602), 'tensorflow.compat.v1.Summary.Value', 'tf.Summary.Value', ([], {'tag': '"""avg_action_magnitude"""', 'simple_value': 'avg_action_magitude'}), "(tag='avg_action_magnitude', simple_value=avg_action_magitude)\n", (9540, 9602), True, 'import tensorflow.compat.v1 as tf\n'), ((9629, 9698), 'tensorflow.compat.v1.Summary.Value', 'tf.Summary.Value', ([], {'tag': '"""avg_episode_len"""', 'simple_value': 'avg_episode_len'}), "(tag='avg_episode_len', simple_value=avg_episode_len)\n", (9645, 9698), True, 'import tensorflow.compat.v1 as tf\n'), ((11071, 11093), 'numpy.max', 'np.max', (['action'], {'axis': '(1)'}), '(action, axis=1)\n', (11077, 11093), True, 'import numpy as np\n'), ((3073, 3100), 'tf_agents.environments.suite_mujoco.load', 'suite_mujoco.load', (['"""Ant-v2"""'], {}), "('Ant-v2')\n", (3090, 3100), False, 'from tf_agents.environments import suite_mujoco\n'), ((3142, 3174), 'tf_agents.environments.suite_mujoco.load', 'suite_mujoco.load', (['"""Humanoid-v2"""'], {}), "('Humanoid-v2')\n", (3159, 3174), False, 'from tf_agents.environments import suite_mujoco\n')]
|
import os
import numpy as np
from keras.models import Sequential, Model
from keras.layers import Dense, Dropout, LSTM, TimeDistributed, Activation, Reshape, concatenate, Input
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint, CSVLogger, Callback
START_CHAR = '\b'
END_CHAR = '\t'
PADDING_CHAR = '\a'
chars = set([START_CHAR, '\n', END_CHAR])
input_frame = 'shakespeare_short.txt'
model_fname = 'model_keras'
output_fname = 'output.txt'
batchout_fname = 'batch_out.txt'
USE_SIMPLE_MODEL = False
with open(input_frame) as f:
for line in f:
chars.update(list(line.strip().lower()))
char_indicies = {c: i for i, c in enumerate(sorted(list(chars)))}
char_indicies[PADDING_CHAR] = 0
indicies_to_chars = {i: c for c, i in char_indicies.items()}
num_chars = len(chars)
print(num_chars)
def get_one(i, sz):
res = np.zeros(sz)
res[i] = 1
return res
char_vectors = {
c: (np.zeros(num_chars) if c == PADDING_CHAR else get_one(v, num_chars)) for c, v in char_indicies.items()
}
sentence_end_markers = set('.!?')
sentences = []
current_sentence = ''
with open(input_frame, 'r') as f:
for line in f:
s = line.strip().lower()
if len(s) > 0:
current_sentence += s + '\n'
if len(s) == 0 or s[-1] in sentence_end_markers:
current_sentence = current_sentence.strip()
if len(current_sentence) > 10:
sentences.append(current_sentence)
current_sentence = ''
def get_matrices(sentences, max_sentence_len):
X = np.zeros((len(sentences), max_sentence_len, len(chars)), dtype=np.bool)
y = np.zeros((len(sentences), max_sentence_len, len(chars)), dtype=np.bool)
for i, sentence in enumerate(sentences):
char_seq = (START_CHAR + sentence + END_CHAR).ljust(max_sentence_len + 1, PADDING_CHAR)
for t in range(max_sentence_len):
X[i, t, :] = char_vectors[char_seq[t]]
y[i, t, :] = char_vectors[char_seq[t + 1]]
return X, y
test_indicies = np.random.choice(range(len(sentences)), int(len(sentences) * 0.05))
sentences_train = [sentences[x] for x in set(range(len(sentences))) - set(test_indicies)]
sentences_test = [sentences[x] for x in test_indicies]
max_sentence_len = np.max([len(x) for x in sentences])
sentences_train = sorted(sentences_train, key=lambda x: len(x))
X_test, y_test = get_matrices(sentences_test, max_sentence_len)
batch_size = 16
print(sentences_train[1])
print(sentences_test[1])
print(X_test.shape)
def generate_batch():
while True:
for i in range(int(len(sentences_train) / batch_size)):
sentences_batch = sentences_train[i * batch_size:(i + 1) * batch_size]
yield get_matrices(sentences_batch, max_sentence_len)
class CharSampler(Callback):
def __init__(self, char_vectors, model):
self.char_vectors = char_vectors
self.model = model
def on_train_begin(self, logs={}):
self.epoch = 0
if os.path.isfile(output_fname):
os.remove(output_fname)
def sample(self, preds, temperature=1.0):
preds = np.asarray(preds).astype('float64')
preds = np.log(preds) / temperature
exp_preds = np.exp(preds)
preds = exp_preds / np.sum(exp_preds)
probas = np.random.multinomial(1, preds, 1)
return np.argmax(probas)
def sample_one(self, T):
result = START_CHAR
while len(result) < 500:
Xsampled = np.zeros((1, len(result), num_chars)) # max_sentence_len
for t, c in enumerate(list(result)):
Xsampled[0, t, :] = self.char_vectors[c]
ysampled = self.model.predict(Xsampled, batch_size=1)[0, :]
yv = ysampled[len(result) - 1, :]
selected_char = indicies_to_chars[self.sample(yv, T)]
if selected_char == END_CHAR:
break
result = result + selected_char
return result
def on_epoch_end(self, epoch, logs=None):
self.epoch = self.epoch + 1
if self.epoch % 1 == 0:
print('\nEpoch: %d text sampling:' % self.epoch)
with open(output_fname, 'a') as outf:
outf.write('\n========= Epoch %d =========' % self.epoch)
for T in [.3, .5, .7, .9, 1.1]:
print('\tsampling, T= %.1f...' % T)
for _ in range(5):
self.model.reset_states()
res = self.sample_one(T)
outf.write('\nT=%.1f \n%s \n' % (T, res[1:]))
def on_batch_end(self, batch, logs={}):
if (batch + 1) % 10 == 0:
print('\nBatch %d text sampling: ' % batch)
with open(output_fname, 'a') as outf:
outf.write('\n========= Batch %d =========' % batch)
for T in [.3, .5, .7, .9, 1.1]:
print('\tsampling, T= %.1f...' % T)
for _ in range(5):
self.model.reset_states()
res = self.sample_one(T)
outf.write(res + '\n')
class LossHistory(Callback):
def on_train_begin(self, logs={}):
self.loss = []
self.acc = []
def on_batch_end(self, batch, logs={}):
self.loss.append(logs.get('loss'))
self.acc.append(logs.get('acc'))
if (batch + 1) % 100 == 0:
with open(batchout_fname, 'a') as outf:
for i in range(100):
outf.write('%d\t%.6f\t%.6f\n' %
(batch + i - 99, self.loss[i - 100], self.acc[i - 100]))
if USE_SIMPLE_MODEL:
# simple model
vec = Input(shape=(None, num_chars))
l1 = LSTM(128, activation='tanh', return_sequences=True)(vec)
l1_d = Dropout(0.2)(l1)
dense = TimeDistributed(Dense(num_chars))(l1_d)
output_res = Activation('softmax')(dense)
model = Model(input=vec, outputs=output_res)
else:
# deep model
vec = Input(shape=(None, num_chars))
l1 = LSTM(128, activation='tanh', return_sequences=True)(vec)
l1_d = Dropout(0.2)(l1)
input2 = concatenate([vec, l1_d])
l2 = LSTM(128, activation='tanh', return_sequences=True)(input2)
l2_d = Dropout(0.2)(l2)
input3 = concatenate([vec, l2_d])
l3 = LSTM(128, activation='tanh', return_sequences=True)(input3)
l3_d = Dropout(0.2)(l2)
input_d = concatenate([l1_d, l2_d, l3_d])
dense3 = TimeDistributed(Dense(num_chars))(input_d)
output_res = Activation('softmax')(dense3)
model = Model(input=vec, outputs=output_res)
model.compile(loss='categorical_crossentropy', optimizer=Adam(clipnorm=1.), metrics=['accuracy'])
cb_sampler = CharSampler(char_vectors, model)
cb_logger = CSVLogger(model_fname + '.log')
cb_checkpoint = ModelCheckpoint("model.hdf5", monitor='val_acc', save_best_only=True, save_weights_only=False)
model.fit_generator(generate_batch(),
int(len(sentences_train) / batch_size) * batch_size,
epochs=10,
verbose=True,
validation_data=(X_test, y_test),
callbacks=[cb_logger, cb_sampler, cb_checkpoint])
|
[
"os.remove",
"numpy.sum",
"numpy.argmax",
"numpy.random.multinomial",
"keras.models.Model",
"os.path.isfile",
"numpy.exp",
"keras.layers.Input",
"keras.layers.concatenate",
"keras.callbacks.ModelCheckpoint",
"keras.layers.Dropout",
"numpy.asarray",
"keras.optimizers.Adam",
"numpy.log",
"keras.layers.Activation",
"keras.layers.LSTM",
"numpy.zeros",
"keras.layers.Dense",
"keras.callbacks.CSVLogger"
] |
[((6724, 6755), 'keras.callbacks.CSVLogger', 'CSVLogger', (["(model_fname + '.log')"], {}), "(model_fname + '.log')\n", (6733, 6755), False, 'from keras.callbacks import ModelCheckpoint, CSVLogger, Callback\n'), ((6772, 6870), 'keras.callbacks.ModelCheckpoint', 'ModelCheckpoint', (['"""model.hdf5"""'], {'monitor': '"""val_acc"""', 'save_best_only': '(True)', 'save_weights_only': '(False)'}), "('model.hdf5', monitor='val_acc', save_best_only=True,\n save_weights_only=False)\n", (6787, 6870), False, 'from keras.callbacks import ModelCheckpoint, CSVLogger, Callback\n'), ((857, 869), 'numpy.zeros', 'np.zeros', (['sz'], {}), '(sz)\n', (865, 869), True, 'import numpy as np\n'), ((5665, 5695), 'keras.layers.Input', 'Input', ([], {'shape': '(None, num_chars)'}), '(shape=(None, num_chars))\n', (5670, 5695), False, 'from keras.layers import Dense, Dropout, LSTM, TimeDistributed, Activation, Reshape, concatenate, Input\n'), ((5900, 5936), 'keras.models.Model', 'Model', ([], {'input': 'vec', 'outputs': 'output_res'}), '(input=vec, outputs=output_res)\n', (5905, 5936), False, 'from keras.models import Sequential, Model\n'), ((5970, 6000), 'keras.layers.Input', 'Input', ([], {'shape': '(None, num_chars)'}), '(shape=(None, num_chars))\n', (5975, 6000), False, 'from keras.layers import Dense, Dropout, LSTM, TimeDistributed, Activation, Reshape, concatenate, Input\n'), ((6109, 6133), 'keras.layers.concatenate', 'concatenate', (['[vec, l1_d]'], {}), '([vec, l1_d])\n', (6120, 6133), False, 'from keras.layers import Dense, Dropout, LSTM, TimeDistributed, Activation, Reshape, concatenate, Input\n'), ((6245, 6269), 'keras.layers.concatenate', 'concatenate', (['[vec, l2_d]'], {}), '([vec, l2_d])\n', (6256, 6269), False, 'from keras.layers import Dense, Dropout, LSTM, TimeDistributed, Activation, Reshape, concatenate, Input\n'), ((6382, 6413), 'keras.layers.concatenate', 'concatenate', (['[l1_d, l2_d, l3_d]'], {}), '([l1_d, l2_d, l3_d])\n', (6393, 6413), False, 'from keras.layers import Dense, Dropout, LSTM, TimeDistributed, Activation, Reshape, concatenate, Input\n'), ((6529, 6565), 'keras.models.Model', 'Model', ([], {'input': 'vec', 'outputs': 'output_res'}), '(input=vec, outputs=output_res)\n', (6534, 6565), False, 'from keras.models import Sequential, Model\n'), ((927, 946), 'numpy.zeros', 'np.zeros', (['num_chars'], {}), '(num_chars)\n', (935, 946), True, 'import numpy as np\n'), ((2984, 3012), 'os.path.isfile', 'os.path.isfile', (['output_fname'], {}), '(output_fname)\n', (2998, 3012), False, 'import os\n'), ((3213, 3226), 'numpy.exp', 'np.exp', (['preds'], {}), '(preds)\n', (3219, 3226), True, 'import numpy as np\n'), ((3290, 3324), 'numpy.random.multinomial', 'np.random.multinomial', (['(1)', 'preds', '(1)'], {}), '(1, preds, 1)\n', (3311, 3324), True, 'import numpy as np\n'), ((3340, 3357), 'numpy.argmax', 'np.argmax', (['probas'], {}), '(probas)\n', (3349, 3357), True, 'import numpy as np\n'), ((5705, 5756), 'keras.layers.LSTM', 'LSTM', (['(128)'], {'activation': '"""tanh"""', 'return_sequences': '(True)'}), "(128, activation='tanh', return_sequences=True)\n", (5709, 5756), False, 'from keras.layers import Dense, Dropout, LSTM, TimeDistributed, Activation, Reshape, concatenate, Input\n'), ((5773, 5785), 'keras.layers.Dropout', 'Dropout', (['(0.2)'], {}), '(0.2)\n', (5780, 5785), False, 'from keras.layers import Dense, Dropout, LSTM, TimeDistributed, Activation, Reshape, concatenate, Input\n'), ((5859, 5880), 'keras.layers.Activation', 'Activation', (['"""softmax"""'], {}), "('softmax')\n", (5869, 5880), False, 'from keras.layers import Dense, Dropout, LSTM, TimeDistributed, Activation, Reshape, concatenate, Input\n'), ((6010, 6061), 'keras.layers.LSTM', 'LSTM', (['(128)'], {'activation': '"""tanh"""', 'return_sequences': '(True)'}), "(128, activation='tanh', return_sequences=True)\n", (6014, 6061), False, 'from keras.layers import Dense, Dropout, LSTM, TimeDistributed, Activation, Reshape, concatenate, Input\n'), ((6078, 6090), 'keras.layers.Dropout', 'Dropout', (['(0.2)'], {}), '(0.2)\n', (6085, 6090), False, 'from keras.layers import Dense, Dropout, LSTM, TimeDistributed, Activation, Reshape, concatenate, Input\n'), ((6143, 6194), 'keras.layers.LSTM', 'LSTM', (['(128)'], {'activation': '"""tanh"""', 'return_sequences': '(True)'}), "(128, activation='tanh', return_sequences=True)\n", (6147, 6194), False, 'from keras.layers import Dense, Dropout, LSTM, TimeDistributed, Activation, Reshape, concatenate, Input\n'), ((6214, 6226), 'keras.layers.Dropout', 'Dropout', (['(0.2)'], {}), '(0.2)\n', (6221, 6226), False, 'from keras.layers import Dense, Dropout, LSTM, TimeDistributed, Activation, Reshape, concatenate, Input\n'), ((6279, 6330), 'keras.layers.LSTM', 'LSTM', (['(128)'], {'activation': '"""tanh"""', 'return_sequences': '(True)'}), "(128, activation='tanh', return_sequences=True)\n", (6283, 6330), False, 'from keras.layers import Dense, Dropout, LSTM, TimeDistributed, Activation, Reshape, concatenate, Input\n'), ((6350, 6362), 'keras.layers.Dropout', 'Dropout', (['(0.2)'], {}), '(0.2)\n', (6357, 6362), False, 'from keras.layers import Dense, Dropout, LSTM, TimeDistributed, Activation, Reshape, concatenate, Input\n'), ((6487, 6508), 'keras.layers.Activation', 'Activation', (['"""softmax"""'], {}), "('softmax')\n", (6497, 6508), False, 'from keras.layers import Dense, Dropout, LSTM, TimeDistributed, Activation, Reshape, concatenate, Input\n'), ((6624, 6642), 'keras.optimizers.Adam', 'Adam', ([], {'clipnorm': '(1.0)'}), '(clipnorm=1.0)\n', (6628, 6642), False, 'from keras.optimizers import Adam\n'), ((3026, 3049), 'os.remove', 'os.remove', (['output_fname'], {}), '(output_fname)\n', (3035, 3049), False, 'import os\n'), ((3165, 3178), 'numpy.log', 'np.log', (['preds'], {}), '(preds)\n', (3171, 3178), True, 'import numpy as np\n'), ((3255, 3272), 'numpy.sum', 'np.sum', (['exp_preds'], {}), '(exp_preds)\n', (3261, 3272), True, 'import numpy as np\n'), ((5818, 5834), 'keras.layers.Dense', 'Dense', (['num_chars'], {}), '(num_chars)\n', (5823, 5834), False, 'from keras.layers import Dense, Dropout, LSTM, TimeDistributed, Activation, Reshape, concatenate, Input\n'), ((6443, 6459), 'keras.layers.Dense', 'Dense', (['num_chars'], {}), '(num_chars)\n', (6448, 6459), False, 'from keras.layers import Dense, Dropout, LSTM, TimeDistributed, Activation, Reshape, concatenate, Input\n'), ((3113, 3130), 'numpy.asarray', 'np.asarray', (['preds'], {}), '(preds)\n', (3123, 3130), True, 'import numpy as np\n')]
|
# Copyright 2020 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import logging
import argparse
import itertools
import numpy as np
import pandas as pd
import xgboost as xgb
import matplotlib.pyplot as plt
import sklearn.metrics as metrics
from sklearn.model_selection import GridSearchCV, train_test_split
logging.basicConfig(level=os.environ.get("LOGLEVEL", "INFO"))
logger = logging.getLogger(__name__)
def eval_models(models, data):
"""Calculates the root mean squared error (RMSE) and the coefficient of
determination (R^2) for each of the models.
:param models: Dictionary of the error model for each state vector
component
:type models: {str: xgboost.XGBRegressor}
:param data: Dictionary containing the training and test datasets
:type data: {str: numpy.array}
:return: Returns a DataFrame containing the evaluation metric results
:rtype: pandas.DataFrame
"""
evals = []
for target_col, reg in models.items():
y_hat = reg.predict(data['X_test'])
y = data['y_test'][target_col]
rmse = metrics.mean_squared_error(y, y_hat, squared=False)
r2 = metrics.r2_score(y, y_hat)
eval_dict = {'Error': target_col, 'RMSE': rmse, 'R^2': r2}
evals.append(eval_dict)
return pd.DataFrame(evals)
def plot_feat_impts(models, data):
"""Plots the feature importances for each of the error models.
For use in an interactive jupyter session.
:param models: Dictionary of the error model for each state vector
component
:type models: {str: xgboost.XGBRegressor}
:param data: Dictionary containing the training and test datasets
:type data: {str: numpy.array}
"""
feat_names = data['X_train'].columns
fig, axs = plt.subplots(2, 3, figsize=(10, 10))
for (target_col, model), ax in zip(models.items(), axs.flat):
feat_imp = pd.Series(model.feature_importances_, index=feat_names)
feat_imp.sort_values(ascending=False, inplace=True)
feat_imp.plot(kind='bar', ax=ax, title=target_col)
plt.ylabel('Feature Importance Score')
plt.tight_layout()
def get_state_vect_cols(prefix):
"""Get the column names of the state vector components with the
provided `prefix`.
:param prefix: The prefix that is used in front of the state vector
components in the column names, examples are `physics_pred` and
`physics_err`
:type prefix: str
:return: A list of the 6 names of the prefixed state vector components
:rtype: [str]
"""
vectors = ['r', 'v']
components = ['x', 'y', 'z']
col_names = [f'{prefix}_{v}_{c}'
for v, c
in itertools.product(vectors, components)]
return col_names
def load_models(models_dir):
"""Loads previously trained XGBoost models from the `models_dir`
:param models_dir: The path to where the serialized XGBoost JSON files are
:type models_dir: str
:return: A list of the loaded XGBoost models
:rtype: [xgboost.XGBRegressor]
"""
ml_models = []
model_names = get_state_vect_cols('physics_err')
for mn in model_names:
model = xgb.XGBRegressor()
model_path = os.path.join(models_dir, f'{mn}.json')
model.load_model(model_path)
ml_models.append(model)
return ml_models
def save_models(models, models_dir):
"""Saves the error estimations models as JSON representations.
:param models: Dictionary of the error model for each state vector
component
:type models: {str: xgboost.XGBRegressor}
:param models_dir: The path to save the serialized XGBoost JSON files to
:type models_dir: str
"""
for model_name, err_model in models.items():
file_name = f'{model_name}.json'
file_path = os.path.join(models_dir, file_name)
err_model.save_model(file_path)
def predict_err(models, physics_preds):
"""Uses the provide ML models to predict the error in the physics
model orbit prediction.
:param ml_models: The ML models to use to estimate the error in each
of the predicted state vector components.
:type ml_models: [xgboost.XGBRegressor]
:param physcis_preds: The elapsed time in seconds and the predicted
state vectors to estimate the errors for
:type physcis_preds: numpy.array
:return: The estimated errors
:rtype: numpy.array
"""
# Each model predicts the error for its respective state vector component
err_preds = [m.predict(physics_preds) for m in models]
# Orient the error estimates as column vectors
err_preds = np.stack(err_preds, axis=1)
return err_preds
def build_train_test_sets(df, test_size=0.2):
"""Builds training and testing sets from the provided DataFrame.
:param df: The DataFrame to use to build training and test sets from
:type df: pandas.DataFrame
:param test_size: The percentage size of the DataFrame that should be used
to build the test set
:type test_size: float
:return: A dictionary containing the feature and target training/test sets
:rtype: dict[str, pandas.DataFrame]
"""
# Features are the physics predicted state vectors and the amount of
# time in seconds into the future the prediction was made
feature_cols = ['elapsed_seconds'] + get_state_vect_cols('physics_pred')
# The target values are the errors between the physical model predictions
# and the ground truth observations
target_cols = get_state_vect_cols('physics_err')
# Create feature and target matrices
X = df[feature_cols]
y = df[target_cols]
# Split feature and target data into training and test sets
data_keys = ['X_train', 'X_test', 'y_train', 'y_test']
data_vals = train_test_split(X, y, test_size=test_size)
train_test_data = dict(zip(data_keys, data_vals))
return train_test_data
def train_models(data, params={}, eval_metric='rmse'):
"""Trains gradient boosted regression tree models to estimate the error in
each of the six state vector components in the physical model prediction
:param data: Dictionary containing the training and test datasets
:type data: {str: numpy.array}
:param params: A dictionary of parameters to pass to the XGBRegressor
constructor
:type params: dict
:param eval_metric: The loss function to use in model training
:type eval_metric: str
:return: Dictionary containing the trained models for each state vector
component
:rtype: {str: xgboost.XGBRegressor}
"""
default_params = {
'booster': 'gbtree',
'tree_method': 'gpu_hist',
'gpu_id': 0
}
default_params.update(params)
X, ys = data['X_train'], data['y_train']
models = {}
for target_col in ys.columns:
y = ys[target_col]
reg = xgb.XGBRegressor(**default_params)
reg.fit(X, y, eval_metric=eval_metric)
models[target_col] = reg
return models
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description=('Train baseline XGBoost models to estimate physical '
'prediction error'),
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument(
'--input_path',
help=('The path to the parquet file containing the physical model '
'prediction training data'),
type=str,
required=True
)
parser.add_argument(
'--use_gpu',
help='Use a GPU in model training',
action='store_true'
)
parser.add_argument(
'--out_dir',
help=('The directory to serialize the models to'),
type=str,
required=True
)
args = parser.parse_args()
logger.info('Loading physical model orbit prediction training data...')
physics_pred_df = pd.read_parquet(args.input_path)
logger.info('Building training and test sets...')
train_test_data = build_train_test_sets(physics_pred_df)
if args.use_gpu:
params = {}
else:
params = {'tree_method': 'hist'}
logger.info('Training Error Models...')
err_models = train_models(train_test_data, params=params)
logger.info(eval_models(err_models, train_test_data))
logger.info('Serializing Error Models...')
save_models(err_models, args.out_dir)
|
[
"pandas.DataFrame",
"matplotlib.pyplot.tight_layout",
"numpy.stack",
"argparse.ArgumentParser",
"os.path.join",
"sklearn.model_selection.train_test_split",
"sklearn.metrics.r2_score",
"os.environ.get",
"pandas.read_parquet",
"pandas.Series",
"xgboost.XGBRegressor",
"matplotlib.pyplot.ylabel",
"itertools.product",
"matplotlib.pyplot.subplots",
"sklearn.metrics.mean_squared_error",
"logging.getLogger"
] |
[((901, 928), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (918, 928), False, 'import logging\n'), ((1798, 1817), 'pandas.DataFrame', 'pd.DataFrame', (['evals'], {}), '(evals)\n', (1810, 1817), True, 'import pandas as pd\n'), ((2275, 2311), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(3)'], {'figsize': '(10, 10)'}), '(2, 3, figsize=(10, 10))\n', (2287, 2311), True, 'import matplotlib.pyplot as plt\n'), ((2576, 2614), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Feature Importance Score"""'], {}), "('Feature Importance Score')\n", (2586, 2614), True, 'import matplotlib.pyplot as plt\n'), ((2619, 2637), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (2635, 2637), True, 'import matplotlib.pyplot as plt\n'), ((5118, 5145), 'numpy.stack', 'np.stack', (['err_preds'], {'axis': '(1)'}), '(err_preds, axis=1)\n', (5126, 5145), True, 'import numpy as np\n'), ((6266, 6309), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': 'test_size'}), '(X, y, test_size=test_size)\n', (6282, 6309), False, 'from sklearn.model_selection import GridSearchCV, train_test_split\n'), ((7524, 7695), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Train baseline XGBoost models to estimate physical prediction error"""', 'formatter_class': 'argparse.ArgumentDefaultsHelpFormatter'}), "(description=\n 'Train baseline XGBoost models to estimate physical prediction error',\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n", (7547, 7695), False, 'import argparse\n'), ((8354, 8386), 'pandas.read_parquet', 'pd.read_parquet', (['args.input_path'], {}), '(args.input_path)\n', (8369, 8386), True, 'import pandas as pd\n'), ((856, 890), 'os.environ.get', 'os.environ.get', (['"""LOGLEVEL"""', '"""INFO"""'], {}), "('LOGLEVEL', 'INFO')\n", (870, 890), False, 'import os\n'), ((1596, 1647), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y', 'y_hat'], {'squared': '(False)'}), '(y, y_hat, squared=False)\n', (1622, 1647), True, 'import sklearn.metrics as metrics\n'), ((1661, 1687), 'sklearn.metrics.r2_score', 'metrics.r2_score', (['y', 'y_hat'], {}), '(y, y_hat)\n', (1677, 1687), True, 'import sklearn.metrics as metrics\n'), ((2397, 2452), 'pandas.Series', 'pd.Series', (['model.feature_importances_'], {'index': 'feat_names'}), '(model.feature_importances_, index=feat_names)\n', (2406, 2452), True, 'import pandas as pd\n'), ((3671, 3689), 'xgboost.XGBRegressor', 'xgb.XGBRegressor', ([], {}), '()\n', (3687, 3689), True, 'import xgboost as xgb\n'), ((3711, 3749), 'os.path.join', 'os.path.join', (['models_dir', 'f"""{mn}.json"""'], {}), "(models_dir, f'{mn}.json')\n", (3723, 3749), False, 'import os\n'), ((4304, 4339), 'os.path.join', 'os.path.join', (['models_dir', 'file_name'], {}), '(models_dir, file_name)\n', (4316, 4339), False, 'import os\n'), ((7349, 7383), 'xgboost.XGBRegressor', 'xgb.XGBRegressor', ([], {}), '(**default_params)\n', (7365, 7383), True, 'import xgboost as xgb\n'), ((3196, 3234), 'itertools.product', 'itertools.product', (['vectors', 'components'], {}), '(vectors, components)\n', (3213, 3234), False, 'import itertools\n')]
|
from brewgorithm import beer2vec, beer_emb, word_weighter
import numpy as np
import unittest
from sklearn.metrics.pairwise import cosine_similarity
class TestBeer2vec(unittest.TestCase):
def test_most_similar_test(self):
beers = beer2vec.get_beer2vec()
embeddings = beer_emb.embed_doc("apricot peach fruity", word_weighter.is_beer_related)
emb = np.average(embeddings, axis=0)
sims = cosine_similarity([emb], [beer['vector'] for beer in beers]).reshape(-1)
candidates = []
for i, sim in enumerate(sims):
candidates.append((sim, i))
result = [x for x in sorted(candidates, key=lambda i: i[0], reverse=True)[:2]][1]
self.assertEqual(beers[result[1]]['BeerNamePlain'].strip(), "delirium tremens")
self.assertEqual(float(beers[result[1]]['Alcohol']), 8.5)
self.assertEqual(int(beers[result[1]]['OverallPctl']), 93)
desc = [a[0] for a in beer_emb.most_similar(positive=[beers[result[1]]['vector']], negative=[])]
self.assertIn("fruity", desc)
if __name__ == '__main__':
unittest.main()
|
[
"unittest.main",
"sklearn.metrics.pairwise.cosine_similarity",
"numpy.average",
"brewgorithm.beer_emb.embed_doc",
"brewgorithm.beer2vec.get_beer2vec",
"brewgorithm.beer_emb.most_similar"
] |
[((1029, 1044), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1042, 1044), False, 'import unittest\n'), ((237, 260), 'brewgorithm.beer2vec.get_beer2vec', 'beer2vec.get_beer2vec', ([], {}), '()\n', (258, 260), False, 'from brewgorithm import beer2vec, beer_emb, word_weighter\n'), ((279, 352), 'brewgorithm.beer_emb.embed_doc', 'beer_emb.embed_doc', (['"""apricot peach fruity"""', 'word_weighter.is_beer_related'], {}), "('apricot peach fruity', word_weighter.is_beer_related)\n", (297, 352), False, 'from brewgorithm import beer2vec, beer_emb, word_weighter\n'), ((363, 393), 'numpy.average', 'np.average', (['embeddings'], {'axis': '(0)'}), '(embeddings, axis=0)\n', (373, 393), True, 'import numpy as np\n'), ((405, 465), 'sklearn.metrics.pairwise.cosine_similarity', 'cosine_similarity', (['[emb]', "[beer['vector'] for beer in beers]"], {}), "([emb], [beer['vector'] for beer in beers])\n", (422, 465), False, 'from sklearn.metrics.pairwise import cosine_similarity\n'), ((889, 962), 'brewgorithm.beer_emb.most_similar', 'beer_emb.most_similar', ([], {'positive': "[beers[result[1]]['vector']]", 'negative': '[]'}), "(positive=[beers[result[1]]['vector']], negative=[])\n", (910, 962), False, 'from brewgorithm import beer2vec, beer_emb, word_weighter\n')]
|
import numpy as np
import pandas as pd
import statsmodels.api as sm
from sklearn.preprocessing import OneHotEncoder
import statistics
import math
import sys
import itertools
import time
np.seterr(over='raise', under="ignore")
def batch_pp(df, covariates, batch_column, ignore):
"""This function takes in a df, the name of the covariate columns, and the batch column
and it outputs a feature count matrix, feature zero inflation matrix,
batch dummy matrix (one hot vectors as rows), covariate matrix (concatenated one hot vectors )
(covariates coefficient matrix [X_ij], batch dummy matrix [X_batch],
the zero inflation matrix [I_ijk], and count matrix [Y])
NOTE: this df can be a combination of datasets, or an individual dataset"""
# df: [dataframe] input with rows as samples and columns as feature counts.
# should only have OTU names ,covariates, and batch_column in keyspace
# covariates: [List] of the covariates to retain and estimate betas for
# batch_column: [string] column that defines the batches in this dataframe
# ignore: [List] of column names to ignore
################################### Check proper input ###################################
if (batch_column not in df.keys()):
raise ValueError("Column name " + str(batch_column) + " not found")
if (not set(covariates) <= set(df.keys())):
raise ValueError("Covariate columns not found in dataframe")
################################### Turn batch column to one hot vector ###################################
# note: for all features, batch matrix and covariate matrix will be the same.
X_batch = pd.get_dummies(df[batch_column], drop_first=False)
################################### Turn covariate columns covariate matrix ###################################
# number of columns is the number of betas to estimate
X_cov = pd.get_dummies(df[covariates], drop_first=True)
intercept = [1 for _ in range(X_cov.shape[0])]
# adding intercept term
X_cov.insert(0, "intercept", intercept)
################################### Build the feature zero inflation matrix ###################################
# turn numbers to 1 and keep zeroes the way they are
otu_keys = df.keys().drop(ignore)
I = df[otu_keys].replace('0.0', False).astype(bool).replace(False, 0).replace(True, 1)
df_dict = {"X_cov": X_cov,
"X_batch": X_batch,
"I": I,
"Y": df[otu_keys],
"ignore": df[ignore]}
return df_dict
def reduce_batch_effects(Y, I, X_cov, X_batch, verbose=False):
"""This function takes in the output of batch_pp and does the feature-wise batch reduction"""
# INPUT:
# Y: matrix of feature counts with the columns as features and columns as sample counts as rows
# I: matrix of feature zero inflation (1s where values are >=1, 0s o.w.)
# X_cov: covariance matrix (this will give us the betas we need to estimate)
# X_batch: dummy matrix of batch values
# OUTPUT:
# corrected matrix
# merge the dummy variables for the covariates and also for the batch to get the whole design matrix
X_mat = pd.concat([X_cov, X_batch], axis=1).astype(float)
# type conversions and index storing
Y = Y.astype(float)
num_beta_cov = X_cov.shape[1]
num_beta_batch = X_batch.shape[1]
num_features = len(Y.keys())
num_samples = Y.shape[0]
Z = pd.DataFrame(index=Y.index, columns=Y.columns)
# for each of the features, we will calculate the batch reduction coefficients, then reduce the batch effects
count = 0
otu_names = list(Y.keys())
otu_names = [x for x in otu_names if Y[x][Y[x] > 0].count() > 2]
sigma_p_store = {}
beta_params_store = pd.DataFrame(columns=Y.columns, index=X_mat.columns)
beta_cov_store = pd.DataFrame(columns=Y.columns, index=X_cov.columns)
beta_batch_store = {}
start = time.time()
for p in otu_names:
# select only the feature as a row
y_ijp = Y[p]
y_store = Y[p] # storing the original column(unchanged)
I_ijp = I[p].astype(float)
if (count % 100 == 0 and verbose):
print("Estimating β_cov, β_batch, and σ_p for feature {}".format(count))
# --------- Estimate beta_p and beta_batch through OLS regression --------------
# ignore the keys with zero counts and only fit with non zero samples
fit_index = list(y_ijp.to_numpy().astype(float).nonzero()[0])
zero_index = list(set(range(num_samples)) - set(fit_index))
zero_keys = y_store.keys()[zero_index]
# use only non zero counts for index to fit our OLS
y_ijp = y_ijp.iloc[fit_index]
# y_ijp = y_ijp[fit_index] # PREVIOUS VERSION
X_design_mat = X_mat.iloc[fit_index, :]
X_cov_mat = X_cov.iloc[fit_index, :]
X_batch_mat = X_batch.iloc[fit_index, :]
# fit ols
model = sm.OLS(y_ijp, X_design_mat)
res = model.fit()
############# Calculate sigma_p using the standard deviation of previous regression ###########
residuals = y_ijp - X_cov_mat.dot(res.params[:num_beta_cov])
sigma_hat_p = statistics.stdev(residuals)
# store in feature keyed dictionary of standard deviations
sigma_p_store[p] = sigma_hat_p
# separate the beta cov from the beta batch
beta_params = res.params
beta_cov = res.params[:num_beta_cov]
beta_batch = res.params[num_beta_cov:]
# store list of beta parameters indexed by feature
beta_params_store[p] = beta_params
beta_cov_store[p] = beta_cov
beta_batch_store[p] = beta_batch
####################################### Calculate Z_ijp #######################################
z_ijp = (y_ijp - X_cov_mat.dot(res.params[:num_beta_cov])) / sigma_hat_p
Z[p] = z_ijp
count += 1
if count % 25 == 0:
end = time.time()
print('{}/{} completed in: {}s'.format(count, len(otu_names), round(end - start, 2)))
# ------------ LOOP END -----------------------------------------------------------------
end = time.time()
print('Total OLS time: {}s'.format(round(end - start, 2)))
Z = Z.fillna(0)
beta_params_store = beta_params_store.astype(float)
# return X_mat.dot(beta_params_store)
estimates = eb_estimator(X_batch, Z, sigma_p=sigma_p_store, X_add=X_cov.dot(beta_cov_store), verbose=verbose)
return estimates
def eb_estimator(X_batch, Z, sigma_p, X_add, max_itt=6000, verbose=False):
"""This function returns the empirical bayes estimates for gamma_star_p and delta_star_p
as well as the standerdized OTU counts"""
# X_batch: Batch effects dummy matrix (n x alpha) matrix
# Z: Matrix of standerdized data (n x p ) matrix
# sigma_p: Vec of OTU variances
# X_add: matrix to add back after parameter estimation
# max_itt: Maximum number of iterations until convergence
# smooth_delta: bool flag for whether or not we replace the 0 values in delta_i by 1
# Standardized matrix init
Z_out = pd.DataFrame(index=Z.index, columns=Z.columns)
# number of genes/otus
G = Z.shape[1]
# number of samples in each batch
N = X_batch.sum(axis=0)
# sample mean for each OTU in each per batch (p X alpha) matrix
gamma_hat = Z.T.dot(X_batch) / N
# parameter estimates for batch effect location - gamma
gamma_bar = gamma_hat.mean(axis=0).astype(float)
tau_bar = ((gamma_hat.sub(gamma_bar) ** 2).sum(axis=0)) / (G - 1)
# parameter estimates for batch effect scale - delta (p X alpha) matrix
delta_hat = (((Z - X_batch.dot(gamma_hat.T)) ** 2).T.dot(X_batch)) / (N - 1)
v_bar = delta_hat.sum(axis=0) / G
s_bar = ((delta_hat.sub(v_bar) ** 2).sum(axis=0)) / (G - 1)
lambda_bar = (v_bar + (2 * s_bar)) / (s_bar)
theta_bar = (v_bar ** 3 + v_bar * s_bar) / (s_bar)
# iteratively solve for gamma_star_ip and delta_star_ip
# initialize the keyed matrices
gamma_star_mat = pd.DataFrame(index=gamma_hat.index, columns=gamma_hat.columns)
delta_star_mat = pd.DataFrame(index=gamma_hat.index, columns=gamma_hat.columns)
batches = gamma_hat.keys()
genes = list(gamma_hat.T.keys())
genes = [x for x in genes if Z[x].max() != 0]
start = time.time()
count = 0
for i in batches:
# get individual variables to focus on
theta_i = theta_bar[i]
lambda_i = lambda_bar[i]
n = N[i]
tau_i = tau_bar[i]
gamma_bar_i = gamma_bar[i]
for p in genes:
gene_counts_in_batch = X_batch[i] * Z[p]
gene_counts_in_batch = gene_counts_in_batch[gene_counts_in_batch != 0]
changed_samples = gene_counts_in_batch.keys()
gamma_hat_ip = gamma_hat[i][p]
# initial iteration values
delta_star_ip_init = delta_hat[i][p]
gamma_star_ip_init = f_gamma_star_ip(tau_i, gamma_bar_i, gamma_hat_ip, delta_star_ip_init, n)
# calculate the next step in the iteration
delta_star_ip_next = f_delta_star_ip(theta_i, lambda_i, gene_counts_in_batch, gamma_star_ip_init, n)
gamma_star_ip_next = f_gamma_star_ip(tau_i, gamma_bar_i, gamma_hat_ip, delta_star_ip_next, n)
conv_delta = abs(delta_star_ip_next - delta_star_ip_init)
conv_gamma = abs(gamma_star_ip_next - gamma_star_ip_init)
itt = 1
while ((conv_delta + conv_gamma) > 1e-8):
# store previous iteration of the values
delta_star_ip_init = delta_star_ip_next
gamma_star_ip_init = gamma_star_ip_next
# take our next "guess" for the values
delta_star_ip_next = f_delta_star_ip(theta_i, lambda_i, gene_counts_in_batch, gamma_star_ip_init, n)
gamma_star_ip_next = f_gamma_star_ip(tau_i, gamma_bar_i, gamma_hat_ip, delta_star_ip_init, n)
# calculate how close we are to convergence
conv_delta = abs(delta_star_ip_next - delta_star_ip_init)
conv_gamma = abs(gamma_star_ip_next - gamma_star_ip_init)
itt += 1
if (itt == max_itt):
raise ValueError("Maximum iteration reached for convergence. Try setting a higher limit")
if (verbose):
print("OTU {} on dataset {} Convergence took {} steps".format(p[-15:], i, itt))
# store found values in the relevant matrices
gamma_star_mat[i][p] = gamma_star_ip_next
delta_star_mat[i][p] = delta_star_ip_next
a = (sigma_p[p] / delta_star_ip_next)
b = (Z[p][changed_samples] - gamma_star_ip_next)
c = X_add[p]
Z_out[p][changed_samples] = (a * b + c)[changed_samples]
count += 1
end = time.time()
print('{}/{} completed in: {}s'.format(count, len(batches), round(end - start, 2)))
# ------------ LOOP END -----------------------------------------------------------------
end = time.time()
print('Total Batch Reduction Parameter Estimation time: {}s'.format(round(end - start, 2)))
Z_out = Z_out.fillna(0)
return {"gamma_star": gamma_star_mat,
"delta_star": delta_star_mat,
"BR": Z_out}
def f_delta_star_ip(theta_bar, lambda_bar, Z_in_batch, gamma_star, n):
"""This is the function to calculate delta star given gamma_star """
# INPUT
# theta_bar: theta estimate for batch i (scale estimate for delta star_ip)
# lambda_bar: lamda estimate for batch i (shape estimate for delta star_ip)
# Z_in_batch: vector of correctd counts for otu p in in batch o
# gamma_star: posterior mean for location parameter of OTU p in batch i
# n: number of samples in batch i
# OUTPUT
# delta_star_ip: posterior mean for location parameter of OTU p in batch i
return (theta_bar + 0.5 * (((Z_in_batch - gamma_star) ** 2).sum())) / ((n / 2) + lambda_bar - 1)
def f_gamma_star_ip(tau_bar, gamma_bar, gamma_hat, delta_star, n):
"""This is the function to calculate gamma star given delta_star"""
# INPUT
# tau_bar: tau estimate in batch i
# gamma_bar: gamma mean estimate for batch i
# gamma_hat: sample mean for each OTU p in batch i
# delta_star: posterior mean for scale parameter of OTU p in batch i
# n: number of samples in batch i
# OUTPUT
# gamma_star_ip: posterior mean for location parameter of OTU p in batch i
return (n * tau_bar * gamma_hat + delta_star * gamma_bar) / (n * tau_bar + delta_star)
def combat(in_df, covariates, batches, ignore, verbose=False):
df = in_df.copy()
for i in range(len(batches.keys())):
print("Performing ComBat Batch Correction for {}".format(batches[i].upper()))
df[df.columns.difference(ignore)] = df[df.columns.difference(ignore)]
t = batch_pp(df, covariates=covariates,batch_column=batches[i], ignore=ignore)
r = reduce_batch_effects(Y=t['Y'], X_cov=t['X_cov'], I=t['I'], X_batch=t['X_batch'], verbose=verbose)
try:
df = pd.concat([r["BR"], t['ignore']], axis=1)
except:
print('Error Occurred - returning original data set')
return ("error", r["BR"])
return df
|
[
"pandas.DataFrame",
"statsmodels.api.OLS",
"numpy.seterr",
"pandas.get_dummies",
"statistics.stdev",
"time.time",
"pandas.concat"
] |
[((187, 226), 'numpy.seterr', 'np.seterr', ([], {'over': '"""raise"""', 'under': '"""ignore"""'}), "(over='raise', under='ignore')\n", (196, 226), True, 'import numpy as np\n'), ((1670, 1720), 'pandas.get_dummies', 'pd.get_dummies', (['df[batch_column]'], {'drop_first': '(False)'}), '(df[batch_column], drop_first=False)\n', (1684, 1720), True, 'import pandas as pd\n'), ((1909, 1956), 'pandas.get_dummies', 'pd.get_dummies', (['df[covariates]'], {'drop_first': '(True)'}), '(df[covariates], drop_first=True)\n', (1923, 1956), True, 'import pandas as pd\n'), ((3459, 3505), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': 'Y.index', 'columns': 'Y.columns'}), '(index=Y.index, columns=Y.columns)\n', (3471, 3505), True, 'import pandas as pd\n'), ((3782, 3834), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': 'Y.columns', 'index': 'X_mat.columns'}), '(columns=Y.columns, index=X_mat.columns)\n', (3794, 3834), True, 'import pandas as pd\n'), ((3856, 3908), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': 'Y.columns', 'index': 'X_cov.columns'}), '(columns=Y.columns, index=X_cov.columns)\n', (3868, 3908), True, 'import pandas as pd\n'), ((3948, 3959), 'time.time', 'time.time', ([], {}), '()\n', (3957, 3959), False, 'import time\n'), ((6193, 6204), 'time.time', 'time.time', ([], {}), '()\n', (6202, 6204), False, 'import time\n'), ((7143, 7189), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': 'Z.index', 'columns': 'Z.columns'}), '(index=Z.index, columns=Z.columns)\n', (7155, 7189), True, 'import pandas as pd\n'), ((8077, 8139), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': 'gamma_hat.index', 'columns': 'gamma_hat.columns'}), '(index=gamma_hat.index, columns=gamma_hat.columns)\n', (8089, 8139), True, 'import pandas as pd\n'), ((8161, 8223), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': 'gamma_hat.index', 'columns': 'gamma_hat.columns'}), '(index=gamma_hat.index, columns=gamma_hat.columns)\n', (8173, 8223), True, 'import pandas as pd\n'), ((8356, 8367), 'time.time', 'time.time', ([], {}), '()\n', (8365, 8367), False, 'import time\n'), ((11118, 11129), 'time.time', 'time.time', ([], {}), '()\n', (11127, 11129), False, 'import time\n'), ((4960, 4987), 'statsmodels.api.OLS', 'sm.OLS', (['y_ijp', 'X_design_mat'], {}), '(y_ijp, X_design_mat)\n', (4966, 4987), True, 'import statsmodels.api as sm\n'), ((5210, 5237), 'statistics.stdev', 'statistics.stdev', (['residuals'], {}), '(residuals)\n', (5226, 5237), False, 'import statistics\n'), ((10905, 10916), 'time.time', 'time.time', ([], {}), '()\n', (10914, 10916), False, 'import time\n'), ((3201, 3236), 'pandas.concat', 'pd.concat', (['[X_cov, X_batch]'], {'axis': '(1)'}), '([X_cov, X_batch], axis=1)\n', (3210, 3236), True, 'import pandas as pd\n'), ((5975, 5986), 'time.time', 'time.time', ([], {}), '()\n', (5984, 5986), False, 'import time\n'), ((13169, 13210), 'pandas.concat', 'pd.concat', (["[r['BR'], t['ignore']]"], {'axis': '(1)'}), "([r['BR'], t['ignore']], axis=1)\n", (13178, 13210), True, 'import pandas as pd\n')]
|
from google.protobuf.symbol_database import Default
import nltk
import random
import pickle
from nltk.corpus.reader.chasen import test
from pandas.core.indexes import period
from statsmodels.tsa.seasonal import _extrapolate_trend
nltk.download('punkt')
nltk.download('wordnet')
from nltk.stem import WordNetLemmatizer
from numpy.lib.function_base import append, select
lemmatizer = WordNetLemmatizer()
import pandas as pd
import yfinance as yf
import streamlit as st
import statsmodels.api as sm
import datetime as dt
import plotly.graph_objects as go
from statsmodels.tsa.arima_model import ARIMA
from sklearn.metrics import mean_squared_error
import requests
import json
import numpy as np
from keras.models import load_model
from bs4 import BeautifulSoup
import csv
from requests.exceptions import ConnectionError
words=pickle.load(open('words.pkl','rb'))
classes=pickle.load(open('classes.pkl','rb'))
model = load_model("stock_model.h5")
intents=json.loads(open('training.json').read())
def calcMovingAverage(data, size):
df = data.copy()
df['sma'] = df['Adj Close'].rolling(size).mean()
df['ema'] = df['Adj Close'].ewm(span=size, min_periods=size).mean()
df.dropna(inplace=True)
return df
def calc_macd(data):
df = data.copy()
df['ema12'] = df['Adj Close'].ewm(span=12, min_periods=12).mean()
df['ema26'] = df['Adj Close'].ewm(span=26, min_periods=26).mean()
df['macd'] = df['ema12'] - df['ema26']
df['signal'] = df['macd'].ewm(span=9, min_periods=9).mean()
df.dropna(inplace=True)
return df
def calcBollinger(data, size):
df = data.copy()
df["sma"] = df['Adj Close'].rolling(size).mean()
df["bolu"] = df["sma"] + 2*df['Adj Close'].rolling(size).std(ddof=0)
df["bold"] = df["sma"] - 2*df['Adj Close'].rolling(size).std(ddof=0)
df["width"] = df["bolu"] - df["bold"]
df.dropna(inplace=True)
return df
def graphMyStock(finalvar,a,b,col):
stock2 = yf.Ticker(finalvar)
info2=stock2.info
ln2=info2['longName']
opt1b, opt2b = st.beta_columns(2)
with opt1b:
numYearMAb = st.number_input('Insert period (Year): ', min_value=1, max_value=10, value=2, key=a)
with opt2b:
windowSizeMAb = st.number_input('Window Size (Day): ', min_value=5, max_value=500, value=20, key=b)
start2 = dt.datetime.today()-dt.timedelta(numYearMAb * 365)
end2 = dt.datetime.today()
livedata2 = yf.download(finalvar,start2,end2)
df_ma2 = calcMovingAverage(livedata2, windowSizeMAb)
df_ma2 = df_ma2.reset_index()
fig2 = go.Figure()
fig2.add_trace(
go.Scatter(
x = df_ma2['Date'],
y = df_ma2['Adj Close'],
name = '('+ finalvar+ ') '+ "Prices Over Last " + str(numYearMAb) + " Year(s)",
mode='lines',
line=dict(color=col)
)
)
fig2.update_layout(showlegend=True,legend=dict(
yanchor="top",
y=0.99,
xanchor="left",
x=0.01,
))
fig2.update_layout(legend_title_text='Trend')
fig2.update_yaxes(tickprefix="$")
st.plotly_chart(fig2, use_container_width=True)
def graphAllStocks(stocka,stockb,stockc,a,b,col1,col2,col3):
stock2 = yf.Ticker(stocka)
info2=stock2.info
ln2=info2['longName']
st.write('')
st.subheader('**Graph of optimal stocks:** ')
opt1b, opt2b = st.beta_columns(2)
with opt1b:
numYearMAb = st.number_input('Insert period (Year): ', min_value=1, max_value=10, value=2, key=a)
with opt2b:
windowSizeMAb = st.number_input('Window Size (Day): ', min_value=5, max_value=500, value=20, key=b)
start2 = dt.datetime.today()-dt.timedelta(numYearMAb * 365)
end2 = dt.datetime.today()
livedata2 = yf.download(stocka,start2,end2)
df_ma2 = calcMovingAverage(livedata2, windowSizeMAb)
df_ma2 = df_ma2.reset_index()
fig2 = go.Figure()
fig2.add_trace(
go.Scatter(
x = df_ma2['Date'],
y = df_ma2['Adj Close'],
name = '('+ stocka+ ') '+ "Prices Over Last " + str(numYearMAb) + " Year(s)",
mode='lines',
line=dict(color=col1)
)
)
livedata2=yf.download(stockb,start2,end2)
df_ma2= calcMovingAverage(livedata2, windowSizeMAb)
df_ma2= df_ma2.reset_index()
fig2.add_trace(
go.Scatter(
x=df_ma2['Date'],
y=df_ma2['Adj Close'],
name = '('+ stockb+ ') '+ "Prices Over Last " + str(numYearMAb) + " Year(s)",
mode='lines',
line=dict(color=col2)
))
livedata3=yf.download(stockc,start2,end2)
df_ma3= calcMovingAverage(livedata3, windowSizeMAb)
df_ma3= df_ma3.reset_index()
fig2.add_trace(
go.Scatter(
x=df_ma3['Date'],
y=df_ma3['Adj Close'],
name = '('+ stockc+ ') '+ "Prices Over Last " + str(numYearMAb) + " Year(s)",
mode='lines',
line=dict(color=col3)
))
fig2.update_layout(showlegend=True,legend=dict(
orientation="h",
yanchor="bottom",
y=1.02,
xanchor="right",
x=1,
))
fig2.update_layout(legend_title_text='Trend')
fig2.update_yaxes(tickprefix="$")
st.plotly_chart(fig2, use_container_width=True)
def RootWordGen(lw):
j=nltk.word_tokenize(lw)
j= [lemmatizer.lemmatize(word.lower()) for word in j]
return(j)
def matrix(sentence, words, show_details=True):
sentence_words= RootWordGen(sentence)
# sentence_words is bag of words - matrix of N words, vocabulary matrix
bag = [0]*len(words)
#matrix contains number of elements = vocabulary, preset value=0
for s in sentence_words:
#traverses root words
for i,w in enumerate(words):
#i is roll no/dir no
#w is unique word
#makes directory, gives a 'roll no' to each word. If 'cramping' is entered, directory till cramping prints along w roll number, then matrix with 0s other than one 1 (one being element number=roll no of cramping)
if w == s:
# assign 1 if current word is in the vocabulary position
bag[i] = 1
if show_details:
#will give name of bag of unique base word the entered word is found in
print ("found in bag: %s" % w)
#removes commas from list, returns matrix
return(np.array(bag))
def predict_class(sentence, model):
# filter out predictions below a threshold probability
pred= matrix(sentence, words,show_details=False)
res = model.predict(np.array([pred]))[0]
ERROR_THRESHOLD = 0.25
global results
results = [[i,r] for i,r in enumerate(res) if r>ERROR_THRESHOLD]
global results1
results1 = [[i,r] for i,r in enumerate(res)]
print(results)
#for guesses above threshold
#f=open('r.txt','w')
#for all guesses
#f1=open('s.txt','w')
# sort by strength of probability
results.sort(key=lambda x: x[1], reverse=True)
results1.sort(key=lambda x: x[1], reverse=True)
pr=results1[0]
global pp
pp=pr[1]
print(pp)
global return_list
return_list = []
global return_list1
return_list1=[]
for r in results1:
return_list1.append({"intent": classes[r[0]], "probability": str(r[1])})
for r in results:
return_list.append({"intent": classes[r[0]], "probability": str(r[1])})
#for x in return_list1:
# f1.write(str(x))
#for x in return_list:
#print(x)
#f.write(str(x))
return return_list[0]
def getResponse(ints, intents_json):
global tag
tag = ints[0]['intent']
print(tag)
list_of_intents = intents_json['intents']
for i in list_of_intents:
if(i['tag']== tag):
result = random.choice(i['responses'])
break
return result
def FinalPrediction(msg):
ints = predict_class(msg, model)
res = getResponse(ints, intents)
return res
stockdata = pd.read_csv("SP500.csv")
symbols = stockdata['Symbol'].sort_values().tolist()
st.title('Investment Optimizer and Stock Growth Predictor')
#We'll add this when we come up with something
expander=st.beta_expander(label='',expanded=False)
expander.write("This application aims at evaluating stock trends and current news to predict it's future growth. It provides a clean and efficient user interface to view current prices and fluctuation history. It also provides a tool to identify an ideal combination of stocks that one should invest in based on the given budget, using our machine learning and optimization algorithm. We have named our ML model 'ATHENA', which stands for Algorithmic Enhancer")
st.write("")
st.write("")
st.write('**Would you like to know where to invest or understand each Stock?**')
a=st.radio("", ("Invest", "Understand"))
if(a=="Invest"):
budget=st.sidebar.number_input("Enter your budget ($): ")
if(st.sidebar.button("Enter")):
st.header("")
st.header("**Following is the combination of stocks you should invest in: ** ")
st.write("")
st.write('Processing...')
invest=[]
invstock_sym=[]
invstock_name=[]
f= open("SP500.csv",'r')
rd=csv.reader(f)
for x in rd:
if x!=[]:
if x[2]=='badboy':
invstock_sym.append(x[0])
invstock_name.append(x[1])
invstock_price=[]
for ticker in invstock_sym:
ticker_yahoo = yf.Ticker(ticker)
data = ticker_yahoo.history()
last_quote = (data.tail(1)['Close'].iloc[0])
invstock_price.append(float(last_quote))
invstock_conf=[]
st.markdown("""
<style>
.stProgress .st-bo {
background-color: green;
}
</style>
""", unsafe_allow_html=True)
my_bar=st.progress(0)
progresscount=10
for badgirl in invstock_name:
checkerb=0
try:
send="https://www.google.com/search?q=should+you+invest+in+ "+badgirl.lower()+" stock"
res=requests.get(send)
except ReadTimeout:
checkerb=checkerb+1
except ConnectionError or ConnectionAbortedError or ConnectionRefusedError:
checkerb=checkerb+1
else:
soup=BeautifulSoup(res.content, "html.parser")
all_links=[]
count=0
for i in soup.select("a"):
if count==1:
break
link=i.get("href")
if("/url?q=https://" in link):
if(("/url?q=https://support.google.com" not in link) and ("/url?q=https://accounts.google.com" not in link)):
x=link.split("https://")
y=x[1].split("&sa")
new="https://"+y[0]
all_links.append(new)
z=i.text
if("..." in z):
type2=z.split("...")
name=type2[0]
else:
type1=z.split(" › ")
name=type1[0]
count+=1
list1=[]
c=0
for i in all_links:
if c==1:
break
option=requests.get(i)
soup=BeautifulSoup(option.content, "html.parser")
pageinfo=soup.select("p")
for j in pageinfo:
m=j.text
n=m.split(' ')
for i in n:
list1.append(i)
c=c+1
tex=' '.join(list1)
find=predict_class(tex,model)
varun=[]
varun.append(float(find['probability']))
varun.append(find['intent'])
invstock_conf.append(varun)
progresscount=progresscount+10
my_bar.progress(progresscount)
stocks={}
for i in range(len(invstock_name)):
temp=[]
if invstock_conf[i][1]=='up':
temp.append(invstock_conf[i][0])
temp.append(invstock_price[i])
temp.append(invstock_name[i])
temp.append(invstock_sym[i])
length= len(stocks)
stocks[length]=temp
###### NEED TO GET "STOCKS" DICTIONARY DATA FROM ########
all_stocks={}
for i in range(len(stocks)):
if((budget >= stocks[i][1]) and (stocks[i][0]>0.5)):
n=len(all_stocks)
all_stocks[n]=[stocks[i][0], stocks[i][1], stocks[i][2], stocks[i][3]]
if len(all_stocks)>=3:
st.balloons()
quad1={}
quad2={}
quad3={}
quad4={}
for i in range(len(all_stocks)):
if((all_stocks[i][0]>=0.8) and (all_stocks[i][1]<=100)):
quad1[i]=[all_stocks[i][0], all_stocks[i][1], all_stocks[i][2],all_stocks[i][3]]
elif((all_stocks[i][0]>=0.8) and (all_stocks[i][1]>100)):
quad2[i]=[all_stocks[i][0], all_stocks[i][1], all_stocks[i][2],all_stocks[i][3]]
elif((all_stocks[i][0]<0.8) and (all_stocks[i][1]<=100)):
quad3[i]=[all_stocks[i][0], all_stocks[i][1], all_stocks[i][2],all_stocks[i][3]]
else:
quad4[i]=[all_stocks[i][0], all_stocks[i][1], all_stocks[i][2],all_stocks[i][3]]
def inputs(quad):
global invest
spq=[]
for i in quad:
spq.append(quad[i][1])
length=len(spq)
for i in range(length):
if(len(invest)==3):
break
minval=min(spq)
for i in quad:
if(quad[i][1]==minval):
invest.append(quad[i])
spq.remove(minval)
inputs(quad1)
if(len(invest)<3):
inputs(quad2)
if(len(invest)<3):
inputs(quad3)
if(len(invest)<3):
inputs(quad4)
#stock1 should get 60%
#stock2 should get 30%
#stock3 should get 10%
s1=budget*0.6
s2=budget*0.3
s3=budget*0.1
n_s1=s1//invest[0][1]
n_s2=s2//invest[1][1]
n_s3=s3//invest[2][1]
left=budget-invest[0][1]*n_s1-invest[1][1]*n_s2-invest[2][1]*n_s3
invest_val=[]
for i in range(3):
invest_val.append(invest[i][1])
a_s1=0
a_s2=0
a_s3=0
a_s3=left//invest[2][1]
left=left-a_s3*invest[2][1]
a_s2=left//invest[1][1]
left=left-a_s2*invest[1][1]
a_s1=left//invest[0][1]
left=left-a_s1*invest[0][1]
t_s1=n_s1+a_s1
t_s2=n_s2+a_s2
t_s3=n_s3+a_s3
st.write("")
st.subheader('**Summary:** ')
summary_table={}
names=[]
prices=[]
nstocks=[]
totalcosts=[]
confidences=[]
for i in range(len(invest)):
names.append(invest[i][2])
prices.append(invest[i][1])
if(i==0):
nstocks.append(t_s1)
tcost=t_s1*invest[i][1]
totalcosts.append(tcost)
if(i==1):
nstocks.append(t_s2)
tcost=t_s2*invest[i][1]
totalcosts.append(tcost)
if(i==2):
nstocks.append(t_s3)
tcost=t_s3*invest[i][1]
totalcosts.append(tcost)
confidences.append(invest[i][0])
summary_table["Stock Name"]=names
summary_table["Cost per Stock"]=prices
summary_table["Number to Purchase"]=nstocks
summary_table["Total Cost"]=totalcosts
summary_table["Our Confidence"]=confidences
column_order=["Stock Name", "Cost per Stock", "Number to Purchase", "Total Cost", "Our Confidence"]
summary_df=pd.DataFrame(data=summary_table)
st.dataframe(summary_df)
st.write("")
bala='**Your balance:** '+ '_$' + str(left) +'_'
st.write(bala)
graphAllStocks(invest[0][3],invest[1][3],invest[2][3],14,15,'royalblue','springgreen','indianred')
st.header('**In depth review:** ')
st.write('')
text1='Your first stock: ' + '_' + str(invest[0][2]) + '_'
st.header(text1)
graphMyStock(invest[0][3],1,2,'royalblue')
text1a='**Price:** '+ '_$'+ str(invest[0][1]) + '_'
st.write(text1a)
text1b='**Number of stocks you should buy:** '+ '_' + str(t_s1) + '_'
st.write(text1b)
text1c="**Athena's confidence: **"+'_'+ str(100*invest[0][0])+'%' + '_'
st.write(text1c)
st.write('')
st.write('')
text2='Your second stock: ' +'_'+ str(invest[1][2])+ '_'
st.header(text2)
graphMyStock(invest[1][3],3,4,'springgreen')
text2a='**Price:** '+ '_$'+ str(invest[1][1])+ '_'
st.write(text2a)
text2b='**Number of stocks you should buy:** '+'_'+ str(t_s2)+ '_'
st.write(text2b)
text2c="**Athena's confidence:** "+'_'+ str(100*invest[1][0]) + '%'+'_'
st.write(text2c)
st.write('')
st.write('')
text3= 'Your third stock: '+'_'+ str(invest[2][2])+ '_'
st.header(text3)
graphMyStock(invest[2][3],5,6,'indianred')
text3a='**Price:** '+ '_$'+ str(invest[2][1])+ '_'
st.write(text3a)
text3b='**Number of stocks you should buy: **'+'_'+ str(t_s3)+'_'
st.write(text3b)
text3c="**Athena's confidence: **"+'_'+ str(100*invest[2][0]) + '%'+'_'
st.write(text3c)
st.write('')
st.write('')
st.header("")
st.header("")
st.write("Disclaimer: We are not liable for the results or actions taken on the basis of these predictions.")
else:
st.write('Budget too low to diversify')
if a=='Understand':
ticker = st.sidebar.selectbox(
'Choose a Stock',symbols)
stock = yf.Ticker(ticker)
info=stock.info
ln=info['longName']
st.title(info['longName'])
st.title(ticker)
opt1, opt2 = st.beta_columns(2)
with opt1:
numYearMA = st.number_input('Insert period (Year): ', min_value=1, max_value=10, value=2, key=0)
with opt2:
windowSizeMA = st.number_input('Window Size (Day): ', min_value=5, max_value=500, value=20, key=1)
start = dt.datetime.today()-dt.timedelta(numYearMA * 365)
end = dt.datetime.today()
livedata = yf.download(ticker,start,end)
df_ma = calcMovingAverage(livedata, windowSizeMA)
df_ma = df_ma.reset_index()
fig = go.Figure()
fig.add_trace(
go.Scatter(
x = df_ma['Date'],
y = df_ma['Adj Close'],
name = '('+ ticker+ ') '+ "Prices Over Last " + str(numYearMA) + " Year(s)",
mode='lines',
line=dict(color='royalblue')
)
)
compstock2=st.selectbox('Choose stock to compare with: ', symbols)
st.info("If you don't wish to compare, select the same stock again")
livedata2=yf.download(compstock2,start,end)
df_ma2= calcMovingAverage(livedata2, windowSizeMA)
df_ma2= df_ma2.reset_index()
fig.add_trace(
go.Scatter(
x=df_ma2['Date'],
y=df_ma2['Adj Close'],
name = '('+ compstock2+ ') '+ "Prices Over Last " + str(numYearMA) + " Year(s)",
mode='lines',
line=dict(color='firebrick')
))
fig.update_layout(showlegend=True,legend=dict(
yanchor="top",
y=0.99,
xanchor="left",
x=0.01,
))
fig.update_layout(legend_title_text='Trend')
fig.update_yaxes(tickprefix="$")
st.plotly_chart(fig, use_container_width=True)
livedata3 = yf.download(ticker,start,end)
df_ma3 = calcMovingAverage(livedata3, windowSizeMA)
df_ma3 = df_ma.reset_index()
train_data, test_data = df_ma3[0:int(len(df_ma3)*0.7)], df_ma3[int(len(df_ma3)*0.7):]
training_data = train_data['Adj Close'].values
test_data = test_data['Adj Close'].values
history = [x for x in training_data]
model_predictions = []
N_test_observations = len(test_data)
abcd=0
for time_point in range(N_test_observations):
model = ARIMA(history, order=(4,1,0))
model_fit = model.fit(disp=0)
output = model_fit.forecast()
yhat = output[0]
model_predictions.append(yhat[0])
true_test_value = test_data[time_point]
history.append(true_test_value)
abcd=abcd+1
af=time_point
MSE_error = mean_squared_error(test_data, model_predictions)
test_set_range = df_ma3[int(len(df_ma3)*0.7):]
dts=df_ma3.loc[:,['Date']]
new = pd.date_range(test_set_range.Date.iloc[-1], periods=30)
df1 = pd.DataFrame(new[1:], columns=['Date'])
df_fin = test_set_range.append(df1, ignore_index=True)
mps=[]
for i in range(30):
model = ARIMA(history, order=(4,1,0))
fitted = model.fit(disp=0)
ou=fitted.forecast()
yha = ou[0]
mps.append(yha[0])
history.append(yha[0])
future_dates=[]
dat=[]
for row in df_fin.itertuples():
dat.append(row[2])
mxq=dat[-1]-dt.timedelta(days=29)
future_dates.append(mxq)
for i in range (30):
date=future_dates[-1]+dt.timedelta(days=1)
future_dates.append(date)
myseries=pd.Series(mps)
st.subheader('Future Graph Trend for '+ info['longName']+' using Time Series Analysis')
figtsa=go.Figure()
figtsa.add_trace(
go.Scatter(
x=df_fin['Date'],
y=model_predictions,
name = 'Predicted Prices',
mode='lines'
)
)
figtsa.add_trace(
go.Scatter(
x=df_fin['Date'],
y=test_data,
mode='lines',
name='Previous model prediction graph'
)
)
figtsa.add_trace(
go.Scatter(
x=future_dates,
y=mps,
mode='lines',
name='Future Price Trend'
)
)
st.plotly_chart(figtsa, use_container_width=True)
st.subheader('Bollinger Band')
opta, optb = st.beta_columns(2)
with opta:
numYearBoll = st.number_input('Insert period (Year): ', min_value=1, max_value=10, value=2, key=6)
with optb:
windowSizeBoll = st.number_input('Window Size (Day): ', min_value=5, max_value=500, value=20, key=7)
startBoll= dt.datetime.today()-dt.timedelta(numYearBoll * 365)
endBoll = dt.datetime.today()
dataBoll = yf.download(ticker,startBoll,endBoll)
df_boll = calcBollinger(dataBoll, windowSizeBoll)
df_boll = df_boll.reset_index()
figBoll = go.Figure()
figBoll.add_trace(
go.Scatter(
x = df_boll['Date'],
y = df_boll['bolu'],
name = "Upper Band"
)
)
figBoll.add_trace(
go.Scatter(
x = df_boll['Date'],
y = df_boll['sma'],
name = "SMA" + str(windowSizeBoll) + " Over Last " + str(numYearBoll) + " Year(s)"
)
)
figBoll.add_trace(
go.Scatter(
x = df_boll['Date'],
y = df_boll['bold'],
name = "Lower Band"
)
)
figBoll.update_layout(showlegend=True,legend=dict(
orientation="h",
yanchor="bottom",
y=1,
xanchor="left",
x=0
))
figBoll.update_yaxes(tickprefix="$")
st.plotly_chart(figBoll, use_container_width=True)
st.sidebar.title("Stock News")
send="https://www.google.com/search?q=should+you+invest+in+ "+ln.lower()+" stock"
res=requests.get(send)
soup=BeautifulSoup(res.content, "html.parser")
all_links=[]
all_titles=[]
count=0
for i in soup.select("a"):
if count==5:
break
link=i.get("href")
if("/url?q=https://" in link):
if(("/url?q=https://support.google.com" not in link) and ("/url?q=https://accounts.google.com" not in link)):
x=link.split("https://")
y=x[1].split("&sa")
new="https://"+y[0]
all_links.append(new)
z=i.text
if("..." in z):
type2=z.split("...")
name=type2[0]
else:
type1=z.split(" › ")
name=type1[0]
all_titles.append(name)
count+=1
for i in range(len(all_titles)):
make="["+str(all_titles[i])+"]"+" "+"("+str(all_links[i])+")"
st.sidebar.markdown(make)
st.sidebar.write("")
st.sidebar.write("")
list1=[]
c=0
alllinksind=len(all_links)
for x in range(alllinksind):
checkera=0
if c==10:
break
try:
option=requests.get(all_links[x], timeout=3)
except ReadTimeout:
checkera=checkera+1
except ConnectionError or ConnectionAbortedError or ConnectionRefusedError:
checkera=checkera+1
else:
if checkera==0:
soup=BeautifulSoup(option.content, "html.parser")
pageinfo=soup.select('p')
paglen=len(pageinfo)
for j in range(paglen):
m=pageinfo[j].text
n=m.split(' ')
for i in n:
list1.append(i)
c=c+1
tex=' '.join(list1)
understand_prob=predict_class(tex,model)
finint=understand_prob['intent']
finprob=100*float(understand_prob['probability'])
if finint=='up':
fininta='Stock prices will go up'
elif finint=='down':
fininta='Stock prices will go down'
fina='**Stock trend prediction: **' + '_'+ str(fininta)+ '_'
finb="**Athena's confidence: **"+ '_'+ str(finprob)+'%' +'_'
st.subheader(fininta)
st.subheader(finb)
st.header("")
st.header("")
st.markdown("""
<style>
.small-font {
font-size:10px !important;
}
</style>
""", unsafe_allow_html=True)
st.markdown('<p class="small-font">Disclaimer: We are not liable for the results or actions taken on the basis of these predictions.</p>', unsafe_allow_html=True)
|
[
"keras.models.load_model",
"streamlit.balloons",
"streamlit.selectbox",
"csv.reader",
"statsmodels.tsa.arima_model.ARIMA",
"streamlit.sidebar.write",
"pandas.read_csv",
"streamlit.radio",
"streamlit.title",
"streamlit.sidebar.title",
"streamlit.sidebar.selectbox",
"nltk.download",
"streamlit.sidebar.button",
"streamlit.beta_expander",
"nltk.word_tokenize",
"pandas.DataFrame",
"streamlit.subheader",
"streamlit.progress",
"nltk.stem.WordNetLemmatizer",
"yfinance.download",
"streamlit.info",
"datetime.timedelta",
"streamlit.sidebar.markdown",
"requests.get",
"streamlit.beta_columns",
"sklearn.metrics.mean_squared_error",
"plotly.graph_objects.Scatter",
"streamlit.plotly_chart",
"datetime.datetime.today",
"pandas.date_range",
"streamlit.header",
"plotly.graph_objects.Figure",
"pandas.Series",
"bs4.BeautifulSoup",
"streamlit.sidebar.number_input",
"streamlit.markdown",
"streamlit.dataframe",
"random.choice",
"streamlit.write",
"numpy.array",
"yfinance.Ticker",
"streamlit.number_input"
] |
[((237, 259), 'nltk.download', 'nltk.download', (['"""punkt"""'], {}), "('punkt')\n", (250, 259), False, 'import nltk\n'), ((261, 285), 'nltk.download', 'nltk.download', (['"""wordnet"""'], {}), "('wordnet')\n", (274, 285), False, 'import nltk\n'), ((393, 412), 'nltk.stem.WordNetLemmatizer', 'WordNetLemmatizer', ([], {}), '()\n', (410, 412), False, 'from nltk.stem import WordNetLemmatizer\n'), ((946, 974), 'keras.models.load_model', 'load_model', (['"""stock_model.h5"""'], {}), "('stock_model.h5')\n", (956, 974), False, 'from keras.models import load_model\n'), ((8682, 8706), 'pandas.read_csv', 'pd.read_csv', (['"""SP500.csv"""'], {}), "('SP500.csv')\n", (8693, 8706), True, 'import pandas as pd\n'), ((8766, 8825), 'streamlit.title', 'st.title', (['"""Investment Optimizer and Stock Growth Predictor"""'], {}), "('Investment Optimizer and Stock Growth Predictor')\n", (8774, 8825), True, 'import streamlit as st\n'), ((8886, 8928), 'streamlit.beta_expander', 'st.beta_expander', ([], {'label': '""""""', 'expanded': '(False)'}), "(label='', expanded=False)\n", (8902, 8928), True, 'import streamlit as st\n'), ((9394, 9406), 'streamlit.write', 'st.write', (['""""""'], {}), "('')\n", (9402, 9406), True, 'import streamlit as st\n'), ((9408, 9420), 'streamlit.write', 'st.write', (['""""""'], {}), "('')\n", (9416, 9420), True, 'import streamlit as st\n'), ((9422, 9507), 'streamlit.write', 'st.write', (['"""**Would you like to know where to invest or understand each Stock?**"""'], {}), "('**Would you like to know where to invest or understand each Stock?**'\n )\n", (9430, 9507), True, 'import streamlit as st\n'), ((9506, 9544), 'streamlit.radio', 'st.radio', (['""""""', "('Invest', 'Understand')"], {}), "('', ('Invest', 'Understand'))\n", (9514, 9544), True, 'import streamlit as st\n'), ((2001, 2020), 'yfinance.Ticker', 'yf.Ticker', (['finalvar'], {}), '(finalvar)\n', (2010, 2020), True, 'import yfinance as yf\n'), ((2105, 2123), 'streamlit.beta_columns', 'st.beta_columns', (['(2)'], {}), '(2)\n', (2120, 2123), True, 'import streamlit as st\n'), ((2474, 2493), 'datetime.datetime.today', 'dt.datetime.today', ([], {}), '()\n', (2491, 2493), True, 'import datetime as dt\n'), ((2511, 2546), 'yfinance.download', 'yf.download', (['finalvar', 'start2', 'end2'], {}), '(finalvar, start2, end2)\n', (2522, 2546), True, 'import yfinance as yf\n'), ((2676, 2687), 'plotly.graph_objects.Figure', 'go.Figure', ([], {}), '()\n', (2685, 2687), True, 'import plotly.graph_objects as go\n'), ((3362, 3409), 'streamlit.plotly_chart', 'st.plotly_chart', (['fig2'], {'use_container_width': '(True)'}), '(fig2, use_container_width=True)\n', (3377, 3409), True, 'import streamlit as st\n'), ((3489, 3506), 'yfinance.Ticker', 'yf.Ticker', (['stocka'], {}), '(stocka)\n', (3498, 3506), True, 'import yfinance as yf\n'), ((3562, 3574), 'streamlit.write', 'st.write', (['""""""'], {}), "('')\n", (3570, 3574), True, 'import streamlit as st\n'), ((3580, 3625), 'streamlit.subheader', 'st.subheader', (['"""**Graph of optimal stocks:** """'], {}), "('**Graph of optimal stocks:** ')\n", (3592, 3625), True, 'import streamlit as st\n'), ((3662, 3680), 'streamlit.beta_columns', 'st.beta_columns', (['(2)'], {}), '(2)\n', (3677, 3680), True, 'import streamlit as st\n'), ((4031, 4050), 'datetime.datetime.today', 'dt.datetime.today', ([], {}), '()\n', (4048, 4050), True, 'import datetime as dt\n'), ((4068, 4101), 'yfinance.download', 'yf.download', (['stocka', 'start2', 'end2'], {}), '(stocka, start2, end2)\n', (4079, 4101), True, 'import yfinance as yf\n'), ((4231, 4242), 'plotly.graph_objects.Figure', 'go.Figure', ([], {}), '()\n', (4240, 4242), True, 'import plotly.graph_objects as go\n'), ((4631, 4664), 'yfinance.download', 'yf.download', (['stockb', 'start2', 'end2'], {}), '(stockb, start2, end2)\n', (4642, 4664), True, 'import yfinance as yf\n'), ((5082, 5115), 'yfinance.download', 'yf.download', (['stockc', 'start2', 'end2'], {}), '(stockc, start2, end2)\n', (5093, 5115), True, 'import yfinance as yf\n'), ((5846, 5893), 'streamlit.plotly_chart', 'st.plotly_chart', (['fig2'], {'use_container_width': '(True)'}), '(fig2, use_container_width=True)\n', (5861, 5893), True, 'import streamlit as st\n'), ((5926, 5948), 'nltk.word_tokenize', 'nltk.word_tokenize', (['lw'], {}), '(lw)\n', (5944, 5948), False, 'import nltk\n'), ((7048, 7061), 'numpy.array', 'np.array', (['bag'], {}), '(bag)\n', (7056, 7061), True, 'import numpy as np\n'), ((9577, 9627), 'streamlit.sidebar.number_input', 'st.sidebar.number_input', (['"""Enter your budget ($): """'], {}), "('Enter your budget ($): ')\n", (9600, 9627), True, 'import streamlit as st\n'), ((9636, 9662), 'streamlit.sidebar.button', 'st.sidebar.button', (['"""Enter"""'], {}), "('Enter')\n", (9653, 9662), True, 'import streamlit as st\n'), ((19822, 19869), 'streamlit.sidebar.selectbox', 'st.sidebar.selectbox', (['"""Choose a Stock"""', 'symbols'], {}), "('Choose a Stock', symbols)\n", (19842, 19869), True, 'import streamlit as st\n'), ((19902, 19919), 'yfinance.Ticker', 'yf.Ticker', (['ticker'], {}), '(ticker)\n', (19911, 19919), True, 'import yfinance as yf\n'), ((19977, 20003), 'streamlit.title', 'st.title', (["info['longName']"], {}), "(info['longName'])\n", (19985, 20003), True, 'import streamlit as st\n'), ((20009, 20025), 'streamlit.title', 'st.title', (['ticker'], {}), '(ticker)\n', (20017, 20025), True, 'import streamlit as st\n'), ((20058, 20076), 'streamlit.beta_columns', 'st.beta_columns', (['(2)'], {}), '(2)\n', (20073, 20076), True, 'import streamlit as st\n'), ((20451, 20470), 'datetime.datetime.today', 'dt.datetime.today', ([], {}), '()\n', (20468, 20470), True, 'import datetime as dt\n'), ((20487, 20518), 'yfinance.download', 'yf.download', (['ticker', 'start', 'end'], {}), '(ticker, start, end)\n', (20498, 20518), True, 'import yfinance as yf\n'), ((20634, 20645), 'plotly.graph_objects.Figure', 'go.Figure', ([], {}), '()\n', (20643, 20645), True, 'import plotly.graph_objects as go\n'), ((21019, 21074), 'streamlit.selectbox', 'st.selectbox', (['"""Choose stock to compare with: """', 'symbols'], {}), "('Choose stock to compare with: ', symbols)\n", (21031, 21074), True, 'import streamlit as st\n'), ((21080, 21148), 'streamlit.info', 'st.info', (['"""If you don\'t wish to compare, select the same stock again"""'], {}), '("If you don\'t wish to compare, select the same stock again")\n', (21087, 21148), True, 'import streamlit as st\n'), ((21164, 21199), 'yfinance.download', 'yf.download', (['compstock2', 'start', 'end'], {}), '(compstock2, start, end)\n', (21175, 21199), True, 'import yfinance as yf\n'), ((21925, 21971), 'streamlit.plotly_chart', 'st.plotly_chart', (['fig'], {'use_container_width': '(True)'}), '(fig, use_container_width=True)\n', (21940, 21971), True, 'import streamlit as st\n'), ((21997, 22028), 'yfinance.download', 'yf.download', (['ticker', 'start', 'end'], {}), '(ticker, start, end)\n', (22008, 22028), True, 'import yfinance as yf\n'), ((22840, 22888), 'sklearn.metrics.mean_squared_error', 'mean_squared_error', (['test_data', 'model_predictions'], {}), '(test_data, model_predictions)\n', (22858, 22888), False, 'from sklearn.metrics import mean_squared_error\n'), ((22990, 23045), 'pandas.date_range', 'pd.date_range', (['test_set_range.Date.iloc[-1]'], {'periods': '(30)'}), '(test_set_range.Date.iloc[-1], periods=30)\n', (23003, 23045), True, 'import pandas as pd\n'), ((23057, 23096), 'pandas.DataFrame', 'pd.DataFrame', (['new[1:]'], {'columns': "['Date']"}), "(new[1:], columns=['Date'])\n", (23069, 23096), True, 'import pandas as pd\n'), ((23700, 23714), 'pandas.Series', 'pd.Series', (['mps'], {}), '(mps)\n', (23709, 23714), True, 'import pandas as pd\n'), ((23724, 23818), 'streamlit.subheader', 'st.subheader', (["('Future Graph Trend for ' + info['longName'] + ' using Time Series Analysis')"], {}), "('Future Graph Trend for ' + info['longName'] +\n ' using Time Series Analysis')\n", (23736, 23818), True, 'import streamlit as st\n'), ((23824, 23835), 'plotly.graph_objects.Figure', 'go.Figure', ([], {}), '()\n', (23833, 23835), True, 'import plotly.graph_objects as go\n'), ((24465, 24514), 'streamlit.plotly_chart', 'st.plotly_chart', (['figtsa'], {'use_container_width': '(True)'}), '(figtsa, use_container_width=True)\n', (24480, 24514), True, 'import streamlit as st\n'), ((24536, 24566), 'streamlit.subheader', 'st.subheader', (['"""Bollinger Band"""'], {}), "('Bollinger Band')\n", (24548, 24566), True, 'import streamlit as st\n'), ((24585, 24603), 'streamlit.beta_columns', 'st.beta_columns', (['(2)'], {}), '(2)\n', (24600, 24603), True, 'import streamlit as st\n'), ((24970, 24989), 'datetime.datetime.today', 'dt.datetime.today', ([], {}), '()\n', (24987, 24989), True, 'import datetime as dt\n'), ((25006, 25045), 'yfinance.download', 'yf.download', (['ticker', 'startBoll', 'endBoll'], {}), '(ticker, startBoll, endBoll)\n', (25017, 25045), True, 'import yfinance as yf\n'), ((25151, 25162), 'plotly.graph_objects.Figure', 'go.Figure', ([], {}), '()\n', (25160, 25162), True, 'import plotly.graph_objects as go\n'), ((26377, 26427), 'streamlit.plotly_chart', 'st.plotly_chart', (['figBoll'], {'use_container_width': '(True)'}), '(figBoll, use_container_width=True)\n', (26392, 26427), True, 'import streamlit as st\n'), ((26433, 26463), 'streamlit.sidebar.title', 'st.sidebar.title', (['"""Stock News"""'], {}), "('Stock News')\n", (26449, 26463), True, 'import streamlit as st\n'), ((26560, 26578), 'requests.get', 'requests.get', (['send'], {}), '(send)\n', (26572, 26578), False, 'import requests\n'), ((26589, 26630), 'bs4.BeautifulSoup', 'BeautifulSoup', (['res.content', '"""html.parser"""'], {}), "(res.content, 'html.parser')\n", (26602, 26630), False, 'from bs4 import BeautifulSoup\n'), ((28915, 28936), 'streamlit.subheader', 'st.subheader', (['fininta'], {}), '(fininta)\n', (28927, 28936), True, 'import streamlit as st\n'), ((28942, 28960), 'streamlit.subheader', 'st.subheader', (['finb'], {}), '(finb)\n', (28954, 28960), True, 'import streamlit as st\n'), ((28966, 28979), 'streamlit.header', 'st.header', (['""""""'], {}), "('')\n", (28975, 28979), True, 'import streamlit as st\n'), ((28985, 28998), 'streamlit.header', 'st.header', (['""""""'], {}), "('')\n", (28994, 28998), True, 'import streamlit as st\n'), ((29004, 29166), 'streamlit.markdown', 'st.markdown', (['"""\n <style>\n .small-font {\n font-size:10px !important;\n }\n </style>\n """'], {'unsafe_allow_html': '(True)'}), '(\n """\n <style>\n .small-font {\n font-size:10px !important;\n }\n </style>\n """\n , unsafe_allow_html=True)\n', (29015, 29166), True, 'import streamlit as st\n'), ((29170, 29342), 'streamlit.markdown', 'st.markdown', (['"""<p class="small-font">Disclaimer: We are not liable for the results or actions taken on the basis of these predictions.</p>"""'], {'unsafe_allow_html': '(True)'}), '(\n \'<p class="small-font">Disclaimer: We are not liable for the results or actions taken on the basis of these predictions.</p>\'\n , unsafe_allow_html=True)\n', (29181, 29342), True, 'import streamlit as st\n'), ((2163, 2252), 'streamlit.number_input', 'st.number_input', (['"""Insert period (Year): """'], {'min_value': '(1)', 'max_value': '(10)', 'value': '(2)', 'key': 'a'}), "('Insert period (Year): ', min_value=1, max_value=10, value=\n 2, key=a)\n", (2178, 2252), True, 'import streamlit as st\n'), ((2312, 2399), 'streamlit.number_input', 'st.number_input', (['"""Window Size (Day): """'], {'min_value': '(5)', 'max_value': '(500)', 'value': '(20)', 'key': 'b'}), "('Window Size (Day): ', min_value=5, max_value=500, value=20,\n key=b)\n", (2327, 2399), True, 'import streamlit as st\n'), ((2411, 2430), 'datetime.datetime.today', 'dt.datetime.today', ([], {}), '()\n', (2428, 2430), True, 'import datetime as dt\n'), ((2431, 2461), 'datetime.timedelta', 'dt.timedelta', (['(numYearMAb * 365)'], {}), '(numYearMAb * 365)\n', (2443, 2461), True, 'import datetime as dt\n'), ((3720, 3809), 'streamlit.number_input', 'st.number_input', (['"""Insert period (Year): """'], {'min_value': '(1)', 'max_value': '(10)', 'value': '(2)', 'key': 'a'}), "('Insert period (Year): ', min_value=1, max_value=10, value=\n 2, key=a)\n", (3735, 3809), True, 'import streamlit as st\n'), ((3869, 3956), 'streamlit.number_input', 'st.number_input', (['"""Window Size (Day): """'], {'min_value': '(5)', 'max_value': '(500)', 'value': '(20)', 'key': 'b'}), "('Window Size (Day): ', min_value=5, max_value=500, value=20,\n key=b)\n", (3884, 3956), True, 'import streamlit as st\n'), ((3968, 3987), 'datetime.datetime.today', 'dt.datetime.today', ([], {}), '()\n', (3985, 3987), True, 'import datetime as dt\n'), ((3988, 4018), 'datetime.timedelta', 'dt.timedelta', (['(numYearMAb * 365)'], {}), '(numYearMAb * 365)\n', (4000, 4018), True, 'import datetime as dt\n'), ((9674, 9687), 'streamlit.header', 'st.header', (['""""""'], {}), "('')\n", (9683, 9687), True, 'import streamlit as st\n'), ((9697, 9782), 'streamlit.header', 'st.header', (['"""**Following is the combination of stocks you should invest in: ** """'], {}), "('**Following is the combination of stocks you should invest in: ** '\n )\n", (9706, 9782), True, 'import streamlit as st\n'), ((9787, 9799), 'streamlit.write', 'st.write', (['""""""'], {}), "('')\n", (9795, 9799), True, 'import streamlit as st\n'), ((9809, 9834), 'streamlit.write', 'st.write', (['"""Processing..."""'], {}), "('Processing...')\n", (9817, 9834), True, 'import streamlit as st\n'), ((9951, 9964), 'csv.reader', 'csv.reader', (['f'], {}), '(f)\n', (9961, 9964), False, 'import csv\n'), ((10445, 10640), 'streamlit.markdown', 'st.markdown', (['"""\n <style>\n .stProgress .st-bo {\n background-color: green;\n }\n </style>\n """'], {'unsafe_allow_html': '(True)'}), '(\n """\n <style>\n .stProgress .st-bo {\n background-color: green;\n }\n </style>\n """\n , unsafe_allow_html=True)\n', (10456, 10640), True, 'import streamlit as st\n'), ((10653, 10667), 'streamlit.progress', 'st.progress', (['(0)'], {}), '(0)\n', (10664, 10667), True, 'import streamlit as st\n'), ((20128, 20217), 'streamlit.number_input', 'st.number_input', (['"""Insert period (Year): """'], {'min_value': '(1)', 'max_value': '(10)', 'value': '(2)', 'key': '(0)'}), "('Insert period (Year): ', min_value=1, max_value=10, value=\n 2, key=0)\n", (20143, 20217), True, 'import streamlit as st\n'), ((20271, 20358), 'streamlit.number_input', 'st.number_input', (['"""Window Size (Day): """'], {'min_value': '(5)', 'max_value': '(500)', 'value': '(20)', 'key': '(1)'}), "('Window Size (Day): ', min_value=5, max_value=500, value=20,\n key=1)\n", (20286, 20358), True, 'import streamlit as st\n'), ((20390, 20409), 'datetime.datetime.today', 'dt.datetime.today', ([], {}), '()\n', (20407, 20409), True, 'import datetime as dt\n'), ((20410, 20439), 'datetime.timedelta', 'dt.timedelta', (['(numYearMA * 365)'], {}), '(numYearMA * 365)\n', (20422, 20439), True, 'import datetime as dt\n'), ((22502, 22533), 'statsmodels.tsa.arima_model.ARIMA', 'ARIMA', (['history'], {'order': '(4, 1, 0)'}), '(history, order=(4, 1, 0))\n', (22507, 22533), False, 'from statsmodels.tsa.arima_model import ARIMA\n'), ((23219, 23250), 'statsmodels.tsa.arima_model.ARIMA', 'ARIMA', (['history'], {'order': '(4, 1, 0)'}), '(history, order=(4, 1, 0))\n', (23224, 23250), False, 'from statsmodels.tsa.arima_model import ARIMA\n'), ((23521, 23542), 'datetime.timedelta', 'dt.timedelta', ([], {'days': '(29)'}), '(days=29)\n', (23533, 23542), True, 'import datetime as dt\n'), ((23868, 23960), 'plotly.graph_objects.Scatter', 'go.Scatter', ([], {'x': "df_fin['Date']", 'y': 'model_predictions', 'name': '"""Predicted Prices"""', 'mode': '"""lines"""'}), "(x=df_fin['Date'], y=model_predictions, name='Predicted Prices',\n mode='lines')\n", (23878, 23960), True, 'import plotly.graph_objects as go\n'), ((24067, 24167), 'plotly.graph_objects.Scatter', 'go.Scatter', ([], {'x': "df_fin['Date']", 'y': 'test_data', 'mode': '"""lines"""', 'name': '"""Previous model prediction graph"""'}), "(x=df_fin['Date'], y=test_data, mode='lines', name=\n 'Previous model prediction graph')\n", (24077, 24167), True, 'import plotly.graph_objects as go\n'), ((24287, 24361), 'plotly.graph_objects.Scatter', 'go.Scatter', ([], {'x': 'future_dates', 'y': 'mps', 'mode': '"""lines"""', 'name': '"""Future Price Trend"""'}), "(x=future_dates, y=mps, mode='lines', name='Future Price Trend')\n", (24297, 24361), True, 'import plotly.graph_objects as go\n'), ((24643, 24732), 'streamlit.number_input', 'st.number_input', (['"""Insert period (Year): """'], {'min_value': '(1)', 'max_value': '(10)', 'value': '(2)', 'key': '(6)'}), "('Insert period (Year): ', min_value=1, max_value=10, value=\n 2, key=6)\n", (24658, 24732), True, 'import streamlit as st\n'), ((24789, 24876), 'streamlit.number_input', 'st.number_input', (['"""Window Size (Day): """'], {'min_value': '(5)', 'max_value': '(500)', 'value': '(20)', 'key': '(7)'}), "('Window Size (Day): ', min_value=5, max_value=500, value=20,\n key=7)\n", (24804, 24876), True, 'import streamlit as st\n'), ((24903, 24922), 'datetime.datetime.today', 'dt.datetime.today', ([], {}), '()\n', (24920, 24922), True, 'import datetime as dt\n'), ((24923, 24954), 'datetime.timedelta', 'dt.timedelta', (['(numYearBoll * 365)'], {}), '(numYearBoll * 365)\n', (24935, 24954), True, 'import datetime as dt\n'), ((25208, 25275), 'plotly.graph_objects.Scatter', 'go.Scatter', ([], {'x': "df_boll['Date']", 'y': "df_boll['bolu']", 'name': '"""Upper Band"""'}), "(x=df_boll['Date'], y=df_boll['bolu'], name='Upper Band')\n", (25218, 25275), True, 'import plotly.graph_objects as go\n'), ((25858, 25925), 'plotly.graph_objects.Scatter', 'go.Scatter', ([], {'x': "df_boll['Date']", 'y': "df_boll['bold']", 'name': '"""Lower Band"""'}), "(x=df_boll['Date'], y=df_boll['bold'], name='Lower Band')\n", (25868, 25925), True, 'import plotly.graph_objects as go\n'), ((27521, 27546), 'streamlit.sidebar.markdown', 'st.sidebar.markdown', (['make'], {}), '(make)\n', (27540, 27546), True, 'import streamlit as st\n'), ((27556, 27576), 'streamlit.sidebar.write', 'st.sidebar.write', (['""""""'], {}), "('')\n", (27572, 27576), True, 'import streamlit as st\n'), ((27586, 27606), 'streamlit.sidebar.write', 'st.sidebar.write', (['""""""'], {}), "('')\n", (27602, 27606), True, 'import streamlit as st\n'), ((7241, 7257), 'numpy.array', 'np.array', (['[pred]'], {}), '([pred])\n', (7249, 7257), True, 'import numpy as np\n'), ((8478, 8507), 'random.choice', 'random.choice', (["i['responses']"], {}), "(i['responses'])\n", (8491, 8507), False, 'import random\n'), ((10233, 10250), 'yfinance.Ticker', 'yf.Ticker', (['ticker'], {}), '(ticker)\n', (10242, 10250), True, 'import yfinance as yf\n'), ((13805, 13818), 'streamlit.balloons', 'st.balloons', ([], {}), '()\n', (13816, 13818), True, 'import streamlit as st\n'), ((16261, 16273), 'streamlit.write', 'st.write', (['""""""'], {}), "('')\n", (16269, 16273), True, 'import streamlit as st\n'), ((16287, 16316), 'streamlit.subheader', 'st.subheader', (['"""**Summary:** """'], {}), "('**Summary:** ')\n", (16299, 16316), True, 'import streamlit as st\n'), ((17538, 17570), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'summary_table'}), '(data=summary_table)\n', (17550, 17570), True, 'import pandas as pd\n'), ((17584, 17608), 'streamlit.dataframe', 'st.dataframe', (['summary_df'], {}), '(summary_df)\n', (17596, 17608), True, 'import streamlit as st\n'), ((17622, 17634), 'streamlit.write', 'st.write', (['""""""'], {}), "('')\n", (17630, 17634), True, 'import streamlit as st\n'), ((17710, 17724), 'streamlit.write', 'st.write', (['bala'], {}), '(bala)\n', (17718, 17724), True, 'import streamlit as st\n'), ((17852, 17886), 'streamlit.header', 'st.header', (['"""**In depth review:** """'], {}), "('**In depth review:** ')\n", (17861, 17886), True, 'import streamlit as st\n'), ((17900, 17912), 'streamlit.write', 'st.write', (['""""""'], {}), "('')\n", (17908, 17912), True, 'import streamlit as st\n'), ((17998, 18014), 'streamlit.header', 'st.header', (['text1'], {}), '(text1)\n', (18007, 18014), True, 'import streamlit as st\n'), ((18164, 18180), 'streamlit.write', 'st.write', (['text1a'], {}), '(text1a)\n', (18172, 18180), True, 'import streamlit as st\n'), ((18277, 18293), 'streamlit.write', 'st.write', (['text1b'], {}), '(text1b)\n', (18285, 18293), True, 'import streamlit as st\n'), ((18392, 18408), 'streamlit.write', 'st.write', (['text1c'], {}), '(text1c)\n', (18400, 18408), True, 'import streamlit as st\n'), ((18422, 18434), 'streamlit.write', 'st.write', (['""""""'], {}), "('')\n", (18430, 18434), True, 'import streamlit as st\n'), ((18448, 18460), 'streamlit.write', 'st.write', (['""""""'], {}), "('')\n", (18456, 18460), True, 'import streamlit as st\n'), ((18546, 18562), 'streamlit.header', 'st.header', (['text2'], {}), '(text2)\n', (18555, 18562), True, 'import streamlit as st\n'), ((18698, 18714), 'streamlit.write', 'st.write', (['text2a'], {}), '(text2a)\n', (18706, 18714), True, 'import streamlit as st\n'), ((18808, 18824), 'streamlit.write', 'st.write', (['text2b'], {}), '(text2b)\n', (18816, 18824), True, 'import streamlit as st\n'), ((18923, 18939), 'streamlit.write', 'st.write', (['text2c'], {}), '(text2c)\n', (18931, 18939), True, 'import streamlit as st\n'), ((18953, 18965), 'streamlit.write', 'st.write', (['""""""'], {}), "('')\n", (18961, 18965), True, 'import streamlit as st\n'), ((18979, 18991), 'streamlit.write', 'st.write', (['""""""'], {}), "('')\n", (18987, 18991), True, 'import streamlit as st\n'), ((19076, 19092), 'streamlit.header', 'st.header', (['text3'], {}), '(text3)\n', (19085, 19092), True, 'import streamlit as st\n'), ((19227, 19243), 'streamlit.write', 'st.write', (['text3a'], {}), '(text3a)\n', (19235, 19243), True, 'import streamlit as st\n'), ((19336, 19352), 'streamlit.write', 'st.write', (['text3b'], {}), '(text3b)\n', (19344, 19352), True, 'import streamlit as st\n'), ((19451, 19467), 'streamlit.write', 'st.write', (['text3c'], {}), '(text3c)\n', (19459, 19467), True, 'import streamlit as st\n'), ((19481, 19493), 'streamlit.write', 'st.write', (['""""""'], {}), "('')\n", (19489, 19493), True, 'import streamlit as st\n'), ((19507, 19519), 'streamlit.write', 'st.write', (['""""""'], {}), "('')\n", (19515, 19519), True, 'import streamlit as st\n'), ((19533, 19546), 'streamlit.header', 'st.header', (['""""""'], {}), "('')\n", (19542, 19546), True, 'import streamlit as st\n'), ((19560, 19573), 'streamlit.header', 'st.header', (['""""""'], {}), "('')\n", (19569, 19573), True, 'import streamlit as st\n'), ((19587, 19706), 'streamlit.write', 'st.write', (['"""Disclaimer: We are not liable for the results or actions taken on the basis of these predictions."""'], {}), "(\n 'Disclaimer: We are not liable for the results or actions taken on the basis of these predictions.'\n )\n", (19595, 19706), True, 'import streamlit as st\n'), ((19727, 19766), 'streamlit.write', 'st.write', (['"""Budget too low to diversify"""'], {}), "('Budget too low to diversify')\n", (19735, 19766), True, 'import streamlit as st\n'), ((23630, 23650), 'datetime.timedelta', 'dt.timedelta', ([], {'days': '(1)'}), '(days=1)\n', (23642, 23650), True, 'import datetime as dt\n'), ((27809, 27846), 'requests.get', 'requests.get', (['all_links[x]'], {'timeout': '(3)'}), '(all_links[x], timeout=3)\n', (27821, 27846), False, 'import requests\n'), ((10903, 10921), 'requests.get', 'requests.get', (['send'], {}), '(send)\n', (10915, 10921), False, 'import requests\n'), ((11162, 11203), 'bs4.BeautifulSoup', 'BeautifulSoup', (['res.content', '"""html.parser"""'], {}), "(res.content, 'html.parser')\n", (11175, 11203), False, 'from bs4 import BeautifulSoup\n'), ((28126, 28170), 'bs4.BeautifulSoup', 'BeautifulSoup', (['option.content', '"""html.parser"""'], {}), "(option.content, 'html.parser')\n", (28139, 28170), False, 'from bs4 import BeautifulSoup\n'), ((12329, 12344), 'requests.get', 'requests.get', (['i'], {}), '(i)\n', (12341, 12344), False, 'import requests\n'), ((12371, 12415), 'bs4.BeautifulSoup', 'BeautifulSoup', (['option.content', '"""html.parser"""'], {}), "(option.content, 'html.parser')\n", (12384, 12415), False, 'from bs4 import BeautifulSoup\n')]
|
import spacy
import sys
import numpy as np
import operator
from keras.models import load_model
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
import preprocess_data
MAX_SEQUENCE_LENGTH = 100
EMBEDDING_DIM = 300
model = load_model('models/bidirectional_lstm/model.h5')
nlp = spacy.load('en')
print('Test your sentences.')
print('> ', end='', flush=True)
intents = preprocess_data.load_intents()
for line in sys.stdin:
doc = nlp(line)
embedding_matrix = np.zeros((1, MAX_SEQUENCE_LENGTH, EMBEDDING_DIM))
for index, word in enumerate(doc):
embedding_matrix[0][index] = word.vector
prediction = model.predict(embedding_matrix)
scores = {}
for (x, y), score in np.ndenumerate(prediction):
scores[intents[y]] = score
sorted_scores = sorted(scores.items(), key=operator.itemgetter(1), reverse=True)
print(sorted_scores)
print('> ', end='', flush=True)
|
[
"keras.models.load_model",
"numpy.ndenumerate",
"numpy.zeros",
"spacy.load",
"operator.itemgetter",
"preprocess_data.load_intents"
] |
[((279, 327), 'keras.models.load_model', 'load_model', (['"""models/bidirectional_lstm/model.h5"""'], {}), "('models/bidirectional_lstm/model.h5')\n", (289, 327), False, 'from keras.models import load_model\n'), ((334, 350), 'spacy.load', 'spacy.load', (['"""en"""'], {}), "('en')\n", (344, 350), False, 'import spacy\n'), ((425, 455), 'preprocess_data.load_intents', 'preprocess_data.load_intents', ([], {}), '()\n', (453, 455), False, 'import preprocess_data\n'), ((523, 572), 'numpy.zeros', 'np.zeros', (['(1, MAX_SEQUENCE_LENGTH, EMBEDDING_DIM)'], {}), '((1, MAX_SEQUENCE_LENGTH, EMBEDDING_DIM))\n', (531, 572), True, 'import numpy as np\n'), ((751, 777), 'numpy.ndenumerate', 'np.ndenumerate', (['prediction'], {}), '(prediction)\n', (765, 777), True, 'import numpy as np\n'), ((862, 884), 'operator.itemgetter', 'operator.itemgetter', (['(1)'], {}), '(1)\n', (881, 884), False, 'import operator\n')]
|
import numpy as np
def train_test(
X: np.ndarray,
y: np.ndarray,
test_size: float,
random_seed: int = 0,
) -> tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
"""
Split input data randomly after shuffling
Args:
X (np.ndarray): decision matrix
y (np.ndarray): ground-truth labels
test_size (float): fraction of test split
random_seed (int): number to initialize a pseudorandom number generator
Returns:
tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]: X_train, y_train,
X_test, y_test
"""
np.random.seed(random_seed)
num_samples = X.shape[0]
num_train_samples = int(num_samples * (1 - test_size))
permuted_ids = np.random.permutation(np.arange(num_samples))
train_ids = permuted_ids[:num_train_samples]
test_ids = permuted_ids[num_train_samples:]
X_test = X[test_ids]
X_train = X[train_ids]
y_test = y[test_ids]
y_train = y[train_ids]
return X_train, y_train, X_test, y_test
class _KFoldIterator:
def __init__(self, kfold):
self._kfold = kfold
self._counter = 0
def __next__(self):
if self._counter < self._kfold.num_folds:
item = self._kfold.__getitem__(self._counter)
self._counter += 1
return item
else:
raise StopIteration
class KFold:
"""Iterable cross-validation object
Args:
X (np.ndarray): samples decision matrix
y (np.ndarray): samples ground-truth value
num_folds (int): number of cross-validation folds
random_seed (int): value for numpy random number generator initialization
Methods:
__getitem__(key): returns X_train, y_train, X_test, y_test
"""
def __init__(self, X: np.ndarray, y: np.ndarray, num_folds: int, random_seed: int):
self.num_samples = X.shape[0]
self.num_folds = num_folds
np.random.seed(random_seed)
permuted_ids = np.random.permutation(np.arange(self.num_samples))
self.X = X[permuted_ids]
self.y = y[permuted_ids]
def __getitem__(self, key: int):
assert key < self.num_folds, "Key must be lower than number of folds"
assert key >= 0, "Key must be not negative"
test_start_id = int(key * self.num_samples / self.num_folds)
test_end_id = int((key + 1) * self.num_samples / self.num_folds)
X_test = self.X[test_start_id: test_end_id]
X_train = np.concatenate([
self.X[: test_start_id],
self.X[test_end_id:],
],
axis=0,
)
y_test = self.y[test_start_id: test_end_id]
y_train = np.concatenate([
self.y[: test_start_id],
self.y[test_end_id:],
],
axis=0,
)
return X_train, y_train, X_test, y_test
def __iter__(self):
return _KFoldIterator(self)
def cross_val(
X: np.ndarray,
y: np.ndarray,
num_folds: int,
random_seed: int = 0,
) -> KFold:
"""
Make cross-validation split randomly after shuffling
Args:
X (np.ndarray): decision matrix
y (np.ndarray): ground-truth labels
num_folds (int): number of train/test folds
random_seed (int): number to initialize a pseudorandom number generator
Returns:
KFold: object containing data with __getitem__ method for getting splits
"""
kfold = KFold(X, y, num_folds, random_seed)
return kfold
|
[
"numpy.random.seed",
"numpy.arange",
"numpy.concatenate"
] |
[((605, 632), 'numpy.random.seed', 'np.random.seed', (['random_seed'], {}), '(random_seed)\n', (619, 632), True, 'import numpy as np\n'), ((762, 784), 'numpy.arange', 'np.arange', (['num_samples'], {}), '(num_samples)\n', (771, 784), True, 'import numpy as np\n'), ((1939, 1966), 'numpy.random.seed', 'np.random.seed', (['random_seed'], {}), '(random_seed)\n', (1953, 1966), True, 'import numpy as np\n'), ((2487, 2557), 'numpy.concatenate', 'np.concatenate', (['[self.X[:test_start_id], self.X[test_end_id:]]'], {'axis': '(0)'}), '([self.X[:test_start_id], self.X[test_end_id:]], axis=0)\n', (2501, 2557), True, 'import numpy as np\n'), ((2698, 2768), 'numpy.concatenate', 'np.concatenate', (['[self.y[:test_start_id], self.y[test_end_id:]]'], {'axis': '(0)'}), '([self.y[:test_start_id], self.y[test_end_id:]], axis=0)\n', (2712, 2768), True, 'import numpy as np\n'), ((2012, 2039), 'numpy.arange', 'np.arange', (['self.num_samples'], {}), '(self.num_samples)\n', (2021, 2039), True, 'import numpy as np\n')]
|
import numpy as np
import pandas as pd
from tqdm import tqdm
def map_prediction_to_emergence_label(results, training_values, test_values, predictors_to_run, test_terms,
emergence_linear_thresholds=(
('rapidly emergent', 0.1),
('emergent', 0.02),
('stationary', -0.02),
('declining', None)
)):
def __map_helper(normalised_counts_to_trend, predicted_emergence, predictor_name, test_term,
emergence_linear_thresholds):
if np.isnan(sum(normalised_counts_to_trend)):
predicted_emergence[predictor_name][test_term] = 'Fail'
return
x_data = range(len(normalised_counts_to_trend))
trend = np.polyfit(x_data, normalised_counts_to_trend, 1)
emergence = emergence_linear_thresholds[-1][0]
for emergence_threshold in emergence_linear_thresholds[:-1]:
if trend[0] > emergence_threshold[1]:
emergence = emergence_threshold[0]
break
predicted_emergence[predictor_name][test_term] = emergence
predicted_emergence = {}
if test_values:
predictor_name = 'Actual'
predicted_emergence[predictor_name] = {}
for test_term in tqdm(test_terms, unit='term', desc='Labelling prediction ' + predictor_name):
counts_to_trend = test_values[test_term]
max_training_value = max(training_values[test_term])
normalised_counts_to_trend = [x / max_training_value for x in counts_to_trend]
__map_helper(normalised_counts_to_trend, predicted_emergence, predictor_name, test_term,
emergence_linear_thresholds)
for predictor_name in predictors_to_run:
predicted_emergence[predictor_name] = {}
for test_term in tqdm(test_terms, unit='term', desc='Labelling prediction ' + predictor_name):
(none, configuration, predicted_values, num_training_values) = results[predictor_name][test_term]
counts_to_trend = predicted_values.ravel().tolist()
max_training_value = max(training_values[test_term])
normalised_counts_to_trend = [x / max_training_value for x in counts_to_trend]
__map_helper(normalised_counts_to_trend, predicted_emergence, predictor_name, test_term,
emergence_linear_thresholds)
return predicted_emergence
def report_predicted_emergence_labels_html(predicted_emergence, emergence_colours={
'highly emergent': 'lime',
'emergent': 'green',
'stationary': 'black',
'declining': 'red'}):
html_string = f'''
<h2>Emergence Label Prediction</h2>
'''
# df = pd.DataFrame(predicted_emergence, index=[0])
test_terms = list(predicted_emergence[list(predicted_emergence.keys())[0]].keys())
df_results = pd.DataFrame({'terms': test_terms})
predictor_display_names = []
for predictor_name in predicted_emergence:
term_results = []
for test_term in predicted_emergence[predictor_name]:
result = predicted_emergence[predictor_name][test_term]
term_results.append(result)
predictor_display_name = predictor_name.replace('-', '<br/>')
predictor_display_names.append(predictor_display_name)
df_term_column = pd.DataFrame({predictor_display_name: term_results})
df_results = df_results.join(df_term_column)
df_summary_table = df_results.style.hide_index()
df_summary_table = df_summary_table.set_table_styles([
dict(selector='table', props=[('border-collapse', 'collapse')]),
dict(selector='td', props=[('border', '2px solid black'),
('text-align', 'right'),
('padding-left', '15px'),
('padding-right', '15px')])
])
def colour_emergence(val):
colour = 'black'
if val in emergence_colours:
colour = emergence_colours[val]
return f'color: {colour}'
df_summary_table = df_summary_table.applymap(colour_emergence)
# for predictor_name in predictor_names:
# df_summary_table = df_summary_table.format({predictor_name: predictor_style})
# df_summary_table = df_summary_table.highlight_min(axis=1)
html_string += '<style type="text/css">table {border-collapse: collapse;} </style>\n'
html_string += df_summary_table.render()
return html_string
|
[
"pandas.DataFrame",
"tqdm.tqdm",
"numpy.polyfit"
] |
[((3031, 3066), 'pandas.DataFrame', 'pd.DataFrame', (["{'terms': test_terms}"], {}), "({'terms': test_terms})\n", (3043, 3066), True, 'import pandas as pd\n'), ((919, 968), 'numpy.polyfit', 'np.polyfit', (['x_data', 'normalised_counts_to_trend', '(1)'], {}), '(x_data, normalised_counts_to_trend, 1)\n', (929, 968), True, 'import numpy as np\n'), ((1446, 1522), 'tqdm.tqdm', 'tqdm', (['test_terms'], {'unit': '"""term"""', 'desc': "('Labelling prediction ' + predictor_name)"}), "(test_terms, unit='term', desc='Labelling prediction ' + predictor_name)\n", (1450, 1522), False, 'from tqdm import tqdm\n'), ((2011, 2087), 'tqdm.tqdm', 'tqdm', (['test_terms'], {'unit': '"""term"""', 'desc': "('Labelling prediction ' + predictor_name)"}), "(test_terms, unit='term', desc='Labelling prediction ' + predictor_name)\n", (2015, 2087), False, 'from tqdm import tqdm\n'), ((3504, 3556), 'pandas.DataFrame', 'pd.DataFrame', (['{predictor_display_name: term_results}'], {}), '({predictor_display_name: term_results})\n', (3516, 3556), True, 'import pandas as pd\n')]
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Data Series Sonification
========================
Functionality for sonifying data series.
"""
import warnings
from inspect import signature, Parameter
import numpy as np
from astropy.table import Table, MaskedColumn
from astropy.time import Time
import pyo
from ..utils.pitch_mapping import data_to_pitch
from ..utils.exceptions import InputWarning
__all__ = ['PitchMap', 'SoniSeries']
class PitchMap():
def __init__(self, pitch_func=data_to_pitch, **pitch_args):
"""
Class that encapsulates the data value to pitch function
and associated arguments.
Parameters
----------
pitch_func : function
Optional. Defaults to `~astronify.utils.data_to_pitch`.
If supplying a function it should take a data array as the first
parameter, and all other parameters should be optional.
**pitch_args
Default parameters and values for the pitch function. Should include
all necessary arguments other than the data values.
"""
# Setting up the default arguments
if (not pitch_args) and (pitch_func == data_to_pitch):
pitch_args = {"pitch_range": [100, 10000],
"center_pitch": 440,
"zero_point": "median",
"stretch": "linear"}
self.pitch_map_func = pitch_func
self.pitch_map_args = pitch_args
def _check_func_args(self):
"""
Make sure the pitch mapping function and argument dictionary match.
Note: This function does not check the the function gets all the required arguments.
"""
# Only test if both pitch func and args are set
if hasattr(self, "pitch_map_func") and hasattr(self, "pitch_map_args"):
# Only check parameters if there is no kwargs argument
param_types = [x.kind for x in signature(self.pitch_map_func).parameters.values()]
if Parameter.VAR_KEYWORD not in param_types:
for arg_name in list(self.pitch_map_args):
if arg_name not in signature(self.pitch_map_func).parameters:
wstr = "{} is not accepted by the pitch mapping function and will be ignored".format(arg_name)
warnings.warn(wstr, InputWarning)
del self.pitch_map_args[arg_name]
def __call__(self, data):
"""
Where does this show up?
"""
self._check_func_args()
return self.pitch_map_func(data, **self.pitch_map_args)
@property
def pitch_map_func(self):
"""
The pitch mapping function.
"""
return self._pitch_map_func
@pitch_map_func.setter
def pitch_map_func(self, new_func):
assert callable(new_func), "Pitch mapping function must be a function."
self._pitch_map_func = new_func
self._check_func_args()
@property
def pitch_map_args(self):
"""
Dictionary of additional arguments (other than the data array)
for the pitch mapping function.
"""
return self._pitch_map_args
@pitch_map_args.setter
def pitch_map_args(self, new_args):
assert isinstance(new_args, dict), "Pitch mapping function args must be in a dictionary."
self._pitch_map_args = new_args
self._check_func_args()
class SoniSeries():
def __init__(self, data, time_col="time", val_col="flux"):
"""
Class that encapsulates a sonified data series.
Parameters
----------
data : `astropy.table.Table`
The table of data to be sonified.
time_col : str
Optional, default "time". The data column to be mapped to time.
val_col : str
Optional, default "flux". The data column to be mapped to pitch.
"""
self.time_col = time_col
self.val_col = val_col
self.data = data
# Default specs
self.note_duration = 0.5 # note duration in seconds
self.note_spacing = 0.01 # spacing between notes in seconds
self.gain = 0.05 # default gain in the generated sine wave. pyo multiplier, -1 to 1.
self.pitch_mapper = PitchMap(data_to_pitch)
self._init_pyo()
def _init_pyo(self):
self.server = pyo.Server()
self.streams = None
@property
def data(self):
""" The data table (~astropy.table.Table). """
return self._data
@data.setter
def data(self, data_table):
assert isinstance(data_table, Table), 'Data must be a Table.'
# Removing any masked values as they interfere with the sonification
if isinstance(data_table[self.val_col], MaskedColumn):
data_table = data_table[~data_table[self.val_col].mask]
if isinstance(data_table[self.time_col], MaskedColumn):
data_table = data_table[~data_table[self.time_col].mask]
# Removing any nans as they interfere with the sonification
data_table = data_table[~np.isnan(data_table[self.val_col])]
# making sure we have a float column for time
if isinstance(data_table[self.time_col], Time):
float_col = "asf_time"
data_table[float_col] = data_table[self.time_col].jd
self.time_col = float_col
self._data = data_table
@property
def time_col(self):
""" The data column mappend to time when sonifying. """
return self._time_col
@time_col.setter
def time_col(self, value):
assert isinstance(value, str), 'Time column name must be a string.'
self._time_col = value
@property
def val_col(self):
""" The data column mappend to putch when sonifying. """
return self._val_col
@val_col.setter
def val_col(self, value):
assert isinstance(value, str), 'Value column name must be a string.'
self._val_col = value
@property
def pitch_mapper(self):
""" The pitch mapping object that takes data values to pitch values (Hz). """
return self._pitch_mapper
@pitch_mapper.setter
def pitch_mapper(self, value):
self._pitch_mapper = value
@property
def gain(self):
""" Adjustable gain for output. """
return self._gain
@gain.setter
def gain(self, value):
self._gain = value
@property
def note_duration(self):
""" How long each individual note will be in seconds."""
return self._note_duration
@note_duration.setter
def note_duration(self, value):
# Add in min value check
self._note_duration = value
@property
def note_spacing(self):
""" The spacing of the notes on average (will adjust based on time) in seconds. """
return self._note_spacing
@note_spacing.setter
def note_spacing(self, value):
# Add in min value check
self._note_spacing = value
def sonify(self):
"""
Perform the sonification, two columns will be added to the data table: asf_pitch, and asf_onsets.
The asf_pitch column will contain the sonified data in Hz.
The asf_onsets column will contain the start time for each note in seconds from the first note.
Metadata will also be added to the table giving information about the duration and spacing
of the sonified pitches, as well as an adjustable gain.
"""
data = self.data
exptime = np.median(np.diff(data[self.time_col]))
data.meta["asf_exposure_time"] = exptime
data.meta["asf_note_duration"] = self.note_duration
data.meta["asf_spacing"] = self.note_spacing
data["asf_pitch"] = self.pitch_mapper(data[self.val_col])
data["asf_onsets"] = [x for x in (data[self.time_col] - data[self.time_col][0])/exptime*self.note_spacing]
def play(self):
"""
Play the data sonification.
"""
# Making sure we have a clean server
if self.server.getIsBooted():
self.server.shutdown()
self.server.boot()
self.server.start()
# Getting data ready
duration = self.data.meta["asf_note_duration"]
pitches = np.repeat(self.data["asf_pitch"], 2)
delays = np.repeat(self.data["asf_onsets"], 2)
# TODO: This doesn't seem like the best way to do this, but I don't know
# how to make it better
env = pyo.Linseg(list=[(0, 0), (0.01, 1), (duration - 0.1, 1),
(duration - 0.05, 0.5), (duration - 0.005, 0)],
mul=[self.gain for i in range(len(pitches))]).play(
delay=list(delays), dur=duration)
self.streams = pyo.Sine(list(pitches), 0, env).out(delay=list(delays),
dur=duration)
def stop(self):
"""
Stop playing the data sonification.
"""
self.streams.stop()
def write(self, filepath):
"""
Save data sonification to the given file.
Currently the only output option is a wav file.
Parameters
----------
filepath : str
The path to the output file.
"""
# Getting data ready
duration = self.data.meta["asf_note_duration"]
pitches = np.repeat(self.data["asf_pitch"], 2)
delays = np.repeat(self.data["asf_onsets"], 2)
# Making sure we have a clean server
if self.server.getIsBooted():
self.server.shutdown()
self.server.reinit(audio="offline")
self.server.boot()
self.server.recordOptions(dur=delays[-1]+duration, filename=filepath)
env = pyo.Linseg(list=[(0, 0), (0.1, 1), (duration - 0.1, 1),
(duration - 0.05, 0.5), (duration - 0.005, 0)],
mul=[self.gain for i in range(len(pitches))]).play(
delay=list(delays), dur=duration)
sine = pyo.Sine(list(pitches), 0, env).out(delay=list(delays), dur=duration) # noqa: F841
self.server.start()
# Clean up
self.server.shutdown()
self.server.reinit(audio="portaudio")
|
[
"numpy.isnan",
"pyo.Server",
"numpy.diff",
"inspect.signature",
"warnings.warn",
"numpy.repeat"
] |
[((4450, 4462), 'pyo.Server', 'pyo.Server', ([], {}), '()\n', (4460, 4462), False, 'import pyo\n'), ((8386, 8422), 'numpy.repeat', 'np.repeat', (["self.data['asf_pitch']", '(2)'], {}), "(self.data['asf_pitch'], 2)\n", (8395, 8422), True, 'import numpy as np\n'), ((8440, 8477), 'numpy.repeat', 'np.repeat', (["self.data['asf_onsets']", '(2)'], {}), "(self.data['asf_onsets'], 2)\n", (8449, 8477), True, 'import numpy as np\n'), ((9522, 9558), 'numpy.repeat', 'np.repeat', (["self.data['asf_pitch']", '(2)'], {}), "(self.data['asf_pitch'], 2)\n", (9531, 9558), True, 'import numpy as np\n'), ((9576, 9613), 'numpy.repeat', 'np.repeat', (["self.data['asf_onsets']", '(2)'], {}), "(self.data['asf_onsets'], 2)\n", (9585, 9613), True, 'import numpy as np\n'), ((7644, 7672), 'numpy.diff', 'np.diff', (['data[self.time_col]'], {}), '(data[self.time_col])\n', (7651, 7672), True, 'import numpy as np\n'), ((5171, 5205), 'numpy.isnan', 'np.isnan', (['data_table[self.val_col]'], {}), '(data_table[self.val_col])\n', (5179, 5205), True, 'import numpy as np\n'), ((2394, 2427), 'warnings.warn', 'warnings.warn', (['wstr', 'InputWarning'], {}), '(wstr, InputWarning)\n', (2407, 2427), False, 'import warnings\n'), ((2208, 2238), 'inspect.signature', 'signature', (['self.pitch_map_func'], {}), '(self.pitch_map_func)\n', (2217, 2238), False, 'from inspect import signature, Parameter\n'), ((2001, 2031), 'inspect.signature', 'signature', (['self.pitch_map_func'], {}), '(self.pitch_map_func)\n', (2010, 2031), False, 'from inspect import signature, Parameter\n')]
|
import pickle
import numpy as np
import pandas as pd
from keras.utils import np_utils
from keras.utils.vis_utils import plot_model
from keras.models import Sequential
from keras.preprocessing.sequence import pad_sequences
from keras.layers import LSTM, Dense, Embedding, Dropout
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
from tensorflow.keras.callbacks import TensorBoard
import time
NAME = 'lstm-{}'.format(int(time.time()))
tensorboard = TensorBoard(log_dir='./logs/{}'.format(NAME))
# 导入数据
# 文件的数据中,特征为evaluation, 类别为label.
def load_data(filepath, input_shape=20):
df = pd.read_csv(filepath)
# 标签及词汇表
labels, vocabulary = list(df['label'].unique()), list(df['CONTENT'].unique())
# 构造字符级别的特征
string = ''
for word in vocabulary:
string += word
vocabulary = set(string)
# 字典列表
word_dictionary = {word: i+1 for i, word in enumerate(vocabulary)}
with open('./data/lstm/word_dict.pk', 'wb') as f:
pickle.dump(word_dictionary, f)
inverse_word_dictionary = {i+1: word for i, word in enumerate(vocabulary)}
label_dictionary = {label: i for i, label in enumerate(labels)}
with open('./data/lstm/label_dict.pk', 'wb') as f:
pickle.dump(label_dictionary, f)
output_dictionary = {i: labels for i, labels in enumerate(labels)}
vocab_size = len(word_dictionary.keys()) # 词汇表大小
label_size = len(label_dictionary.keys()) # 标签类别数量
# 序列填充,按input_shape填充,长度不足的按0补充
x = [[word_dictionary[word] for word in sent] for sent in df['CONTENT']]
x = pad_sequences(maxlen=input_shape, sequences=x, padding='post', value=0)
y = [[label_dictionary[sent]] for sent in df['label']]
y = [np_utils.to_categorical(label, num_classes=label_size) for label in y]
y = np.array([list(_[0]) for _ in y])
return x, y, output_dictionary, vocab_size, label_size, inverse_word_dictionary
# 创建深度学习模型, Embedding + LSTM + Softmax.
def create_LSTM(n_units, input_shape, output_dim, filepath):
x, y, output_dictionary, vocab_size, label_size, inverse_word_dictionary = load_data(filepath)
model = Sequential()
model.add(Embedding(input_dim=vocab_size + 1, output_dim=output_dim,
input_length=input_shape, mask_zero=True))
model.add(LSTM(n_units, input_shape=(x.shape[0], x.shape[1])))
model.add(Dropout(0.2))
model.add(Dense(label_size, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
plot_model(model, to_file='./data/img/model_lstm.png', show_shapes=True)
model.summary()
return model
# 模型训练
def model_train(input_shape, filepath, model_save_path):
# 将数据集分为训练集和测试集,占比为9:1
# input_shape = 100
x, y, output_dictionary, vocab_size, label_size, inverse_word_dictionary = load_data(filepath, input_shape)
train_x, test_x, train_y, test_y = train_test_split(x, y, test_size = 0.1, random_state = 42)
# 模型输入参数,需要自己根据需要调整
n_units = 100
batch_size = 32
epochs = 5
output_dim = 20
# 模型训练
lstm_model = create_LSTM(n_units, input_shape, output_dim, filepath)
lstm_model.fit(train_x, train_y, epochs=epochs, batch_size=batch_size, verbose=1,callbacks=[tensorboard])
# 模型保存
lstm_model.save(model_save_path)
N = test_x.shape[0] # 测试的条数
predict = []
label = []
for start, end in zip(range(0, N, 1), range(1, N+1, 1)):
sentence = [inverse_word_dictionary[i] for i in test_x[start] if i != 0]
y_predict = lstm_model.predict(test_x[start:end])
label_predict = output_dictionary[np.argmax(y_predict[0])]
label_true = output_dictionary[np.argmax(test_y[start:end])]
print(''.join(sentence), label_true, label_predict) # 输出预测结果
predict.append(label_predict)
label.append(label_true)
acc = accuracy_score(predict, label) # 预测准确率
print('模型在测试集上的准确率为: %s.' % acc)
if __name__ == '__main__':
filepath = './data/comment_trainset_2class.csv'
input_shape = 140
model_save_path = './data/lstm/douban_lstm.model'
model_train(input_shape, filepath, model_save_path)
|
[
"pickle.dump",
"numpy.argmax",
"pandas.read_csv",
"keras.preprocessing.sequence.pad_sequences",
"sklearn.model_selection.train_test_split",
"keras.utils.vis_utils.plot_model",
"sklearn.metrics.accuracy_score",
"keras.layers.LSTM",
"keras.layers.Dropout",
"time.time",
"keras.utils.np_utils.to_categorical",
"keras.layers.Dense",
"keras.layers.Embedding",
"keras.models.Sequential"
] |
[((632, 653), 'pandas.read_csv', 'pd.read_csv', (['filepath'], {}), '(filepath)\n', (643, 653), True, 'import pandas as pd\n'), ((1586, 1657), 'keras.preprocessing.sequence.pad_sequences', 'pad_sequences', ([], {'maxlen': 'input_shape', 'sequences': 'x', 'padding': '"""post"""', 'value': '(0)'}), "(maxlen=input_shape, sequences=x, padding='post', value=0)\n", (1599, 1657), False, 'from keras.preprocessing.sequence import pad_sequences\n'), ((2137, 2149), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (2147, 2149), False, 'from keras.models import Sequential\n'), ((2536, 2608), 'keras.utils.vis_utils.plot_model', 'plot_model', (['model'], {'to_file': '"""./data/img/model_lstm.png"""', 'show_shapes': '(True)'}), "(model, to_file='./data/img/model_lstm.png', show_shapes=True)\n", (2546, 2608), False, 'from keras.utils.vis_utils import plot_model\n'), ((2915, 2969), 'sklearn.model_selection.train_test_split', 'train_test_split', (['x', 'y'], {'test_size': '(0.1)', 'random_state': '(42)'}), '(x, y, test_size=0.1, random_state=42)\n', (2931, 2969), False, 'from sklearn.model_selection import train_test_split\n'), ((3869, 3899), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['predict', 'label'], {}), '(predict, label)\n', (3883, 3899), False, 'from sklearn.metrics import accuracy_score\n'), ((466, 477), 'time.time', 'time.time', ([], {}), '()\n', (475, 477), False, 'import time\n'), ((1009, 1040), 'pickle.dump', 'pickle.dump', (['word_dictionary', 'f'], {}), '(word_dictionary, f)\n', (1020, 1040), False, 'import pickle\n'), ((1251, 1283), 'pickle.dump', 'pickle.dump', (['label_dictionary', 'f'], {}), '(label_dictionary, f)\n', (1262, 1283), False, 'import pickle\n'), ((1726, 1780), 'keras.utils.np_utils.to_categorical', 'np_utils.to_categorical', (['label'], {'num_classes': 'label_size'}), '(label, num_classes=label_size)\n', (1749, 1780), False, 'from keras.utils import np_utils\n'), ((2164, 2269), 'keras.layers.Embedding', 'Embedding', ([], {'input_dim': '(vocab_size + 1)', 'output_dim': 'output_dim', 'input_length': 'input_shape', 'mask_zero': '(True)'}), '(input_dim=vocab_size + 1, output_dim=output_dim, input_length=\n input_shape, mask_zero=True)\n', (2173, 2269), False, 'from keras.layers import LSTM, Dense, Embedding, Dropout\n'), ((2304, 2355), 'keras.layers.LSTM', 'LSTM', (['n_units'], {'input_shape': '(x.shape[0], x.shape[1])'}), '(n_units, input_shape=(x.shape[0], x.shape[1]))\n', (2308, 2355), False, 'from keras.layers import LSTM, Dense, Embedding, Dropout\n'), ((2371, 2383), 'keras.layers.Dropout', 'Dropout', (['(0.2)'], {}), '(0.2)\n', (2378, 2383), False, 'from keras.layers import LSTM, Dense, Embedding, Dropout\n'), ((2399, 2438), 'keras.layers.Dense', 'Dense', (['label_size'], {'activation': '"""softmax"""'}), "(label_size, activation='softmax')\n", (2404, 2438), False, 'from keras.layers import LSTM, Dense, Embedding, Dropout\n'), ((3624, 3647), 'numpy.argmax', 'np.argmax', (['y_predict[0]'], {}), '(y_predict[0])\n', (3633, 3647), True, 'import numpy as np\n'), ((3688, 3716), 'numpy.argmax', 'np.argmax', (['test_y[start:end]'], {}), '(test_y[start:end])\n', (3697, 3716), True, 'import numpy as np\n')]
|
# Python Standard Libraries
import warnings
import time
import os
import sys
from pathlib import Path
# Third party imports
# fancy prints
import numpy as np
from tqdm import tqdm
# grAdapt package
import grAdapt.utils.math
import grAdapt.utils.misc
import grAdapt.utils.sampling
from grAdapt import surrogate as sur, optimizer as opt, escape as esc
from grAdapt.space.transformer import Transformer
from grAdapt.sampling import initializer as init, equidistributed as equi
class Asynchronous:
def __init__(self, bounds, surrogate=None, optimizer=None, sampling_method=None,
escape=None, training=None, random_state=1,
n_evals='auto', eps=1e-3, f_min=-np.inf, f_min_eps=1e-2, n_random_starts='auto',
auto_checkpoint=False, show_progressbar=True, prints=True):
"""
Parameters
----------
bounds : list
list of tuples e.g. [(-5, 5), (-5, 5)]
surrogate : grAdapt Surrogate object
optimizer : grAdapt Optimizer object
sampling_method : Sampling Method to be used. static method from utils
escape : grAdapt Escape object
training : (X, y) with X shape (n, m) and y shape (n,)
random_state : integer
random_state integer sets numpy seed
bounds : list
list of tuples e.g. [(-5, 5), (-5, 5)]
"""
# Stock module settings
self.bounds = bounds
# seed
self.random_state = random_state
np.random.seed(self.random_state)
if surrogate is None:
self.surrogate = sur.GPRSlidingWindow()
else:
self.surrogate = surrogate
if optimizer is None:
self.optimizer = opt.AMSGradBisection(surrogate=self.surrogate)
else:
self.optimizer = optimizer
if surrogate is None:
raise Exception('If optimizer is passed, then surrogate must be passed, too.')
if sampling_method is None:
self.sampling_method = equi.MaximalMinDistance()
else:
self.sampling_method = sampling_method
if escape is None:
self.escape = esc.NormalDistributionDecay(surrogate=self.surrogate, sampling_method=self.sampling_method)
else:
self.escape = escape
if surrogate is None or sampling_method is None:
raise Exception('When passing an escape function, surrogate and sampling_method must be passed, too.')
# other settings
# continue optimizing
self.training = training
if training is not None:
self.X = list(training[0])
self.y = list(training[1])
if len(self.X) != len(self.y):
raise AssertionError('Training data not valid. Length of X and y must be the same.')
# self.fit(self.X, self.y)
else:
self.X = list(grAdapt.utils.sampling.sample_points_bounds(self.bounds, 11))
self.y = []
self.n_evals = n_evals
self.eps = eps
self.f_min = f_min
self.f_min_eps = f_min_eps
self.n_random_starts = n_random_starts
# keep track of checkpoint files
self.checkpoint_file = None
self.auto_checkpoint = auto_checkpoint
# results
self.res = None
self.show_progressbar = show_progressbar
self.prints = prints
# save current iteration
if training is not None:
self.iteration = len(self.X) - 1
else:
self.iteration = 0
def escape_x_criteria(self, x_train, iteration):
"""Checks whether new point is different than the latest point by the euclidean distance
Checks whether new point is inside the defined search space/bounds.
Returns True if one of the conditions above are fulfilled.
Parameters
----------
x_train : ndarray (n, d)
iteration : integer
Returns
-------
boolean
"""
# x convergence
# escape_convergence = (np.linalg.norm(x_train[iteration - 1] - x_train[iteration])) < self.eps
n_hist = 2
escape_convergence_history = any(
(np.linalg.norm(x_train[iteration - (n_hist + 1):] - x_train[iteration - 1], axis=1)) < self.eps)
# check whether point is inside bounds
escape_valid = not (grAdapt.utils.sampling.inside_bounds(self.bounds, x_train[iteration - 1]))
# escape_x = escape_convergence or escape_valid
escape_x = escape_convergence_history or escape_valid
return escape_x
@staticmethod
def escape_y_criteria(y_train, iteration, pct):
"""
Parameters
----------
y_train : array-like (n, d)
iteration : integer
pct : numeric
pct should be less than 1.
Returns
-------
boolean
"""
try:
return grAdapt.utils.misc.is_inside_relative_range(y_train[iteration - 1], y_train[iteration - 2], pct)
except:
return False
def dummy(self):
return 0
def ask(self):
if len(self.X) > len(self.y): # initial points
self.iteration += 1
# if user asks consecutively without telling
if self.iteration == len(self.y) + 2:
self.iteration -= 1
warnings.warn("Tell the optimizer/model after you ask.", RuntimeWarning)
return self.X[self.iteration - 1]
else:
# gradient parameters specific for the surrogate model
surrogate_grad_params = [np.array(self.X[:self.iteration]), np.array(self.y[:self.iteration]),
self.dummy, self.bounds]
# apply optimizer
return_x = self.optimizer.run(self.X[self.iteration - 1],
grAdapt.utils.misc.epochs(self.iteration),
surrogate_grad_params)
# escape indicator variables
escape_x_criteria_boolean = self.escape_x_criteria(np.array(self.X), self.iteration)
escape_y_criteria_boolean = self.escape_y_criteria(self.y, self.iteration, self.f_min_eps)
escape_boolean = escape_x_criteria_boolean or escape_y_criteria_boolean
# sample new point if must escape or bounds not valid
if escape_boolean:
return_x = self.escape.get_point(self.X[:self.iteration], self.y[:self.iteration],
self.iteration, self.bounds)
self.iteration += 1
# save current training data
return return_x
def tell(self, next_x, f_val):
if len(self.X) > len(self.y):
# no need to append x
self.y.append(f_val)
elif len(self.X) == len(self.y):
# append
self.X.append(next_x)
self.y.append(f_val)
else:
raise RuntimeError('More function values available than x values/parameter sets.')
# Fit data on surrogate model
self.surrogate.fit(np.array(self.X[:self.iteration]), np.array(self.X[:self.iteration]))
|
[
"numpy.random.seed",
"grAdapt.sampling.equidistributed.MaximalMinDistance",
"grAdapt.surrogate.GPRSlidingWindow",
"grAdapt.optimizer.AMSGradBisection",
"numpy.array",
"numpy.linalg.norm",
"warnings.warn",
"grAdapt.escape.NormalDistributionDecay"
] |
[((1506, 1539), 'numpy.random.seed', 'np.random.seed', (['self.random_state'], {}), '(self.random_state)\n', (1520, 1539), True, 'import numpy as np\n'), ((1600, 1622), 'grAdapt.surrogate.GPRSlidingWindow', 'sur.GPRSlidingWindow', ([], {}), '()\n', (1620, 1622), True, 'from grAdapt import surrogate as sur, optimizer as opt, escape as esc\n'), ((1736, 1782), 'grAdapt.optimizer.AMSGradBisection', 'opt.AMSGradBisection', ([], {'surrogate': 'self.surrogate'}), '(surrogate=self.surrogate)\n', (1756, 1782), True, 'from grAdapt import surrogate as sur, optimizer as opt, escape as esc\n'), ((2037, 2062), 'grAdapt.sampling.equidistributed.MaximalMinDistance', 'equi.MaximalMinDistance', ([], {}), '()\n', (2060, 2062), True, 'from grAdapt.sampling import initializer as init, equidistributed as equi\n'), ((2182, 2278), 'grAdapt.escape.NormalDistributionDecay', 'esc.NormalDistributionDecay', ([], {'surrogate': 'self.surrogate', 'sampling_method': 'self.sampling_method'}), '(surrogate=self.surrogate, sampling_method=self.\n sampling_method)\n', (2209, 2278), True, 'from grAdapt import surrogate as sur, optimizer as opt, escape as esc\n'), ((7183, 7216), 'numpy.array', 'np.array', (['self.X[:self.iteration]'], {}), '(self.X[:self.iteration])\n', (7191, 7216), True, 'import numpy as np\n'), ((7218, 7251), 'numpy.array', 'np.array', (['self.X[:self.iteration]'], {}), '(self.X[:self.iteration])\n', (7226, 7251), True, 'import numpy as np\n'), ((4235, 4322), 'numpy.linalg.norm', 'np.linalg.norm', (['(x_train[iteration - (n_hist + 1):] - x_train[iteration - 1])'], {'axis': '(1)'}), '(x_train[iteration - (n_hist + 1):] - x_train[iteration - 1],\n axis=1)\n', (4249, 4322), True, 'import numpy as np\n'), ((5411, 5483), 'warnings.warn', 'warnings.warn', (['"""Tell the optimizer/model after you ask."""', 'RuntimeWarning'], {}), "('Tell the optimizer/model after you ask.', RuntimeWarning)\n", (5424, 5483), False, 'import warnings\n'), ((5650, 5683), 'numpy.array', 'np.array', (['self.X[:self.iteration]'], {}), '(self.X[:self.iteration])\n', (5658, 5683), True, 'import numpy as np\n'), ((5685, 5718), 'numpy.array', 'np.array', (['self.y[:self.iteration]'], {}), '(self.y[:self.iteration])\n', (5693, 5718), True, 'import numpy as np\n'), ((6138, 6154), 'numpy.array', 'np.array', (['self.X'], {}), '(self.X)\n', (6146, 6154), True, 'import numpy as np\n')]
|
#!/usr/bin/python3
"""modelling.py
Various utility functions for modelling
"""
__author__ = "<NAME>"
import os
import numpy as np
import tensorflow as tf
from tensorflow.keras.callbacks import Callback
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Activation, BatchNormalization, \
Bidirectional, concatenate, Conv1D, Dense, Dropout, \
GlobalAveragePooling1D, GRU, Input, LSTM, Masking, \
SpatialDropout1D
from tensorflow.keras.losses import MeanAbsoluteError, \
SparseCategoricalCrossentropy
from tensorflow.keras.optimizers import Adam
from nicu_los.src.utils.evaluation import evaluate_classification_model, \
evaluate_regression_model
from nicu_los.src.utils.custom_keras_layers import ApplyMask, \
squeeze_excite_block, Slice
def construct_rnn(input_dimension, output_dimension, model_type='lstm',
n_cells=1, dropout=0.3, hid_dimension=64, model_name=""):
"""Construct an RNN model (either LSTM or GRU)
Args:
input_dimension (int): Input dimension of the model
output_dimension (int): Output dimension of the model
n_cells (int): Number of RNN cells
dropout (float): Amount of dropout to apply after each RNN cell
hid_dimension (int): Dimension of the hidden layer (i.e. # of unit in
the RNN cell)
Returns:
model (tf.keras.Model): Constructed RNN model
"""
inputs = Input(shape=(None, input_dimension))
# Skip timestep if all values of the input tensor are 0
X = Masking()(inputs)
num_hid_units = hid_dimension
for layer in range(n_cells - 1):
num_hid_units = num_hid_units // 2
if model_type == 'lstm':
cell = LSTM(units=num_hid_units, activation='tanh',
return_sequences=True, recurrent_dropout=0.0,
dropout=dropout)
elif model_type == 'gru':
cell = GRU(units=num_hid_units, activation='tanh',
return_sequences=True, recurrent_dropout=0.0,
dropout=dropout)
else:
raise ValueError("Parameter 'model_type' should be one of " +
"'lstm' or 'gru'.")
X = Bidirectional(cell)(X)
# There always has to be at least one cell
if model_type == 'lstm':
X = LSTM(activation='tanh', dropout=dropout, recurrent_dropout=0.0,
return_sequences=False, units=hid_dimension)(X)
elif model_type == 'gru':
X = GRU(activation='tanh', dropout=dropout, recurrent_dropout=0.0,
return_sequences=False, units=hid_dimension)(X)
else:
raise ValueError("Parameter 'model_type' should be one of " +
"'lstm' or 'gru'.")
if dropout:
X = Dropout(dropout)(X)
if output_dimension != 1:
# Classification
outputs = Dense(units=output_dimension, activation='softmax')(X)
else:
# Regression
outputs = Dense(units=output_dimension)(X)
model = Model(inputs=inputs, outputs=outputs, name=model_name)
return model
def construct_fcn(input_dimension, output_dimension, dropout=0.5,
model_name=""):
"""Construct an FCN model for multivariate time series classification
(Karim et al. 2019 - Multivariate LSTM-FCNs for time series classification)
Args:
input_dimension (int): Input dimension of the model
output_dimension (int): Output dimension of the model
dropout (float): Amount of dropout to apply in the first two
convolutional blocks
model_name (str): Name of the model
Returns:
model (tf.keras.Model): Constructed CN model
"""
inputs = Input(shape=(None, input_dimension))
mask = Masking().compute_mask(inputs)
X = Conv1D(128, 8, padding='same',
kernel_initializer='he_uniform')(inputs)
X = Activation('relu')(X)
X = BatchNormalization()(X)
X = SpatialDropout1D(dropout)(X)
X = ApplyMask()(X, mask)
X = squeeze_excite_block(X, mask)
X = Conv1D(256, 5, padding='same', kernel_initializer='he_uniform')(X)
X = Activation('relu')(X)
X = BatchNormalization()(X)
X = SpatialDropout1D(dropout)(X)
X = ApplyMask()(X, mask)
X = squeeze_excite_block(X, mask)
X = Conv1D(128, 3, padding='same', kernel_initializer='he_uniform')(X)
X = Activation('relu')(X)
X = BatchNormalization()(X)
X = GlobalAveragePooling1D()(X, mask)
if output_dimension != 1:
# Classification
outputs = Dense(units=output_dimension, activation='softmax')(X)
else:
# Regression
outputs = Dense(units=output_dimension)(X)
model = Model(inputs=inputs, outputs=outputs, name=model_name)
return model
def construct_fcn_originial(input_dimension, output_dimension, model_name=""):
"""Construct an FCN model for multivariate time series classification
(Karim et al. 2019 - Multivariate LSTM-FCNs for time series classification)
Args:
input_dimension (int): Input dimension of the model
output_dimension (int): Output dimension of the model
model_name (str): Name of the model
Returns:
model (tf.keras.Model): Constructed CN model
"""
inputs = Input(shape=(None, input_dimension))
X = Conv1D(128, 8, padding='same',
kernel_initializer='he_uniform')(inputs)
X = BatchNormalization()(X2)
X = Activation('relu')(X2)
X = Conv1D(256, 5, padding='same', kernel_initializer='he_uniform')(X2)
X = BatchNormalization()(X2)
X = Activation('relu')(X2)
X = Conv1D(128, 3, padding='same', kernel_initializer='he_uniform')(X2)
X = BatchNormalization()(X2)
X = Activation('relu')(X2)
X = GlobalAveragePooling1D()(X2)
if output_dimension != 1:
# Classification
outputs = Dense(units=output_dimension, activation='softmax')(X)
else:
# Regression
outputs = Dense(units=output_dimension)(X)
model = Model(inputs=inputs, outputs=outputs, name=model_name)
return model
def construct_lstm_fcn_original(input_dimension, output_dimension, dropout=0.8,
hid_dimension_lstm=8, model_name=""):
"""Construct an LSTM-FCN model
Architecture as described in:
Karim et al. 2019 - Multivariate LSTM-FCNs for time series classification
Args:
input_dimension (int): Input dimension of the model
output_dimension (int): Output dimension of the model
dropout (float): Amount of dropout to apply after the LSTM cell
hid_dimension (int): Dimension of the hidden layer (i.e. # of unit in
the RNN cell)
model_name (str): Name of the model
Returns:
model (tf.keras.Model): Constructed LSTM-FCN model
"""
inputs = Input(shape=(None, input_dimension))
X1 = Masking()(inputs)
X1 = LSTM(hid_dimension_lstm)(X1)
X1 = Dropout(dropout)(X1)
X2 = Conv1D(128, 8, padding='same',
kernel_initializer='he_uniform')(inputs)
X2 = BatchNormalization()(X2)
X2 = Activation('relu')(X2)
X2 = squeeze_excite_block(X2)
X2 = Conv1D(256, 5, padding='same', kernel_initializer='he_uniform')(X2)
X2 = BatchNormalization()(X2)
X2 = Activation('relu')(X2)
X2 = squeeze_excite_block(X2)
X2 = Conv1D(128, 3, padding='same', kernel_initializer='he_uniform')(X2)
X2 = BatchNormalization()(X2)
X2 = Activation('relu')(X2)
X2 = GlobalAveragePooling1D()(X2)
X = concatenate([X1, X2])
if output_dimension != 1:
# Classification
outputs = Dense(units=output_dimension, activation='softmax')(X)
else:
# Regression
outputs = Dense(units=output_dimension)(X)
model = Model(inputs=inputs, outputs=outputs, name=model_name)
return model
def construct_lstm_fcn(input_dimension, output_dimension, dropout=0.5,
hid_dimension_lstm=16, model_name=""):
"""Construct a (modified) LSTM-FCN model
Modified architecture:
- Perform batch normalization after ReLU activation
- Use SpatialDropout1D in the convolutional blocks to reduce overfitting
Args:
input_dimension (int): Input dimension of the model
output_dimension (int): Output dimension of the model
dropout (float): Amount of dropout to apply after the LSTM cell
hid_dimension (int): Dimension of the hidden layer (i.e. # of unit in
the RNN cell)
model_name (str): Name of the model
Returns:
model (tf.keras.Model): Constructed LSTM-FCN model
"""
inputs = Input(shape=(None, input_dimension))
mask = Masking().compute_mask(inputs)
X1 = Masking()(inputs)
X1 = LSTM(hid_dimension_lstm)(X1)
X1 = Dropout(dropout)(X1)
X2 = Conv1D(128, 8, padding='same',
kernel_initializer='he_uniform')(inputs)
X2 = Activation('relu')(X2)
X2 = BatchNormalization()(X2)
X2 = SpatialDropout1D(0.5)(X2)
X2 = ApplyMask()(X2, mask)
X2 = squeeze_excite_block(X2, mask)
X2 = Conv1D(256, 5, padding='same', kernel_initializer='he_uniform')(X2)
X2 = Activation('relu')(X2)
X2 = BatchNormalization()(X2)
X2 = SpatialDropout1D(0.5)(X2)
X2 = ApplyMask()(X2, mask)
X2 = squeeze_excite_block(X2, mask)
X2 = Conv1D(128, 3, padding='same', kernel_initializer='he_uniform')(X2)
X2 = Activation('relu')(X2)
X2 = BatchNormalization()(X2)
X2 = GlobalAveragePooling1D()(X2, mask)
X = concatenate([X1, X2])
if output_dimension != 1:
# Classification
outputs = Dense(units=output_dimension, activation='softmax')(X)
else:
# Regression
outputs = Dense(units=output_dimension)(X)
model = Model(inputs=inputs, outputs=outputs, name=model_name)
return model
def construct_channel_wise_rnn(input_dimension, output_dimension,
model_type='lstm_cw', dropout=0.0, global_dropout=0.0, hid_dimension=16,
multiplier=4, model_name=""):
"""Construct an RNN model (either LSTM or GRU)
Args:
input_dimension (int): Input dimension of the model
output_dimension (int): Output dimension of the model
dropout (float): Amount of dropout to apply after each RNN cell
global_dropout (float): Amount of dropout to apply before the output
hid_dimension (int): Dimension of the hidden layer (i.e. # of unit in
the RNN cell)
multiplier (int): Multiplier for the hidden dimension of the global LSTM
Returns:
model (tf.keras.Model): Constructed channel-wise RNN model
"""
inputs = Input(shape=(None, input_dimension))
# Skip timestep if all values of the input tensor are 0
mask = Masking().compute_mask(inputs)
X = Masking()(inputs)
# Train LSTMs over the channels, and append them
cXs = []
for feature in range(int(input_dimension/2)):
mask_var = int(feature+input_dimension/2)
channel_slice = Slice(feature, mask_var)(X)
num_hid_units = hid_dimension // 2
cell = LSTM(units=num_hid_units, activation='tanh',
return_sequences=True, recurrent_dropout=dropout,
dropout=dropout)
cX = Bidirectional(cell)(channel_slice)
cX = ApplyMask()(cX, mask)
cXs.append(cX)
# Concatenate the channels
X = concatenate(cXs, axis=2)
X = Masking()(X)
# There always has to be at least one cell
if model_type == 'lstm_cw':
X = LSTM(activation='tanh', dropout=dropout, recurrent_dropout=0.0,
return_sequences=False, units=multiplier*hid_dimension)(X)
elif model_type == 'gru_cw':
X = GRU(activation='tanh', dropout=dropout, recurrent_dropout=0.0,
return_sequences=False, units=multiplier*hid_dimension)(X)
else:
raise ValueError("Parameter 'model_type' should be one of " +
"'lstm_cw' or 'gru_cw'.")
if global_dropout:
X = Dropout(global_dropout)(X)
if output_dimension != 1:
# Classification
outputs = Dense(units=output_dimension, activation='softmax')(X)
else:
# Regression
outputs = Dense(units=output_dimension)(X)
model = Model(inputs=inputs, outputs=outputs, name=model_name)
return model
def construct_and_compile_model(model_type, model_name, task, checkpoint_file,
checkpoints_dir, model_params={}):
"""Construct and compile a model of a specific type
Args:
model_type (str): The type of model to be constructed
model_name (str): The name of model to be constructed
task (str): Either 'regression' or 'classification'
checkpoint_file (str): Name of a checkpoint file
checkpoints_dir (str): Path to the checkpoints directory
model_params (dict): Possible hyper-parameters for the model to be
constructed
Returns:
model (tf.keras.Model): Constructed and compiled model
"""
n_cells = model_params['n_cells']
input_dimension = model_params['input_dimension']
output_dimension = model_params['output_dimension']
dropout = model_params['dropout']
global_dropout = model_params['global_dropout']
hid_dimension = model_params['hidden_dimension']
multiplier = model_params['multiplier']
if task == 'classification':
loss_fn = SparseCategoricalCrossentropy()
metrics = ['accuracy']
elif task == 'regression':
loss_fn = MeanAbsoluteError()
metrics = ['mse']
output_dimension = 1
else:
raise ValueError('Argument "task" must be one of "classification" ' \
'or "regression"')
if model_type == 'lstm' or model_type == 'gru':
model = construct_rnn(input_dimension, output_dimension, model_type,
n_cells, dropout, hid_dimension, model_name)
elif model_type == 'lstm_cw' or model_type == 'gru_cw':
model = construct_channel_wise_rnn(input_dimension, output_dimension,
model_type, dropout, global_dropout, hid_dimension, multiplier,
model_name)
elif model_type == 'fcn':
model = construct_fcn(input_dimension, output_dimension, dropout,
model_name)
elif model_type == 'lstm_fcn':
model = construct_lstm_fcn(input_dimension, output_dimension, dropout,
hid_dimension, model_name)
else:
raise ValueError(f'Model type {model_type} is not supported.')
if checkpoint_file:
print(f"=> Loading weights from checkpoint: {checkpoint_file}")
model.load_weights(os.path.join(checkpoints_dir, checkpoint_file))
model.compile(optimizer=Adam(), loss=loss_fn, metrics=metrics)
model.summary()
return model
class MetricsCallback(Callback):
def __init__(self, model, task, training_data, validation_data,
training_steps, validation_steps):
"""Callback to compute metrics after an epoch has ended
Args:
model (tf.keras.model): TensorFlow (Keras) model
task (str): Classification or regression
training_data (tf.data.Dataset)
validation_data (tf.data.Dataset)
training_steps (int)
validation_steps (int)
"""
self.model = model
self.task = task
self.training_data = training_data
self.validation_data = validation_data
self.training_steps = training_steps
self.validation_steps = validation_steps
def on_epoch_end(self, epoch, logs=None):
"""The callback
Args:
epoch (int): Identifier of the current epoch
"""
print('\n=> Predict on training data:\n')
y_true, y_pred = [], []
for batch, (x, y) in enumerate(self.training_data):
if batch > self.training_steps:
break
if self.task == 'classification':
y_pred.append(np.argmax(self.model.predict_on_batch(x), axis=1))
else:
y_pred.append(self.model.predict_on_batch(x))
y_true.append(y.numpy())
if self.task == 'classification':
evaluate_classification_model(np.concatenate(y_true, axis=0),
np.concatenate(y_pred, axis=0))
else:
evaluate_regression_model(np.concatenate(y_true, axis=0),
np.concatenate(y_pred, axis=0))
print('\n=> Predict on validation data:\n')
y_true, y_pred = [], []
for batch, (x, y) in enumerate(self.validation_data):
if batch > self.validation_steps:
break
if self.task == 'classification':
y_pred.append(np.argmax(self.model.predict_on_batch(x), axis=1))
else:
y_pred.append(self.model.predict_on_batch(x))
y_true.append(y.numpy())
if self.task == 'classification':
evaluate_classification_model(np.concatenate(y_true, axis=0),
np.concatenate(y_pred, axis=0))
else:
evaluate_regression_model(np.concatenate(y_true, axis=0),
np.concatenate(y_pred, axis=0))
|
[
"tensorflow.keras.layers.Dense",
"tensorflow.keras.layers.concatenate",
"tensorflow.keras.layers.Masking",
"os.path.join",
"tensorflow.keras.losses.SparseCategoricalCrossentropy",
"tensorflow.keras.layers.BatchNormalization",
"nicu_los.src.utils.custom_keras_layers.ApplyMask",
"tensorflow.keras.losses.MeanAbsoluteError",
"tensorflow.keras.layers.Activation",
"tensorflow.keras.optimizers.Adam",
"tensorflow.keras.layers.Input",
"tensorflow.keras.layers.GlobalAveragePooling1D",
"tensorflow.keras.layers.Dropout",
"tensorflow.keras.layers.GRU",
"tensorflow.keras.models.Model",
"nicu_los.src.utils.custom_keras_layers.Slice",
"tensorflow.keras.layers.SpatialDropout1D",
"numpy.concatenate",
"nicu_los.src.utils.custom_keras_layers.squeeze_excite_block",
"tensorflow.keras.layers.Conv1D",
"tensorflow.keras.layers.LSTM",
"tensorflow.keras.layers.Bidirectional"
] |
[((1471, 1507), 'tensorflow.keras.layers.Input', 'Input', ([], {'shape': '(None, input_dimension)'}), '(shape=(None, input_dimension))\n', (1476, 1507), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Bidirectional, concatenate, Conv1D, Dense, Dropout, GlobalAveragePooling1D, GRU, Input, LSTM, Masking, SpatialDropout1D\n'), ((3053, 3107), 'tensorflow.keras.models.Model', 'Model', ([], {'inputs': 'inputs', 'outputs': 'outputs', 'name': 'model_name'}), '(inputs=inputs, outputs=outputs, name=model_name)\n', (3058, 3107), False, 'from tensorflow.keras.models import Model\n'), ((3759, 3795), 'tensorflow.keras.layers.Input', 'Input', ([], {'shape': '(None, input_dimension)'}), '(shape=(None, input_dimension))\n', (3764, 3795), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Bidirectional, concatenate, Conv1D, Dense, Dropout, GlobalAveragePooling1D, GRU, Input, LSTM, Masking, SpatialDropout1D\n'), ((4067, 4096), 'nicu_los.src.utils.custom_keras_layers.squeeze_excite_block', 'squeeze_excite_block', (['X', 'mask'], {}), '(X, mask)\n', (4087, 4096), False, 'from nicu_los.src.utils.custom_keras_layers import ApplyMask, squeeze_excite_block, Slice\n'), ((4309, 4338), 'nicu_los.src.utils.custom_keras_layers.squeeze_excite_block', 'squeeze_excite_block', (['X', 'mask'], {}), '(X, mask)\n', (4329, 4338), False, 'from nicu_los.src.utils.custom_keras_layers import ApplyMask, squeeze_excite_block, Slice\n'), ((4745, 4799), 'tensorflow.keras.models.Model', 'Model', ([], {'inputs': 'inputs', 'outputs': 'outputs', 'name': 'model_name'}), '(inputs=inputs, outputs=outputs, name=model_name)\n', (4750, 4799), False, 'from tensorflow.keras.models import Model\n'), ((5324, 5360), 'tensorflow.keras.layers.Input', 'Input', ([], {'shape': '(None, input_dimension)'}), '(shape=(None, input_dimension))\n', (5329, 5360), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Bidirectional, concatenate, Conv1D, Dense, Dropout, GlobalAveragePooling1D, GRU, Input, LSTM, Masking, SpatialDropout1D\n'), ((6062, 6116), 'tensorflow.keras.models.Model', 'Model', ([], {'inputs': 'inputs', 'outputs': 'outputs', 'name': 'model_name'}), '(inputs=inputs, outputs=outputs, name=model_name)\n', (6067, 6116), False, 'from tensorflow.keras.models import Model\n'), ((6885, 6921), 'tensorflow.keras.layers.Input', 'Input', ([], {'shape': '(None, input_dimension)'}), '(shape=(None, input_dimension))\n', (6890, 6921), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Bidirectional, concatenate, Conv1D, Dense, Dropout, GlobalAveragePooling1D, GRU, Input, LSTM, Masking, SpatialDropout1D\n'), ((7187, 7211), 'nicu_los.src.utils.custom_keras_layers.squeeze_excite_block', 'squeeze_excite_block', (['X2'], {}), '(X2)\n', (7207, 7211), False, 'from nicu_los.src.utils.custom_keras_layers import ApplyMask, squeeze_excite_block, Slice\n'), ((7365, 7389), 'nicu_los.src.utils.custom_keras_layers.squeeze_excite_block', 'squeeze_excite_block', (['X2'], {}), '(X2)\n', (7385, 7389), False, 'from nicu_los.src.utils.custom_keras_layers import ApplyMask, squeeze_excite_block, Slice\n'), ((7582, 7603), 'tensorflow.keras.layers.concatenate', 'concatenate', (['[X1, X2]'], {}), '([X1, X2])\n', (7593, 7603), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Bidirectional, concatenate, Conv1D, Dense, Dropout, GlobalAveragePooling1D, GRU, Input, LSTM, Masking, SpatialDropout1D\n'), ((7829, 7883), 'tensorflow.keras.models.Model', 'Model', ([], {'inputs': 'inputs', 'outputs': 'outputs', 'name': 'model_name'}), '(inputs=inputs, outputs=outputs, name=model_name)\n', (7834, 7883), False, 'from tensorflow.keras.models import Model\n'), ((8703, 8739), 'tensorflow.keras.layers.Input', 'Input', ([], {'shape': '(None, input_dimension)'}), '(shape=(None, input_dimension))\n', (8708, 8739), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Bidirectional, concatenate, Conv1D, Dense, Dropout, GlobalAveragePooling1D, GRU, Input, LSTM, Masking, SpatialDropout1D\n'), ((9114, 9144), 'nicu_los.src.utils.custom_keras_layers.squeeze_excite_block', 'squeeze_excite_block', (['X2', 'mask'], {}), '(X2, mask)\n', (9134, 9144), False, 'from nicu_los.src.utils.custom_keras_layers import ApplyMask, squeeze_excite_block, Slice\n'), ((9364, 9394), 'nicu_los.src.utils.custom_keras_layers.squeeze_excite_block', 'squeeze_excite_block', (['X2', 'mask'], {}), '(X2, mask)\n', (9384, 9394), False, 'from nicu_los.src.utils.custom_keras_layers import ApplyMask, squeeze_excite_block, Slice\n'), ((9593, 9614), 'tensorflow.keras.layers.concatenate', 'concatenate', (['[X1, X2]'], {}), '([X1, X2])\n', (9604, 9614), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Bidirectional, concatenate, Conv1D, Dense, Dropout, GlobalAveragePooling1D, GRU, Input, LSTM, Masking, SpatialDropout1D\n'), ((9840, 9894), 'tensorflow.keras.models.Model', 'Model', ([], {'inputs': 'inputs', 'outputs': 'outputs', 'name': 'model_name'}), '(inputs=inputs, outputs=outputs, name=model_name)\n', (9845, 9894), False, 'from tensorflow.keras.models import Model\n'), ((10739, 10775), 'tensorflow.keras.layers.Input', 'Input', ([], {'shape': '(None, input_dimension)'}), '(shape=(None, input_dimension))\n', (10744, 10775), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Bidirectional, concatenate, Conv1D, Dense, Dropout, GlobalAveragePooling1D, GRU, Input, LSTM, Masking, SpatialDropout1D\n'), ((11475, 11499), 'tensorflow.keras.layers.concatenate', 'concatenate', (['cXs'], {'axis': '(2)'}), '(cXs, axis=2)\n', (11486, 11499), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Bidirectional, concatenate, Conv1D, Dense, Dropout, GlobalAveragePooling1D, GRU, Input, LSTM, Masking, SpatialDropout1D\n'), ((12345, 12399), 'tensorflow.keras.models.Model', 'Model', ([], {'inputs': 'inputs', 'outputs': 'outputs', 'name': 'model_name'}), '(inputs=inputs, outputs=outputs, name=model_name)\n', (12350, 12399), False, 'from tensorflow.keras.models import Model\n'), ((1578, 1587), 'tensorflow.keras.layers.Masking', 'Masking', ([], {}), '()\n', (1585, 1587), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Bidirectional, concatenate, Conv1D, Dense, Dropout, GlobalAveragePooling1D, GRU, Input, LSTM, Masking, SpatialDropout1D\n'), ((3847, 3910), 'tensorflow.keras.layers.Conv1D', 'Conv1D', (['(128)', '(8)'], {'padding': '"""same"""', 'kernel_initializer': '"""he_uniform"""'}), "(128, 8, padding='same', kernel_initializer='he_uniform')\n", (3853, 3910), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Bidirectional, concatenate, Conv1D, Dense, Dropout, GlobalAveragePooling1D, GRU, Input, LSTM, Masking, SpatialDropout1D\n'), ((3939, 3957), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (3949, 3957), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Bidirectional, concatenate, Conv1D, Dense, Dropout, GlobalAveragePooling1D, GRU, Input, LSTM, Masking, SpatialDropout1D\n'), ((3969, 3989), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (3987, 3989), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Bidirectional, concatenate, Conv1D, Dense, Dropout, GlobalAveragePooling1D, GRU, Input, LSTM, Masking, SpatialDropout1D\n'), ((4001, 4026), 'tensorflow.keras.layers.SpatialDropout1D', 'SpatialDropout1D', (['dropout'], {}), '(dropout)\n', (4017, 4026), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Bidirectional, concatenate, Conv1D, Dense, Dropout, GlobalAveragePooling1D, GRU, Input, LSTM, Masking, SpatialDropout1D\n'), ((4038, 4049), 'nicu_los.src.utils.custom_keras_layers.ApplyMask', 'ApplyMask', ([], {}), '()\n', (4047, 4049), False, 'from nicu_los.src.utils.custom_keras_layers import ApplyMask, squeeze_excite_block, Slice\n'), ((4106, 4169), 'tensorflow.keras.layers.Conv1D', 'Conv1D', (['(256)', '(5)'], {'padding': '"""same"""', 'kernel_initializer': '"""he_uniform"""'}), "(256, 5, padding='same', kernel_initializer='he_uniform')\n", (4112, 4169), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Bidirectional, concatenate, Conv1D, Dense, Dropout, GlobalAveragePooling1D, GRU, Input, LSTM, Masking, SpatialDropout1D\n'), ((4181, 4199), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (4191, 4199), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Bidirectional, concatenate, Conv1D, Dense, Dropout, GlobalAveragePooling1D, GRU, Input, LSTM, Masking, SpatialDropout1D\n'), ((4211, 4231), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (4229, 4231), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Bidirectional, concatenate, Conv1D, Dense, Dropout, GlobalAveragePooling1D, GRU, Input, LSTM, Masking, SpatialDropout1D\n'), ((4243, 4268), 'tensorflow.keras.layers.SpatialDropout1D', 'SpatialDropout1D', (['dropout'], {}), '(dropout)\n', (4259, 4268), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Bidirectional, concatenate, Conv1D, Dense, Dropout, GlobalAveragePooling1D, GRU, Input, LSTM, Masking, SpatialDropout1D\n'), ((4280, 4291), 'nicu_los.src.utils.custom_keras_layers.ApplyMask', 'ApplyMask', ([], {}), '()\n', (4289, 4291), False, 'from nicu_los.src.utils.custom_keras_layers import ApplyMask, squeeze_excite_block, Slice\n'), ((4348, 4411), 'tensorflow.keras.layers.Conv1D', 'Conv1D', (['(128)', '(3)'], {'padding': '"""same"""', 'kernel_initializer': '"""he_uniform"""'}), "(128, 3, padding='same', kernel_initializer='he_uniform')\n", (4354, 4411), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Bidirectional, concatenate, Conv1D, Dense, Dropout, GlobalAveragePooling1D, GRU, Input, LSTM, Masking, SpatialDropout1D\n'), ((4423, 4441), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (4433, 4441), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Bidirectional, concatenate, Conv1D, Dense, Dropout, GlobalAveragePooling1D, GRU, Input, LSTM, Masking, SpatialDropout1D\n'), ((4453, 4473), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (4471, 4473), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Bidirectional, concatenate, Conv1D, Dense, Dropout, GlobalAveragePooling1D, GRU, Input, LSTM, Masking, SpatialDropout1D\n'), ((4486, 4510), 'tensorflow.keras.layers.GlobalAveragePooling1D', 'GlobalAveragePooling1D', ([], {}), '()\n', (4508, 4510), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Bidirectional, concatenate, Conv1D, Dense, Dropout, GlobalAveragePooling1D, GRU, Input, LSTM, Masking, SpatialDropout1D\n'), ((5370, 5433), 'tensorflow.keras.layers.Conv1D', 'Conv1D', (['(128)', '(8)'], {'padding': '"""same"""', 'kernel_initializer': '"""he_uniform"""'}), "(128, 8, padding='same', kernel_initializer='he_uniform')\n", (5376, 5433), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Bidirectional, concatenate, Conv1D, Dense, Dropout, GlobalAveragePooling1D, GRU, Input, LSTM, Masking, SpatialDropout1D\n'), ((5461, 5481), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (5479, 5481), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Bidirectional, concatenate, Conv1D, Dense, Dropout, GlobalAveragePooling1D, GRU, Input, LSTM, Masking, SpatialDropout1D\n'), ((5494, 5512), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (5504, 5512), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Bidirectional, concatenate, Conv1D, Dense, Dropout, GlobalAveragePooling1D, GRU, Input, LSTM, Masking, SpatialDropout1D\n'), ((5526, 5589), 'tensorflow.keras.layers.Conv1D', 'Conv1D', (['(256)', '(5)'], {'padding': '"""same"""', 'kernel_initializer': '"""he_uniform"""'}), "(256, 5, padding='same', kernel_initializer='he_uniform')\n", (5532, 5589), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Bidirectional, concatenate, Conv1D, Dense, Dropout, GlobalAveragePooling1D, GRU, Input, LSTM, Masking, SpatialDropout1D\n'), ((5602, 5622), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (5620, 5622), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Bidirectional, concatenate, Conv1D, Dense, Dropout, GlobalAveragePooling1D, GRU, Input, LSTM, Masking, SpatialDropout1D\n'), ((5635, 5653), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (5645, 5653), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Bidirectional, concatenate, Conv1D, Dense, Dropout, GlobalAveragePooling1D, GRU, Input, LSTM, Masking, SpatialDropout1D\n'), ((5667, 5730), 'tensorflow.keras.layers.Conv1D', 'Conv1D', (['(128)', '(3)'], {'padding': '"""same"""', 'kernel_initializer': '"""he_uniform"""'}), "(128, 3, padding='same', kernel_initializer='he_uniform')\n", (5673, 5730), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Bidirectional, concatenate, Conv1D, Dense, Dropout, GlobalAveragePooling1D, GRU, Input, LSTM, Masking, SpatialDropout1D\n'), ((5743, 5763), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (5761, 5763), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Bidirectional, concatenate, Conv1D, Dense, Dropout, GlobalAveragePooling1D, GRU, Input, LSTM, Masking, SpatialDropout1D\n'), ((5776, 5794), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (5786, 5794), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Bidirectional, concatenate, Conv1D, Dense, Dropout, GlobalAveragePooling1D, GRU, Input, LSTM, Masking, SpatialDropout1D\n'), ((5808, 5832), 'tensorflow.keras.layers.GlobalAveragePooling1D', 'GlobalAveragePooling1D', ([], {}), '()\n', (5830, 5832), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Bidirectional, concatenate, Conv1D, Dense, Dropout, GlobalAveragePooling1D, GRU, Input, LSTM, Masking, SpatialDropout1D\n'), ((6932, 6941), 'tensorflow.keras.layers.Masking', 'Masking', ([], {}), '()\n', (6939, 6941), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Bidirectional, concatenate, Conv1D, Dense, Dropout, GlobalAveragePooling1D, GRU, Input, LSTM, Masking, SpatialDropout1D\n'), ((6959, 6983), 'tensorflow.keras.layers.LSTM', 'LSTM', (['hid_dimension_lstm'], {}), '(hid_dimension_lstm)\n', (6963, 6983), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Bidirectional, concatenate, Conv1D, Dense, Dropout, GlobalAveragePooling1D, GRU, Input, LSTM, Masking, SpatialDropout1D\n'), ((6997, 7013), 'tensorflow.keras.layers.Dropout', 'Dropout', (['dropout'], {}), '(dropout)\n', (7004, 7013), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Bidirectional, concatenate, Conv1D, Dense, Dropout, GlobalAveragePooling1D, GRU, Input, LSTM, Masking, SpatialDropout1D\n'), ((7028, 7091), 'tensorflow.keras.layers.Conv1D', 'Conv1D', (['(128)', '(8)'], {'padding': '"""same"""', 'kernel_initializer': '"""he_uniform"""'}), "(128, 8, padding='same', kernel_initializer='he_uniform')\n", (7034, 7091), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Bidirectional, concatenate, Conv1D, Dense, Dropout, GlobalAveragePooling1D, GRU, Input, LSTM, Masking, SpatialDropout1D\n'), ((7121, 7141), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (7139, 7141), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Bidirectional, concatenate, Conv1D, Dense, Dropout, GlobalAveragePooling1D, GRU, Input, LSTM, Masking, SpatialDropout1D\n'), ((7155, 7173), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (7165, 7173), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Bidirectional, concatenate, Conv1D, Dense, Dropout, GlobalAveragePooling1D, GRU, Input, LSTM, Masking, SpatialDropout1D\n'), ((7222, 7285), 'tensorflow.keras.layers.Conv1D', 'Conv1D', (['(256)', '(5)'], {'padding': '"""same"""', 'kernel_initializer': '"""he_uniform"""'}), "(256, 5, padding='same', kernel_initializer='he_uniform')\n", (7228, 7285), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Bidirectional, concatenate, Conv1D, Dense, Dropout, GlobalAveragePooling1D, GRU, Input, LSTM, Masking, SpatialDropout1D\n'), ((7299, 7319), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (7317, 7319), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Bidirectional, concatenate, Conv1D, Dense, Dropout, GlobalAveragePooling1D, GRU, Input, LSTM, Masking, SpatialDropout1D\n'), ((7333, 7351), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (7343, 7351), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Bidirectional, concatenate, Conv1D, Dense, Dropout, GlobalAveragePooling1D, GRU, Input, LSTM, Masking, SpatialDropout1D\n'), ((7400, 7463), 'tensorflow.keras.layers.Conv1D', 'Conv1D', (['(128)', '(3)'], {'padding': '"""same"""', 'kernel_initializer': '"""he_uniform"""'}), "(128, 3, padding='same', kernel_initializer='he_uniform')\n", (7406, 7463), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Bidirectional, concatenate, Conv1D, Dense, Dropout, GlobalAveragePooling1D, GRU, Input, LSTM, Masking, SpatialDropout1D\n'), ((7477, 7497), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (7495, 7497), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Bidirectional, concatenate, Conv1D, Dense, Dropout, GlobalAveragePooling1D, GRU, Input, LSTM, Masking, SpatialDropout1D\n'), ((7511, 7529), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (7521, 7529), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Bidirectional, concatenate, Conv1D, Dense, Dropout, GlobalAveragePooling1D, GRU, Input, LSTM, Masking, SpatialDropout1D\n'), ((7544, 7568), 'tensorflow.keras.layers.GlobalAveragePooling1D', 'GlobalAveragePooling1D', ([], {}), '()\n', (7566, 7568), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Bidirectional, concatenate, Conv1D, Dense, Dropout, GlobalAveragePooling1D, GRU, Input, LSTM, Masking, SpatialDropout1D\n'), ((8793, 8802), 'tensorflow.keras.layers.Masking', 'Masking', ([], {}), '()\n', (8800, 8802), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Bidirectional, concatenate, Conv1D, Dense, Dropout, GlobalAveragePooling1D, GRU, Input, LSTM, Masking, SpatialDropout1D\n'), ((8820, 8844), 'tensorflow.keras.layers.LSTM', 'LSTM', (['hid_dimension_lstm'], {}), '(hid_dimension_lstm)\n', (8824, 8844), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Bidirectional, concatenate, Conv1D, Dense, Dropout, GlobalAveragePooling1D, GRU, Input, LSTM, Masking, SpatialDropout1D\n'), ((8858, 8874), 'tensorflow.keras.layers.Dropout', 'Dropout', (['dropout'], {}), '(dropout)\n', (8865, 8874), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Bidirectional, concatenate, Conv1D, Dense, Dropout, GlobalAveragePooling1D, GRU, Input, LSTM, Masking, SpatialDropout1D\n'), ((8889, 8952), 'tensorflow.keras.layers.Conv1D', 'Conv1D', (['(128)', '(8)'], {'padding': '"""same"""', 'kernel_initializer': '"""he_uniform"""'}), "(128, 8, padding='same', kernel_initializer='he_uniform')\n", (8895, 8952), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Bidirectional, concatenate, Conv1D, Dense, Dropout, GlobalAveragePooling1D, GRU, Input, LSTM, Masking, SpatialDropout1D\n'), ((8982, 9000), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (8992, 9000), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Bidirectional, concatenate, Conv1D, Dense, Dropout, GlobalAveragePooling1D, GRU, Input, LSTM, Masking, SpatialDropout1D\n'), ((9014, 9034), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (9032, 9034), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Bidirectional, concatenate, Conv1D, Dense, Dropout, GlobalAveragePooling1D, GRU, Input, LSTM, Masking, SpatialDropout1D\n'), ((9048, 9069), 'tensorflow.keras.layers.SpatialDropout1D', 'SpatialDropout1D', (['(0.5)'], {}), '(0.5)\n', (9064, 9069), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Bidirectional, concatenate, Conv1D, Dense, Dropout, GlobalAveragePooling1D, GRU, Input, LSTM, Masking, SpatialDropout1D\n'), ((9083, 9094), 'nicu_los.src.utils.custom_keras_layers.ApplyMask', 'ApplyMask', ([], {}), '()\n', (9092, 9094), False, 'from nicu_los.src.utils.custom_keras_layers import ApplyMask, squeeze_excite_block, Slice\n'), ((9155, 9218), 'tensorflow.keras.layers.Conv1D', 'Conv1D', (['(256)', '(5)'], {'padding': '"""same"""', 'kernel_initializer': '"""he_uniform"""'}), "(256, 5, padding='same', kernel_initializer='he_uniform')\n", (9161, 9218), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Bidirectional, concatenate, Conv1D, Dense, Dropout, GlobalAveragePooling1D, GRU, Input, LSTM, Masking, SpatialDropout1D\n'), ((9232, 9250), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (9242, 9250), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Bidirectional, concatenate, Conv1D, Dense, Dropout, GlobalAveragePooling1D, GRU, Input, LSTM, Masking, SpatialDropout1D\n'), ((9264, 9284), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (9282, 9284), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Bidirectional, concatenate, Conv1D, Dense, Dropout, GlobalAveragePooling1D, GRU, Input, LSTM, Masking, SpatialDropout1D\n'), ((9298, 9319), 'tensorflow.keras.layers.SpatialDropout1D', 'SpatialDropout1D', (['(0.5)'], {}), '(0.5)\n', (9314, 9319), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Bidirectional, concatenate, Conv1D, Dense, Dropout, GlobalAveragePooling1D, GRU, Input, LSTM, Masking, SpatialDropout1D\n'), ((9333, 9344), 'nicu_los.src.utils.custom_keras_layers.ApplyMask', 'ApplyMask', ([], {}), '()\n', (9342, 9344), False, 'from nicu_los.src.utils.custom_keras_layers import ApplyMask, squeeze_excite_block, Slice\n'), ((9405, 9468), 'tensorflow.keras.layers.Conv1D', 'Conv1D', (['(128)', '(3)'], {'padding': '"""same"""', 'kernel_initializer': '"""he_uniform"""'}), "(128, 3, padding='same', kernel_initializer='he_uniform')\n", (9411, 9468), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Bidirectional, concatenate, Conv1D, Dense, Dropout, GlobalAveragePooling1D, GRU, Input, LSTM, Masking, SpatialDropout1D\n'), ((9482, 9500), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (9492, 9500), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Bidirectional, concatenate, Conv1D, Dense, Dropout, GlobalAveragePooling1D, GRU, Input, LSTM, Masking, SpatialDropout1D\n'), ((9514, 9534), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (9532, 9534), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Bidirectional, concatenate, Conv1D, Dense, Dropout, GlobalAveragePooling1D, GRU, Input, LSTM, Masking, SpatialDropout1D\n'), ((9549, 9573), 'tensorflow.keras.layers.GlobalAveragePooling1D', 'GlobalAveragePooling1D', ([], {}), '()\n', (9571, 9573), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Bidirectional, concatenate, Conv1D, Dense, Dropout, GlobalAveragePooling1D, GRU, Input, LSTM, Masking, SpatialDropout1D\n'), ((10888, 10897), 'tensorflow.keras.layers.Masking', 'Masking', ([], {}), '()\n', (10895, 10897), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Bidirectional, concatenate, Conv1D, Dense, Dropout, GlobalAveragePooling1D, GRU, Input, LSTM, Masking, SpatialDropout1D\n'), ((11185, 11300), 'tensorflow.keras.layers.LSTM', 'LSTM', ([], {'units': 'num_hid_units', 'activation': '"""tanh"""', 'return_sequences': '(True)', 'recurrent_dropout': 'dropout', 'dropout': 'dropout'}), "(units=num_hid_units, activation='tanh', return_sequences=True,\n recurrent_dropout=dropout, dropout=dropout)\n", (11189, 11300), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Bidirectional, concatenate, Conv1D, Dense, Dropout, GlobalAveragePooling1D, GRU, Input, LSTM, Masking, SpatialDropout1D\n'), ((11508, 11517), 'tensorflow.keras.layers.Masking', 'Masking', ([], {}), '()\n', (11515, 11517), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Bidirectional, concatenate, Conv1D, Dense, Dropout, GlobalAveragePooling1D, GRU, Input, LSTM, Masking, SpatialDropout1D\n'), ((13503, 13534), 'tensorflow.keras.losses.SparseCategoricalCrossentropy', 'SparseCategoricalCrossentropy', ([], {}), '()\n', (13532, 13534), False, 'from tensorflow.keras.losses import MeanAbsoluteError, SparseCategoricalCrossentropy\n'), ((1765, 1876), 'tensorflow.keras.layers.LSTM', 'LSTM', ([], {'units': 'num_hid_units', 'activation': '"""tanh"""', 'return_sequences': '(True)', 'recurrent_dropout': '(0.0)', 'dropout': 'dropout'}), "(units=num_hid_units, activation='tanh', return_sequences=True,\n recurrent_dropout=0.0, dropout=dropout)\n", (1769, 1876), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Bidirectional, concatenate, Conv1D, Dense, Dropout, GlobalAveragePooling1D, GRU, Input, LSTM, Masking, SpatialDropout1D\n'), ((2254, 2273), 'tensorflow.keras.layers.Bidirectional', 'Bidirectional', (['cell'], {}), '(cell)\n', (2267, 2273), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Bidirectional, concatenate, Conv1D, Dense, Dropout, GlobalAveragePooling1D, GRU, Input, LSTM, Masking, SpatialDropout1D\n'), ((2366, 2478), 'tensorflow.keras.layers.LSTM', 'LSTM', ([], {'activation': '"""tanh"""', 'dropout': 'dropout', 'recurrent_dropout': '(0.0)', 'return_sequences': '(False)', 'units': 'hid_dimension'}), "(activation='tanh', dropout=dropout, recurrent_dropout=0.0,\n return_sequences=False, units=hid_dimension)\n", (2370, 2478), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Bidirectional, concatenate, Conv1D, Dense, Dropout, GlobalAveragePooling1D, GRU, Input, LSTM, Masking, SpatialDropout1D\n'), ((2808, 2824), 'tensorflow.keras.layers.Dropout', 'Dropout', (['dropout'], {}), '(dropout)\n', (2815, 2824), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Bidirectional, concatenate, Conv1D, Dense, Dropout, GlobalAveragePooling1D, GRU, Input, LSTM, Masking, SpatialDropout1D\n'), ((2902, 2953), 'tensorflow.keras.layers.Dense', 'Dense', ([], {'units': 'output_dimension', 'activation': '"""softmax"""'}), "(units=output_dimension, activation='softmax')\n", (2907, 2953), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Bidirectional, concatenate, Conv1D, Dense, Dropout, GlobalAveragePooling1D, GRU, Input, LSTM, Masking, SpatialDropout1D\n'), ((3007, 3036), 'tensorflow.keras.layers.Dense', 'Dense', ([], {'units': 'output_dimension'}), '(units=output_dimension)\n', (3012, 3036), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Bidirectional, concatenate, Conv1D, Dense, Dropout, GlobalAveragePooling1D, GRU, Input, LSTM, Masking, SpatialDropout1D\n'), ((3807, 3816), 'tensorflow.keras.layers.Masking', 'Masking', ([], {}), '()\n', (3814, 3816), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Bidirectional, concatenate, Conv1D, Dense, Dropout, GlobalAveragePooling1D, GRU, Input, LSTM, Masking, SpatialDropout1D\n'), ((4594, 4645), 'tensorflow.keras.layers.Dense', 'Dense', ([], {'units': 'output_dimension', 'activation': '"""softmax"""'}), "(units=output_dimension, activation='softmax')\n", (4599, 4645), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Bidirectional, concatenate, Conv1D, Dense, Dropout, GlobalAveragePooling1D, GRU, Input, LSTM, Masking, SpatialDropout1D\n'), ((4699, 4728), 'tensorflow.keras.layers.Dense', 'Dense', ([], {'units': 'output_dimension'}), '(units=output_dimension)\n', (4704, 4728), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Bidirectional, concatenate, Conv1D, Dense, Dropout, GlobalAveragePooling1D, GRU, Input, LSTM, Masking, SpatialDropout1D\n'), ((5911, 5962), 'tensorflow.keras.layers.Dense', 'Dense', ([], {'units': 'output_dimension', 'activation': '"""softmax"""'}), "(units=output_dimension, activation='softmax')\n", (5916, 5962), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Bidirectional, concatenate, Conv1D, Dense, Dropout, GlobalAveragePooling1D, GRU, Input, LSTM, Masking, SpatialDropout1D\n'), ((6016, 6045), 'tensorflow.keras.layers.Dense', 'Dense', ([], {'units': 'output_dimension'}), '(units=output_dimension)\n', (6021, 6045), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Bidirectional, concatenate, Conv1D, Dense, Dropout, GlobalAveragePooling1D, GRU, Input, LSTM, Masking, SpatialDropout1D\n'), ((7678, 7729), 'tensorflow.keras.layers.Dense', 'Dense', ([], {'units': 'output_dimension', 'activation': '"""softmax"""'}), "(units=output_dimension, activation='softmax')\n", (7683, 7729), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Bidirectional, concatenate, Conv1D, Dense, Dropout, GlobalAveragePooling1D, GRU, Input, LSTM, Masking, SpatialDropout1D\n'), ((7783, 7812), 'tensorflow.keras.layers.Dense', 'Dense', ([], {'units': 'output_dimension'}), '(units=output_dimension)\n', (7788, 7812), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Bidirectional, concatenate, Conv1D, Dense, Dropout, GlobalAveragePooling1D, GRU, Input, LSTM, Masking, SpatialDropout1D\n'), ((8752, 8761), 'tensorflow.keras.layers.Masking', 'Masking', ([], {}), '()\n', (8759, 8761), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Bidirectional, concatenate, Conv1D, Dense, Dropout, GlobalAveragePooling1D, GRU, Input, LSTM, Masking, SpatialDropout1D\n'), ((9689, 9740), 'tensorflow.keras.layers.Dense', 'Dense', ([], {'units': 'output_dimension', 'activation': '"""softmax"""'}), "(units=output_dimension, activation='softmax')\n", (9694, 9740), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Bidirectional, concatenate, Conv1D, Dense, Dropout, GlobalAveragePooling1D, GRU, Input, LSTM, Masking, SpatialDropout1D\n'), ((9794, 9823), 'tensorflow.keras.layers.Dense', 'Dense', ([], {'units': 'output_dimension'}), '(units=output_dimension)\n', (9799, 9823), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Bidirectional, concatenate, Conv1D, Dense, Dropout, GlobalAveragePooling1D, GRU, Input, LSTM, Masking, SpatialDropout1D\n'), ((10849, 10858), 'tensorflow.keras.layers.Masking', 'Masking', ([], {}), '()\n', (10856, 10858), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Bidirectional, concatenate, Conv1D, Dense, Dropout, GlobalAveragePooling1D, GRU, Input, LSTM, Masking, SpatialDropout1D\n'), ((11097, 11121), 'nicu_los.src.utils.custom_keras_layers.Slice', 'Slice', (['feature', 'mask_var'], {}), '(feature, mask_var)\n', (11102, 11121), False, 'from nicu_los.src.utils.custom_keras_layers import ApplyMask, squeeze_excite_block, Slice\n'), ((11342, 11361), 'tensorflow.keras.layers.Bidirectional', 'Bidirectional', (['cell'], {}), '(cell)\n', (11355, 11361), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Bidirectional, concatenate, Conv1D, Dense, Dropout, GlobalAveragePooling1D, GRU, Input, LSTM, Masking, SpatialDropout1D\n'), ((11390, 11401), 'nicu_los.src.utils.custom_keras_layers.ApplyMask', 'ApplyMask', ([], {}), '()\n', (11399, 11401), False, 'from nicu_los.src.utils.custom_keras_layers import ApplyMask, squeeze_excite_block, Slice\n'), ((11613, 11738), 'tensorflow.keras.layers.LSTM', 'LSTM', ([], {'activation': '"""tanh"""', 'dropout': 'dropout', 'recurrent_dropout': '(0.0)', 'return_sequences': '(False)', 'units': '(multiplier * hid_dimension)'}), "(activation='tanh', dropout=dropout, recurrent_dropout=0.0,\n return_sequences=False, units=multiplier * hid_dimension)\n", (11617, 11738), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Bidirectional, concatenate, Conv1D, Dense, Dropout, GlobalAveragePooling1D, GRU, Input, LSTM, Masking, SpatialDropout1D\n'), ((12093, 12116), 'tensorflow.keras.layers.Dropout', 'Dropout', (['global_dropout'], {}), '(global_dropout)\n', (12100, 12116), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Bidirectional, concatenate, Conv1D, Dense, Dropout, GlobalAveragePooling1D, GRU, Input, LSTM, Masking, SpatialDropout1D\n'), ((12194, 12245), 'tensorflow.keras.layers.Dense', 'Dense', ([], {'units': 'output_dimension', 'activation': '"""softmax"""'}), "(units=output_dimension, activation='softmax')\n", (12199, 12245), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Bidirectional, concatenate, Conv1D, Dense, Dropout, GlobalAveragePooling1D, GRU, Input, LSTM, Masking, SpatialDropout1D\n'), ((12299, 12328), 'tensorflow.keras.layers.Dense', 'Dense', ([], {'units': 'output_dimension'}), '(units=output_dimension)\n', (12304, 12328), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Bidirectional, concatenate, Conv1D, Dense, Dropout, GlobalAveragePooling1D, GRU, Input, LSTM, Masking, SpatialDropout1D\n'), ((13615, 13634), 'tensorflow.keras.losses.MeanAbsoluteError', 'MeanAbsoluteError', ([], {}), '()\n', (13632, 13634), False, 'from tensorflow.keras.losses import MeanAbsoluteError, SparseCategoricalCrossentropy\n'), ((14744, 14790), 'os.path.join', 'os.path.join', (['checkpoints_dir', 'checkpoint_file'], {}), '(checkpoints_dir, checkpoint_file)\n', (14756, 14790), False, 'import os\n'), ((14821, 14827), 'tensorflow.keras.optimizers.Adam', 'Adam', ([], {}), '()\n', (14825, 14827), False, 'from tensorflow.keras.optimizers import Adam\n'), ((1966, 2076), 'tensorflow.keras.layers.GRU', 'GRU', ([], {'units': 'num_hid_units', 'activation': '"""tanh"""', 'return_sequences': '(True)', 'recurrent_dropout': '(0.0)', 'dropout': 'dropout'}), "(units=num_hid_units, activation='tanh', return_sequences=True,\n recurrent_dropout=0.0, dropout=dropout)\n", (1969, 2076), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Bidirectional, concatenate, Conv1D, Dense, Dropout, GlobalAveragePooling1D, GRU, Input, LSTM, Masking, SpatialDropout1D\n'), ((2536, 2647), 'tensorflow.keras.layers.GRU', 'GRU', ([], {'activation': '"""tanh"""', 'dropout': 'dropout', 'recurrent_dropout': '(0.0)', 'return_sequences': '(False)', 'units': 'hid_dimension'}), "(activation='tanh', dropout=dropout, recurrent_dropout=0.0,\n return_sequences=False, units=hid_dimension)\n", (2539, 2647), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Bidirectional, concatenate, Conv1D, Dense, Dropout, GlobalAveragePooling1D, GRU, Input, LSTM, Masking, SpatialDropout1D\n'), ((11797, 11921), 'tensorflow.keras.layers.GRU', 'GRU', ([], {'activation': '"""tanh"""', 'dropout': 'dropout', 'recurrent_dropout': '(0.0)', 'return_sequences': '(False)', 'units': '(multiplier * hid_dimension)'}), "(activation='tanh', dropout=dropout, recurrent_dropout=0.0,\n return_sequences=False, units=multiplier * hid_dimension)\n", (11800, 11921), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Bidirectional, concatenate, Conv1D, Dense, Dropout, GlobalAveragePooling1D, GRU, Input, LSTM, Masking, SpatialDropout1D\n'), ((16352, 16382), 'numpy.concatenate', 'np.concatenate', (['y_true'], {'axis': '(0)'}), '(y_true, axis=0)\n', (16366, 16382), True, 'import numpy as np\n'), ((16404, 16434), 'numpy.concatenate', 'np.concatenate', (['y_pred'], {'axis': '(0)'}), '(y_pred, axis=0)\n', (16418, 16434), True, 'import numpy as np\n'), ((16488, 16518), 'numpy.concatenate', 'np.concatenate', (['y_true'], {'axis': '(0)'}), '(y_true, axis=0)\n', (16502, 16518), True, 'import numpy as np\n'), ((16540, 16570), 'numpy.concatenate', 'np.concatenate', (['y_pred'], {'axis': '(0)'}), '(y_pred, axis=0)\n', (16554, 16570), True, 'import numpy as np\n'), ((17118, 17148), 'numpy.concatenate', 'np.concatenate', (['y_true'], {'axis': '(0)'}), '(y_true, axis=0)\n', (17132, 17148), True, 'import numpy as np\n'), ((17170, 17200), 'numpy.concatenate', 'np.concatenate', (['y_pred'], {'axis': '(0)'}), '(y_pred, axis=0)\n', (17184, 17200), True, 'import numpy as np\n'), ((17254, 17284), 'numpy.concatenate', 'np.concatenate', (['y_true'], {'axis': '(0)'}), '(y_true, axis=0)\n', (17268, 17284), True, 'import numpy as np\n'), ((17306, 17336), 'numpy.concatenate', 'np.concatenate', (['y_pred'], {'axis': '(0)'}), '(y_pred, axis=0)\n', (17320, 17336), True, 'import numpy as np\n')]
|
from vmad.lib import linalg, mpi
from vmad.testing import BaseScalarTest
from mpi4py import MPI
import numpy
from pprint import pprint
class Test_allreduce(BaseScalarTest):
to_scalar = staticmethod(linalg.to_scalar)
comm = MPI.COMM_WORLD
x = comm.rank + 1.0
y = comm.allreduce(x) ** 2
x_ = numpy.eye(1)
# self.x is distributed, thus allreduce along the rank axis.
def inner(self, a, b):
return self.comm.allreduce(numpy.sum(a * b))
def model(self, x):
return mpi.allreduce(x, self.comm)
class Test_allbcast(BaseScalarTest):
to_scalar = staticmethod(lambda x: x)
comm = MPI.COMM_WORLD
x = 2.0
y = comm.allreduce(x * (comm.rank + 1))
x_ = numpy.eye(1)
# self.x is universal, thus no special allreduce here.
def inner(self, a, b):
return numpy.sum(a*b)
def model(self, x):
x = mpi.allbcast(x, self.comm)
x = x * (self.comm.rank + 1)
return mpi.allreduce(x, comm=self.comm)
|
[
"vmad.lib.mpi.allreduce",
"numpy.eye",
"vmad.lib.mpi.allbcast",
"numpy.sum"
] |
[((315, 327), 'numpy.eye', 'numpy.eye', (['(1)'], {}), '(1)\n', (324, 327), False, 'import numpy\n'), ((715, 727), 'numpy.eye', 'numpy.eye', (['(1)'], {}), '(1)\n', (724, 727), False, 'import numpy\n'), ((514, 541), 'vmad.lib.mpi.allreduce', 'mpi.allreduce', (['x', 'self.comm'], {}), '(x, self.comm)\n', (527, 541), False, 'from vmad.lib import linalg, mpi\n'), ((830, 846), 'numpy.sum', 'numpy.sum', (['(a * b)'], {}), '(a * b)\n', (839, 846), False, 'import numpy\n'), ((882, 908), 'vmad.lib.mpi.allbcast', 'mpi.allbcast', (['x', 'self.comm'], {}), '(x, self.comm)\n', (894, 908), False, 'from vmad.lib import linalg, mpi\n'), ((961, 993), 'vmad.lib.mpi.allreduce', 'mpi.allreduce', (['x'], {'comm': 'self.comm'}), '(x, comm=self.comm)\n', (974, 993), False, 'from vmad.lib import linalg, mpi\n'), ((456, 472), 'numpy.sum', 'numpy.sum', (['(a * b)'], {}), '(a * b)\n', (465, 472), False, 'import numpy\n')]
|
import torch.nn as nn
import numpy as np
import torch
class DQN(nn.Module):
'''
pytorch CNN model for Atari games
'''
def __init__(self,img_shape,num_actions):
super(DQN,self).__init__()
self._conv=nn.Sequential(
nn.Conv2d(4,16,kernel_size=5,stride=2),
nn.BatchNorm2d(16),
nn.Conv2d(16,32,kernel_size=5,stride=2),
nn.BatchNorm2d(32),
nn.Conv2d(32,64,kernel_size=5,stride=2),
nn.BatchNorm2d(64)
)
convw=img_shape[0]
convh=img_shape[1]
for i in range(3):
convw=self._getConvSize(convw,5,2)
convh=self._getConvSize(convh,5,2)
linear_input_size=convh*convw*64
self._linear=nn.Sequential(
nn.Linear(linear_input_size,512),
nn.ReLU(),
nn.Linear(512,num_actions)
)
self.num_actions=num_actions
def _getConvSize(self,size,size_kernal,stride):
'''
get the tensor size after Conv operation
:param size:
:param size_kernal:
:param stride:
:return:
'''
return (size-(size_kernal-1)-1)//stride+1
def forward(self,img_in):
'''
:param x:input image:N*C*W*H
:return:Q-values of actions N*num_actions
'''
x=self._conv(img_in)
x=x.view(x.size(0),-1)
return self._linear(x)
def _selectAction(self,img_in,eps_threshold):
'''
select action according to Q values,
:param img_in:input images
:return:action selected
'''
sample=np.random.random()
if(sample>eps_threshold):
with torch.no_grad():
q_value = self.forward(img_in)
return q_value.max(1)[1].item()
else:
return np.random.randint(0,self.num_actions)
def main():
'''
unitest
:return:
'''
import torch
import numpy as np
dqn=DQN((100,100,3),4)
dqn.eval()
img=torch.Tensor(np.zeros((1,4,100,100)))
q=dqn.forward(img)
print(q)
print(q.max(1))
print(dqn._selectAction(img,0.01))
print("finish test")
if __name__=="__main__":
main()
|
[
"torch.nn.ReLU",
"torch.nn.Conv2d",
"numpy.zeros",
"torch.nn.BatchNorm2d",
"numpy.random.random",
"numpy.random.randint",
"torch.nn.Linear",
"torch.no_grad"
] |
[((1618, 1636), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (1634, 1636), True, 'import numpy as np\n'), ((2027, 2053), 'numpy.zeros', 'np.zeros', (['(1, 4, 100, 100)'], {}), '((1, 4, 100, 100))\n', (2035, 2053), True, 'import numpy as np\n'), ((257, 298), 'torch.nn.Conv2d', 'nn.Conv2d', (['(4)', '(16)'], {'kernel_size': '(5)', 'stride': '(2)'}), '(4, 16, kernel_size=5, stride=2)\n', (266, 298), True, 'import torch.nn as nn\n'), ((309, 327), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(16)'], {}), '(16)\n', (323, 327), True, 'import torch.nn as nn\n'), ((341, 383), 'torch.nn.Conv2d', 'nn.Conv2d', (['(16)', '(32)'], {'kernel_size': '(5)', 'stride': '(2)'}), '(16, 32, kernel_size=5, stride=2)\n', (350, 383), True, 'import torch.nn as nn\n'), ((394, 412), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(32)'], {}), '(32)\n', (408, 412), True, 'import torch.nn as nn\n'), ((426, 468), 'torch.nn.Conv2d', 'nn.Conv2d', (['(32)', '(64)'], {'kernel_size': '(5)', 'stride': '(2)'}), '(32, 64, kernel_size=5, stride=2)\n', (435, 468), True, 'import torch.nn as nn\n'), ((479, 497), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(64)'], {}), '(64)\n', (493, 497), True, 'import torch.nn as nn\n'), ((772, 805), 'torch.nn.Linear', 'nn.Linear', (['linear_input_size', '(512)'], {}), '(linear_input_size, 512)\n', (781, 805), True, 'import torch.nn as nn\n'), ((818, 827), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (825, 827), True, 'import torch.nn as nn\n'), ((841, 868), 'torch.nn.Linear', 'nn.Linear', (['(512)', 'num_actions'], {}), '(512, num_actions)\n', (850, 868), True, 'import torch.nn as nn\n'), ((1830, 1868), 'numpy.random.randint', 'np.random.randint', (['(0)', 'self.num_actions'], {}), '(0, self.num_actions)\n', (1847, 1868), True, 'import numpy as np\n'), ((1689, 1704), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1702, 1704), False, 'import torch\n')]
|
# coding:utf-8
from load_data import load_data, timer
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import SVC
from sklearn.model_selection import StratifiedShuffleSplit
from sklearn.model_selection import GridSearchCV
import numpy as np
import pandas as pd
@timer
def use_logistic_regression(X_train, y_train, X_test, y_test):
model = LogisticRegression()
print("Start to train a logistic regression model.")
model.fit(X_train, y_train)
score = model.score(X_test, y_test)
print("Score of logistic regression:", score)
@timer
def use_naive_bayes(X_train, y_train, X_test, y_test):
model = GaussianNB()
print("Start to train a naive bayes model.")
model.fit(X_train, y_train)
score = model.score(X_test, y_test)
print("Score of naive bayes:", score)
@timer
def use_SVM(X_train, y_train, X_test, y_test, kernel="linear"):
try:
model = SVC(kernel=kernel, C=10.0, gamma=0.001)
print("Start to train a SVM model(kernel: {0}).".format(kernel))
model.fit(X_train, y_train)
score = model.score(X_test, y_test)
print("Score of SVM(kernel: {0}):".format(kernel), score)
except:
print("Error!")
def optimize_SVM(X_train, y_train, X_test, y_test):
C_range = np.logspace(-4, 3, 8)
gamma_range = np.logspace(-4, 3, 8)
kernel_range = ["linear", "rbf"]
param_grid = dict(gamma=gamma_range, C=C_range, kernel=kernel_range)
grid = GridSearchCV(SVC(),
param_grid=param_grid, n_jobs=-1,)
grid.fit(X_train[:100], y_train[:100])
print("The best parameters are %s with a score of %0.2f"
% (grid.best_params_, grid.best_score_))
if __name__ == '__main__':
X_train, y_train, X_test, y_test = load_data()
#use_logistic_regression(X_train, y_train, X_test, y_test)
#use_naive_bayes(X_train, y_train, X_test, y_test)
SVM_kernels = ["linear", "rbf", "sigmoid"]
for kernel in SVM_kernels:
use_SVM(X_train, y_train, X_test, y_test, kernel)
#optimize_SVM(X_train, y_train, X_test, y_test)
'''Sample Output:
Start to load training data from file.
Runtime:54.356s
Start to load testing data from file.
Runtime:13.156s
Start to load training data from feature file.
Runtime:0.276s
Start to load testinging data from feature file.
Runtime:0.068s
Start to train a logistic regression model.
Score of logistic regression: 0.75
Runtime:0.026s
Start to train a naive bayes model.
Score of naive bayes: 0.720543806647
Runtime:0.016s
Start to train a SVM model(kernel: linear).
Score of SVM(kernel: linear): 0.730362537764
Runtime:6.807s
Start to train a SVM model(kernel: rbf).
Score of SVM(kernel: rbf): 0.690332326284
Runtime:2.324s
Start to train a SVM model(kernel: sigmoid).
Score of SVM(kernel: sigmoid): 0.615558912387
Runtime:1.207s
'''
# The best parameters are {'C': 1, 'gamma': 0.125, 'kernel': 'linear'} with a score of 0.78
|
[
"sklearn.naive_bayes.GaussianNB",
"numpy.logspace",
"load_data.load_data",
"sklearn.linear_model.LogisticRegression",
"sklearn.svm.SVC"
] |
[((409, 429), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {}), '()\n', (427, 429), False, 'from sklearn.linear_model import LogisticRegression\n'), ((685, 697), 'sklearn.naive_bayes.GaussianNB', 'GaussianNB', ([], {}), '()\n', (695, 697), False, 'from sklearn.naive_bayes import GaussianNB\n'), ((1322, 1343), 'numpy.logspace', 'np.logspace', (['(-4)', '(3)', '(8)'], {}), '(-4, 3, 8)\n', (1333, 1343), True, 'import numpy as np\n'), ((1362, 1383), 'numpy.logspace', 'np.logspace', (['(-4)', '(3)', '(8)'], {}), '(-4, 3, 8)\n', (1373, 1383), True, 'import numpy as np\n'), ((1807, 1818), 'load_data.load_data', 'load_data', ([], {}), '()\n', (1816, 1818), False, 'from load_data import load_data, timer\n'), ((959, 998), 'sklearn.svm.SVC', 'SVC', ([], {'kernel': 'kernel', 'C': '(10.0)', 'gamma': '(0.001)'}), '(kernel=kernel, C=10.0, gamma=0.001)\n', (962, 998), False, 'from sklearn.svm import SVC\n'), ((1518, 1523), 'sklearn.svm.SVC', 'SVC', ([], {}), '()\n', (1521, 1523), False, 'from sklearn.svm import SVC\n')]
|
# Copyright 2018 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Sequence tagging module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from research.cvt_text.corpus_processing import minibatching
from research.cvt_text.model import model_helpers
from research.cvt_text.model import task_module
class TaggingModule(task_module.SemiSupervisedModule):
def __init__(self, config, task_name, n_classes, inputs,
encoder):
super(TaggingModule, self).__init__()
self.task_name = task_name
self.n_classes = n_classes
self.labels = labels = tf.placeholder(tf.float32, [None, None, None],
name=task_name + '_labels')
class PredictionModule(object):
def __init__(self, name, input_reprs, roll_direction=0, activate=True):
self.name = name
with tf.variable_scope(name + '/predictions'):
projected = model_helpers.project(input_reprs, config.projection_size)
if activate:
projected = tf.nn.relu(projected)
self.logits = tf.layers.dense(projected, n_classes, name='predict')
targets = labels
targets *= (1 - inputs.label_smoothing)
targets += inputs.label_smoothing / n_classes
self.loss = model_helpers.masked_ce_loss(
self.logits, targets, inputs.mask, roll_direction=roll_direction)
primary = PredictionModule('primary',
([encoder.uni_reprs, encoder.bi_reprs]))
ps = [
PredictionModule('full', ([encoder.uni_reprs, encoder.bi_reprs]),
activate=False),
PredictionModule('forwards', [encoder.uni_fw]),
PredictionModule('backwards', [encoder.uni_bw]),
PredictionModule('future', [encoder.uni_fw], roll_direction=1),
PredictionModule('past', [encoder.uni_bw], roll_direction=-1),
]
self.unsupervised_loss = sum(p.loss for p in ps)
self.supervised_loss = primary.loss
self.probs = tf.nn.softmax(primary.logits)
self.preds = tf.argmax(primary.logits, axis=-1)
def update_feed_dict(self, feed, mb):
if self.task_name in mb.teacher_predictions:
feed[self.labels] = mb.teacher_predictions[self.task_name]
elif mb.task_name != 'unlabeled':
labels = minibatching.build_array(
[[0] + e.labels + [0] for e in mb.examples])
feed[self.labels] = np.eye(self.n_classes)[labels]
|
[
"tensorflow.nn.softmax",
"tensorflow.nn.relu",
"research.cvt_text.corpus_processing.minibatching.build_array",
"tensorflow.argmax",
"tensorflow.layers.dense",
"tensorflow.variable_scope",
"tensorflow.placeholder",
"research.cvt_text.model.model_helpers.project",
"research.cvt_text.model.model_helpers.masked_ce_loss",
"numpy.eye"
] |
[((1326, 1400), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, None, None]'], {'name': "(task_name + '_labels')"}), "(tf.float32, [None, None, None], name=task_name + '_labels')\n", (1340, 1400), True, 'import tensorflow as tf\n'), ((2901, 2930), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['primary.logits'], {}), '(primary.logits)\n', (2914, 2930), True, 'import tensorflow as tf\n'), ((2952, 2986), 'tensorflow.argmax', 'tf.argmax', (['primary.logits'], {'axis': '(-1)'}), '(primary.logits, axis=-1)\n', (2961, 2986), True, 'import tensorflow as tf\n'), ((2118, 2216), 'research.cvt_text.model.model_helpers.masked_ce_loss', 'model_helpers.masked_ce_loss', (['self.logits', 'targets', 'inputs.mask'], {'roll_direction': 'roll_direction'}), '(self.logits, targets, inputs.mask,\n roll_direction=roll_direction)\n', (2146, 2216), False, 'from research.cvt_text.model import model_helpers\n'), ((3217, 3288), 'research.cvt_text.corpus_processing.minibatching.build_array', 'minibatching.build_array', (['[([0] + e.labels + [0]) for e in mb.examples]'], {}), '([([0] + e.labels + [0]) for e in mb.examples])\n', (3241, 3288), False, 'from research.cvt_text.corpus_processing import minibatching\n'), ((1626, 1666), 'tensorflow.variable_scope', 'tf.variable_scope', (["(name + '/predictions')"], {}), "(name + '/predictions')\n", (1643, 1666), True, 'import tensorflow as tf\n'), ((1700, 1758), 'research.cvt_text.model.model_helpers.project', 'model_helpers.project', (['input_reprs', 'config.projection_size'], {}), '(input_reprs, config.projection_size)\n', (1721, 1758), False, 'from research.cvt_text.model import model_helpers\n'), ((1884, 1937), 'tensorflow.layers.dense', 'tf.layers.dense', (['projected', 'n_classes'], {'name': '"""predict"""'}), "(projected, n_classes, name='predict')\n", (1899, 1937), True, 'import tensorflow as tf\n'), ((3336, 3358), 'numpy.eye', 'np.eye', (['self.n_classes'], {}), '(self.n_classes)\n', (3342, 3358), True, 'import numpy as np\n'), ((1828, 1849), 'tensorflow.nn.relu', 'tf.nn.relu', (['projected'], {}), '(projected)\n', (1838, 1849), True, 'import tensorflow as tf\n')]
|
# A simple MDP where agent has to traverse a specific path
# in gridworld - wrong action will throw player back to start or do nothing.
# Player is rewarded for reaching new maximum length in the episode.
#
# State is represented by a positive ndim vector that tells
# where the player is. This is designed to mimic coordinate-systems
# and also deliberately confuse networks (e.g. might think higher value
# on axis 0 means we should take one specific action always)
#
import random
import numpy as np
import gym
# Fix for older gym versions
import gym.spaces
def generate_path(game_length: int, ndim: int, num_mines: int, seed: int = 42) -> np.ndarray:
"""Generate the path player has to follow.
Args:
game_length: Length of the path to generate
ndim: Number of dimensions in the environment
num_mines: Number of mines per step
seed: Seed used to generate path
Returns:
path: List of ints, representing actions player should take in each state.
mines: List of List of ints, representing which actions are mines in each state.
"""
path = []
mines = []
gen = np.random.default_rng(seed)
for i in range(game_length):
action_ordering = gen.permutation(ndim)
# First item goes to path, next num_mines go to mines
path.append(action_ordering[0].item())
mines.append(action_ordering[1:1 + num_mines].tolist())
return path, mines
class DangerousPathEnv(gym.Env):
"""
A N-dimensional environment where player has to choose
the exact correct action at any given location (follow
a very specific path). Otherwise game terminates or player stays
still, depending on if they hit a mine or not.
If `discrete_obs` is True, observation space tells location
of player in path. If False, uses continuous observations
that tell coordinate-like information of location of the player.
`mine_ratio` specifies the amount of mines (terminal states) versus
no-move moves per state.
"""
def __init__(
self,
game_length=100,
ndim=2,
seed=42,
discrete_obs=False,
random_action_p=0.0,
mine_ratio=1.0
):
super().__init__()
self.game_length = game_length
self.ndim = ndim
self.mine_ratio = mine_ratio
self.num_mines_per_step = np.floor(ndim * mine_ratio)
self.path, self.mines = generate_path(game_length, ndim, seed)
# Emperically found to be a necessary adjustment
self.step_size = 1.0
self.discrete_obs = discrete_obs
self.random_action_p = random_action_p
if discrete_obs:
self.observation_space = gym.spaces.Discrete(n=self.game_length)
else:
self.observation_space = gym.spaces.Box(0, 1, shape=(self.ndim,))
self.action_space = gym.spaces.Discrete(n=self.ndim)
self.path_location = 0
self.max_path_location = 0
self.num_steps = 0
self.player_location = np.zeros((self.ndim,))
def step(self, action):
if self.random_action_p > 0.0 and random.random() < self.random_action_p:
action = self.action_space.sample()
done = False
reward = 0
action = int(action)
if action == self.path[self.path_location]:
# You chose wisely
self.path_location += 1
# Only reward progressing once
if self.path_location > self.max_path_location:
reward = 1
self.max_path_location += 1
# Small step sizes
self.player_location[action] += self.step_size
if self.path_location == (self.game_length - 1):
done = True
else:
# You chose poorly
reward = 0
if action in self.mines[self.path_location]:
# You chose very poorly, back to start
self.path_location = 0
self.player_location = np.zeros((self.ndim,))
self.num_steps += 1
if self.num_steps >= self.game_length:
done = True
return self.path_location if self.discrete_obs else self.player_location, reward, done, {}
def reset(self):
self.path_location = 0
self.max_path_location = 0
self.num_steps = 0
self.player_location = np.zeros((self.ndim,))
return self.path_location if self.discrete_obs else self.player_location
def seed(self, seed):
self.path, self.mines = generate_path(self.game_length, self.ndim, seed)
|
[
"numpy.floor",
"gym.spaces.Discrete",
"numpy.zeros",
"numpy.random.default_rng",
"random.random",
"gym.spaces.Box"
] |
[((1139, 1166), 'numpy.random.default_rng', 'np.random.default_rng', (['seed'], {}), '(seed)\n', (1160, 1166), True, 'import numpy as np\n'), ((2372, 2399), 'numpy.floor', 'np.floor', (['(ndim * mine_ratio)'], {}), '(ndim * mine_ratio)\n', (2380, 2399), True, 'import numpy as np\n'), ((2869, 2901), 'gym.spaces.Discrete', 'gym.spaces.Discrete', ([], {'n': 'self.ndim'}), '(n=self.ndim)\n', (2888, 2901), False, 'import gym\n'), ((3027, 3049), 'numpy.zeros', 'np.zeros', (['(self.ndim,)'], {}), '((self.ndim,))\n', (3035, 3049), True, 'import numpy as np\n'), ((4374, 4396), 'numpy.zeros', 'np.zeros', (['(self.ndim,)'], {}), '((self.ndim,))\n', (4382, 4396), True, 'import numpy as np\n'), ((2709, 2748), 'gym.spaces.Discrete', 'gym.spaces.Discrete', ([], {'n': 'self.game_length'}), '(n=self.game_length)\n', (2728, 2748), False, 'import gym\n'), ((2800, 2840), 'gym.spaces.Box', 'gym.spaces.Box', (['(0)', '(1)'], {'shape': '(self.ndim,)'}), '(0, 1, shape=(self.ndim,))\n', (2814, 2840), False, 'import gym\n'), ((3121, 3136), 'random.random', 'random.random', ([], {}), '()\n', (3134, 3136), False, 'import random\n'), ((4005, 4027), 'numpy.zeros', 'np.zeros', (['(self.ndim,)'], {}), '((self.ndim,))\n', (4013, 4027), True, 'import numpy as np\n')]
|
# coding: utf-8
import numpy as np
import matplotlib.pyplot as plt
class Naca_4_digit(object):
def __init__(self, int_4, attack_angle_deg, resolution, quasi_equidistant=True, length_adjust=False, from5digit=False):
if from5digit == False:
self.m = float(int_4[0]) / 100 # maximum camber
self.p = float(int_4[1]) / 10 # position of the maximum camber
self.t = float(int_4[2:4]) / 100 # maximum thickness
self.load_setting(attack_angle_deg, resolution, quasi_equidistant, length_adjust)
self.__y_c()
self.__dyc_dx()
self.__y_t()
self.theta = np.arctan(self.dyc_dx)
self.get_surface()
if quasi_equidistant == True:
self.get_quasi_equidistant_line()
def load_setting(self, attack_angle_deg, resolution, quasi_equidistant=True, length_adjust=False):
self.use_quasi_equidistant = quasi_equidistant
self.reshape = length_adjust
if quasi_equidistant == True:
self.resolution = 100 * resolution
else:
self.resolution = resolution
self.new_resolution = resolution
self.attack_angle = attack_angle_deg
self.x = np.linspace(start = 0, stop = 1, num = self.resolution)
def __y_c(self):
x_lt_p = lambda m, p, x: m / (p ** 2) * (2.0 * p * x - x ** 2)
x_ge_p = lambda m, p, x: m / ((1 - p) ** 2) * ((1.0 - 2.0 * p) + 2.0 * p * x - x ** 2)
m = self.m
p = self.p
x = self.x
if ((p != 0) and (p != 1)):
self.y_c = np.where(x < p, x_lt_p(m, p, x), x_ge_p(m, p, x))
elif (p == 0):
self.y_c = m * (1.0 - x**2)
elif (p == 1):
self.y_c = m * (2.0 * x - x ** 2)
def __y_t(self):
t = self.t
x = self.x
self.y_t = t / 0.2 * (0.2969 * np.sqrt(x) - 0.1260 * x - 0.3516 * x**2 + 0.2843 * x**3 - 0.1015 * x**4)
def __dyc_dx(self):
x_lt_p = lambda m, p, x: 2.0 * m / (p ** 2) * (p - x)
x_ge_p = lambda m, p, x: 2.0 * m / ((1.0 - p) ** 2) * (p - x)
m = self.m
p = self.p
x = self.x
if ((p != 0) and (p != 1)):
self.dyc_dx = np.where(x < p, x_lt_p(m, p, x), x_ge_p(m, p, x))
elif (p == 0):
self.dyc_dx = - 2.0 * m * x
elif (p == 1):
self.dyc_dx = 2.0 * m * (1.0 - x)
def get_surface(self):
# original NACA-4digit wings
# upper
vec_l = np.full((3, self.resolution), 1.0)
vec_u = np.full((3, self.resolution), 1.0)
vec_u[0] = self.x - self.y_t * np.sin(self.theta) - 0.5
vec_u[1] = self.y_c + self.y_t * np.cos(self.theta)
# lower
vec_l[0] = self.x + self.y_t * np.sin(self.theta) - 0.5
vec_l[1] = self.y_c - self.y_t * np.cos(self.theta)
attack_angle = self.attack_angle / 180 * (np.pi)
rotMat = np.array([[np.cos(attack_angle), np.sin(attack_angle), 0], [- np.sin(attack_angle), np.cos(attack_angle), 0], [0, 0, 1]])
rot_l = rotMat.dot(vec_l)
rot_u = rotMat.dot(vec_u)
if self.reshape == True:
x_min = min(np.min(rot_l[0]), np.min(rot_u[0]))
x_max = max(np.max(rot_l[0]), np.max(rot_u[0]))
rate = 1.0 / (x_max - x_min)
if rate != 1.0:
expMat = np.array([[rate, 0, 0], [0, rate, 0], [0, 0, 1]])
rot_l = expMat.dot(rot_l)
rot_u = expMat.dot(rot_u)
self.x_l = rot_l[0] + 0.5
self.y_l = rot_l[1] + 0.5
self.x_u = rot_u[0] + 0.5
self.y_u = rot_u[1] + 0.5
def plot(self):
plt.xlim([0, 1])
plt.ylim([0, 1])
plt.plot(self.x_u, self.y_u)
plt.plot(self.x_l, self.y_l)
plt.show()
def get_quasi_equidistant_line(self):
new_resolution = self.new_resolution
x_min = min(np.min(self.x_u), np.min(self.x_l))
x_max = max(np.max(self.x_u), np.max(self.x_l))
if self.reshape == False:
self.equidistant_x = np.linspace(start = 0, stop = 1, num = new_resolution)
else:
self.equidistant_x = np.linspace(start=x_min, stop=x_max, num=new_resolution)
self.equidistant_y_l = np.zeros(new_resolution)
self.equidistant_y_u = np.zeros(new_resolution)
for index in range(new_resolution):
if ((x_min <= self.equidistant_x[index]) and (x_max >= self.equidistant_x[index])):
self.equidistant_y_l[index] = self.y_l[np.argmin(np.abs(self.x_l - self.equidistant_x[index]))]
self.equidistant_y_u[index] = self.y_u[np.argmin(np.abs(self.x_u - self.equidistant_x[index]))]
else:
self.equidistant_y_l[index] = -1.0 # 外れ値
self.equidistant_y_u[index] = -1.0
def plot_quasi_equidistant_shape(self):
plt.xlim([0, 1])
plt.ylim([0, 1])
plt.plot(self.equidistant_x, self.equidistant_y_u, "o")
plt.plot(self.equidistant_x, self.equidistant_y_l, "o")
plt.show()
def transform2complex(self):
z_u_reverse = (self.x_u + 1j * self.y_u)[::-1]
z_l = self.x_l + 1j * self.y_l
if self.use_quasi_equidistant == True:
return np.concatenate([z_u_reverse[::100], z_l[::100], z_u_reverse[0].reshape(-1)])
else:
if z_u_reverse[self.resolution - 1] == z_l[0]:
return np.concatenate([z_u_reverse, z_l[1:], z_u_reverse[0].reshape(-1)])
else:
return np.concatenate([z_u_reverse, z_l, z_u_reverse[0].reshape(-1)])
class Naca_5_digit(Naca_4_digit):
def __init__(self, int_5, attack_angle_deg, resolution, quasi_equidistant = True, length_adjust = False, from5digit = True):
self.cl = float(int_5[0])*(3.0/2.0) / 10 # designed lift_coefficient
self.p = float(int_5[1]) / 2.0 / 100 # position of the maximum camber
self.ref = int_5[2] # enable / disable reflect
self.t = float(int_5[3:5]) / 100.0 # maximum thickness
self.camberline_plofile = int(int_5[0:3])
self.camberline_plofile_table()
self.load_setting(attack_angle_deg, resolution, quasi_equidistant, length_adjust)
self.__y_c()
self.__dyc_dx()
super(Naca_5_digit, self).__init__(int_5, attack_angle_deg, resolution, quasi_equidistant = quasi_equidistant, length_adjust = length_adjust, from5digit = True)
def __y_c(self):
x_lt_m_nr = lambda m, k1, x: k1 / 6.0 * (x ** 3 - 3.0 * m * x ** 2 + m ** 2 * (3.0 - m) * x)
x_gt_m_nr = lambda m, k1, x: k1 / 6.0 * m ** 3 * (1.0 - x)
x_lt_m_rf = lambda m, k1, k2_k1, x: k1 / 6.0 * ((x - m)**3 - k2_k1 * (1.0-m)**3 * x - m**3 * x + m**3)
x_gt_m_rf = lambda m, k1, k2_k1, x: k1 / 6.0 * (k2_k1 * (x - m)**3 - k2_k1 * (1.0 - m)**3 * x - m**3 * x + m**3)
m = self.m
k1 = self.k1
x = self.x
if int(self.ref) == 0: # not reflected
self.y_c = np.where(x < m, x_lt_m_nr(m, k1, x), x_gt_m_nr(m, k1, x))
else:
k2_k1 = self.k2byk1
self.y_c = np.where(x < m, x_lt_m_rf(m, k1, k2_k1, x), x_gt_m_rf(m, k1, k2_k1, x))
def __dyc_dx(self):
x_lt_m_nr = lambda m, k1, x: k1 / 6.0 * (3.0 * x ** 2 - 6.0 * m * x + m ** 2 * (3.0 - m))
x_gt_m_nr = lambda m, k1, x: - k1 / 6.0 * m ** 3
x_lt_m_rf = lambda m, k1, k2_k1, x: k1 / 6.0 * (3.0 * (x - m) ** 2 - k2_k1 * (1.0 - m) ** 3 - m ** 3)
x_gt_m_rf = lambda m, k1, k2_k1, x: k1 / 6.0 * (3.0 * k2_k1 * (x - m) ** 2 - k2_k1 * (1.0 - m) ** 3 - m ** 3)
m = self.m
k1 = self.k1
x = self.x
if int(self.ref) == 0: # not reflected
self.dyc_dx = np.where(x < m, x_lt_m_nr(m, k1, x), x_gt_m_nr(m, k1, x))
else:
k2_k1 = self.k2byk1
self.dyc_dx = np.where(x < m, x_lt_m_rf(m, k1, k2_k1, x), x_gt_m_rf(m, k1, k2_k1, x))
def camberline_plofile_table(self):
if self.camberline_plofile == 210:
self.m = 0.058
self.k1 = 361.4
elif self.camberline_plofile == 220:
self.m = 0.126
self.k1 = 51.64
elif self.camberline_plofile == 230:
self.m = 0.2025
self.k1 = 15.957
elif self.camberline_plofile == 240:
self.m = 0.29
self.k1 = 6.643
elif self.camberline_plofile == 250:
self.m = 0.391
self.k1 = 3.230
elif self.camberline_plofile == 221:
self.m = 0.130
self.k1 = 51.990
self.k2byk1 = 0.000764
elif self.camberline_plofile == 231:
self.m = 0.217
self.k1 = 15.793
self.k2byk1 = 0.00677
elif self.camberline_plofile == 241:
self.m = 0.318
self.k1 = 6.520
self.k2byk1 = 0.0303
elif self.camberline_plofile == 251:
self.m = 0.441
self.k1 = 3.191
self.k2byk1 = 0.1355
else:
print("this type wing is not defined")
exit()
def main():
deg = 0.0
naca = Naca_4_digit(int_4="0012", attack_angle_deg=deg, resolution=100, quasi_equidistant=True, length_adjust=True)
naca.plot()
naca.plot_quasi_equidistant_shape()
naca = Naca_5_digit(int_5="23012", attack_angle_deg=deg, resolution=100, quasi_equidistant=True, length_adjust=True)
naca.plot()
naca.plot_quasi_equidistant_shape()
if __name__ == '__main__':
main()
|
[
"numpy.full",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.show",
"numpy.abs",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.ylim",
"numpy.zeros",
"numpy.min",
"numpy.max",
"numpy.array",
"numpy.sin",
"numpy.linspace",
"numpy.cos",
"numpy.arctan",
"numpy.sqrt"
] |
[((646, 668), 'numpy.arctan', 'np.arctan', (['self.dyc_dx'], {}), '(self.dyc_dx)\n', (655, 668), True, 'import numpy as np\n'), ((1227, 1276), 'numpy.linspace', 'np.linspace', ([], {'start': '(0)', 'stop': '(1)', 'num': 'self.resolution'}), '(start=0, stop=1, num=self.resolution)\n', (1238, 1276), True, 'import numpy as np\n'), ((2548, 2582), 'numpy.full', 'np.full', (['(3, self.resolution)', '(1.0)'], {}), '((3, self.resolution), 1.0)\n', (2555, 2582), True, 'import numpy as np\n'), ((2599, 2633), 'numpy.full', 'np.full', (['(3, self.resolution)', '(1.0)'], {}), '((3, self.resolution), 1.0)\n', (2606, 2633), True, 'import numpy as np\n'), ((3735, 3751), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[0, 1]'], {}), '([0, 1])\n', (3743, 3751), True, 'import matplotlib.pyplot as plt\n'), ((3760, 3776), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[0, 1]'], {}), '([0, 1])\n', (3768, 3776), True, 'import matplotlib.pyplot as plt\n'), ((3785, 3813), 'matplotlib.pyplot.plot', 'plt.plot', (['self.x_u', 'self.y_u'], {}), '(self.x_u, self.y_u)\n', (3793, 3813), True, 'import matplotlib.pyplot as plt\n'), ((3822, 3850), 'matplotlib.pyplot.plot', 'plt.plot', (['self.x_l', 'self.y_l'], {}), '(self.x_l, self.y_l)\n', (3830, 3850), True, 'import matplotlib.pyplot as plt\n'), ((3859, 3869), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3867, 3869), True, 'import matplotlib.pyplot as plt\n'), ((4336, 4360), 'numpy.zeros', 'np.zeros', (['new_resolution'], {}), '(new_resolution)\n', (4344, 4360), True, 'import numpy as np\n'), ((4392, 4416), 'numpy.zeros', 'np.zeros', (['new_resolution'], {}), '(new_resolution)\n', (4400, 4416), True, 'import numpy as np\n'), ((4962, 4978), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[0, 1]'], {}), '([0, 1])\n', (4970, 4978), True, 'import matplotlib.pyplot as plt\n'), ((4987, 5003), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[0, 1]'], {}), '([0, 1])\n', (4995, 5003), True, 'import matplotlib.pyplot as plt\n'), ((5012, 5067), 'matplotlib.pyplot.plot', 'plt.plot', (['self.equidistant_x', 'self.equidistant_y_u', '"""o"""'], {}), "(self.equidistant_x, self.equidistant_y_u, 'o')\n", (5020, 5067), True, 'import matplotlib.pyplot as plt\n'), ((5076, 5131), 'matplotlib.pyplot.plot', 'plt.plot', (['self.equidistant_x', 'self.equidistant_y_l', '"""o"""'], {}), "(self.equidistant_x, self.equidistant_y_l, 'o')\n", (5084, 5131), True, 'import matplotlib.pyplot as plt\n'), ((5140, 5150), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5148, 5150), True, 'import matplotlib.pyplot as plt\n'), ((3986, 4002), 'numpy.min', 'np.min', (['self.x_u'], {}), '(self.x_u)\n', (3992, 4002), True, 'import numpy as np\n'), ((4004, 4020), 'numpy.min', 'np.min', (['self.x_l'], {}), '(self.x_l)\n', (4010, 4020), True, 'import numpy as np\n'), ((4042, 4058), 'numpy.max', 'np.max', (['self.x_u'], {}), '(self.x_u)\n', (4048, 4058), True, 'import numpy as np\n'), ((4060, 4076), 'numpy.max', 'np.max', (['self.x_l'], {}), '(self.x_l)\n', (4066, 4076), True, 'import numpy as np\n'), ((4146, 4194), 'numpy.linspace', 'np.linspace', ([], {'start': '(0)', 'stop': '(1)', 'num': 'new_resolution'}), '(start=0, stop=1, num=new_resolution)\n', (4157, 4194), True, 'import numpy as np\n'), ((4248, 4304), 'numpy.linspace', 'np.linspace', ([], {'start': 'x_min', 'stop': 'x_max', 'num': 'new_resolution'}), '(start=x_min, stop=x_max, num=new_resolution)\n', (4259, 4304), True, 'import numpy as np\n'), ((2748, 2766), 'numpy.cos', 'np.cos', (['self.theta'], {}), '(self.theta)\n', (2754, 2766), True, 'import numpy as np\n'), ((2888, 2906), 'numpy.cos', 'np.cos', (['self.theta'], {}), '(self.theta)\n', (2894, 2906), True, 'import numpy as np\n'), ((3239, 3255), 'numpy.min', 'np.min', (['rot_l[0]'], {}), '(rot_l[0])\n', (3245, 3255), True, 'import numpy as np\n'), ((3257, 3273), 'numpy.min', 'np.min', (['rot_u[0]'], {}), '(rot_u[0])\n', (3263, 3273), True, 'import numpy as np\n'), ((3299, 3315), 'numpy.max', 'np.max', (['rot_l[0]'], {}), '(rot_l[0])\n', (3305, 3315), True, 'import numpy as np\n'), ((3317, 3333), 'numpy.max', 'np.max', (['rot_u[0]'], {}), '(rot_u[0])\n', (3323, 3333), True, 'import numpy as np\n'), ((3430, 3479), 'numpy.array', 'np.array', (['[[rate, 0, 0], [0, rate, 0], [0, 0, 1]]'], {}), '([[rate, 0, 0], [0, rate, 0], [0, 0, 1]])\n', (3438, 3479), True, 'import numpy as np\n'), ((2682, 2700), 'numpy.sin', 'np.sin', (['self.theta'], {}), '(self.theta)\n', (2688, 2700), True, 'import numpy as np\n'), ((2822, 2840), 'numpy.sin', 'np.sin', (['self.theta'], {}), '(self.theta)\n', (2828, 2840), True, 'import numpy as np\n'), ((2993, 3013), 'numpy.cos', 'np.cos', (['attack_angle'], {}), '(attack_angle)\n', (2999, 3013), True, 'import numpy as np\n'), ((3015, 3035), 'numpy.sin', 'np.sin', (['attack_angle'], {}), '(attack_angle)\n', (3021, 3035), True, 'import numpy as np\n'), ((3066, 3086), 'numpy.cos', 'np.cos', (['attack_angle'], {}), '(attack_angle)\n', (3072, 3086), True, 'import numpy as np\n'), ((3044, 3064), 'numpy.sin', 'np.sin', (['attack_angle'], {}), '(attack_angle)\n', (3050, 3064), True, 'import numpy as np\n'), ((4622, 4666), 'numpy.abs', 'np.abs', (['(self.x_l - self.equidistant_x[index])'], {}), '(self.x_l - self.equidistant_x[index])\n', (4628, 4666), True, 'import numpy as np\n'), ((4734, 4778), 'numpy.abs', 'np.abs', (['(self.x_u - self.equidistant_x[index])'], {}), '(self.x_u - self.equidistant_x[index])\n', (4740, 4778), True, 'import numpy as np\n'), ((1898, 1908), 'numpy.sqrt', 'np.sqrt', (['x'], {}), '(x)\n', (1905, 1908), True, 'import numpy as np\n')]
|
"""
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
from argparse import ArgumentParser
from collections import defaultdict
from typing import Optional
import fastmri
import numpy as np
import torch
import torch.nn as nn
from fastmri import evaluate
from fastmri.data import transforms
from fastmri.data.transforms import VarNetSample
from fastmri.models.adaptive_varnet import AdaptiveSensitivityModel, AdaptiveVarNetBlock
from fastmri.models.varnet import NormUnet
from fastmri.pl_modules.mri_module import MriModule
from .metrics import DistributedMetricSum, DistributedArraySum
class VarNet(nn.Module):
"""
A full variational network model.
This model applies a combination of soft data consistency with a U-Net
regularizer. To use non-U-Net regularizers, use VarNetBlock.
"""
def __init__(
self,
num_cascades: int = 12,
sens_chans: int = 8,
sens_pools: int = 4,
chans: int = 18,
pools: int = 4,
num_sense_lines: Optional[int] = None,
hard_dc: bool = False,
dc_mode: str = "simul",
sparse_dc_gradients: bool = True,
):
"""
Args:
num_cascades: Number of cascades (i.e., layers) for variational
network.
sens_chans: Number of channels for sensitivity map U-Net.
sens_pools Number of downsampling and upsampling layers for
sensitivity map U-Net.
chans: Number of channels for cascade U-Net.
pools: Number of downsampling and upsampling layers for cascade
U-Net.
num_sense_lines: Number of low-frequency lines to use for
sensitivity map computation, must be even or `None`. Default
`None` will automatically compute the number from masks.
Default behaviour may cause some slices to use more
low-frequency lines than others, when used in conjunction with
e.g. the EquispacedMaskFunc defaults.
hard_dc: Whether to do hard DC layers instead of soft (learned).
dc_mode: str, whether to do DC before ('first'), after ('last') or
simultaneously ('simul') with Refinement step. Default 'simul'.
sparse_dc_gradients: Whether to sparsify the gradients in DC by
using torch.where() with the mask: this essentially removes
gradients for the policy on unsampled rows. This should change
nothing for the non-active VarNet.
"""
super().__init__()
self.num_sense_lines = num_sense_lines
self.hard_dc = hard_dc
self.dc_mode = dc_mode
self.sparse_dc_gradients = sparse_dc_gradients
self.sens_net = AdaptiveSensitivityModel(
sens_chans, sens_pools, num_sense_lines=num_sense_lines
)
self.cascades = nn.ModuleList(
[
AdaptiveVarNetBlock(
NormUnet(chans, pools),
hard_dc=hard_dc,
dc_mode=dc_mode,
sparse_dc_gradients=sparse_dc_gradients,
)
for _ in range(num_cascades)
]
)
def forward(
self, kspace: torch.Tensor, masked_kspace: torch.Tensor, mask: torch.Tensor
):
extra_outputs = defaultdict(list)
sens_maps = self.sens_net(masked_kspace, mask)
extra_outputs["sense"].append(sens_maps.detach().cpu())
kspace_pred = masked_kspace.clone()
extra_outputs["masks"].append(mask.detach().cpu())
# Store current reconstruction
current_recon = fastmri.complex_abs(
self.sens_reduce(masked_kspace, sens_maps)
).squeeze(1)
extra_outputs["recons"].append(current_recon.detach().cpu())
for cascade in self.cascades:
kspace_pred = cascade(
kspace_pred, masked_kspace, mask, sens_maps, kspace=kspace
)
# Store current reconstruction
current_recon = fastmri.complex_abs(
self.sens_reduce(masked_kspace, sens_maps)
).squeeze(1)
extra_outputs["recons"].append(current_recon.detach().cpu())
# Could presumably do complex_abs(complex_rss()) instead and get same result?
output = fastmri.rss(fastmri.complex_abs(fastmri.ifft2c(kspace_pred)), dim=1)
return output, extra_outputs
def sens_reduce(self, x: torch.Tensor, sens_maps: torch.Tensor) -> torch.Tensor:
x = fastmri.ifft2c(x)
return fastmri.complex_mul(x, fastmri.complex_conj(sens_maps)).sum(
dim=1, keepdim=True
)
class VarNetModule(MriModule):
"""
VarNet training module.
This can be used to train variational networks from the paper:
<NAME> al. End-to-end variational networks for accelerated MRI
reconstruction. In International Conference on Medical Image Computing and
Computer-Assisted Intervention, 2020.
which was inspired by the earlier paper:
<NAME> et al. Learning a variational network for reconstruction of
accelerated MRI data. Magnetic Resonance inMedicine, 79(6):3055–3071, 2018.
"""
def __init__(
self,
num_cascades: int = 12,
pools: int = 4,
chans: int = 18,
sens_pools: int = 4,
sens_chans: int = 8,
lr: float = 0.0003,
lr_step_size: int = 40,
lr_gamma: float = 0.1,
weight_decay: float = 0.0,
num_sense_lines: int = None,
hard_dc: bool = False,
dc_mode: str = "simul",
sparse_dc_gradients: bool = True,
**kwargs,
):
"""
Args:
num_cascades: Number of cascades (i.e., layers) for variational
network.
pools: Number of downsampling and upsampling layers for cascade
U-Net.
chans: Number of channels for cascade U-Net.
sens_pools: Number of downsampling and upsampling layers for
sensitivity map U-Net.
sens_chans: Number of channels for sensitivity map U-Net.
lr: Learning rate.
lr_step_size: Learning rate step size.
lr_gamma: Learning rate gamma decay.
weight_decay: Parameter for penalizing weights norm.
num_sense_lines: Number of low-frequency lines to use for sensitivity map
computation, must be even or `None`. Default `None` will automatically
compute the number from masks. Default behaviour may cause some slices to
use more low-frequency lines than others, when used in conjunction with
e.g. the EquispacedMaskFunc defaults.
hard_dc: Whether to do hard DC layers instead of soft (learned).
dc_mode: str, whether to do DC before ('first'), after ('last') or
simultaneously ('simul') with Refinement step. Default 'simul'.
sparse_dc_gradients: Whether to sparsify the gradients in DC by using torch.where()
with the mask: this essentially removes gradients for the policy on unsampled rows. This should
change nothing for the non-active VarNet.
"""
super().__init__()
self.save_hyperparameters()
self.num_cascades = num_cascades
self.pools = pools
self.chans = chans
self.sens_pools = sens_pools
self.sens_chans = sens_chans
self.lr = lr
self.lr_step_size = lr_step_size
self.lr_gamma = lr_gamma
self.weight_decay = weight_decay
self.num_sense_lines = num_sense_lines
self.hard_dc = hard_dc
self.dc_mode = dc_mode
self.sparse_dc_gradients = sparse_dc_gradients
# logging functions
self.NMSE = DistributedMetricSum()
self.SSIM = DistributedMetricSum()
self.PSNR = DistributedMetricSum()
self.ValLoss = DistributedMetricSum()
self.TotExamples = DistributedMetricSum()
self.TotSliceExamples = DistributedMetricSum()
self.ValMargDist = DistributedArraySum()
self.ValCondEnt = DistributedMetricSum()
self.TrainNMSE = DistributedMetricSum()
self.TrainSSIM = DistributedMetricSum()
self.TrainPSNR = DistributedMetricSum()
self.TrainLoss = DistributedMetricSum()
self.TrainTotExamples = DistributedMetricSum()
self.TrainTotSliceExamples = DistributedMetricSum()
self.TrainMargDist = DistributedArraySum()
self.TrainCondEnt = DistributedMetricSum()
self.varnet = VarNet(
num_cascades=self.num_cascades,
sens_chans=self.sens_chans,
sens_pools=self.sens_pools,
chans=self.chans,
pools=self.pools,
num_sense_lines=self.num_sense_lines,
hard_dc=self.hard_dc,
dc_mode=self.dc_mode,
sparse_dc_gradients=self.sparse_dc_gradients,
)
self.loss = fastmri.SSIMLoss()
def forward(self, kspace, masked_kspace, mask):
return self.varnet(kspace, masked_kspace, mask)
def training_step(self, batch, batch_idx):
output, extra_outputs = self(batch.kspace, batch.masked_kspace, batch.mask)
target, output = transforms.center_crop_to_smallest(batch.target, output)
# NOTE: Using max value here...
loss = self.loss(
output.unsqueeze(1), target.unsqueeze(1), data_range=batch.max_value
)
self.log("train_loss", loss)
# Return same stuff as on validation step here
return {
"batch_idx": batch_idx,
"fname": batch.fname,
"slice_num": batch.slice_num,
"max_value": batch.max_value,
"output": output,
"target": target,
"loss": loss,
"extra_outputs": extra_outputs,
}
def training_step_end(self, train_logs):
# check inputs
for k in (
"batch_idx",
"fname",
"slice_num",
"max_value",
"output",
"target",
"loss",
"extra_outputs",
):
if k not in train_logs.keys():
raise RuntimeError(
f"Expected key {k} in dict returned by training_step."
)
if train_logs["output"].ndim == 2:
train_logs["output"] = train_logs["output"].unsqueeze(0)
elif train_logs["output"].ndim != 3:
raise RuntimeError("Unexpected output size from training_step.")
if train_logs["target"].ndim == 2:
train_logs["target"] = train_logs["target"].unsqueeze(0)
elif train_logs["target"].ndim != 3:
raise RuntimeError("Unexpected output size from training_step.")
# compute evaluation metrics
mse_vals = defaultdict(dict)
target_norms = defaultdict(dict)
ssim_vals = defaultdict(dict)
max_vals = dict()
for i, fname in enumerate(train_logs["fname"]):
slice_num = int(train_logs["slice_num"][i].cpu())
maxval = train_logs["max_value"][i].cpu().numpy()
output = train_logs["output"][i].detach().cpu().numpy()
target = train_logs["target"][i].cpu().numpy()
mse_vals[fname][slice_num] = torch.tensor(
evaluate.mse(target, output)
).view(1)
target_norms[fname][slice_num] = torch.tensor(
evaluate.mse(target, np.zeros_like(target))
).view(1)
ssim_vals[fname][slice_num] = torch.tensor(
evaluate.ssim(target[None, ...], output[None, ...], maxval=maxval)
).view(1)
max_vals[fname] = maxval
return {
"loss": train_logs["loss"],
"mse_vals": mse_vals,
"target_norms": target_norms,
"ssim_vals": ssim_vals,
"max_vals": max_vals,
}
def validation_step(self, batch, batch_idx):
batch: VarNetSample
output, extra_outputs = self.forward(
batch.kspace, batch.masked_kspace, batch.mask
)
target, output = transforms.center_crop_to_smallest(batch.target, output)
return {
"batch_idx": batch_idx,
"fname": batch.fname,
"slice_num": batch.slice_num,
"max_value": batch.max_value,
"output": output,
"target": target,
"val_loss": self.loss(
output.unsqueeze(1), target.unsqueeze(1), data_range=batch.max_value
),
"extra_outputs": extra_outputs,
}
def validation_step_end(self, val_logs):
# check inputs
for k in (
"batch_idx",
"fname",
"slice_num",
"max_value",
"output",
"target",
"val_loss",
):
if k not in val_logs.keys():
raise RuntimeError(
f"Expected key {k} in dict returned by validation_step."
)
if val_logs["output"].ndim == 2:
val_logs["output"] = val_logs["output"].unsqueeze(0)
elif val_logs["output"].ndim != 3:
raise RuntimeError("Unexpected output size from validation_step.")
if val_logs["target"].ndim == 2:
val_logs["target"] = val_logs["target"].unsqueeze(0)
elif val_logs["target"].ndim != 3:
raise RuntimeError("Unexpected output size from validation_step.")
# pick a set of images to log if we don't have one already
if self.val_log_indices is None:
self.val_log_indices = list(
np.random.permutation(len(self.trainer.val_dataloaders[0]))[
: self.num_log_images
]
)
# log images to tensorboard
if isinstance(val_logs["batch_idx"], int):
batch_indices = [val_logs["batch_idx"]]
else:
batch_indices = val_logs["batch_idx"]
for i, batch_idx in enumerate(batch_indices):
if batch_idx in self.val_log_indices:
key = f"val_images_idx_{batch_idx}"
target = val_logs["target"][i].unsqueeze(0)
output = val_logs["output"][i].unsqueeze(0)
error = torch.abs(target - output)
output = output / output.max()
target = target / target.max()
error = error / error.max()
self.log_image(f"{key}/target", target)
self.log_image(f"{key}/reconstruction", output)
self.log_image(f"{key}/error", error)
# compute evaluation metrics
mse_vals = defaultdict(dict)
target_norms = defaultdict(dict)
ssim_vals = defaultdict(dict)
max_vals = dict()
for i, fname in enumerate(val_logs["fname"]):
slice_num = int(val_logs["slice_num"][i].cpu())
maxval = val_logs["max_value"][i].cpu().numpy()
output = val_logs["output"][i].cpu().numpy()
target = val_logs["target"][i].cpu().numpy()
mse_vals[fname][slice_num] = torch.tensor(
evaluate.mse(target, output)
).view(1)
target_norms[fname][slice_num] = torch.tensor(
evaluate.mse(target, np.zeros_like(target))
).view(1)
ssim_vals[fname][slice_num] = torch.tensor(
evaluate.ssim(target[None, ...], output[None, ...], maxval=maxval)
).view(1)
max_vals[fname] = maxval
return {
"val_loss": val_logs["val_loss"],
"mse_vals": mse_vals,
"target_norms": target_norms,
"ssim_vals": ssim_vals,
"max_vals": max_vals,
}
def training_epoch_end(self, train_logs):
losses = []
mse_vals = defaultdict(dict)
target_norms = defaultdict(dict)
ssim_vals = defaultdict(dict)
max_vals = dict()
# use dict updates to handle duplicate slices
for train_log in train_logs:
losses.append(train_log["loss"].data.view(-1))
for k in train_log["mse_vals"].keys():
mse_vals[k].update(train_log["mse_vals"][k])
for k in train_log["target_norms"].keys():
target_norms[k].update(train_log["target_norms"][k])
for k in train_log["ssim_vals"].keys():
ssim_vals[k].update(train_log["ssim_vals"][k])
for k in train_log["max_vals"]:
max_vals[k] = train_log["max_vals"][k]
# check to make sure we have all files in all metrics
assert (
mse_vals.keys()
== target_norms.keys()
== ssim_vals.keys()
== max_vals.keys()
)
# apply means across image volumes
metrics = {"nmse": 0, "ssim": 0, "psnr": 0}
local_examples = 0
for fname in mse_vals.keys():
local_examples = local_examples + 1
mse_val = torch.mean(
torch.cat([v.view(-1) for _, v in mse_vals[fname].items()])
)
target_norm = torch.mean(
torch.cat([v.view(-1) for _, v in target_norms[fname].items()])
)
metrics["nmse"] = metrics["nmse"] + mse_val / target_norm
metrics["psnr"] = (
metrics["psnr"]
+ 20
* torch.log10(
torch.tensor(
max_vals[fname], dtype=mse_val.dtype, device=mse_val.device
)
)
- 10 * torch.log10(mse_val)
)
metrics["ssim"] = metrics["ssim"] + torch.mean(
torch.cat([v.view(-1) for _, v in ssim_vals[fname].items()])
)
# reduce across ddp via sum
metrics["nmse"] = self.TrainNMSE(metrics["nmse"])
metrics["ssim"] = self.TrainSSIM(metrics["ssim"])
metrics["psnr"] = self.TrainPSNR(metrics["psnr"])
tot_examples = self.TrainTotExamples(torch.tensor(local_examples))
train_loss = self.TrainLoss(torch.sum(torch.cat(losses)))
tot_slice_examples = self.TrainTotSliceExamples(
torch.tensor(len(losses), dtype=torch.float)
)
self.log("training_loss", train_loss / tot_slice_examples, prog_bar=True)
for metric, value in metrics.items():
self.log(f"train_metrics/{metric}", value / tot_examples)
def validation_epoch_end(self, val_logs):
# aggregate losses
losses = []
mse_vals = defaultdict(dict)
target_norms = defaultdict(dict)
ssim_vals = defaultdict(dict)
max_vals = dict()
# use dict updates to handle duplicate slices
for val_log in val_logs:
losses.append(val_log["val_loss"].view(-1))
for k in val_log["mse_vals"].keys():
mse_vals[k].update(val_log["mse_vals"][k])
for k in val_log["target_norms"].keys():
target_norms[k].update(val_log["target_norms"][k])
for k in val_log["ssim_vals"].keys():
ssim_vals[k].update(val_log["ssim_vals"][k])
for k in val_log["max_vals"]:
max_vals[k] = val_log["max_vals"][k]
# check to make sure we have all files in all metrics
assert (
mse_vals.keys()
== target_norms.keys()
== ssim_vals.keys()
== max_vals.keys()
)
# apply means across image volumes
metrics = {"nmse": 0, "ssim": 0, "psnr": 0}
local_examples = 0
for fname in mse_vals.keys():
local_examples = local_examples + 1
mse_val = torch.mean(
torch.cat([v.view(-1) for _, v in mse_vals[fname].items()])
)
target_norm = torch.mean(
torch.cat([v.view(-1) for _, v in target_norms[fname].items()])
)
metrics["nmse"] = metrics["nmse"] + mse_val / target_norm
metrics["psnr"] = (
metrics["psnr"]
+ 20
* torch.log10(
torch.tensor(
max_vals[fname], dtype=mse_val.dtype, device=mse_val.device
)
)
- 10 * torch.log10(mse_val)
)
metrics["ssim"] = metrics["ssim"] + torch.mean(
torch.cat([v.view(-1) for _, v in ssim_vals[fname].items()])
)
# reduce across ddp via sum
metrics["nmse"] = self.NMSE(metrics["nmse"])
metrics["ssim"] = self.SSIM(metrics["ssim"])
metrics["psnr"] = self.PSNR(metrics["psnr"])
tot_examples = self.TotExamples(torch.tensor(local_examples))
val_loss = self.ValLoss(torch.sum(torch.cat(losses)))
tot_slice_examples = self.TotSliceExamples(
torch.tensor(len(losses), dtype=torch.float)
)
self.log("validation_loss", val_loss / tot_slice_examples, prog_bar=True)
for metric, value in metrics.items():
self.log(f"val_metrics/{metric}", value / tot_examples)
def test_step(self, batch, batch_idx):
kspace, masked_kspace, mask, _, fname, slice_num, _, crop_size = batch
crop_size = crop_size[0] # always have a batch size of 1 for varnet
output, extra_outputs = self(kspace, masked_kspace, mask)
# check for FLAIR 203
if output.shape[-1] < crop_size[1]:
crop_size = (output.shape[-1], output.shape[-1])
output = transforms.center_crop(output, crop_size)
return {
"fname": fname,
"slice": slice_num,
"output": output.cpu().numpy(),
}
def configure_optimizers(self):
# This needs to be a class attribute for storing of gradients workaround
self.optim = torch.optim.Adam(
self.parameters(), lr=self.lr, weight_decay=self.weight_decay
)
scheduler = torch.optim.lr_scheduler.StepLR(
self.optim, self.lr_step_size, self.lr_gamma
)
return [self.optim], [scheduler]
@staticmethod
def add_model_specific_args(parent_parser): # pragma: no-cover
"""
Define parameters that only apply to this model
"""
parser = ArgumentParser(parents=[parent_parser], add_help=False)
parser = MriModule.add_model_specific_args(parser)
# param overwrites
# network params
parser.add_argument(
"--num_cascades",
default=12,
type=int,
help="Number of VarNet cascades",
)
parser.add_argument(
"--pools",
default=4,
type=int,
help="Number of U-Net pooling layers in VarNet blocks",
)
parser.add_argument(
"--chans",
default=18,
type=int,
help="Number of channels for U-Net in VarNet blocks",
)
parser.add_argument(
"--sens_pools",
default=4,
type=int,
help="Number of pooling layers for sense map estimation U-Net in VarNet",
)
parser.add_argument(
"--sens_chans",
default=8,
type=float,
help="Number of channels for sense map estimation U-Net in VarNet",
)
# training params (opt)
parser.add_argument(
"--lr", default=0.0003, type=float, help="Adam learning rate"
)
parser.add_argument(
"--lr_step_size",
default=40,
type=int,
help="Epoch at which to decrease step size",
)
parser.add_argument(
"--lr_gamma",
default=0.1,
type=float,
help="Extent to which step size should be decreased",
)
parser.add_argument(
"--weight_decay",
default=0.0,
type=float,
help="Strength of weight decay regularization",
)
return parser
|
[
"fastmri.data.transforms.center_crop_to_smallest",
"numpy.zeros_like",
"torch.optim.lr_scheduler.StepLR",
"argparse.ArgumentParser",
"fastmri.ifft2c",
"torch.cat",
"collections.defaultdict",
"fastmri.data.transforms.center_crop",
"fastmri.pl_modules.mri_module.MriModule.add_model_specific_args",
"fastmri.models.varnet.NormUnet",
"torch.log10",
"fastmri.complex_conj",
"fastmri.SSIMLoss",
"fastmri.evaluate.ssim",
"fastmri.models.adaptive_varnet.AdaptiveSensitivityModel",
"torch.abs",
"fastmri.evaluate.mse",
"torch.tensor"
] |
[((2896, 2982), 'fastmri.models.adaptive_varnet.AdaptiveSensitivityModel', 'AdaptiveSensitivityModel', (['sens_chans', 'sens_pools'], {'num_sense_lines': 'num_sense_lines'}), '(sens_chans, sens_pools, num_sense_lines=\n num_sense_lines)\n', (2920, 2982), False, 'from fastmri.models.adaptive_varnet import AdaptiveSensitivityModel, AdaptiveVarNetBlock\n'), ((3490, 3507), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (3501, 3507), False, 'from collections import defaultdict\n'), ((4684, 4701), 'fastmri.ifft2c', 'fastmri.ifft2c', (['x'], {}), '(x)\n', (4698, 4701), False, 'import fastmri\n'), ((9169, 9187), 'fastmri.SSIMLoss', 'fastmri.SSIMLoss', ([], {}), '()\n', (9185, 9187), False, 'import fastmri\n'), ((9455, 9511), 'fastmri.data.transforms.center_crop_to_smallest', 'transforms.center_crop_to_smallest', (['batch.target', 'output'], {}), '(batch.target, output)\n', (9489, 9511), False, 'from fastmri.data import transforms\n'), ((11059, 11076), 'collections.defaultdict', 'defaultdict', (['dict'], {}), '(dict)\n', (11070, 11076), False, 'from collections import defaultdict\n'), ((11100, 11117), 'collections.defaultdict', 'defaultdict', (['dict'], {}), '(dict)\n', (11111, 11117), False, 'from collections import defaultdict\n'), ((11138, 11155), 'collections.defaultdict', 'defaultdict', (['dict'], {}), '(dict)\n', (11149, 11155), False, 'from collections import defaultdict\n'), ((12382, 12438), 'fastmri.data.transforms.center_crop_to_smallest', 'transforms.center_crop_to_smallest', (['batch.target', 'output'], {}), '(batch.target, output)\n', (12416, 12438), False, 'from fastmri.data import transforms\n'), ((14952, 14969), 'collections.defaultdict', 'defaultdict', (['dict'], {}), '(dict)\n', (14963, 14969), False, 'from collections import defaultdict\n'), ((14993, 15010), 'collections.defaultdict', 'defaultdict', (['dict'], {}), '(dict)\n', (15004, 15010), False, 'from collections import defaultdict\n'), ((15031, 15048), 'collections.defaultdict', 'defaultdict', (['dict'], {}), '(dict)\n', (15042, 15048), False, 'from collections import defaultdict\n'), ((16131, 16148), 'collections.defaultdict', 'defaultdict', (['dict'], {}), '(dict)\n', (16142, 16148), False, 'from collections import defaultdict\n'), ((16172, 16189), 'collections.defaultdict', 'defaultdict', (['dict'], {}), '(dict)\n', (16183, 16189), False, 'from collections import defaultdict\n'), ((16210, 16227), 'collections.defaultdict', 'defaultdict', (['dict'], {}), '(dict)\n', (16221, 16227), False, 'from collections import defaultdict\n'), ((18878, 18895), 'collections.defaultdict', 'defaultdict', (['dict'], {}), '(dict)\n', (18889, 18895), False, 'from collections import defaultdict\n'), ((18919, 18936), 'collections.defaultdict', 'defaultdict', (['dict'], {}), '(dict)\n', (18930, 18936), False, 'from collections import defaultdict\n'), ((18957, 18974), 'collections.defaultdict', 'defaultdict', (['dict'], {}), '(dict)\n', (18968, 18974), False, 'from collections import defaultdict\n'), ((21879, 21920), 'fastmri.data.transforms.center_crop', 'transforms.center_crop', (['output', 'crop_size'], {}), '(output, crop_size)\n', (21901, 21920), False, 'from fastmri.data import transforms\n'), ((22314, 22391), 'torch.optim.lr_scheduler.StepLR', 'torch.optim.lr_scheduler.StepLR', (['self.optim', 'self.lr_step_size', 'self.lr_gamma'], {}), '(self.optim, self.lr_step_size, self.lr_gamma)\n', (22345, 22391), False, 'import torch\n'), ((22640, 22695), 'argparse.ArgumentParser', 'ArgumentParser', ([], {'parents': '[parent_parser]', 'add_help': '(False)'}), '(parents=[parent_parser], add_help=False)\n', (22654, 22695), False, 'from argparse import ArgumentParser\n'), ((22713, 22754), 'fastmri.pl_modules.mri_module.MriModule.add_model_specific_args', 'MriModule.add_model_specific_args', (['parser'], {}), '(parser)\n', (22746, 22754), False, 'from fastmri.pl_modules.mri_module import MriModule\n'), ((18346, 18374), 'torch.tensor', 'torch.tensor', (['local_examples'], {}), '(local_examples)\n', (18358, 18374), False, 'import torch\n'), ((21050, 21078), 'torch.tensor', 'torch.tensor', (['local_examples'], {}), '(local_examples)\n', (21062, 21078), False, 'import torch\n'), ((4512, 4539), 'fastmri.ifft2c', 'fastmri.ifft2c', (['kspace_pred'], {}), '(kspace_pred)\n', (4526, 4539), False, 'import fastmri\n'), ((14556, 14582), 'torch.abs', 'torch.abs', (['(target - output)'], {}), '(target - output)\n', (14565, 14582), False, 'import torch\n'), ((18422, 18439), 'torch.cat', 'torch.cat', (['losses'], {}), '(losses)\n', (18431, 18439), False, 'import torch\n'), ((21122, 21139), 'torch.cat', 'torch.cat', (['losses'], {}), '(losses)\n', (21131, 21139), False, 'import torch\n'), ((3110, 3132), 'fastmri.models.varnet.NormUnet', 'NormUnet', (['chans', 'pools'], {}), '(chans, pools)\n', (3118, 3132), False, 'from fastmri.models.varnet import NormUnet\n'), ((4740, 4771), 'fastmri.complex_conj', 'fastmri.complex_conj', (['sens_maps'], {}), '(sens_maps)\n', (4760, 4771), False, 'import fastmri\n'), ((17904, 17924), 'torch.log10', 'torch.log10', (['mse_val'], {}), '(mse_val)\n', (17915, 17924), False, 'import torch\n'), ((20628, 20648), 'torch.log10', 'torch.log10', (['mse_val'], {}), '(mse_val)\n', (20639, 20648), False, 'import torch\n'), ((11561, 11589), 'fastmri.evaluate.mse', 'evaluate.mse', (['target', 'output'], {}), '(target, output)\n', (11573, 11589), False, 'from fastmri import evaluate\n'), ((11825, 11891), 'fastmri.evaluate.ssim', 'evaluate.ssim', (['target[None, ...]', 'output[None, ...]'], {'maxval': 'maxval'}), '(target[None, ...], output[None, ...], maxval=maxval)\n', (11838, 11891), False, 'from fastmri import evaluate\n'), ((15435, 15463), 'fastmri.evaluate.mse', 'evaluate.mse', (['target', 'output'], {}), '(target, output)\n', (15447, 15463), False, 'from fastmri import evaluate\n'), ((15699, 15765), 'fastmri.evaluate.ssim', 'evaluate.ssim', (['target[None, ...]', 'output[None, ...]'], {'maxval': 'maxval'}), '(target[None, ...], output[None, ...], maxval=maxval)\n', (15712, 15765), False, 'from fastmri import evaluate\n'), ((11708, 11729), 'numpy.zeros_like', 'np.zeros_like', (['target'], {}), '(target)\n', (11721, 11729), True, 'import numpy as np\n'), ((15582, 15603), 'numpy.zeros_like', 'np.zeros_like', (['target'], {}), '(target)\n', (15595, 15603), True, 'import numpy as np\n'), ((17743, 17816), 'torch.tensor', 'torch.tensor', (['max_vals[fname]'], {'dtype': 'mse_val.dtype', 'device': 'mse_val.device'}), '(max_vals[fname], dtype=mse_val.dtype, device=mse_val.device)\n', (17755, 17816), False, 'import torch\n'), ((20467, 20540), 'torch.tensor', 'torch.tensor', (['max_vals[fname]'], {'dtype': 'mse_val.dtype', 'device': 'mse_val.device'}), '(max_vals[fname], dtype=mse_val.dtype, device=mse_val.device)\n', (20479, 20540), False, 'import torch\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
loading_dataset.py
Created on Thu May 3 12:47:36 2018
@author: sungkyun
"""
import torch
from torch.utils.data.dataset import Dataset
#from torch import from_numpy
import numpy as np
import pandas as pd
#from sklearn import preprocessing
#from sklearn.preprocessing import StandardScaler
#from sklearn.externals import joblib
import glob
from nnmnkwii import minmax_scale, scale
DIM_INDEX = dict()
DIM_INDEX['linguistic'] = np.arange(0,420) # source: /linguistic
DIM_INDEX['f0'] = [0] # source: /pyworld
DIM_INDEX['log-f0'] = [1] # source: /pyworld
DIM_INDEX['vuv'] = [2] # source: /pyworld
DIM_INDEX['bap'] = [3] # source: /pyworld
DIM_INDEX['melcep'] = np.arange(4,64) # source: /pyworld
DIM_INDEX['pyspec'] = np.arange(64,577) # source: /pyworld
DIM_INDEX['melspec'] = np.arange(0, 128) # source: /melmfcc
DIM_INDEX['mfcc'] = np.arange(128,153) # source: /melmfcc
class CmuArcticDataset(Dataset):
def __init__(self, data_root_dir=None, random_zpad=bool, cond_feature_select=None, transform=None):
#data_root_dir = 'data/processed_slt_arctic/TRAIN/'
#data_root_dir = 'data/processed_slt_arctic/TEST/'
self.mulaw_filepaths = sorted(glob.glob(data_root_dir + 'mulaw/*.npy'))
self.linguistic_filepaths = sorted(glob.glob(data_root_dir + 'linguistic/*.npy'))
self.melmfcc_filepaths = sorted(glob.glob(data_root_dir + 'melmfcc/*.npy'))
self.pyworld_filepaths = sorted(glob.glob(data_root_dir + 'pyworld/*.npy'))
self.file_ids = [path.split('/')[-1][:-4] for path in self.mulaw_filepaths]
self.random_zpad = random_zpad
self.cond_feature_select = cond_feature_select # ['linguistic', 'f0', 'log-f0', 'vuv','bap', 'melcep', 'pyspec', 'melspec', 'mfcc']
self.transform = transform
self.scale_factor = np.load(data_root_dir + '../scale_factors.npy')
# Construct conditional feature selection info
global DIM_INDEX
self.cond_info = dict()
self.cond_dim = 0 # total dimension of condition features
for sel in self.cond_feature_select:
self.cond_info[sel] = np.arange(self.cond_dim, self.cond_dim + len(DIM_INDEX[sel]))
self.cond_dim += len(DIM_INDEX[sel])
def __getitem__(self, index):
# Get 3 items: (file_id, mulaw, cond)
file_id = self.file_ids[index]
x = np.load(self.mulaw_filepaths[index]) # size(x) = (T,)
cond = np.empty((len(x),0), np.float16) # size(cond) = (T,d)
cond_linguistic, cond_pyworld, cond_melmfcc = [], [], []
if any(sel in self.cond_feature_select for sel in ['linguistic']):
cond_linguistic = np.load(self.linguistic_filepaths[index])
if any(sel in self.cond_feature_select for sel in ['f0', 'log-f0', 'vuv', 'bap', 'melcep', 'pyspec']):
cond_pyworld = np.load(self.pyworld_filepaths[index])
if any(sel in self.cond_feature_select for sel in ['melspec', 'mfcc']):
cond_melmfcc = np.load(self.melmfcc_filepaths[index])
global DIM_INDEX
for sel in self.cond_feature_select:
if sel is 'linguistic':
cond = np.hstack((cond, cond_linguistic))
elif sel in ['f0', 'log-f0', 'vuv', 'bap', 'melcep', 'pyspec']:
cond = np.hstack((cond, cond_pyworld[:,DIM_INDEX[sel]]))
elif sel in ['melspec', 'mfcc']:
cond = np.hstack((cond, cond_melmfcc[:,DIM_INDEX[sel]]))
assert(cond.shape[1]==self.cond_dim) # check if stacked cond feature size mismatches
# Feature-scaling
cond = self.featScaler(cond)
# Transpose
cond = np.transpose(cond) # size(cond) = (T,d) --> (d, T): required for pytorch dataloading
# Random zeropadding 20~50%
if self.random_zpad is True:
zpad_sz = int(len(x) * np.random.uniform(0.2,0.5))
x[0:zpad_sz] = 128 # fill first <zpad_sz> samples with zeros (in mulaw-enc, 128)
cond[:,0:zpad_sz] = 0.
return file_id, torch.LongTensor(x), cond
def featScaler(self, feat):
for sel in self.cond_feature_select:
if sel is 'linguistic':
feat[:,self.cond_info[sel]] = minmax_scale(feat[:,self.cond_info[sel]],
self.scale_factor['linguistic_min'], self.scale_factor['linguistic_max'], feature_range=(0.01, 0.99))
return feat
def __len__(self):
return len(self.file_ids) # return the number of examples that we have
class YesNoDataset(Dataset):
def __init__(self, csv_path=None, zpad_target_len=int, transform=None):
# Internal variables
#csv_path = 'data/processed_yesno/test.csv'
#csv_path = 'data/processed_yesno/train.csv'
self.zpad_target_len = zpad_target_len
self.transform = transform
self.file_ids = None
self.mulaw_filepaths = None
self.mfcc_filepaths = None
# Reading .csv file
df = pd.read_csv(csv_path, index_col=0) # ['file_id', 'mulaw_filepath', 'mfcc_filepath']
self.file_ids = df.iloc[:,0]
self.mulaw_filepaths = df.iloc[:,1]
self.mfcc_filepaths = df.iloc[:,2]
def __getitem__(self, index):
# Get 3 items: (file_id, x = mulaw, cond = mfcc)
file_id = self.file_ids[index]
x = np.load(self.mulaw_filepaths[index]) # size = (T,)
cond = np.load(self.mfcc_filepaths[index]) # size = (25,T)
if self.zpad_target_len:
x_length = x.shape[0]
if x_length > self.zpad_target_len:
x = x[0:self.zpad_target_len]
elif x_length < self.zpad_target_len:
zpad_sz = self.zpad_target_len - x_length
x = np.pad(x, (zpad_sz,0), mode='constant', constant_values=128) # padding first 48,000 samples with zeros
cond_length = cond.shape[1]
if cond_length > self.zpad_target_len:
cond = cond[:, 0:self.zpad_target_len]
elif cond_length < self.zpad_target_len:
zpad_sz = self.zpad_target_len - cond_length
cond = np.pad(cond, ((0,0),(zpad_sz, 0)), mode='constant')
return file_id, torch.LongTensor(x), cond
def __len__(self):
return len(self.file_ids) # return the number of examples that we have
|
[
"numpy.pad",
"numpy.random.uniform",
"numpy.load",
"nnmnkwii.minmax_scale",
"torch.LongTensor",
"pandas.read_csv",
"numpy.transpose",
"numpy.hstack",
"numpy.arange",
"glob.glob"
] |
[((482, 499), 'numpy.arange', 'np.arange', (['(0)', '(420)'], {}), '(0, 420)\n', (491, 499), True, 'import numpy as np\n'), ((799, 815), 'numpy.arange', 'np.arange', (['(4)', '(64)'], {}), '(4, 64)\n', (808, 815), True, 'import numpy as np\n'), ((861, 879), 'numpy.arange', 'np.arange', (['(64)', '(577)'], {}), '(64, 577)\n', (870, 879), True, 'import numpy as np\n'), ((924, 941), 'numpy.arange', 'np.arange', (['(0)', '(128)'], {}), '(0, 128)\n', (933, 941), True, 'import numpy as np\n'), ((983, 1002), 'numpy.arange', 'np.arange', (['(128)', '(153)'], {}), '(128, 153)\n', (992, 1002), True, 'import numpy as np\n'), ((1964, 2011), 'numpy.load', 'np.load', (["(data_root_dir + '../scale_factors.npy')"], {}), "(data_root_dir + '../scale_factors.npy')\n", (1971, 2011), True, 'import numpy as np\n'), ((2538, 2574), 'numpy.load', 'np.load', (['self.mulaw_filepaths[index]'], {}), '(self.mulaw_filepaths[index])\n', (2545, 2574), True, 'import numpy as np\n'), ((3917, 3935), 'numpy.transpose', 'np.transpose', (['cond'], {}), '(cond)\n', (3929, 3935), True, 'import numpy as np\n'), ((5300, 5334), 'pandas.read_csv', 'pd.read_csv', (['csv_path'], {'index_col': '(0)'}), '(csv_path, index_col=0)\n', (5311, 5334), True, 'import pandas as pd\n'), ((5669, 5705), 'numpy.load', 'np.load', (['self.mulaw_filepaths[index]'], {}), '(self.mulaw_filepaths[index])\n', (5676, 5705), True, 'import numpy as np\n'), ((5735, 5770), 'numpy.load', 'np.load', (['self.mfcc_filepaths[index]'], {}), '(self.mfcc_filepaths[index])\n', (5742, 5770), True, 'import numpy as np\n'), ((1329, 1369), 'glob.glob', 'glob.glob', (["(data_root_dir + 'mulaw/*.npy')"], {}), "(data_root_dir + 'mulaw/*.npy')\n", (1338, 1369), False, 'import glob\n'), ((1414, 1459), 'glob.glob', 'glob.glob', (["(data_root_dir + 'linguistic/*.npy')"], {}), "(data_root_dir + 'linguistic/*.npy')\n", (1423, 1459), False, 'import glob\n'), ((1501, 1543), 'glob.glob', 'glob.glob', (["(data_root_dir + 'melmfcc/*.npy')"], {}), "(data_root_dir + 'melmfcc/*.npy')\n", (1510, 1543), False, 'import glob\n'), ((1585, 1627), 'glob.glob', 'glob.glob', (["(data_root_dir + 'pyworld/*.npy')"], {}), "(data_root_dir + 'pyworld/*.npy')\n", (1594, 1627), False, 'import glob\n'), ((2881, 2922), 'numpy.load', 'np.load', (['self.linguistic_filepaths[index]'], {}), '(self.linguistic_filepaths[index])\n', (2888, 2922), True, 'import numpy as np\n'), ((3061, 3099), 'numpy.load', 'np.load', (['self.pyworld_filepaths[index]'], {}), '(self.pyworld_filepaths[index])\n', (3068, 3099), True, 'import numpy as np\n'), ((3211, 3249), 'numpy.load', 'np.load', (['self.melmfcc_filepaths[index]'], {}), '(self.melmfcc_filepaths[index])\n', (3218, 3249), True, 'import numpy as np\n'), ((4308, 4327), 'torch.LongTensor', 'torch.LongTensor', (['x'], {}), '(x)\n', (4324, 4327), False, 'import torch\n'), ((6562, 6581), 'torch.LongTensor', 'torch.LongTensor', (['x'], {}), '(x)\n', (6578, 6581), False, 'import torch\n'), ((3388, 3422), 'numpy.hstack', 'np.hstack', (['(cond, cond_linguistic)'], {}), '((cond, cond_linguistic))\n', (3397, 3422), True, 'import numpy as np\n'), ((4517, 4671), 'nnmnkwii.minmax_scale', 'minmax_scale', (['feat[:, self.cond_info[sel]]', "self.scale_factor['linguistic_min']", "self.scale_factor['linguistic_max']"], {'feature_range': '(0.01, 0.99)'}), "(feat[:, self.cond_info[sel]], self.scale_factor[\n 'linguistic_min'], self.scale_factor['linguistic_max'], feature_range=(\n 0.01, 0.99))\n", (4529, 4671), False, 'from nnmnkwii import minmax_scale, scale\n'), ((3522, 3572), 'numpy.hstack', 'np.hstack', (['(cond, cond_pyworld[:, DIM_INDEX[sel]])'], {}), '((cond, cond_pyworld[:, DIM_INDEX[sel]]))\n', (3531, 3572), True, 'import numpy as np\n'), ((4126, 4153), 'numpy.random.uniform', 'np.random.uniform', (['(0.2)', '(0.5)'], {}), '(0.2, 0.5)\n', (4143, 4153), True, 'import numpy as np\n'), ((6085, 6146), 'numpy.pad', 'np.pad', (['x', '(zpad_sz, 0)'], {'mode': '"""constant"""', 'constant_values': '(128)'}), "(x, (zpad_sz, 0), mode='constant', constant_values=128)\n", (6091, 6146), True, 'import numpy as np\n'), ((6486, 6539), 'numpy.pad', 'np.pad', (['cond', '((0, 0), (zpad_sz, 0))'], {'mode': '"""constant"""'}), "(cond, ((0, 0), (zpad_sz, 0)), mode='constant')\n", (6492, 6539), True, 'import numpy as np\n'), ((3640, 3690), 'numpy.hstack', 'np.hstack', (['(cond, cond_melmfcc[:, DIM_INDEX[sel]])'], {}), '((cond, cond_melmfcc[:, DIM_INDEX[sel]]))\n', (3649, 3690), True, 'import numpy as np\n')]
|
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from rlkit.torch.core import PyTorchModule
from rlkit.torch.networks import Mlp
from rlkit.torch import pytorch_util as ptu
from rlkit.torch.torch_meta_irl_algorithm import np_to_pytorch_batch
from rlkit.torch.irl.encoders.aggregators import sum_aggregator_unmasked, tanh_sum_aggregator_unmasked
from rlkit.torch.irl.encoders.aggregators import sum_aggregator, tanh_sum_aggregator
from rlkit.torch.distributions import ReparamMultivariateNormalDiag
class TrivialR2ZMap(PyTorchModule):
def __init__(
self,
r_dim,
z_dim,
hid_dim,
# this makes it be closer to deterministic, makes it easier to train
# before we turn on the KL regularization
LOG_STD_SUBTRACT_VALUE=2.0
):
self.save_init_params(locals())
super().__init__()
self.trunk = nn.Sequential(
nn.Linear(r_dim, hid_dim),
nn.BatchNorm1d(hid_dim),
nn.ReLU(),
nn.Linear(hid_dim, hid_dim),
nn.BatchNorm1d(hid_dim),
nn.ReLU()
)
self.mean_fc = nn.Linear(hid_dim, z_dim)
self.log_sig_fc = nn.Linear(hid_dim, z_dim)
self.LOG_STD_SUBTRACT_VALUE = LOG_STD_SUBTRACT_VALUE
print('LOG STD SUBTRACT VALUE IS FOR APPROX POSTERIOR IS %f' % LOG_STD_SUBTRACT_VALUE)
def forward(self, r):
trunk_output = self.trunk(r)
mean = self.mean_fc(trunk_output)
log_sig = self.log_sig_fc(trunk_output) - self.LOG_STD_SUBTRACT_VALUE
return mean, log_sig
class TimestepBasedEncoder(PyTorchModule):
def __init__(
self,
input_dim, #(s,a,s') or (s,s') depending on state-only
r_dim,
z_dim,
enc_hid_dim,
r2z_hid_dim,
num_enc_layer_blocks,
hid_act='relu',
use_bn=True,
within_traj_agg='sum', # 'sum' or 'mean',
state_only=False # if state-only, we only condition on the states and not actions
):
self.save_init_params(locals())
super().__init__()
if hid_act == 'relu':
hid_act_class = nn.ReLU
elif hid_act == 'tanh':
hid_act_class = nn.Tanh
else:
raise NotImplementedError()
self.r_dim, self.z_dim = r_dim, z_dim
# build the timestep encoder
mod_list = nn.ModuleList([nn.Linear(input_dim, enc_hid_dim)])
if use_bn: mod_list.append(nn.BatchNorm1d(enc_hid_dim))
mod_list.append(hid_act_class())
for i in range(num_enc_layer_blocks - 1):
mod_list.append(nn.Linear(enc_hid_dim, enc_hid_dim))
if use_bn: mod_list.append(nn.BatchNorm1d(enc_hid_dim))
mod_list.append(hid_act_class())
mod_list.append(nn.Linear(enc_hid_dim, r_dim))
self.timestep_encoder = nn.Sequential(*mod_list)
assert within_traj_agg in ['sum', 'mean']
self.use_sum_for_traj_agg = within_traj_agg == 'sum'
print('\nWITHIN TRAJ AGG IS SUM: {}'.format(self.use_sum_for_traj_agg))
# aggregator
self.agg = sum_aggregator_unmasked
self.agg_masked = sum_aggregator
# build the r to z map
self.r2z_map = TrivialR2ZMap(r_dim, z_dim, r2z_hid_dim)
self.state_only = state_only
print('STATE-ONLY ENCODER: {}'.format(self.state_only))
def forward(self, context=None, mask=None, r=None):
if r is None:
obs = np.array([[d['observations'] for d in task_trajs] for task_trajs in context])
next_obs = np.array([[d['next_observations'] for d in task_trajs] for task_trajs in context])
if not self.state_only:
acts = np.array([[d['actions'] for d in task_trajs] for task_trajs in context])
all_timesteps = np.concatenate([obs, acts, next_obs], axis=-1)
else:
all_timesteps = np.concatenate([obs, next_obs], axis=-1)
# FOR DEBUGGING THE ENCODER
# all_timesteps = all_timesteps[:,:,-1:,:]
all_timesteps = Variable(ptu.from_numpy(all_timesteps), requires_grad=False)
# N_tasks x N_trajs x Len x Dim
N_tasks, N_trajs, Len, Dim = all_timesteps.size(0), all_timesteps.size(1), all_timesteps.size(2), all_timesteps.size(3)
all_timesteps = all_timesteps.view(-1, Dim)
embeddings = self.timestep_encoder(all_timesteps)
embeddings = embeddings.view(N_tasks, N_trajs, Len, self.r_dim)
if self.use_sum_for_traj_agg:
traj_embeddings = torch.sum(embeddings, dim=2)
else:
traj_embeddings = torch.mean(embeddings, dim=2)
# get r
if mask is None:
r = self.agg(traj_embeddings)
else:
r = self.agg_masked(traj_embeddings, mask)
post_mean, post_log_sig_diag = self.r2z_map(r)
return ReparamMultivariateNormalDiag(post_mean, post_log_sig_diag)
|
[
"torch.mean",
"torch.nn.ReLU",
"torch.nn.Sequential",
"torch.nn.BatchNorm1d",
"rlkit.torch.pytorch_util.from_numpy",
"rlkit.torch.distributions.ReparamMultivariateNormalDiag",
"numpy.array",
"torch.nn.Linear",
"torch.sum",
"numpy.concatenate"
] |
[((1195, 1220), 'torch.nn.Linear', 'nn.Linear', (['hid_dim', 'z_dim'], {}), '(hid_dim, z_dim)\n', (1204, 1220), True, 'import torch.nn as nn\n'), ((1247, 1272), 'torch.nn.Linear', 'nn.Linear', (['hid_dim', 'z_dim'], {}), '(hid_dim, z_dim)\n', (1256, 1272), True, 'import torch.nn as nn\n'), ((2918, 2942), 'torch.nn.Sequential', 'nn.Sequential', (['*mod_list'], {}), '(*mod_list)\n', (2931, 2942), True, 'import torch.nn as nn\n'), ((5023, 5082), 'rlkit.torch.distributions.ReparamMultivariateNormalDiag', 'ReparamMultivariateNormalDiag', (['post_mean', 'post_log_sig_diag'], {}), '(post_mean, post_log_sig_diag)\n', (5052, 5082), False, 'from rlkit.torch.distributions import ReparamMultivariateNormalDiag\n'), ((975, 1000), 'torch.nn.Linear', 'nn.Linear', (['r_dim', 'hid_dim'], {}), '(r_dim, hid_dim)\n', (984, 1000), True, 'import torch.nn as nn\n'), ((1014, 1037), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['hid_dim'], {}), '(hid_dim)\n', (1028, 1037), True, 'import torch.nn as nn\n'), ((1051, 1060), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (1058, 1060), True, 'import torch.nn as nn\n'), ((1074, 1101), 'torch.nn.Linear', 'nn.Linear', (['hid_dim', 'hid_dim'], {}), '(hid_dim, hid_dim)\n', (1083, 1101), True, 'import torch.nn as nn\n'), ((1115, 1138), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['hid_dim'], {}), '(hid_dim)\n', (1129, 1138), True, 'import torch.nn as nn\n'), ((1152, 1161), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (1159, 1161), True, 'import torch.nn as nn\n'), ((2855, 2884), 'torch.nn.Linear', 'nn.Linear', (['enc_hid_dim', 'r_dim'], {}), '(enc_hid_dim, r_dim)\n', (2864, 2884), True, 'import torch.nn as nn\n'), ((3537, 3614), 'numpy.array', 'np.array', (["[[d['observations'] for d in task_trajs] for task_trajs in context]"], {}), "([[d['observations'] for d in task_trajs] for task_trajs in context])\n", (3545, 3614), True, 'import numpy as np\n'), ((3638, 3724), 'numpy.array', 'np.array', (["[[d['next_observations'] for d in task_trajs] for task_trajs in context]"], {}), "([[d['next_observations'] for d in task_trajs] for task_trajs in\n context])\n", (3646, 3724), True, 'import numpy as np\n'), ((2452, 2485), 'torch.nn.Linear', 'nn.Linear', (['input_dim', 'enc_hid_dim'], {}), '(input_dim, enc_hid_dim)\n', (2461, 2485), True, 'import torch.nn as nn\n'), ((2523, 2550), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['enc_hid_dim'], {}), '(enc_hid_dim)\n', (2537, 2550), True, 'import torch.nn as nn\n'), ((2672, 2707), 'torch.nn.Linear', 'nn.Linear', (['enc_hid_dim', 'enc_hid_dim'], {}), '(enc_hid_dim, enc_hid_dim)\n', (2681, 2707), True, 'import torch.nn as nn\n'), ((3780, 3852), 'numpy.array', 'np.array', (["[[d['actions'] for d in task_trajs] for task_trajs in context]"], {}), "([[d['actions'] for d in task_trajs] for task_trajs in context])\n", (3788, 3852), True, 'import numpy as np\n'), ((3885, 3931), 'numpy.concatenate', 'np.concatenate', (['[obs, acts, next_obs]'], {'axis': '(-1)'}), '([obs, acts, next_obs], axis=-1)\n', (3899, 3931), True, 'import numpy as np\n'), ((3982, 4022), 'numpy.concatenate', 'np.concatenate', (['[obs, next_obs]'], {'axis': '(-1)'}), '([obs, next_obs], axis=-1)\n', (3996, 4022), True, 'import numpy as np\n'), ((4169, 4198), 'rlkit.torch.pytorch_util.from_numpy', 'ptu.from_numpy', (['all_timesteps'], {}), '(all_timesteps)\n', (4183, 4198), True, 'from rlkit.torch import pytorch_util as ptu\n'), ((4669, 4697), 'torch.sum', 'torch.sum', (['embeddings'], {'dim': '(2)'}), '(embeddings, dim=2)\n', (4678, 4697), False, 'import torch\n'), ((4750, 4779), 'torch.mean', 'torch.mean', (['embeddings'], {'dim': '(2)'}), '(embeddings, dim=2)\n', (4760, 4779), False, 'import torch\n'), ((2748, 2775), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['enc_hid_dim'], {}), '(enc_hid_dim)\n', (2762, 2775), True, 'import torch.nn as nn\n')]
|
"""
Module for Magellan/FIRE specific methods.
Important Notes:
- If you are reducing old FIRE data (before the broken happened
in 2016), please change the ord_spat_pos array (see lines from
~220 to ~230)
.. include:: ../include/links.rst
"""
from pkg_resources import resource_filename
import numpy as np
from pypeit import msgs
from pypeit import telescopes
from pypeit.core import framematch
from pypeit.spectrographs import spectrograph
from pypeit.images import detector_container
class MagellanFIRESpectrograph(spectrograph.Spectrograph):
"""
Child to handle Magellan/FIRE specific code
.. note::
For FIRE Echelle, we usually use high gain and SUTR read mode.
The exposure time is usually around 900s. The detector
parameters below are based on such mode. Standard star and
calibrations are usually use Fowler 1 read mode in which case
the read noise is ~20 electron.
"""
ndet = 1
telescope = telescopes.MagellanTelescopePar()
def init_meta(self):
"""
Define how metadata are derived from the spectrograph files.
That is, this associates the ``PypeIt``-specific metadata keywords
with the instrument-specific header cards using :attr:`meta`.
"""
self.meta = {}
# Required (core)
self.meta['ra'] = dict(ext=0, card='RA')
self.meta['dec'] = dict(ext=0, card='DEC')
self.meta['target'] = dict(ext=0, card='OBJECT')
self.meta['decker'] = dict(ext=0, card=None, default='default')
self.meta['dichroic'] = dict(ext=0, card=None, default='default')
self.meta['binning'] = dict(ext=0, card=None, default='1,1')
self.meta['mjd'] = dict(ext=0, card='ACQTIME')
self.meta['exptime'] = dict(ext=0, card='EXPTIME')
self.meta['airmass'] = dict(ext=0, card='AIRMASS')
# Extras for config and frametyping
self.meta['dispname'] = dict(ext=0, card='GRISM')
self.meta['idname'] = dict(ext=0, card='OBSTYPE')
class MagellanFIREEchelleSpectrograph(MagellanFIRESpectrograph):
"""
Child to handle Magellan/FIRE Echelle data
.. note::
For FIRE Echelle, we usually use high gain and SUTR read mode.
The exposure time is usually around 900s. The detector
parameters below are based on such mode. Standard star and
calibrations are usually use Fowler 1 read mode in which case
the read noise is ~20 electron.
"""
name = 'magellan_fire'
camera = 'FIRE'
pypeline = 'Echelle'
supported = True
comment = 'Magellan/FIRE in echelle mode'
def get_detector_par(self, hdu, det):
"""
Return metadata for the selected detector.
Args:
hdu (`astropy.io.fits.HDUList`_):
The open fits file with the raw image of interest.
det (:obj:`int`):
1-indexed detector number.
Returns:
:class:`~pypeit.images.detector_container.DetectorContainer`:
Object with the detector metadata.
"""
# Detector 1
detector_dict = dict(
binning = '1,1',
det = 1,
dataext = 0,
specaxis = 1,
specflip = True,
spatflip = False,
platescale = 0.18,
darkcurr = 0.01,
#saturation = 20000., # high gain is 20000 ADU, low gain is 32000 ADU
saturation = 100000., # This is an arbitrary value.
nonlinear = 1.0, # high gain mode, low gain is 0.875
mincounts = -1e10,
numamplifiers = 1,
gain = np.atleast_1d(1.2), # high gain mode, low gain is 3.8 e-/DN
ronoise = np.atleast_1d(5.0), # for high gain mode and SUTR read modes with exptime ~ 900s
datasec = np.atleast_1d('[5:2044,5:2044]'),
oscansec = np.atleast_1d('[5:2044,:5]')
)
return detector_container.DetectorContainer(**detector_dict)
@classmethod
def default_pypeit_par(cls):
"""
Return the default parameters to use for this instrument.
Returns:
:class:`~pypeit.par.pypeitpar.PypeItPar`: Parameters required by
all of ``PypeIt`` methods.
"""
par = super().default_pypeit_par()
# Wavelengths
# 1D wavelength solution with OH lines
par['calibrations']['wavelengths']['rms_threshold'] = 1.0
par['calibrations']['wavelengths']['sigdetect']=[5,10,10,10,10,20,30,30,30,30,30,10,30,30,60,30,30,10,20,30,10]
par['calibrations']['wavelengths']['n_first']=2
par['calibrations']['wavelengths']['n_final']=[3,3,3,2,4,4,4,3,4,4,4,3,4,4,4,4,4,4,6,6,4]
par['calibrations']['wavelengths']['lamps'] = ['OH_FIRE_Echelle']
#par['calibrations']['wavelengths']['nonlinear_counts'] = self.detector[0]['nonlinear'] * self.detector[0]['saturation']
par['calibrations']['wavelengths']['method'] = 'reidentify'
par['calibrations']['wavelengths']['cc_thresh'] = 0.35
par['calibrations']['wavelengths']['reid_arxiv'] = 'magellan_fire_echelle.fits'
par['calibrations']['wavelengths']['match_toler']=30.0
# Echelle parameters
par['calibrations']['wavelengths']['echelle'] = True
par['calibrations']['wavelengths']['ech_fix_format'] = True
par['calibrations']['wavelengths']['ech_nspec_coeff'] = 4
par['calibrations']['wavelengths']['ech_norder_coeff'] = 6
par['calibrations']['wavelengths']['ech_sigrej'] = 3.0
# Always correct for flexure, starting with default parameters
par['scienceframe']['process']['sigclip'] = 20.0
par['scienceframe']['process']['satpix'] ='nothing'
# Set slits and tilts parameters
par['calibrations']['tilts']['tracethresh'] = 5
par['calibrations']['slitedges']['edge_thresh'] = 10.
par['calibrations']['slitedges']['trace_thresh'] = 10.
par['calibrations']['slitedges']['fit_order'] = 5
par['calibrations']['slitedges']['max_shift_adj'] = 0.5
par['calibrations']['slitedges']['fit_min_spec_length'] = 0.5
par['calibrations']['slitedges']['left_right_pca'] = True
par['calibrations']['slitedges']['pca_order'] = 3
# Model entire slit
par['reduce']['extraction']['model_full_slit'] = True # local sky subtraction operates on entire slit
# Processing steps
turn_off = dict(use_illumflat=False, use_biasimage=False, use_overscan=False, use_darkimage=False)
par.reset_all_processimages_par(**turn_off)
# Do not correct for flexure
par['flexure']['spec_method'] = 'skip'
# Set the default exposure time ranges for the frame typing
par['calibrations']['standardframe']['exprng'] = [None, 60]
par['calibrations']['arcframe']['exprng'] = [20, None]
par['calibrations']['darkframe']['exprng'] = [20, None]
par['scienceframe']['exprng'] = [20, None]
# Sensitivity function parameters
# Sensitivity function parameters
par['sensfunc']['algorithm'] = 'IR'
par['sensfunc']['polyorder'] = 8
# place holder for telgrid file
par['sensfunc']['IR']['telgridfile'] \
= resource_filename('pypeit',
'/data/telluric/TelFit_LasCampanas_3100_26100_R20000.fits')
return par
def check_frame_type(self, ftype, fitstbl, exprng=None):
"""
Check for frames of the provided type.
Args:
ftype (:obj:`str`):
Type of frame to check. Must be a valid frame type; see
frame-type :ref:`frame_type_defs`.
fitstbl (`astropy.table.Table`_):
The table with the metadata for one or more frames to check.
exprng (:obj:`list`, optional):
Range in the allowed exposure time for a frame of type
``ftype``. See
:func:`pypeit.core.framematch.check_frame_exptime`.
Returns:
`numpy.ndarray`_: Boolean array with the flags selecting the
exposures in ``fitstbl`` that are ``ftype`` type frames.
"""
good_exp = framematch.check_frame_exptime(fitstbl['exptime'], exprng)
if ftype in ['pinhole', 'bias']:
# No pinhole or bias frames
return np.zeros(len(fitstbl), dtype=bool)
if ftype in ['pixelflat', 'trace']:
return good_exp & (fitstbl['idname'] == 'PixFlat')
if ftype == 'standard':
return good_exp & (fitstbl['idname'] == 'Telluric')
if ftype == 'science':
return good_exp & (fitstbl['idname'] == 'Science')
if ftype in ['arc', 'tilt']:
return good_exp & (fitstbl['idname'] == 'Science')
msgs.warn('Cannot determine if frames are of type {0}.'.format(ftype))
return np.zeros(len(fitstbl), dtype=bool)
@property
def norders(self):
"""
Number of orders for this spectograph. Should only defined for
echelle spectrographs, and it is undefined for the base class.
"""
return 21
@property
def order_spat_pos(self):
"""
Return the expected spatial position of each echelle order.
"""
# ToDo: We somehow need to automate this.
## For OLD data, i.e. before 2017
#ord_spat_pos = np.array([0.06054688, 0.14160156, 0.17089844, 0.22753906, 0.27539062,
# 0.32128906, 0.36474609, 0.40673828, 0.45019531, 0.48974609,
# 0.52978516, 0.56054688, 0.59814453, 0.63378906, 0.66503906,
# 0.70019531, 0.7421875 , 0.77978516, 0.82763672, 0.87109375,
# 0.9296875])
## For NEW data
ord_spat_pos = np.array([0.078125, 0.13769531, 0.19189453, 0.24414062, 0.29296875,
0.34179688, 0.38330078, 0.42724609, 0.46582031, 0.50439453,
0.54199219, 0.57763672, 0.61279297, 0.6484375 , 0.68457031,
0.71875 , 0.75439453, 0.79443359, 0.83789062, 0.88671875,
0.94091797])
return ord_spat_pos
@property
def orders(self):
"""
Return the order number for each echelle order.
"""
return np.arange(31, 10, -1, dtype=int)
@property
def spec_min_max(self):
"""
Return the minimum and maximum spectral pixel expected for the
spectral range of each order.
"""
spec_max = np.asarray([2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,
2048,2048,2048,2048,2048])
spec_min = np.asarray([ 500, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0])
return np.vstack((spec_min, spec_max))
def order_platescale(self, order_vec, binning=None):
"""
Return the platescale for each echelle order.
Note that FIRE has no binning.
Args:
order_vec (`numpy.ndarray`_):
The vector providing the order numbers.
binning (:obj:`str`, optional):
The string defining the spectral and spatial binning. **This
is always ignored.**
Returns:
`numpy.ndarray`_: An array with the platescale for each order
provided by ``order``.
"""
return np.full(order_vec.size, 0.15)
@property
def dloglam(self):
"""
Return the logarithmic step in wavelength for output spectra.
"""
# This number was determined using the resolution and sampling quoted on the FIRE website
R = 6000.0 * 2.7
dloglam = 1.0 / R / np.log(10.0)
return dloglam
@property
def loglam_minmax(self):
"""
Return the base-10 logarithm of the first and last wavelength for
ouput spectra.
"""
return np.log10(8000.0), np.log10(25700)
class MagellanFIRELONGSpectrograph(MagellanFIRESpectrograph):
"""
Child to handle Magellan/FIRE high-throughput data
.. note::
For FIRE longslit, science data are usually taken with SUTR readout
mode with ~600s exposure (at least for quasar hunting people) and the
readout noise is ~6 e-
"""
name = 'magellan_fire_long'
camera = 'FIRE'
supported = True
comment = 'Magellan/FIRE in long-slit/high-throughput mode'
def get_detector_par(self, hdu, det):
"""
Return metadata for the selected detector.
Args:
hdu (`astropy.io.fits.HDUList`_):
The open fits file with the raw image of interest.
det (:obj:`int`):
1-indexed detector number.
Returns:
:class:`~pypeit.images.detector_container.DetectorContainer`:
Object with the detector metadata.
"""
# Detector 1
detector_dict = dict(
binning = '1,1',
det = 1,
dataext = 0,
specaxis = 0,
specflip = False,
spatflip = False,
platescale = 0.15,
darkcurr = 0.01,
saturation = 320000., #32000 for low gain, I set to a higher value to keep data in K-band
nonlinear = 0.875,
mincounts = -1e10,
numamplifiers = 1,
gain = np.atleast_1d(3.8),
ronoise = np.atleast_1d(6.0), # SUTR readout mode with exposure~600s
datasec = np.atleast_1d('[5:2044, 900:1250]'),
oscansec = np.atleast_1d('[:5, 900:1250]')
)
return detector_container.DetectorContainer(**detector_dict)
@classmethod
def default_pypeit_par(cls):
"""
Return the default parameters to use for this instrument.
Returns:
:class:`~pypeit.par.pypeitpar.PypeItPar`: Parameters required by
all of ``PypeIt`` methods.
"""
par = super().default_pypeit_par()
# Wavelengths
# 1D wavelength solution with arc lines
par['calibrations']['wavelengths']['rms_threshold'] = 1.0
par['calibrations']['wavelengths']['sigdetect']=3
par['calibrations']['wavelengths']['fwhm'] = 20
par['calibrations']['wavelengths']['n_first']=2
par['calibrations']['wavelengths']['n_final']=4
par['calibrations']['wavelengths']['lamps'] = ['ArI', 'ArII', 'ThAr', 'NeI']
#par['calibrations']['wavelengths']['nonlinear_counts'] = self.detector[0]['nonlinear'] * self.detector[0]['saturation']
par['calibrations']['wavelengths']['method'] = 'full_template'
par['calibrations']['wavelengths']['reid_arxiv'] = 'magellan_fire_long.fits'
par['calibrations']['wavelengths']['match_toler']=5.0
# Set slits and tilts parameters
par['calibrations']['tilts']['tracethresh'] = 5
par['calibrations']['slitedges']['trace_thresh'] = 10.
par['calibrations']['slitedges']['sync_predict'] = 'nearest'
# Processing steps
turn_off = dict(use_illumflat=False, use_biasimage=False, use_overscan=False,
use_darkimage=False)
par.reset_all_processimages_par(**turn_off)
# Scienceimage parameters
par['reduce']['findobj']['sig_thresh'] = 5
#par['reduce']['maxnumber'] = 2
par['reduce']['findobj']['find_trim_edge'] = [50,50]
par['flexure']['spec_method'] = 'skip'
par['sensfunc']['IR']['telgridfile'] \
= resource_filename('pypeit',
'/data/telluric/TelFit_LasCampanas_3100_26100_R20000.fits')
# Set the default exposure time ranges for the frame typing
par['calibrations']['standardframe']['exprng'] = [None, 60]
par['calibrations']['arcframe']['exprng'] = [1, 50]
par['calibrations']['darkframe']['exprng'] = [20, None]
par['scienceframe']['exprng'] = [20, None]
return par
def check_frame_type(self, ftype, fitstbl, exprng=None):
"""
Check for frames of the provided type.
Args:
ftype (:obj:`str`):
Type of frame to check. Must be a valid frame type; see
frame-type :ref:`frame_type_defs`.
fitstbl (`astropy.table.Table`_):
The table with the metadata for one or more frames to check.
exprng (:obj:`list`, optional):
Range in the allowed exposure time for a frame of type
``ftype``. See
:func:`pypeit.core.framematch.check_frame_exptime`.
Returns:
`numpy.ndarray`_: Boolean array with the flags selecting the
exposures in ``fitstbl`` that are ``ftype`` type frames.
"""
good_exp = framematch.check_frame_exptime(fitstbl['exptime'], exprng)
if ftype in ['pinhole', 'bias']:
# No pinhole or bias frames
return np.zeros(len(fitstbl), dtype=bool)
if ftype in ['pixelflat', 'trace']:
return good_exp & (fitstbl['idname'] == 'PixFlat')
if ftype == 'standard':
return good_exp & (fitstbl['idname'] == 'Telluric')
if ftype == 'science':
return good_exp & (fitstbl['idname'] == 'Science')
if ftype in ['arc', 'tilt']:
return good_exp & (fitstbl['idname'] == 'Arc')
msgs.warn('Cannot determine if frames are of type {0}.'.format(ftype))
return np.zeros(len(fitstbl), dtype=bool)
|
[
"numpy.full",
"numpy.atleast_1d",
"numpy.log",
"pypeit.telescopes.MagellanTelescopePar",
"numpy.asarray",
"pypeit.core.framematch.check_frame_exptime",
"pkg_resources.resource_filename",
"numpy.array",
"numpy.arange",
"numpy.log10",
"pypeit.images.detector_container.DetectorContainer",
"numpy.vstack"
] |
[((988, 1021), 'pypeit.telescopes.MagellanTelescopePar', 'telescopes.MagellanTelescopePar', ([], {}), '()\n', (1019, 1021), False, 'from pypeit import telescopes\n'), ((4078, 4131), 'pypeit.images.detector_container.DetectorContainer', 'detector_container.DetectorContainer', ([], {}), '(**detector_dict)\n', (4114, 4131), False, 'from pypeit.images import detector_container\n'), ((7436, 7527), 'pkg_resources.resource_filename', 'resource_filename', (['"""pypeit"""', '"""/data/telluric/TelFit_LasCampanas_3100_26100_R20000.fits"""'], {}), "('pypeit',\n '/data/telluric/TelFit_LasCampanas_3100_26100_R20000.fits')\n", (7453, 7527), False, 'from pkg_resources import resource_filename\n'), ((8399, 8457), 'pypeit.core.framematch.check_frame_exptime', 'framematch.check_frame_exptime', (["fitstbl['exptime']", 'exprng'], {}), "(fitstbl['exptime'], exprng)\n", (8429, 8457), False, 'from pypeit.core import framematch\n'), ((10039, 10309), 'numpy.array', 'np.array', (['[0.078125, 0.13769531, 0.19189453, 0.24414062, 0.29296875, 0.34179688, \n 0.38330078, 0.42724609, 0.46582031, 0.50439453, 0.54199219, 0.57763672,\n 0.61279297, 0.6484375, 0.68457031, 0.71875, 0.75439453, 0.79443359, \n 0.83789062, 0.88671875, 0.94091797]'], {}), '([0.078125, 0.13769531, 0.19189453, 0.24414062, 0.29296875, \n 0.34179688, 0.38330078, 0.42724609, 0.46582031, 0.50439453, 0.54199219,\n 0.57763672, 0.61279297, 0.6484375, 0.68457031, 0.71875, 0.75439453, \n 0.79443359, 0.83789062, 0.88671875, 0.94091797])\n', (10047, 10309), True, 'import numpy as np\n'), ((10592, 10624), 'numpy.arange', 'np.arange', (['(31)', '(10)', '(-1)'], {'dtype': 'int'}), '(31, 10, -1, dtype=int)\n', (10601, 10624), True, 'import numpy as np\n'), ((10820, 10963), 'numpy.asarray', 'np.asarray', (['[2048, 2048, 2048, 2048, 2048, 2048, 2048, 2048, 2048, 2048, 2048, 2048, \n 2048, 2048, 2048, 2048, 2048, 2048, 2048, 2048, 2048]'], {}), '([2048, 2048, 2048, 2048, 2048, 2048, 2048, 2048, 2048, 2048, \n 2048, 2048, 2048, 2048, 2048, 2048, 2048, 2048, 2048, 2048, 2048])\n', (10830, 10963), True, 'import numpy as np\n'), ((10990, 11067), 'numpy.asarray', 'np.asarray', (['[500, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]'], {}), '([500, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])\n', (11000, 11067), True, 'import numpy as np\n'), ((11156, 11187), 'numpy.vstack', 'np.vstack', (['(spec_min, spec_max)'], {}), '((spec_min, spec_max))\n', (11165, 11187), True, 'import numpy as np\n'), ((11777, 11806), 'numpy.full', 'np.full', (['order_vec.size', '(0.15)'], {}), '(order_vec.size, 0.15)\n', (11784, 11806), True, 'import numpy as np\n'), ((14111, 14164), 'pypeit.images.detector_container.DetectorContainer', 'detector_container.DetectorContainer', ([], {}), '(**detector_dict)\n', (14147, 14164), False, 'from pypeit.images import detector_container\n'), ((16027, 16118), 'pkg_resources.resource_filename', 'resource_filename', (['"""pypeit"""', '"""/data/telluric/TelFit_LasCampanas_3100_26100_R20000.fits"""'], {}), "('pypeit',\n '/data/telluric/TelFit_LasCampanas_3100_26100_R20000.fits')\n", (16044, 16118), False, 'from pkg_resources import resource_filename\n'), ((17301, 17359), 'pypeit.core.framematch.check_frame_exptime', 'framematch.check_frame_exptime', (["fitstbl['exptime']", 'exprng'], {}), "(fitstbl['exptime'], exprng)\n", (17331, 17359), False, 'from pypeit.core import framematch\n'), ((12090, 12102), 'numpy.log', 'np.log', (['(10.0)'], {}), '(10.0)\n', (12096, 12102), True, 'import numpy as np\n'), ((12306, 12322), 'numpy.log10', 'np.log10', (['(8000.0)'], {}), '(8000.0)\n', (12314, 12322), True, 'import numpy as np\n'), ((12324, 12339), 'numpy.log10', 'np.log10', (['(25700)'], {}), '(25700)\n', (12332, 12339), True, 'import numpy as np\n'), ((3755, 3773), 'numpy.atleast_1d', 'np.atleast_1d', (['(1.2)'], {}), '(1.2)\n', (3768, 3773), True, 'import numpy as np\n'), ((3845, 3863), 'numpy.atleast_1d', 'np.atleast_1d', (['(5.0)'], {}), '(5.0)\n', (3858, 3863), True, 'import numpy as np\n'), ((3956, 3988), 'numpy.atleast_1d', 'np.atleast_1d', (['"""[5:2044,5:2044]"""'], {}), "('[5:2044,5:2044]')\n", (3969, 3988), True, 'import numpy as np\n'), ((4020, 4048), 'numpy.atleast_1d', 'np.atleast_1d', (['"""[5:2044,:5]"""'], {}), "('[5:2044,:5]')\n", (4033, 4048), True, 'import numpy as np\n'), ((13848, 13866), 'numpy.atleast_1d', 'np.atleast_1d', (['(3.8)'], {}), '(3.8)\n', (13861, 13866), True, 'import numpy as np\n'), ((13898, 13916), 'numpy.atleast_1d', 'np.atleast_1d', (['(6.0)'], {}), '(6.0)\n', (13911, 13916), True, 'import numpy as np\n'), ((13987, 14022), 'numpy.atleast_1d', 'np.atleast_1d', (['"""[5:2044, 900:1250]"""'], {}), "('[5:2044, 900:1250]')\n", (14000, 14022), True, 'import numpy as np\n'), ((14054, 14085), 'numpy.atleast_1d', 'np.atleast_1d', (['"""[:5, 900:1250]"""'], {}), "('[:5, 900:1250]')\n", (14067, 14085), True, 'import numpy as np\n')]
|
#%%
import numpy as np
import scipy.signal as signal
import matplotlib.pyplot as plt
#%%
N = 1000
n = np.arange (N)
f = 100
fs = 44100
x = (1.58 * 0.3125) * np.sin (2 * np.pi * n * f / fs)
#%%
e_s_plus = 72
e_s_minus = -72
V_cm = (e_s_plus + e_s_minus) / 2
V_dm = (e_s_plus - e_s_minus) / 2
R_p = 0 # 50000
G = (R_p + 150 + 20000) / (R_p + 150)
print (V_cm)
print (V_dm)
#%%
y = np.zeros (N)
y_1 = 0
for i in range (N):
inner = (G*x[i] - V_cm) / V_dm
sat = inner
if sat < -1:
sat = -1
elif sat >1:
sat = 1
y[i] = V_cm + V_dm * sat
#%%
plt.figure()
plt.plot (n, x)
plt.plot (n, y)
plt.axhline (66)
#%%
|
[
"matplotlib.pyplot.axhline",
"matplotlib.pyplot.plot",
"numpy.zeros",
"matplotlib.pyplot.figure",
"numpy.sin",
"numpy.arange"
] |
[((103, 115), 'numpy.arange', 'np.arange', (['N'], {}), '(N)\n', (112, 115), True, 'import numpy as np\n'), ((384, 395), 'numpy.zeros', 'np.zeros', (['N'], {}), '(N)\n', (392, 395), True, 'import numpy as np\n'), ((578, 590), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (588, 590), True, 'import matplotlib.pyplot as plt\n'), ((591, 605), 'matplotlib.pyplot.plot', 'plt.plot', (['n', 'x'], {}), '(n, x)\n', (599, 605), True, 'import matplotlib.pyplot as plt\n'), ((607, 621), 'matplotlib.pyplot.plot', 'plt.plot', (['n', 'y'], {}), '(n, y)\n', (615, 621), True, 'import matplotlib.pyplot as plt\n'), ((623, 638), 'matplotlib.pyplot.axhline', 'plt.axhline', (['(66)'], {}), '(66)\n', (634, 638), True, 'import matplotlib.pyplot as plt\n'), ((158, 188), 'numpy.sin', 'np.sin', (['(2 * np.pi * n * f / fs)'], {}), '(2 * np.pi * n * f / fs)\n', (164, 188), True, 'import numpy as np\n')]
|
# Copyright (C) 2018-2019 <NAME>
# SPDX-License-Identifier: Apache-2.0
import numpy
def read_surfaces(res):
inp = res.input
res.surfaces = {}
if 'probes' not in inp:
return
for probe in inp['probes']:
if not (probe.get('enabled', True) and probe.get('type', '') == 'IsoSurface'):
continue
name = probe['name']
field_name = probe['field']
value = probe['value']
file_name_postfix = probe['file_name']
file_name = res.get_file_path(file_name_postfix)
isosurf = IsoSurfaces(name, field_name, value, file_name)
res.surfaces[name] = isosurf
class IsoSurfaces(object):
def __init__(self, name, field_name, value, file_name):
self.name = name
self.field_name = field_name
self.value = value
self.file_name = file_name
self._cache = None
def reload(self):
self._cache = None
def get_surfaces(self, cache=True):
if cache and self._cache is not None:
return self._cache
timesteps = []
data = []
with open(self.file_name, 'rt') as f:
description = f.readline()[1:].strip()
value = float(f.readline().split()[-1])
dim = int(f.readline().split()[-1])
line = f.readline()
while line:
wds = line.split()
try:
time = float(wds[1])
nsurf = int(wds[3])
except Exception:
break
if nsurf == 0:
timesteps.append(time)
data.append([])
line = f.readline()
continue
datalines = [f.readline() for _ in range(nsurf * 3)]
if not datalines[-1]:
break
timesteps.append(time)
data.append([])
for i in range(nsurf):
xvals = [float(v) for v in datalines[i * 3 + 0].split()]
yvals = [float(v) for v in datalines[i * 3 + 1].split()]
zvals = [float(v) for v in datalines[i * 3 + 2].split()]
data[-1].append((xvals, yvals, zvals))
line = f.readline()
res = (description, value, dim, numpy.array(timesteps), data)
if cache:
self._cache = res
return res
|
[
"numpy.array"
] |
[((2334, 2356), 'numpy.array', 'numpy.array', (['timesteps'], {}), '(timesteps)\n', (2345, 2356), False, 'import numpy\n')]
|
import os
import pickle
import numpy as np
def deviation_from_actual_value(array):
"""
Calculates standard deviation for the parameters
:param array: either (num_iters, num_points_in_sim, [n] params) or (num_iters, num_points_in_sim, [n*m] params)
:return:
"""
if array.ndim == 3:
deviations = np.zeros((array.shape[1],array.shape[2]))
for pt in range(array.shape[1]):
for param in range(array.shape[2]):
dev = np.std(array[:,pt,param])
deviations[pt,param] = dev
return deviations
elif array.ndim == 4:
deviations = np.zeros((array.shape[1], array.shape[2], array.shape[3]))
for pt in range(array.shape[1]):
for param_ind1 in range(array.shape[2]):
for param_ind2 in range(array.shape[3]):
dev = np.std(array[:, pt, param_ind1, param_ind2])
deviations[pt, param_ind1, param_ind2] = dev
return deviations
else:
raise ValueError("Wrong num of dimensions")
def main():
#retrieving pickle data calculated from parameter_deviation_calculator.py
directory_path = os.path.dirname(
os.path.dirname(os.path.join(os.getcwd(), os.listdir(os.getcwd())[0]))) ## directory of directory of file
pickle_dir = directory_path + '/Bound_Estimation/Parameter_Deviation/'
with open(pickle_dir + 'theta.pkl', 'rb') as f:
theta_l_r = pickle.load(f)
with open(pickle_dir + 'rtof_dist.pkl', 'rb') as f:
rtof_dist = pickle.load(f)
with open(pickle_dir + 'tdoa_dist.pkl', 'rb') as f:
tdoa_dist = pickle.load(f)
#calculating deviation for theta, rtof_dist.pkl, tdoa_dist
deviation_theta = deviation_from_actual_value(theta_l_r)
deviation_rtof_dist = deviation_from_actual_value(rtof_dist)
deviation_tdoa_dist = deviation_from_actual_value(tdoa_dist)
#saving calculated deviation parameters.
with open(pickle_dir + 'deviation_theta.pkl', 'wb') as f:
pickle.dump(deviation_theta, f)
with open(pickle_dir + 'deviation_rtof_dist.pkl', 'wb') as f:
pickle.dump(deviation_rtof_dist, f)
with open(pickle_dir + 'deviation_tdoa_dist.pkl', 'wb') as f:
pickle.dump(deviation_tdoa_dist, f)
if __name__ == '__main__':
main()
|
[
"pickle.dump",
"numpy.std",
"os.getcwd",
"numpy.zeros",
"pickle.load"
] |
[((328, 370), 'numpy.zeros', 'np.zeros', (['(array.shape[1], array.shape[2])'], {}), '((array.shape[1], array.shape[2]))\n', (336, 370), True, 'import numpy as np\n'), ((1451, 1465), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (1462, 1465), False, 'import pickle\n'), ((1543, 1557), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (1554, 1557), False, 'import pickle\n'), ((1635, 1649), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (1646, 1649), False, 'import pickle\n'), ((2021, 2052), 'pickle.dump', 'pickle.dump', (['deviation_theta', 'f'], {}), '(deviation_theta, f)\n', (2032, 2052), False, 'import pickle\n'), ((2127, 2162), 'pickle.dump', 'pickle.dump', (['deviation_rtof_dist', 'f'], {}), '(deviation_rtof_dist, f)\n', (2138, 2162), False, 'import pickle\n'), ((2237, 2272), 'pickle.dump', 'pickle.dump', (['deviation_tdoa_dist', 'f'], {}), '(deviation_tdoa_dist, f)\n', (2248, 2272), False, 'import pickle\n'), ((624, 682), 'numpy.zeros', 'np.zeros', (['(array.shape[1], array.shape[2], array.shape[3])'], {}), '((array.shape[1], array.shape[2], array.shape[3]))\n', (632, 682), True, 'import numpy as np\n'), ((481, 508), 'numpy.std', 'np.std', (['array[:, pt, param]'], {}), '(array[:, pt, param])\n', (487, 508), True, 'import numpy as np\n'), ((1225, 1236), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1234, 1236), False, 'import os\n'), ((860, 904), 'numpy.std', 'np.std', (['array[:, pt, param_ind1, param_ind2]'], {}), '(array[:, pt, param_ind1, param_ind2])\n', (866, 904), True, 'import numpy as np\n'), ((1249, 1260), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1258, 1260), False, 'import os\n')]
|
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
def plot(gather_count, filename):
gather_count = np.log(gather_count + 1)
sns.color_palette("light:b", as_cmap=True)
ax = sns.heatmap(gather_count, vmax=8, vmin=0, cmap="Purples",
xticklabels=False, yticklabels=False, cbar=False,
square=True)
ax.spines['top'].set_visible(True)
ax.spines['right'].set_visible(True)
ax.spines['bottom'].set_visible(True)
ax.spines['left'].set_visible(True)
[i.set_linewidth(2) for i in ax.spines.values()]
plt.tight_layout()
# plt.show()
plt.savefig(filename)
def main():
gather_count = [[0, 0, 0, 0, 0], [0, 0, 1, 0, 1], [0, 1, 2, 0, 4], [0, 0, 0, 7, 24], [0, 0, 5, 18, 4549]]
gather_count = np.array(gather_count)
gather_count = np.log(gather_count + 1)
plot(gather_count)
if __name__ == "__main__":
main()
|
[
"seaborn.heatmap",
"numpy.log",
"numpy.array",
"seaborn.color_palette",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.savefig"
] |
[((128, 152), 'numpy.log', 'np.log', (['(gather_count + 1)'], {}), '(gather_count + 1)\n', (134, 152), True, 'import numpy as np\n'), ((157, 199), 'seaborn.color_palette', 'sns.color_palette', (['"""light:b"""'], {'as_cmap': '(True)'}), "('light:b', as_cmap=True)\n", (174, 199), True, 'import seaborn as sns\n'), ((209, 333), 'seaborn.heatmap', 'sns.heatmap', (['gather_count'], {'vmax': '(8)', 'vmin': '(0)', 'cmap': '"""Purples"""', 'xticklabels': '(False)', 'yticklabels': '(False)', 'cbar': '(False)', 'square': '(True)'}), "(gather_count, vmax=8, vmin=0, cmap='Purples', xticklabels=False,\n yticklabels=False, cbar=False, square=True)\n", (220, 333), True, 'import seaborn as sns\n'), ((591, 609), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (607, 609), True, 'import matplotlib.pyplot as plt\n'), ((631, 652), 'matplotlib.pyplot.savefig', 'plt.savefig', (['filename'], {}), '(filename)\n', (642, 652), True, 'import matplotlib.pyplot as plt\n'), ((796, 818), 'numpy.array', 'np.array', (['gather_count'], {}), '(gather_count)\n', (804, 818), True, 'import numpy as np\n'), ((838, 862), 'numpy.log', 'np.log', (['(gather_count + 1)'], {}), '(gather_count + 1)\n', (844, 862), True, 'import numpy as np\n')]
|
import random
import math
import numpy
from planner.state_space import test_goal, test_parent_operator, StateSpace, Solution, Plan
from misc.numerical import INF
from misc.functions import randomize
def random_policy(current_vertex):
edges = current_vertex.get_successors()
if not edges:
return None # current_vertex
return random.choice(edges)
def greedy_policy(current_vertex, weight=1, shuffle=True):
# TODO: function that returns the policy
# TODO: use evaluators
edges = current_vertex.get_successors()
if not edges:
return None
if shuffle:
edges = randomize(edges)
return min(edges, key=lambda e: e.cost + weight*e.sink.get_h_cost())
##################################################
def random_walk(start, goal, generator, _=None, policy=random_policy, max_steps=INF, debug=None, **kwargs):
space = StateSpace(generator, start, max_extensions=INF, **kwargs)
current_vertex = space.root
edge_path = []
while space.is_active() and len(edge_path) < max_steps:
#current_vertex.generate_all()
space.new_iteration(current_vertex)
if debug is not None:
debug(current_vertex)
if test_goal(current_vertex, goal):
operator_path = [edge.operator for edge in edge_path]
plan = Plan(start, operator_path)
return Solution(plan, space)
#return space.solution(current_vertex)
edge = policy(current_vertex)
if edge is None:
break
edge_path.append(edge)
current_vertex = edge.sink
return space.failure()
##################################################
MAX_ROLLOUT = 100 # 100 | INF
class TreeNode(object):
def __init__(self, vertex, parent_edge=None, parent_node=None):
self.vertex = vertex
self.parent_edge = parent_edge
self.parent_node = parent_node
self.rollouts = [] # TODO: rename to estimates?
self.children = [] # TODO: map from edges to nodes
if self.parent_node is not None:
self.parent_node.children.append(self)
def is_leaf(self):
return not bool(self.children)
# def is_explored(self):
# return set(self.vertex.get_successors()) == {child.vertex for child in self.children}
def num_rollouts(self):
return len(self.rollouts)
def get_estimate(self):
if not self.rollouts:
return INF
return numpy.average(self.rollouts)
def get_uct(self, c=math.sqrt(2)):
# https://en.wikipedia.org/wiki/Monte_Carlo_tree_search
estimate = -self.get_estimate()
if (self.parent_node is None) or (c == 0):
return estimate
diverse = math.sqrt(math.log(self.parent_node.num_rollouts()) / self.num_rollouts())
if c == INF:
return diverse
return estimate + c*diverse
def ancestors(self):
if self.parent_node is None:
return []
return self.parent_node.ancestors() + [self.parent_node]
def descendants(self):
nodes = [self]
for child in self.children:
nodes.extend(child.descendants())
return nodes
def random_leaf(self):
if self.is_leaf(): # is_leaf | is_explored
return self
child = random.choice(self.children)
return child.random_leaf()
def uniform_leaf(self):
leaves = list(filter(TreeNode.is_leaf, self.descendants()))
return random.choice(leaves)
def uct_leaf(self, **kwargs):
if self.is_leaf(): # is_leaf | is_explored
return self
best_child = max(self.children, key=lambda n: n.get_uct(**kwargs))
return best_child.uct_leaf()
def __repr__(self):
return '{}({})'.format(self.__class__.__name__, self.vertex)
##################################################
def goal_rollout(vertex, goal):
if test_goal(vertex, goal):
return 0
return 1 # TODO: min action cost
def deadend_rollout(vertex, goal):
if test_goal(vertex, goal):
return 0
if not vertex.get_successors():
return MAX_ROLLOUT
return 1
def heuristic_rollout(vertex, goal):
return vertex.get_h_cost()
def simulation(start_vertex, goal, policy=random_policy, max_steps=5):
current_vertex = start_vertex
path = []
while len(path) < max_steps:
if test_goal(current_vertex, goal):
# TODO: greedy version
break
edge = policy(current_vertex)
if edge is None:
break
path.append(edge)
current_vertex = edge.sink
return path
def simulated_rollout(vertex, goal, evaluator=deadend_rollout, **kwargs):
path = simulation(vertex, goal, **kwargs)
cost = 0
estimates = [cost + evaluator(vertex, goal)]
for edge in path:
cost += edge.cost
estimates.append(cost + evaluator(vertex, goal))
return estimates[-1]
#return numpy.average(estimates)
def simulated_rollouts(vertex, goal, num=1, **kwargs):
assert num >= 1
return numpy.average([simulated_rollout(vertex, goal, **kwargs) for _ in range(num)])
##################################################
def mcts(start, goal, generator, _=None, debug=None, **kwargs):
# TODO: dynamic programming instead of independent tree
# https://gist.github.com/qpwo/c538c6f73727e254fdc7fab81024f6e1
# https://github.com/pbsinclair42/MCTS/blob/master/mcts.py
# https://github.com/int8/monte-carlo-tree-search/blob/master/mctspy/tree/search.py
space = StateSpace(generator, start, max_extensions=INF, **kwargs)
root = TreeNode(space.root)
while space.is_active():
#leaf = root.uniform_leaf()
#leaf = root.random_leaf()
leaf = root.uct_leaf()
vertex = leaf.vertex
space.new_iteration(vertex)
if debug is not None:
debug(vertex)
if test_goal(vertex, goal):
return space.solution(vertex)
for edge in vertex.get_successors(): # TODO: sample a subset
new_vertex = edge.sink
if test_goal(new_vertex, goal):
return space.solution(new_vertex)
node = TreeNode(new_vertex, parent_edge=edge, parent_node=leaf)
#rollout = goal_rollout(new_vertex, goal)
#rollout = deadend_rollout(new_vertex, goal)
#rollout = heuristic_rollout(new_vertex, goal)
#rollout = simulated_rollout(new_vertex, goal)
rollout = simulated_rollouts(new_vertex, goal, num=3)
for ancestor in reversed(node.ancestors() + [node]):
ancestor.rollouts.append(rollout)
if ancestor.parent_edge is not None:
rollout += ancestor.parent_edge.cost
return space.failure()
|
[
"planner.state_space.Plan",
"numpy.average",
"math.sqrt",
"planner.state_space.StateSpace",
"random.choice",
"planner.state_space.test_goal",
"planner.state_space.Solution",
"misc.functions.randomize"
] |
[((346, 366), 'random.choice', 'random.choice', (['edges'], {}), '(edges)\n', (359, 366), False, 'import random\n'), ((876, 934), 'planner.state_space.StateSpace', 'StateSpace', (['generator', 'start'], {'max_extensions': 'INF'}), '(generator, start, max_extensions=INF, **kwargs)\n', (886, 934), False, 'from planner.state_space import test_goal, test_parent_operator, StateSpace, Solution, Plan\n'), ((3901, 3924), 'planner.state_space.test_goal', 'test_goal', (['vertex', 'goal'], {}), '(vertex, goal)\n', (3910, 3924), False, 'from planner.state_space import test_goal, test_parent_operator, StateSpace, Solution, Plan\n'), ((4023, 4046), 'planner.state_space.test_goal', 'test_goal', (['vertex', 'goal'], {}), '(vertex, goal)\n', (4032, 4046), False, 'from planner.state_space import test_goal, test_parent_operator, StateSpace, Solution, Plan\n'), ((5542, 5600), 'planner.state_space.StateSpace', 'StateSpace', (['generator', 'start'], {'max_extensions': 'INF'}), '(generator, start, max_extensions=INF, **kwargs)\n', (5552, 5600), False, 'from planner.state_space import test_goal, test_parent_operator, StateSpace, Solution, Plan\n'), ((613, 629), 'misc.functions.randomize', 'randomize', (['edges'], {}), '(edges)\n', (622, 629), False, 'from misc.functions import randomize\n'), ((1204, 1235), 'planner.state_space.test_goal', 'test_goal', (['current_vertex', 'goal'], {}), '(current_vertex, goal)\n', (1213, 1235), False, 'from planner.state_space import test_goal, test_parent_operator, StateSpace, Solution, Plan\n'), ((2450, 2478), 'numpy.average', 'numpy.average', (['self.rollouts'], {}), '(self.rollouts)\n', (2463, 2478), False, 'import numpy\n'), ((2503, 2515), 'math.sqrt', 'math.sqrt', (['(2)'], {}), '(2)\n', (2512, 2515), False, 'import math\n'), ((3298, 3326), 'random.choice', 'random.choice', (['self.children'], {}), '(self.children)\n', (3311, 3326), False, 'import random\n'), ((3473, 3494), 'random.choice', 'random.choice', (['leaves'], {}), '(leaves)\n', (3486, 3494), False, 'import random\n'), ((4374, 4405), 'planner.state_space.test_goal', 'test_goal', (['current_vertex', 'goal'], {}), '(current_vertex, goal)\n', (4383, 4405), False, 'from planner.state_space import test_goal, test_parent_operator, StateSpace, Solution, Plan\n'), ((5896, 5919), 'planner.state_space.test_goal', 'test_goal', (['vertex', 'goal'], {}), '(vertex, goal)\n', (5905, 5919), False, 'from planner.state_space import test_goal, test_parent_operator, StateSpace, Solution, Plan\n'), ((1322, 1348), 'planner.state_space.Plan', 'Plan', (['start', 'operator_path'], {}), '(start, operator_path)\n', (1326, 1348), False, 'from planner.state_space import test_goal, test_parent_operator, StateSpace, Solution, Plan\n'), ((1368, 1389), 'planner.state_space.Solution', 'Solution', (['plan', 'space'], {}), '(plan, space)\n', (1376, 1389), False, 'from planner.state_space import test_goal, test_parent_operator, StateSpace, Solution, Plan\n'), ((6082, 6109), 'planner.state_space.test_goal', 'test_goal', (['new_vertex', 'goal'], {}), '(new_vertex, goal)\n', (6091, 6109), False, 'from planner.state_space import test_goal, test_parent_operator, StateSpace, Solution, Plan\n')]
|
import numpy as np
import math
from scipy.interpolate import interp1d
import scipy.linalg as LA
import os
import numpy as np
from skimage.transform import resize
from multiprocessing import Process
import shutil
from tqdm import tqdm
from concurrent.futures import ProcessPoolExecutor, as_completed
def compute_tf_fig(org_sig):
final_sig = create_extended_sig(org_sig)
wave2000 = final_sig
ps_SampleRate = 2000
s_Len = len(final_sig)
#exts_len = len(final_sig)
s_HalfLen = math.floor(s_Len/2)+1
ps_MinFreqHz = 10
ps_MaxFreqHz = 500
ps_FreqSeg = 512
v_WAxis = np.linspace(0, 2*np.pi, s_Len, endpoint=False)
v_WAxis = v_WAxis* ps_SampleRate
v_WAxisHalf = v_WAxis[:s_HalfLen]
v_FreqAxis = np.linspace(ps_MinFreqHz, ps_MaxFreqHz,num=ps_FreqSeg)#ps_MinFreqHz:s_FreqStep:ps_MaxFreqHz
v_FreqAxis = v_FreqAxis[::-1]
v_InputSignalFFT = np.fft.fft(wave2000)
ps_StDevCycles = 3
m_GaborWT = np.zeros((ps_FreqSeg, s_Len),dtype=complex)
for i, s_FreqCounter in enumerate(v_FreqAxis):
v_WinFFT = np.zeros(s_Len)
s_StDevSec = (1 / s_FreqCounter) * ps_StDevCycles
v_WinFFT[:s_HalfLen] = np.exp(-0.5*np.power( v_WAxisHalf - (2* np.pi* s_FreqCounter) , 2)*
(s_StDevSec**2))
v_WinFFT = v_WinFFT* np.sqrt(s_Len)/ LA.norm(v_WinFFT, 2)
m_GaborWT[i, :] = np.fft.ifft(v_InputSignalFFT* v_WinFFT)/np.sqrt(s_StDevSec)
return s_HalfLen, v_FreqAxis, v_WAxisHalf, v_InputSignalFFT, m_GaborWT
def compute_spectrum(org_sig):
final_sig = create_extended_sig(org_sig)
wave2000 = final_sig
ps_SampleRate = 2000
s_Len = len(final_sig)
#exts_len = len(final_sig)
s_HalfLen = math.floor(s_Len/2)+1
ps_MinFreqHz = 10
ps_MaxFreqHz = 500
ps_FreqSeg = 512
v_WAxis = np.linspace(0, 2*np.pi, s_Len, endpoint=False)
v_WAxis = v_WAxis* ps_SampleRate
v_WAxisHalf = v_WAxis[:s_HalfLen]
v_FreqAxis = np.linspace(ps_MinFreqHz, ps_MaxFreqHz,num=ps_FreqSeg)#ps_MinFreqHz:s_FreqStep:ps_MaxFreqHz
v_FreqAxis = v_FreqAxis[::-1]
v_InputSignalFFT = np.fft.fft(wave2000)
ps_StDevCycles = 3
m_GaborWT = np.zeros((ps_FreqSeg, s_Len),dtype=complex)
for i, s_FreqCounter in enumerate(v_FreqAxis):
v_WinFFT = np.zeros(s_Len)
s_StDevSec = (1 / s_FreqCounter) * ps_StDevCycles
v_WinFFT[:s_HalfLen] = np.exp(-0.5*np.power( v_WAxisHalf - (2* np.pi* s_FreqCounter) , 2)*
(s_StDevSec**2))
v_WinFFT = v_WinFFT* np.sqrt(s_Len)/ LA.norm(v_WinFFT, 2)
m_GaborWT[i, :] = np.fft.ifft(v_InputSignalFFT* v_WinFFT)/np.sqrt(s_StDevSec)
return resize(np.abs(m_GaborWT[:, 3000:5000]), (224,224))
def create_extended_sig(wave2000):
#wave2000 = bb
s_len = len(wave2000)
s_halflen = int(np.ceil(s_len/2)) + 1
sig = wave2000
start_win = sig[:s_halflen] - sig[0]
end_win = sig[s_len - s_halflen - 1:] - sig[-1]
start_win = -start_win[::-1] + sig[0]
end_win = -end_win[::-1] + sig[-1]
final_sig = np.concatenate((start_win[:-1],sig, end_win[1:]))
#print(s_halflen, start_win.shape, end_win.shape, sig.shape, final_sig.shape)
if len(final_sig)%2 == 0:
final_sig = final_sig[:-1]
return final_sig
def strip_key(key):
key = key.strip()
key = key.replace('EEG', '').strip()
key = key.replace('Ref', '').strip()
key = key.replace('-', '').strip()
key = key.replace('_', ' ').strip()
key = key.split(" ")
if len(key) > 1:
key = key[1]
else:
key = key[0]
return key
def normalized(a, max_ = 2000-11):
c = (max_*(a - np.min(a))/np.ptp(a)).astype(int)
c = c + 5
return c
def construct_features(raw_signal, length=1000):
#HFO with spike
canvas = np.zeros((2*length, 2*length))
hfo_spike = normalized(raw_signal)
index = np.arange(len(hfo_spike))
for ii in range(3):
canvas[index,hfo_spike-ii] = 256
canvas[index,hfo_spike+ii] = 256
spike_image = resize(canvas, (224, 224))
intensity_image = np.zeros_like(canvas)
intensity_image[index, :] = raw_signal
hfo_image = resize(intensity_image, (224, 224))
return spike_image, hfo_image
def clean_folder(saved_fn):
if not os.path.exists(saved_fn):
#os.mkdir(saved_fn)
os.makedirs(saved_fn)
else:
shutil.rmtree(saved_fn)
os.mkdir(saved_fn)
def parallel_process(array, function, n_jobs=16, use_kwargs=False, front_num=3):
"""
A parallel version of the map function with a progress bar.
Args:
array (array-like): An array to iterate over.
function (function): A python function to apply to the elements of array
n_jobs (int, default=16): The number of cores to use
use_kwargs (boolean, default=False): Whether to consider the elements of array as dictionaries of
keyword arguments to function
front_num (int, default=3): The number of iterations to run serially before kicking off the parallel job.
Useful for catching bugs
Returns:
[function(array[0]), function(array[1]), ...]
"""
#We run the first few iterations serially to catch bugs
if front_num > 0:
front = [function(**a) if use_kwargs else function(a) for a in array[:front_num]]
#If we set n_jobs to 1, just run a list comprehension. This is useful for benchmarking and debugging.
if n_jobs==1:
return front + [function(**a) if use_kwargs else function(a) for a in tqdm(array[front_num:])]
#Assemble the workers
with ProcessPoolExecutor(max_workers=n_jobs) as pool:
#Pass the elements of array into function
if use_kwargs:
futures = [pool.submit(function, **a) for a in array[front_num:]]
else:
futures = [pool.submit(function, a) for a in array[front_num:]]
kwargs = {
'total': len(futures),
'unit': 'it',
'unit_scale': True,
'leave': True
}
#Print out the progress as tasks complete
for f in tqdm(as_completed(futures), **kwargs):
pass
out = []
#Get the results from the futures.
for i, future in tqdm(enumerate(futures)):
try:
out.append(future.result())
except Exception as e:
out.append(e)
return front + out
|
[
"os.mkdir",
"numpy.abs",
"concurrent.futures.ProcessPoolExecutor",
"skimage.transform.resize",
"shutil.rmtree",
"numpy.zeros_like",
"numpy.fft.fft",
"numpy.power",
"os.path.exists",
"numpy.linspace",
"numpy.fft.ifft",
"tqdm.tqdm",
"numpy.ceil",
"numpy.min",
"concurrent.futures.as_completed",
"numpy.concatenate",
"os.makedirs",
"numpy.ptp",
"numpy.zeros",
"math.floor",
"scipy.linalg.norm",
"numpy.sqrt"
] |
[((604, 652), 'numpy.linspace', 'np.linspace', (['(0)', '(2 * np.pi)', 's_Len'], {'endpoint': '(False)'}), '(0, 2 * np.pi, s_Len, endpoint=False)\n', (615, 652), True, 'import numpy as np\n'), ((743, 798), 'numpy.linspace', 'np.linspace', (['ps_MinFreqHz', 'ps_MaxFreqHz'], {'num': 'ps_FreqSeg'}), '(ps_MinFreqHz, ps_MaxFreqHz, num=ps_FreqSeg)\n', (754, 798), True, 'import numpy as np\n'), ((897, 917), 'numpy.fft.fft', 'np.fft.fft', (['wave2000'], {}), '(wave2000)\n', (907, 917), True, 'import numpy as np\n'), ((957, 1001), 'numpy.zeros', 'np.zeros', (['(ps_FreqSeg, s_Len)'], {'dtype': 'complex'}), '((ps_FreqSeg, s_Len), dtype=complex)\n', (965, 1001), True, 'import numpy as np\n'), ((1811, 1859), 'numpy.linspace', 'np.linspace', (['(0)', '(2 * np.pi)', 's_Len'], {'endpoint': '(False)'}), '(0, 2 * np.pi, s_Len, endpoint=False)\n', (1822, 1859), True, 'import numpy as np\n'), ((1950, 2005), 'numpy.linspace', 'np.linspace', (['ps_MinFreqHz', 'ps_MaxFreqHz'], {'num': 'ps_FreqSeg'}), '(ps_MinFreqHz, ps_MaxFreqHz, num=ps_FreqSeg)\n', (1961, 2005), True, 'import numpy as np\n'), ((2104, 2124), 'numpy.fft.fft', 'np.fft.fft', (['wave2000'], {}), '(wave2000)\n', (2114, 2124), True, 'import numpy as np\n'), ((2164, 2208), 'numpy.zeros', 'np.zeros', (['(ps_FreqSeg, s_Len)'], {'dtype': 'complex'}), '((ps_FreqSeg, s_Len), dtype=complex)\n', (2172, 2208), True, 'import numpy as np\n'), ((3026, 3076), 'numpy.concatenate', 'np.concatenate', (['(start_win[:-1], sig, end_win[1:])'], {}), '((start_win[:-1], sig, end_win[1:]))\n', (3040, 3076), True, 'import numpy as np\n'), ((3768, 3802), 'numpy.zeros', 'np.zeros', (['(2 * length, 2 * length)'], {}), '((2 * length, 2 * length))\n', (3776, 3802), True, 'import numpy as np\n'), ((4001, 4027), 'skimage.transform.resize', 'resize', (['canvas', '(224, 224)'], {}), '(canvas, (224, 224))\n', (4007, 4027), False, 'from skimage.transform import resize\n'), ((4051, 4072), 'numpy.zeros_like', 'np.zeros_like', (['canvas'], {}), '(canvas)\n', (4064, 4072), True, 'import numpy as np\n'), ((4132, 4167), 'skimage.transform.resize', 'resize', (['intensity_image', '(224, 224)'], {}), '(intensity_image, (224, 224))\n', (4138, 4167), False, 'from skimage.transform import resize\n'), ((501, 522), 'math.floor', 'math.floor', (['(s_Len / 2)'], {}), '(s_Len / 2)\n', (511, 522), False, 'import math\n'), ((1071, 1086), 'numpy.zeros', 'np.zeros', (['s_Len'], {}), '(s_Len)\n', (1079, 1086), True, 'import numpy as np\n'), ((1708, 1729), 'math.floor', 'math.floor', (['(s_Len / 2)'], {}), '(s_Len / 2)\n', (1718, 1729), False, 'import math\n'), ((2278, 2293), 'numpy.zeros', 'np.zeros', (['s_Len'], {}), '(s_Len)\n', (2286, 2293), True, 'import numpy as np\n'), ((2650, 2681), 'numpy.abs', 'np.abs', (['m_GaborWT[:, 3000:5000]'], {}), '(m_GaborWT[:, 3000:5000])\n', (2656, 2681), True, 'import numpy as np\n'), ((4243, 4267), 'os.path.exists', 'os.path.exists', (['saved_fn'], {}), '(saved_fn)\n', (4257, 4267), False, 'import os\n'), ((4305, 4326), 'os.makedirs', 'os.makedirs', (['saved_fn'], {}), '(saved_fn)\n', (4316, 4326), False, 'import os\n'), ((4345, 4368), 'shutil.rmtree', 'shutil.rmtree', (['saved_fn'], {}), '(saved_fn)\n', (4358, 4368), False, 'import shutil\n'), ((4377, 4395), 'os.mkdir', 'os.mkdir', (['saved_fn'], {}), '(saved_fn)\n', (4385, 4395), False, 'import os\n'), ((5613, 5652), 'concurrent.futures.ProcessPoolExecutor', 'ProcessPoolExecutor', ([], {'max_workers': 'n_jobs'}), '(max_workers=n_jobs)\n', (5632, 5652), False, 'from concurrent.futures import ProcessPoolExecutor, as_completed\n'), ((1318, 1338), 'scipy.linalg.norm', 'LA.norm', (['v_WinFFT', '(2)'], {}), '(v_WinFFT, 2)\n', (1325, 1338), True, 'import scipy.linalg as LA\n'), ((1365, 1405), 'numpy.fft.ifft', 'np.fft.ifft', (['(v_InputSignalFFT * v_WinFFT)'], {}), '(v_InputSignalFFT * v_WinFFT)\n', (1376, 1405), True, 'import numpy as np\n'), ((1405, 1424), 'numpy.sqrt', 'np.sqrt', (['s_StDevSec'], {}), '(s_StDevSec)\n', (1412, 1424), True, 'import numpy as np\n'), ((2525, 2545), 'scipy.linalg.norm', 'LA.norm', (['v_WinFFT', '(2)'], {}), '(v_WinFFT, 2)\n', (2532, 2545), True, 'import scipy.linalg as LA\n'), ((2572, 2612), 'numpy.fft.ifft', 'np.fft.ifft', (['(v_InputSignalFFT * v_WinFFT)'], {}), '(v_InputSignalFFT * v_WinFFT)\n', (2583, 2612), True, 'import numpy as np\n'), ((2612, 2631), 'numpy.sqrt', 'np.sqrt', (['s_StDevSec'], {}), '(s_StDevSec)\n', (2619, 2631), True, 'import numpy as np\n'), ((2795, 2813), 'numpy.ceil', 'np.ceil', (['(s_len / 2)'], {}), '(s_len / 2)\n', (2802, 2813), True, 'import numpy as np\n'), ((6123, 6144), 'concurrent.futures.as_completed', 'as_completed', (['futures'], {}), '(futures)\n', (6135, 6144), False, 'from concurrent.futures import ProcessPoolExecutor, as_completed\n'), ((1302, 1316), 'numpy.sqrt', 'np.sqrt', (['s_Len'], {}), '(s_Len)\n', (1309, 1316), True, 'import numpy as np\n'), ((2509, 2523), 'numpy.sqrt', 'np.sqrt', (['s_Len'], {}), '(s_Len)\n', (2516, 2523), True, 'import numpy as np\n'), ((3633, 3642), 'numpy.ptp', 'np.ptp', (['a'], {}), '(a)\n', (3639, 3642), True, 'import numpy as np\n'), ((1188, 1240), 'numpy.power', 'np.power', (['(v_WAxisHalf - 2 * np.pi * s_FreqCounter)', '(2)'], {}), '(v_WAxisHalf - 2 * np.pi * s_FreqCounter, 2)\n', (1196, 1240), True, 'import numpy as np\n'), ((2395, 2447), 'numpy.power', 'np.power', (['(v_WAxisHalf - 2 * np.pi * s_FreqCounter)', '(2)'], {}), '(v_WAxisHalf - 2 * np.pi * s_FreqCounter, 2)\n', (2403, 2447), True, 'import numpy as np\n'), ((5553, 5576), 'tqdm.tqdm', 'tqdm', (['array[front_num:]'], {}), '(array[front_num:])\n', (5557, 5576), False, 'from tqdm import tqdm\n'), ((3622, 3631), 'numpy.min', 'np.min', (['a'], {}), '(a)\n', (3628, 3631), True, 'import numpy as np\n')]
|
from __future__ import absolute_import
from __future__ import print_function
import os,time,cv2,sys,math
import tensorflow as tf
import numpy as np
import time, datetime
import argparse
import random
import os, sys
import subprocess
from utils import utils, helpers
from builders import fusion_model_builder
import datetime
def str2bool(v):
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def radiance_writer(out_path, image):
with open(out_path, "wb") as f:
f.write(bytes("#?RADIANCE\n# Made with Python & Numpy\nFORMAT=32-bit_rle_rgbe\n\n",'UTF-8'))
f.write(bytes("-Y %d +X %d\n" %(image.shape[0], image.shape[1]),'UTF-8'))
brightest = np.max(image,axis=2)
mantissa = np.zeros_like(brightest)
exponent = np.zeros_like(brightest)
np.frexp(brightest, mantissa, exponent)
scaled_mantissa = mantissa * 255.0 / brightest
rgbe = np.zeros((image.shape[0], image.shape[1], 4), dtype=np.uint8)
rgbe[...,0:3] = np.around(image[...,0:3] * scaled_mantissa[...,None])
rgbe[...,3] = np.around(exponent + 128)
rgbe.flatten().tofile(f)
def compute_psnr(img1, img2):
mse = np.mean((img1-img2)**2)
if mse == 0:
return 100
PIXEL_MAX = 1.0 # input -1~1
return 20 * math.log10(PIXEL_MAX / math.sqrt(mse))
def log_tonemap(im):
return tf.log(1+5000*im)/tf.log(1+5000.0)
def log_tonemap_output(im):
return np.log(1+5000*im)/np.log(1+5000.0)
parser = argparse.ArgumentParser()
parser.add_argument('--nTry', type=int, default=None, help='Current try number')
parser.add_argument('--num_epochs', type=int, default=100, help='Number of epochs to train for')
parser.add_argument('--id_str', type=str, default="", help='Unique ID string to identify current try')
parser.add_argument('--status_id', type=int, default=1, help='Status ID to write to status.txt. Can be 1, 2 or 3')
parser.add_argument('--epoch_start_i', type=int, default=0, help='Start counting epochs from this number')
parser.add_argument('--checkpoint_step', type=int, default=1, help='How often to save checkpoints (epochs)')
parser.add_argument('--validation_step', type=int, default=1, help='How often to perform validation (epochs)')
parser.add_argument('--image', type=str, default=None, help='The image you want to predict on. Only valid in "predict" mode.')
parser.add_argument('--continue_training', type=str2bool, default=False, help='Whether to continue training from a checkpoint')
parser.add_argument('--dataset', type=str, default="hdr_ddg_dataset_ulti_13thJuly", help='Dataset you are using.')
parser.add_argument('--crop_height', type=int, default=256, help='Height of cropped input image to network')
parser.add_argument('--crop_width', type=int, default=256, help='Width of cropped input image to network')
parser.add_argument('--batch_size', type=int, default=16, help='Number of images in each batch')
parser.add_argument('--num_val_images', type=int, default=100000, help='The number of images to used for validations')
parser.add_argument('--model', type=str, default="DRIB_4_four_conv", help='The model you are using. See model_builder.py for supported models')
parser.add_argument('--frontend', type=str, default="ResNet101", help='The frontend you are using. See frontend_builder.py for supported models')
parser.add_argument('--save_logs', type=str2bool, default=True, help='Whether to save training info to the corresponding logs txt file')
parser.add_argument('--log_interval', type=int, default=100, help='Log Interval')
parser.add_argument('--init_lr', type=float, default=0.0002, help='Initial learning rate')
parser.add_argument('--lr_decay', type=float, default=0.94, help='Initial learning rate')
parser.add_argument('--loss', type=str, default='l2', help='Choose between l2 or l1 norm as a loss function')
parser.add_argument('--logdir', type=str, default='/workspace/logs', help='Choose between l2 or l1 norm as a loss function')
parser.add_argument('--crop_pixels_height',type=int,default=10,help='Location of input image')
args = parser.parse_args()
try_name = "Try%d_%s_%s"%(args.nTry,args.model,args.id_str)
if not os.path.isdir(try_name):
os.makedirs(try_name)
if args.save_logs:
if args.continue_training:
log_file = open("%s/Logs_try%d.txt"%(try_name, args.nTry), 'a')
status = open("status%d.txt"%(args.status_id),'a')
else:
log_file = open("%s/Logs_try%d.txt"%(try_name, args.nTry), 'w')
status = open("status%d.txt"%(args.status_id),'w')
config = tf.ConfigProto()
os.environ["CUDA_VISIBLE_DEVICES"] = '0'
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
if not os.path.exists(os.path.join(args.logdir,try_name,'train')):
os.makedirs(os.path.join(args.logdir,try_name,'train'),exist_ok=True)
if not os.path.exists(os.path.join(args.logdir,try_name,'test')):
os.makedirs(os.path.join(args.logdir,try_name,'test'),exist_ok=True)
train_writer = tf.summary.FileWriter('{}/{}/train'.format(args.logdir,try_name))
test_writer = tf.summary.FileWriter('{}/{}/test'.format(args.logdir,try_name))
train_loss_pl = tf.placeholder(tf.float32,shape=None)
train_loss_summary =tf.summary.scalar('train_loss',train_loss_pl)
test_loss_pl = tf.placeholder(tf.float32,shape=None)
test_loss_summary =tf.summary.scalar('test_loss',test_loss_pl)
test_psnr_pl = tf.placeholder(tf.float32,shape=None)
test_psnr_summary =tf.summary.scalar('val_psnr',test_psnr_pl)
le_image_pl = tf.placeholder(tf.float32,shape=[args.batch_size,args.crop_width,args.crop_height,3])
me_image_pl = tf.placeholder(tf.float32,shape=[args.batch_size,args.crop_width,args.crop_height,3])
he_image_pl = tf.placeholder(tf.float32,shape=[args.batch_size,args.crop_width,args.crop_height,3])
gt_image_pl = tf.placeholder(tf.float32,shape=[args.batch_size,args.crop_width,args.crop_height,3])
le_image_summ = tf.summary.image('le images',le_image_pl,max_outputs=args.batch_size)
me_image_summ = tf.summary.image('me images',me_image_pl,max_outputs=args.batch_size)
he_image_summ = tf.summary.image('he images',he_image_pl,max_outputs=args.batch_size)
gt_image_summ = tf.summary.image('gt images',gt_image_pl,max_outputs=args.batch_size)
input_exposure_stacks = [tf.placeholder(tf.float32,shape=[None,None,None,6]) for x in range(3)]
gt_exposure_stack = tf.placeholder(tf.float32,shape=[None,None,None,3])
lr = tf.placeholder("float", shape=[])
network, init_fn = fusion_model_builder.build_model(model_name=args.model, frontend=args.frontend, input_exposure_stack=input_exposure_stacks, crop_width=args.crop_width, crop_height=args.crop_height, is_training=True)
str_params = utils.count_params()
print(str_params)
if args.save_logs:
log_file.write(str_params + "\n")
if args.loss == 'l2':
loss = tf.losses.mean_squared_error(log_tonemap(gt_exposure_stack), log_tonemap(network))
elif args.loss == 'l1':
loss = tf.losses.absolute_difference(log_tonemap(gt_exposure_stack), log_tonemap(network))
opt = tf.train.AdamOptimizer(learning_rate=lr).minimize(loss, var_list=[var for var in tf.trainable_variables()])
saver=tf.train.Saver(max_to_keep=1000)
sess.run(tf.global_variables_initializer())
train_writer.add_graph(sess.graph)
# Load a previous checkpoint if desired
model_checkpoint_name = "%s/ckpts/latest_model_"%(try_name) + args.model + "_" + args.dataset + ".ckpt"
if args.continue_training:
print('Loading latest model checkpoint')
saver.restore(sess, model_checkpoint_name)
print('Loaded latest model checkpoint')
print("\n***** Begin training *****")
print("Try -->", args.nTry)
print("Dataset -->", args.dataset)
print("Model -->", args.model)
print("Crop Height -->", args.crop_height)
print("Crop Width -->", args.crop_width)
print("Num Epochs -->", args.num_epochs)
print("Batch Size -->", args.batch_size)
print("Save Logs -->", args.save_logs)
avg_loss_per_epoch = []
avg_val_loss_per_epoch = []
avg_psnr_per_epoch = []
if args.save_logs:
log_file.write("\nDataset --> " + args.dataset)
log_file.write("\nModel --> " + args.model)
log_file.write("\nCrop Height -->" + str(args.crop_height))
log_file.write("\nCrop Width -->" + str(args.crop_width))
log_file.write("\nNum Epochs -->" + str(args.num_epochs))
log_file.write("\nBatch Size -->" + str(args.batch_size))
log_file.close()
status.write("\nDataset --> " + args.dataset)
status.write("\nModel --> " + args.model)
status.write("\nCrop Height -->" + str(args.crop_height))
status.write("\nCrop Width -->" + str(args.crop_width))
status.write("\nNum Epochs -->" + str(args.num_epochs))
status.write("\nBatch Size -->" + str(args.batch_size))
status.close()
# Load the data
print("Loading the data ...")
# ["he_at_me", "le_at_me", "me_at_he", "me_at_le", "he", "le", "me"]
exposure_keys_train = ["he", "le", "me"]
exposure_keys_train_labels = ["hdr"]
exposure_keys_val = ["he", "le", "me"]
exposure_keys_val_labels = ["hdr"]
multiexposure_train_names = utils.prepare_data_multiexposure("%s/train_256"%(args.dataset), exposure_keys_train)
multiexposure_train_label_names = utils.prepare_data_multiexposure("%s/train_labels_256"%(args.dataset), exposure_keys_train_labels)
multiexposure_val_names = utils.prepare_data_multiexposure("%s/val"%(args.dataset), exposure_keys_val)
multiexposure_val_label_names = utils.prepare_data_multiexposure("%s/val_labels"%(args.dataset), exposure_keys_val_labels)
train_input_names_he, train_input_names_le, train_input_names_me = multiexposure_train_names[0], multiexposure_train_names[1], multiexposure_train_names[2]
train_output_names_hdr = multiexposure_train_label_names[0]
val_input_names_he, val_input_names_le, val_input_names_me = multiexposure_val_names[0], multiexposure_val_names[1], multiexposure_val_names[2]
val_output_names_hdr = multiexposure_val_label_names[0]
# Which validation images do we want
val_indices = []
num_vals = min(args.num_val_images, len(val_input_names_he))
# Set random seed to make sure models are validated on the same validation images.
# So you can compare the results of different models more intuitively.
random.seed(16)
val_indices=random.sample(range(0,len(val_input_names_he)),num_vals)
learning_rates = []
lr_decay_step = 1
small_loss_bin = []
train_step =0
val_step = 0
# Do the training here
for epoch in range(args.epoch_start_i, args.num_epochs):
learning_rate = args.init_lr*(float)(args.lr_decay)**(float)(epoch)
learning_rates.append(learning_rate)
print("\nLearning rate for epoch # %04d = %f\n"%(epoch, learning_rate))
if args.save_logs:
log_file = open("%s/Logs_try%d.txt"%(try_name, args.nTry), 'a')
log_file.write("\nLearning rate for epoch " + str(epoch) + " = " + str(learning_rate) + "\n")
log_file.close()
status = open("status%d.txt"%(args.status_id),'a')
status.write("\nLearning rate for epoch " + str(epoch) + " = " + str(learning_rate) + "\n")
status.close()
current_losses = []
current_losses_val = []
cnt=0
# Equivalent to shuffling
id_list = np.random.permutation(len(train_input_names_he))
num_iters = int(np.floor(len(id_list) / args.batch_size))
st = time.time()
epoch_st=time.time()
for i in range(num_iters):
input_image_le_batch = []
input_image_me_batch = []
input_image_he_batch = []
output_image_batch = []
# Collect a batch of images
for j in range(args.batch_size):
index = i*args.batch_size + j
id = id_list[index]
cv2_image_train_he = cv2.imread(train_input_names_he[id],-1)
input_image_he = np.float32(cv2.cvtColor(cv2_image_train_he,cv2.COLOR_BGR2RGB)) / 65535.0
input_image_he_gamma,_,_ = utils.ldr_to_hdr_train(input_image_he,train_input_names_he[id])
input_image_he_c = np.concatenate([input_image_he,input_image_he_gamma],axis=2)
cv2_image_train_me = cv2.imread(train_input_names_me[id],-1)
input_image_me = np.float32(cv2.cvtColor(cv2_image_train_me,cv2.COLOR_BGR2RGB)) / 65535.0
input_image_me_gamma,_,_ = utils.ldr_to_hdr_train(input_image_me,train_input_names_me[id])
input_image_me_c = np.concatenate([input_image_me,input_image_me_gamma],axis=2)
cv2_image_train_le = cv2.imread(train_input_names_le[id],-1)
input_image_le = np.float32(cv2.cvtColor(cv2_image_train_le,cv2.COLOR_BGR2RGB)) / 65535.0
input_image_le_gamma,_,_ = utils.ldr_to_hdr_train(input_image_le,train_input_names_le[id])
input_image_le_c = np.concatenate([input_image_le,input_image_le_gamma],axis=2)
output_image = cv2.cvtColor(cv2.imread(train_output_names_hdr[id],-1),cv2.COLOR_BGR2RGB)
input_image_le_batch.append(np.expand_dims(input_image_le_c, axis=0))
input_image_me_batch.append(np.expand_dims(input_image_me_c, axis=0))
input_image_he_batch.append(np.expand_dims(input_image_he_c, axis=0))
output_image_batch.append(np.expand_dims(output_image, axis=0))
input_image_le_batch = np.squeeze(np.stack(input_image_le_batch, axis=1))
input_image_me_batch = np.squeeze(np.stack(input_image_me_batch, axis=1))
input_image_he_batch = np.squeeze(np.stack(input_image_he_batch, axis=1))
output_image_batch = np.squeeze(np.stack(output_image_batch, axis=1))
train_writer.add_summary(sess.run(le_image_summ,feed_dict={le_image_pl:input_image_le_batch[...,:3]}),i)
train_writer.add_summary(sess.run(me_image_summ,feed_dict={me_image_pl:input_image_me_batch[...,:3]}),i)
train_writer.add_summary(sess.run(he_image_summ,feed_dict={he_image_pl:input_image_he_batch[...,:3]}),i)
train_writer.add_summary(sess.run(gt_image_summ,feed_dict={gt_image_pl:output_image_batch[...,:3]}),i)
# Do the training here
_,current_loss=sess.run([opt,loss],feed_dict={input_exposure_stacks[0]:input_image_le_batch,input_exposure_stacks[1]:input_image_me_batch,input_exposure_stacks[2]:input_image_he_batch, gt_exposure_stack:output_image_batch, lr:learning_rate})
current_losses.append(current_loss)
small_loss_bin.append(current_loss)
cnt = cnt + args.batch_size
if cnt % args.log_interval == 0:
small_loss_bin_mean = np.mean(small_loss_bin)
string_print = "Epoch = %d Count = %d Current_Loss = %.4f Time = %.2f "%(epoch, cnt, small_loss_bin_mean, time.time()-st)
small_loss_bin = []
train_str = utils.LOG(string_print)
print(train_str)
if args.save_logs:
log_file = open("%s/Logs_try%d.txt"%(try_name, args.nTry), 'a')
log_file.write(train_str + "\n")
log_file.close()
status = open("status%d.txt"%(args.status_id),'a')
status.write(train_str + "\n")
status.close()
st = time.time()
summ = sess.run(train_loss_summary, feed_dict={train_loss_pl:np.mean(current_losses)})
train_writer.add_summary(summ,train_step)
train_step +=1
mean_loss = np.mean(current_losses)
avg_loss_per_epoch.append(mean_loss)
# Create directories if needed
if not os.path.isdir("%s/%s/%04d"%(try_name, "ckpts", epoch)):
os.makedirs("%s/%s/%04d"%(try_name, "ckpts", epoch))
# Save latest checkpoint to same file name
print("Saving latest checkpoint")
saver.save(sess, model_checkpoint_name)
if val_indices != 0 and epoch % args.checkpoint_step == 0:
print("Saving checkpoint for this epoch")
saver.save(sess, "%s/%s/%04d/model.ckpt"%(try_name, "ckpts", epoch))
print("Average Training loss = ", mean_loss)
if args.save_logs:
log_file = open("%s/Logs_try%d.txt"%(try_name, args.nTry), 'a')
log_file.write("\nAverage Training loss = " + str(mean_loss))
log_file.close()
status = open("status%d.txt"%(args.status_id),'a')
status.write("\nAverage Training loss = " + str(mean_loss))
status.close()
if epoch % args.validation_step == 0:
print("Performing validation")
if not os.path.isdir("%s/%s/%04d"%(try_name, "val_Imgs", epoch)):
os.makedirs("%s/%s/%04d"%(try_name, "val_Imgs", epoch))
psnr_pre_list = []
psnr_post_list = []
val_idx_count = 0
pred_time_list = []
# Do the validation on a small set of validation images
for ind in val_indices:
print("\rRunning test image %d / %d"%(val_idx_count+1, len(val_input_names_he)))
input_images = []
cv2_img_he = cv2.imread(val_input_names_he[ind],-1)
h,w = cv2_img_he.shape[:2]
input_image_he = np.expand_dims(np.float32(cv2.cvtColor(cv2_img_he,cv2.COLOR_BGR2RGB)),axis=0)/65535.0
input_image_he_gamma,_,_ = utils.ldr_to_hdr_test(input_image_he,val_input_names_he[ind])
input_image_he_c = np.concatenate([input_image_he,input_image_he_gamma],axis=3)
cv2_img_me = cv2.imread(val_input_names_me[ind],-1)
h,w = cv2_img_me.shape[:2]
input_image_me = np.expand_dims(np.float32(cv2.cvtColor(cv2_img_me,cv2.COLOR_BGR2RGB)),axis=0)/65535.0
input_image_me_gamma,_,_ = utils.ldr_to_hdr_test(input_image_me,val_input_names_me[ind])
input_image_me_c = np.concatenate([input_image_me,input_image_me_gamma],axis=3)
cv2_img_le = cv2.imread(val_input_names_le[ind],-1)
h,w = cv2_img_le.shape[:2]
input_image_le = np.expand_dims(np.float32(cv2.cvtColor(cv2_img_le,cv2.COLOR_BGR2RGB)),axis=0)/65535.0
input_image_le_gamma,_,_ = utils.ldr_to_hdr_test(input_image_le,val_input_names_le[ind])
input_image_le_c = np.concatenate([input_image_le,input_image_le_gamma],axis=3)
cv2_img_hdr = cv2.imread(val_output_names_hdr[ind],-1)
h,w = cv2_img_hdr.shape[:2]
gt_hdr = cv2.cvtColor(cv2_img_hdr,cv2.COLOR_BGR2RGB)
gt_hdr = np.expand_dims(np.float32(gt_hdr), axis=0)
pred_st = time.time()
output_image_pred, curr_val_loss = sess.run([network,loss],feed_dict={input_exposure_stacks[0]:input_image_le_c,input_exposure_stacks[1]:input_image_me_c,input_exposure_stacks[2]:input_image_he_c,gt_exposure_stack:gt_hdr})
pred_et = time.time()
pred_time_list.append(pred_et-pred_st)
output_image = np.squeeze(output_image_pred)
gt_hdr = np.squeeze(gt_hdr)
h,w = output_image.shape[:2]
output_image_cropped = output_image[args.crop_pixels_height:h-args.crop_pixels_height,:,:]
gt_hdr_cropped = gt_hdr[args.crop_pixels_height:h-args.crop_pixels_height,:,:]
current_pre_psnr = compute_psnr(output_image_cropped, gt_hdr_cropped)
current_post_psnr = compute_psnr(log_tonemap_output(output_image_cropped), log_tonemap_output(gt_hdr_cropped))
current_losses_val.append(curr_val_loss)
psnr_pre_list.append(current_pre_psnr)
psnr_post_list.append(current_post_psnr)
file_name = utils.filepath_to_name(val_input_names_he[ind])
radiance_writer("%s/%s/%04d/%s_pred.hdr"%(try_name, "val_Imgs", epoch, file_name),output_image)
radiance_writer("%s/%s/%04d/%s_gt.hdr"%(try_name, "val_Imgs", epoch, file_name),gt_hdr)
val_idx_count = val_idx_count+1
mean_val_loss = np.mean(current_losses_val)
merge_summ = tf.summary.merge([test_loss_summary,test_psnr_summary])
merge_summ = sess.run(merge_summ, feed_dict={test_loss_pl:mean_val_loss,test_psnr_pl:np.mean(psnr_post_list)})
test_writer.add_summary(merge_summ,val_step)
val_step+=1
mean_pre_psnr = np.mean(psnr_pre_list)
mean_post_psnr = np.mean(psnr_post_list)
mean_proc_time = np.mean(pred_time_list)
print('val psnr pre list {}\n'.format(psnr_pre_list))
print('val psnr post list {}\n'.format(psnr_post_list))
print("Average Validation loss = %f"%(mean_val_loss))
print("Average PRE-PSNR = %f"%(mean_pre_psnr))
print("Average POST -PSNR = %f"%(mean_post_psnr))
print('Average processing time = %f'%(mean_proc_time))
if args.save_logs:
log_file = open("%s/Logs_try%d.txt"%(try_name, args.nTry), 'a')
log_file.write("\nAverage Validation loss = " + str(mean_val_loss)+"\n")
log_file.write("Average PRE-PSNR = %f\n"%(mean_pre_psnr))
log_file.write("Average POST-PSNR = %f\n"%(mean_post_psnr))
log_file.write('Average processing time = %f\n'%(mean_proc_time))
status = open("status%d.txt"%(args.status_id),'a')
status.write("\nAverage Validation loss = " + str(mean_val_loss)+"\n")
status.write("Average PRE-PSNR = %f\n"%(mean_pre_psnr))
status.write("Average POST -PSNR = %f\n"%(mean_post_psnr))
status.write('Average processing time = %f\n'%(mean_proc_time))
status.close()
epoch_time=time.time()-epoch_st
remain_time=epoch_time*(args.num_epochs-1-epoch)
m, s = divmod(remain_time, 60)
h, m = divmod(m, 60)
if s!=0:
train_time="Remaining training time = %d hours %d minutes %d seconds\n"%(h,m,s)
else:
train_time="Remaining training time : Training completed.\n"
str_time = utils.LOG(train_time)
print(str_time)
if args.save_logs:
log_file = open("%s/Logs_try%d.txt"%(try_name, args.nTry), 'a')
log_file.write(str_time + "\n")
log_file.close()
status = open("status%d.txt"%(args.status_id),'a')
status.write(str_time + "\n")
status.close()
sess.close()
|
[
"argparse.ArgumentParser",
"tensorflow.trainable_variables",
"tensorflow.train.AdamOptimizer",
"tensorflow.ConfigProto",
"numpy.around",
"utils.utils.prepare_data_multiexposure",
"numpy.mean",
"tensorflow.summary.merge",
"utils.utils.count_params",
"os.path.join",
"argparse.ArgumentTypeError",
"numpy.zeros_like",
"cv2.cvtColor",
"tensorflow.placeholder",
"numpy.max",
"utils.utils.ldr_to_hdr_train",
"random.seed",
"utils.utils.ldr_to_hdr_test",
"numpy.frexp",
"numpy.stack",
"tensorflow.summary.image",
"tensorflow.summary.scalar",
"tensorflow.train.Saver",
"tensorflow.global_variables_initializer",
"math.sqrt",
"tensorflow.Session",
"utils.utils.LOG",
"tensorflow.log",
"numpy.squeeze",
"numpy.concatenate",
"os.makedirs",
"numpy.log",
"os.path.isdir",
"numpy.float32",
"numpy.zeros",
"numpy.expand_dims",
"utils.utils.filepath_to_name",
"time.time",
"cv2.imread",
"builders.fusion_model_builder.build_model"
] |
[((1599, 1624), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1622, 1624), False, 'import argparse\n'), ((4663, 4679), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {}), '()\n', (4677, 4679), True, 'import tensorflow as tf\n'), ((4770, 4795), 'tensorflow.Session', 'tf.Session', ([], {'config': 'config'}), '(config=config)\n', (4780, 4795), True, 'import tensorflow as tf\n'), ((5269, 5307), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': 'None'}), '(tf.float32, shape=None)\n', (5283, 5307), True, 'import tensorflow as tf\n'), ((5328, 5374), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""train_loss"""', 'train_loss_pl'], {}), "('train_loss', train_loss_pl)\n", (5345, 5374), True, 'import tensorflow as tf\n'), ((5392, 5430), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': 'None'}), '(tf.float32, shape=None)\n', (5406, 5430), True, 'import tensorflow as tf\n'), ((5450, 5494), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""test_loss"""', 'test_loss_pl'], {}), "('test_loss', test_loss_pl)\n", (5467, 5494), True, 'import tensorflow as tf\n'), ((5512, 5550), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': 'None'}), '(tf.float32, shape=None)\n', (5526, 5550), True, 'import tensorflow as tf\n'), ((5570, 5613), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""val_psnr"""', 'test_psnr_pl'], {}), "('val_psnr', test_psnr_pl)\n", (5587, 5613), True, 'import tensorflow as tf\n'), ((5632, 5726), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[args.batch_size, args.crop_width, args.crop_height, 3]'}), '(tf.float32, shape=[args.batch_size, args.crop_width, args.\n crop_height, 3])\n', (5646, 5726), True, 'import tensorflow as tf\n'), ((5733, 5827), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[args.batch_size, args.crop_width, args.crop_height, 3]'}), '(tf.float32, shape=[args.batch_size, args.crop_width, args.\n crop_height, 3])\n', (5747, 5827), True, 'import tensorflow as tf\n'), ((5834, 5928), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[args.batch_size, args.crop_width, args.crop_height, 3]'}), '(tf.float32, shape=[args.batch_size, args.crop_width, args.\n crop_height, 3])\n', (5848, 5928), True, 'import tensorflow as tf\n'), ((5935, 6029), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[args.batch_size, args.crop_width, args.crop_height, 3]'}), '(tf.float32, shape=[args.batch_size, args.crop_width, args.\n crop_height, 3])\n', (5949, 6029), True, 'import tensorflow as tf\n'), ((6042, 6113), 'tensorflow.summary.image', 'tf.summary.image', (['"""le images"""', 'le_image_pl'], {'max_outputs': 'args.batch_size'}), "('le images', le_image_pl, max_outputs=args.batch_size)\n", (6058, 6113), True, 'import tensorflow as tf\n'), ((6129, 6200), 'tensorflow.summary.image', 'tf.summary.image', (['"""me images"""', 'me_image_pl'], {'max_outputs': 'args.batch_size'}), "('me images', me_image_pl, max_outputs=args.batch_size)\n", (6145, 6200), True, 'import tensorflow as tf\n'), ((6216, 6287), 'tensorflow.summary.image', 'tf.summary.image', (['"""he images"""', 'he_image_pl'], {'max_outputs': 'args.batch_size'}), "('he images', he_image_pl, max_outputs=args.batch_size)\n", (6232, 6287), True, 'import tensorflow as tf\n'), ((6303, 6374), 'tensorflow.summary.image', 'tf.summary.image', (['"""gt images"""', 'gt_image_pl'], {'max_outputs': 'args.batch_size'}), "('gt images', gt_image_pl, max_outputs=args.batch_size)\n", (6319, 6374), True, 'import tensorflow as tf\n'), ((6493, 6548), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[None, None, None, 3]'}), '(tf.float32, shape=[None, None, None, 3])\n', (6507, 6548), True, 'import tensorflow as tf\n'), ((6555, 6588), 'tensorflow.placeholder', 'tf.placeholder', (['"""float"""'], {'shape': '[]'}), "('float', shape=[])\n", (6569, 6588), True, 'import tensorflow as tf\n'), ((6609, 6818), 'builders.fusion_model_builder.build_model', 'fusion_model_builder.build_model', ([], {'model_name': 'args.model', 'frontend': 'args.frontend', 'input_exposure_stack': 'input_exposure_stacks', 'crop_width': 'args.crop_width', 'crop_height': 'args.crop_height', 'is_training': '(True)'}), '(model_name=args.model, frontend=args.\n frontend, input_exposure_stack=input_exposure_stacks, crop_width=args.\n crop_width, crop_height=args.crop_height, is_training=True)\n', (6641, 6818), False, 'from builders import fusion_model_builder\n'), ((6829, 6849), 'utils.utils.count_params', 'utils.count_params', ([], {}), '()\n', (6847, 6849), False, 'from utils import utils, helpers\n'), ((7288, 7320), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {'max_to_keep': '(1000)'}), '(max_to_keep=1000)\n', (7302, 7320), True, 'import tensorflow as tf\n'), ((9176, 9264), 'utils.utils.prepare_data_multiexposure', 'utils.prepare_data_multiexposure', (["('%s/train_256' % args.dataset)", 'exposure_keys_train'], {}), "('%s/train_256' % args.dataset,\n exposure_keys_train)\n", (9208, 9264), False, 'from utils import utils, helpers\n'), ((9296, 9398), 'utils.utils.prepare_data_multiexposure', 'utils.prepare_data_multiexposure', (["('%s/train_labels_256' % args.dataset)", 'exposure_keys_train_labels'], {}), "('%s/train_labels_256' % args.dataset,\n exposure_keys_train_labels)\n", (9328, 9398), False, 'from utils import utils, helpers\n'), ((9422, 9498), 'utils.utils.prepare_data_multiexposure', 'utils.prepare_data_multiexposure', (["('%s/val' % args.dataset)", 'exposure_keys_val'], {}), "('%s/val' % args.dataset, exposure_keys_val)\n", (9454, 9498), False, 'from utils import utils, helpers\n'), ((9532, 9626), 'utils.utils.prepare_data_multiexposure', 'utils.prepare_data_multiexposure', (["('%s/val_labels' % args.dataset)", 'exposure_keys_val_labels'], {}), "('%s/val_labels' % args.dataset,\n exposure_keys_val_labels)\n", (9564, 9626), False, 'from utils import utils, helpers\n'), ((10326, 10341), 'random.seed', 'random.seed', (['(16)'], {}), '(16)\n', (10337, 10341), False, 'import random\n'), ((1286, 1313), 'numpy.mean', 'np.mean', (['((img1 - img2) ** 2)'], {}), '((img1 - img2) ** 2)\n', (1293, 1313), True, 'import numpy as np\n'), ((4300, 4323), 'os.path.isdir', 'os.path.isdir', (['try_name'], {}), '(try_name)\n', (4313, 4323), False, 'import os, sys\n'), ((4327, 4348), 'os.makedirs', 'os.makedirs', (['try_name'], {}), '(try_name)\n', (4338, 4348), False, 'import os, sys\n'), ((6401, 6456), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[None, None, None, 6]'}), '(tf.float32, shape=[None, None, None, 6])\n', (6415, 6456), True, 'import tensorflow as tf\n'), ((7331, 7364), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (7362, 7364), True, 'import tensorflow as tf\n'), ((11414, 11425), 'time.time', 'time.time', ([], {}), '()\n', (11423, 11425), False, 'import time, datetime\n'), ((11437, 11448), 'time.time', 'time.time', ([], {}), '()\n', (11446, 11448), False, 'import time, datetime\n'), ((15056, 15079), 'numpy.mean', 'np.mean', (['current_losses'], {}), '(current_losses)\n', (15063, 15079), True, 'import numpy as np\n'), ((20866, 20887), 'utils.utils.LOG', 'utils.LOG', (['train_time'], {}), '(train_time)\n', (20875, 20887), False, 'from utils import utils, helpers\n'), ((830, 851), 'numpy.max', 'np.max', (['image'], {'axis': '(2)'}), '(image, axis=2)\n', (836, 851), True, 'import numpy as np\n'), ((867, 891), 'numpy.zeros_like', 'np.zeros_like', (['brightest'], {}), '(brightest)\n', (880, 891), True, 'import numpy as np\n'), ((906, 930), 'numpy.zeros_like', 'np.zeros_like', (['brightest'], {}), '(brightest)\n', (919, 930), True, 'import numpy as np\n'), ((937, 976), 'numpy.frexp', 'np.frexp', (['brightest', 'mantissa', 'exponent'], {}), '(brightest, mantissa, exponent)\n', (945, 976), True, 'import numpy as np\n'), ((1037, 1098), 'numpy.zeros', 'np.zeros', (['(image.shape[0], image.shape[1], 4)'], {'dtype': 'np.uint8'}), '((image.shape[0], image.shape[1], 4), dtype=np.uint8)\n', (1045, 1098), True, 'import numpy as np\n'), ((1118, 1173), 'numpy.around', 'np.around', (['(image[..., 0:3] * scaled_mantissa[..., None])'], {}), '(image[..., 0:3] * scaled_mantissa[..., None])\n', (1127, 1173), True, 'import numpy as np\n'), ((1189, 1214), 'numpy.around', 'np.around', (['(exponent + 128)'], {}), '(exponent + 128)\n', (1198, 1214), True, 'import numpy as np\n'), ((1469, 1490), 'tensorflow.log', 'tf.log', (['(1 + 5000 * im)'], {}), '(1 + 5000 * im)\n', (1475, 1490), True, 'import tensorflow as tf\n'), ((1487, 1505), 'tensorflow.log', 'tf.log', (['(1 + 5000.0)'], {}), '(1 + 5000.0)\n', (1493, 1505), True, 'import tensorflow as tf\n'), ((1548, 1569), 'numpy.log', 'np.log', (['(1 + 5000 * im)'], {}), '(1 + 5000 * im)\n', (1554, 1569), True, 'import numpy as np\n'), ((1566, 1584), 'numpy.log', 'np.log', (['(1 + 5000.0)'], {}), '(1 + 5000.0)\n', (1572, 1584), True, 'import numpy as np\n'), ((4821, 4865), 'os.path.join', 'os.path.join', (['args.logdir', 'try_name', '"""train"""'], {}), "(args.logdir, try_name, 'train')\n", (4833, 4865), False, 'import os, sys\n'), ((4883, 4927), 'os.path.join', 'os.path.join', (['args.logdir', 'try_name', '"""train"""'], {}), "(args.logdir, try_name, 'train')\n", (4895, 4927), False, 'import os, sys\n'), ((4966, 5009), 'os.path.join', 'os.path.join', (['args.logdir', 'try_name', '"""test"""'], {}), "(args.logdir, try_name, 'test')\n", (4978, 5009), False, 'import os, sys\n'), ((5027, 5070), 'os.path.join', 'os.path.join', (['args.logdir', 'try_name', '"""test"""'], {}), "(args.logdir, try_name, 'test')\n", (5039, 5070), False, 'import os, sys\n'), ((7171, 7211), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', ([], {'learning_rate': 'lr'}), '(learning_rate=lr)\n', (7193, 7211), True, 'import tensorflow as tf\n'), ((15171, 15227), 'os.path.isdir', 'os.path.isdir', (["('%s/%s/%04d' % (try_name, 'ckpts', epoch))"], {}), "('%s/%s/%04d' % (try_name, 'ckpts', epoch))\n", (15184, 15227), False, 'import os, sys\n'), ((15230, 15284), 'os.makedirs', 'os.makedirs', (["('%s/%s/%04d' % (try_name, 'ckpts', epoch))"], {}), "('%s/%s/%04d' % (try_name, 'ckpts', epoch))\n", (15241, 15284), False, 'import os, sys\n'), ((19369, 19391), 'numpy.mean', 'np.mean', (['psnr_pre_list'], {}), '(psnr_pre_list)\n', (19376, 19391), True, 'import numpy as np\n'), ((19412, 19435), 'numpy.mean', 'np.mean', (['psnr_post_list'], {}), '(psnr_post_list)\n', (19419, 19435), True, 'import numpy as np\n'), ((19456, 19479), 'numpy.mean', 'np.mean', (['pred_time_list'], {}), '(pred_time_list)\n', (19463, 19479), True, 'import numpy as np\n'), ((20559, 20570), 'time.time', 'time.time', ([], {}), '()\n', (20568, 20570), False, 'import time, datetime\n'), ((513, 566), 'argparse.ArgumentTypeError', 'argparse.ArgumentTypeError', (['"""Boolean value expected."""'], {}), "('Boolean value expected.')\n", (539, 566), False, 'import argparse\n'), ((11755, 11795), 'cv2.imread', 'cv2.imread', (['train_input_names_he[id]', '(-1)'], {}), '(train_input_names_he[id], -1)\n', (11765, 11795), False, 'import os, time, cv2, sys, math\n'), ((11920, 11984), 'utils.utils.ldr_to_hdr_train', 'utils.ldr_to_hdr_train', (['input_image_he', 'train_input_names_he[id]'], {}), '(input_image_he, train_input_names_he[id])\n', (11942, 11984), False, 'from utils import utils, helpers\n'), ((12007, 12069), 'numpy.concatenate', 'np.concatenate', (['[input_image_he, input_image_he_gamma]'], {'axis': '(2)'}), '([input_image_he, input_image_he_gamma], axis=2)\n', (12021, 12069), True, 'import numpy as np\n'), ((12097, 12137), 'cv2.imread', 'cv2.imread', (['train_input_names_me[id]', '(-1)'], {}), '(train_input_names_me[id], -1)\n', (12107, 12137), False, 'import os, time, cv2, sys, math\n'), ((12262, 12326), 'utils.utils.ldr_to_hdr_train', 'utils.ldr_to_hdr_train', (['input_image_me', 'train_input_names_me[id]'], {}), '(input_image_me, train_input_names_me[id])\n', (12284, 12326), False, 'from utils import utils, helpers\n'), ((12349, 12411), 'numpy.concatenate', 'np.concatenate', (['[input_image_me, input_image_me_gamma]'], {'axis': '(2)'}), '([input_image_me, input_image_me_gamma], axis=2)\n', (12363, 12411), True, 'import numpy as np\n'), ((12439, 12479), 'cv2.imread', 'cv2.imread', (['train_input_names_le[id]', '(-1)'], {}), '(train_input_names_le[id], -1)\n', (12449, 12479), False, 'import os, time, cv2, sys, math\n'), ((12604, 12668), 'utils.utils.ldr_to_hdr_train', 'utils.ldr_to_hdr_train', (['input_image_le', 'train_input_names_le[id]'], {}), '(input_image_le, train_input_names_le[id])\n', (12626, 12668), False, 'from utils import utils, helpers\n'), ((12691, 12753), 'numpy.concatenate', 'np.concatenate', (['[input_image_le, input_image_le_gamma]'], {'axis': '(2)'}), '([input_image_le, input_image_le_gamma], axis=2)\n', (12705, 12753), True, 'import numpy as np\n'), ((13190, 13228), 'numpy.stack', 'np.stack', (['input_image_le_batch'], {'axis': '(1)'}), '(input_image_le_batch, axis=1)\n', (13198, 13228), True, 'import numpy as np\n'), ((13267, 13305), 'numpy.stack', 'np.stack', (['input_image_me_batch'], {'axis': '(1)'}), '(input_image_me_batch, axis=1)\n', (13275, 13305), True, 'import numpy as np\n'), ((13344, 13382), 'numpy.stack', 'np.stack', (['input_image_he_batch'], {'axis': '(1)'}), '(input_image_he_batch, axis=1)\n', (13352, 13382), True, 'import numpy as np\n'), ((13421, 13457), 'numpy.stack', 'np.stack', (['output_image_batch'], {'axis': '(1)'}), '(output_image_batch, axis=1)\n', (13429, 13457), True, 'import numpy as np\n'), ((14362, 14385), 'numpy.mean', 'np.mean', (['small_loss_bin'], {}), '(small_loss_bin)\n', (14369, 14385), True, 'import numpy as np\n'), ((14552, 14575), 'utils.utils.LOG', 'utils.LOG', (['string_print'], {}), '(string_print)\n', (14561, 14575), False, 'from utils import utils, helpers\n'), ((14870, 14881), 'time.time', 'time.time', ([], {}), '()\n', (14879, 14881), False, 'import time, datetime\n'), ((16044, 16103), 'os.path.isdir', 'os.path.isdir', (["('%s/%s/%04d' % (try_name, 'val_Imgs', epoch))"], {}), "('%s/%s/%04d' % (try_name, 'val_Imgs', epoch))\n", (16057, 16103), False, 'import os, sys\n'), ((16107, 16164), 'os.makedirs', 'os.makedirs', (["('%s/%s/%04d' % (try_name, 'val_Imgs', epoch))"], {}), "('%s/%s/%04d' % (try_name, 'val_Imgs', epoch))\n", (16118, 16164), False, 'import os, sys\n'), ((16469, 16508), 'cv2.imread', 'cv2.imread', (['val_input_names_he[ind]', '(-1)'], {}), '(val_input_names_he[ind], -1)\n', (16479, 16508), False, 'import os, time, cv2, sys, math\n'), ((16680, 16742), 'utils.utils.ldr_to_hdr_test', 'utils.ldr_to_hdr_test', (['input_image_he', 'val_input_names_he[ind]'], {}), '(input_image_he, val_input_names_he[ind])\n', (16701, 16742), False, 'from utils import utils, helpers\n'), ((16765, 16827), 'numpy.concatenate', 'np.concatenate', (['[input_image_he, input_image_he_gamma]'], {'axis': '(3)'}), '([input_image_he, input_image_he_gamma], axis=3)\n', (16779, 16827), True, 'import numpy as np\n'), ((16847, 16886), 'cv2.imread', 'cv2.imread', (['val_input_names_me[ind]', '(-1)'], {}), '(val_input_names_me[ind], -1)\n', (16857, 16886), False, 'import os, time, cv2, sys, math\n'), ((17055, 17117), 'utils.utils.ldr_to_hdr_test', 'utils.ldr_to_hdr_test', (['input_image_me', 'val_input_names_me[ind]'], {}), '(input_image_me, val_input_names_me[ind])\n', (17076, 17117), False, 'from utils import utils, helpers\n'), ((17140, 17202), 'numpy.concatenate', 'np.concatenate', (['[input_image_me, input_image_me_gamma]'], {'axis': '(3)'}), '([input_image_me, input_image_me_gamma], axis=3)\n', (17154, 17202), True, 'import numpy as np\n'), ((17225, 17264), 'cv2.imread', 'cv2.imread', (['val_input_names_le[ind]', '(-1)'], {}), '(val_input_names_le[ind], -1)\n', (17235, 17264), False, 'import os, time, cv2, sys, math\n'), ((17433, 17495), 'utils.utils.ldr_to_hdr_test', 'utils.ldr_to_hdr_test', (['input_image_le', 'val_input_names_le[ind]'], {}), '(input_image_le, val_input_names_le[ind])\n', (17454, 17495), False, 'from utils import utils, helpers\n'), ((17518, 17580), 'numpy.concatenate', 'np.concatenate', (['[input_image_le, input_image_le_gamma]'], {'axis': '(3)'}), '([input_image_le, input_image_le_gamma], axis=3)\n', (17532, 17580), True, 'import numpy as np\n'), ((17604, 17645), 'cv2.imread', 'cv2.imread', (['val_output_names_hdr[ind]', '(-1)'], {}), '(val_output_names_hdr[ind], -1)\n', (17614, 17645), False, 'import os, time, cv2, sys, math\n'), ((17690, 17734), 'cv2.cvtColor', 'cv2.cvtColor', (['cv2_img_hdr', 'cv2.COLOR_BGR2RGB'], {}), '(cv2_img_hdr, cv2.COLOR_BGR2RGB)\n', (17702, 17734), False, 'import os, time, cv2, sys, math\n'), ((17806, 17817), 'time.time', 'time.time', ([], {}), '()\n', (17815, 17817), False, 'import time, datetime\n'), ((18059, 18070), 'time.time', 'time.time', ([], {}), '()\n', (18068, 18070), False, 'import time, datetime\n'), ((18137, 18166), 'numpy.squeeze', 'np.squeeze', (['output_image_pred'], {}), '(output_image_pred)\n', (18147, 18166), True, 'import numpy as np\n'), ((18180, 18198), 'numpy.squeeze', 'np.squeeze', (['gt_hdr'], {}), '(gt_hdr)\n', (18190, 18198), True, 'import numpy as np\n'), ((18761, 18808), 'utils.utils.filepath_to_name', 'utils.filepath_to_name', (['val_input_names_he[ind]'], {}), '(val_input_names_he[ind])\n', (18783, 18808), False, 'from utils import utils, helpers\n'), ((19059, 19086), 'numpy.mean', 'np.mean', (['current_losses_val'], {}), '(current_losses_val)\n', (19066, 19086), True, 'import numpy as np\n'), ((19106, 19162), 'tensorflow.summary.merge', 'tf.summary.merge', (['[test_loss_summary, test_psnr_summary]'], {}), '([test_loss_summary, test_psnr_summary])\n', (19122, 19162), True, 'import tensorflow as tf\n'), ((1418, 1432), 'math.sqrt', 'math.sqrt', (['mse'], {}), '(mse)\n', (1427, 1432), False, 'import os, time, cv2, sys, math\n'), ((7252, 7276), 'tensorflow.trainable_variables', 'tf.trainable_variables', ([], {}), '()\n', (7274, 7276), True, 'import tensorflow as tf\n'), ((12788, 12830), 'cv2.imread', 'cv2.imread', (['train_output_names_hdr[id]', '(-1)'], {}), '(train_output_names_hdr[id], -1)\n', (12798, 12830), False, 'import os, time, cv2, sys, math\n'), ((12885, 12925), 'numpy.expand_dims', 'np.expand_dims', (['input_image_le_c'], {'axis': '(0)'}), '(input_image_le_c, axis=0)\n', (12899, 12925), True, 'import numpy as np\n'), ((12959, 12999), 'numpy.expand_dims', 'np.expand_dims', (['input_image_me_c'], {'axis': '(0)'}), '(input_image_me_c, axis=0)\n', (12973, 12999), True, 'import numpy as np\n'), ((13033, 13073), 'numpy.expand_dims', 'np.expand_dims', (['input_image_he_c'], {'axis': '(0)'}), '(input_image_he_c, axis=0)\n', (13047, 13073), True, 'import numpy as np\n'), ((13105, 13141), 'numpy.expand_dims', 'np.expand_dims', (['output_image'], {'axis': '(0)'}), '(output_image, axis=0)\n', (13119, 13141), True, 'import numpy as np\n'), ((17762, 17780), 'numpy.float32', 'np.float32', (['gt_hdr'], {}), '(gt_hdr)\n', (17772, 17780), True, 'import numpy as np\n'), ((11827, 11878), 'cv2.cvtColor', 'cv2.cvtColor', (['cv2_image_train_he', 'cv2.COLOR_BGR2RGB'], {}), '(cv2_image_train_he, cv2.COLOR_BGR2RGB)\n', (11839, 11878), False, 'import os, time, cv2, sys, math\n'), ((12169, 12220), 'cv2.cvtColor', 'cv2.cvtColor', (['cv2_image_train_me', 'cv2.COLOR_BGR2RGB'], {}), '(cv2_image_train_me, cv2.COLOR_BGR2RGB)\n', (12181, 12220), False, 'import os, time, cv2, sys, math\n'), ((12511, 12562), 'cv2.cvtColor', 'cv2.cvtColor', (['cv2_image_train_le', 'cv2.COLOR_BGR2RGB'], {}), '(cv2_image_train_le, cv2.COLOR_BGR2RGB)\n', (12523, 12562), False, 'import os, time, cv2, sys, math\n'), ((14496, 14507), 'time.time', 'time.time', ([], {}), '()\n', (14505, 14507), False, 'import time, datetime\n'), ((14949, 14972), 'numpy.mean', 'np.mean', (['current_losses'], {}), '(current_losses)\n', (14956, 14972), True, 'import numpy as np\n'), ((16589, 16632), 'cv2.cvtColor', 'cv2.cvtColor', (['cv2_img_he', 'cv2.COLOR_BGR2RGB'], {}), '(cv2_img_he, cv2.COLOR_BGR2RGB)\n', (16601, 16632), False, 'import os, time, cv2, sys, math\n'), ((16964, 17007), 'cv2.cvtColor', 'cv2.cvtColor', (['cv2_img_me', 'cv2.COLOR_BGR2RGB'], {}), '(cv2_img_me, cv2.COLOR_BGR2RGB)\n', (16976, 17007), False, 'import os, time, cv2, sys, math\n'), ((17342, 17385), 'cv2.cvtColor', 'cv2.cvtColor', (['cv2_img_le', 'cv2.COLOR_BGR2RGB'], {}), '(cv2_img_le, cv2.COLOR_BGR2RGB)\n', (17354, 17385), False, 'import os, time, cv2, sys, math\n'), ((19251, 19274), 'numpy.mean', 'np.mean', (['psnr_post_list'], {}), '(psnr_post_list)\n', (19258, 19274), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
"""
Created on Sun Jul 15 22:20:52 2018
@author: Srinivas
"""
import numpy as np
X = np.arange(1, 1000)
Y = X[(X % 3 == 0) | (X % 5 == 0)]
Z = sum(Y)
print(Z)
|
[
"numpy.arange"
] |
[((121, 139), 'numpy.arange', 'np.arange', (['(1)', '(1000)'], {}), '(1, 1000)\n', (130, 139), True, 'import numpy as np\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@author: <NAME>
email: <EMAIL>
GitHub: phuycke
"""
#%%
import matplotlib.pyplot as plt
import mne
import numpy as np
import os
import pandas as pd
import seaborn as sns
from scipy import ndimage
from matplotlib import ticker, rcParams, gridspec
#%%
TEXT_SIZE = 15
rcParams['font.family'] = 'Times New Roman'
rcParams['axes.titlesize'] = TEXT_SIZE
rcParams['axes.labelsize'] = TEXT_SIZE
rcParams['xtick.labelsize'] = TEXT_SIZE
rcParams['ytick.labelsize'] = TEXT_SIZE
#%%
# create grid for plots
fig = plt.figure(figsize=(10, 9))
gs = gridspec.GridSpec(2, 13)
# TFR plot
fig_3a = plt.subplot(gs[0, :8])
# topoplot
fig_3b = plt.subplot(gs[0, 8:])
# alpha on the fast timescale
fig_3c_l = plt.subplot(gs[1, 0:3]) # novel condition
fig_3c_r = plt.subplot(gs[1, 3:6]) # repeating condition
# alpha on the slow timescale
fig_3d_l = plt.subplot(gs[1, 7:10]) # novel condition
fig_3d_r = plt.subplot(gs[1, 10:13]) # repeating condition
#%%
"""
Figure 3A
"""
# path to the result of the permutation data
PERM_DATA = r"C:\Users\pieter\OneDrive - UGent\Projects\2019\overtraining - PILOT 3\figures\Publish\Data\Stimulus-locked\Repetition 1 vs. repetition 8"
TIME_DATA = r"C:\Users\pieter\OneDrive - UGent\Projects\2019\overtraining - PILOT 3\figures\TF\Group level\data"
# define frequency bands (log spaced for setting the y-ticks later on)
FREQS = np.logspace(np.log10(4),
np.log10(30),
15)
# load the time data, and select everything between 0 and 1s
times = np.load(os.path.join(TIME_DATA, "stimulus_times.npy"))
times = times[np.where((times > 0) & (times <= 1))]
# the the difference between x[0] and x[1] for each value in times, and divide
# by 2 if len(times) is larger than 1s, else fix this at 0.0005
time_diff = np.diff(times) / 2. if len(times) > 1 else [0.0005]
# compute the limits of the time window (x-axis)
# start: first value of time (a bit larger than 0) - 0.00048828
# middle: all values except the last + 0.00048828
# final: last value of time (1) + 0.00048828
time_lims = np.concatenate([[times[0] - time_diff[0]], times[:-1] +
time_diff, [times[-1] + time_diff[-1]]])
# get the values that should be on the y-axis
yvals = FREQS
# compute the ratio: x[1] = x[0] * ratio (holds for all values)
ratio = yvals[1:] / yvals[:-1]
# compute the limits of the frequencies (y-axis)
# start: first value of yvals (4) / 1.15479362
# middle: the values of yvals
# last: the last value of yvals (30) * 1.15479362
log_yvals = np.concatenate([[yvals[0] / ratio[0]], yvals,
[yvals[-1] * ratio[0]]])
# get the limits of the y-axis
# note that yvals_lims is in this case equal to yvals since yvals is
# log-spaced. This would not be true if linspace was used to get frequencies
yval_lims = np.sqrt(log_yvals[:-2] * log_yvals[2:])
time_lims = time_lims[:-1]
# create a meshgrid
# time_mesh: row values are the same, column values differ (time)
# yval_mesh: row values differ (freqs), column values are the same
time_mesh, yval_mesh = np.meshgrid(time_lims, yval_lims)
# load the permutation test result array + check dimensions of the data
f_obs = np.load(os.path.join(PERM_DATA, "f_obs.npy"))
assert f_obs.shape == (64, 15, 1024)
# 64: electrodes, 15: frequencies, 1024: time points
# we average over electrodes to retain the frequency and time information
f_obs_mean = np.mean(f_obs, axis = 0)
# apply a gaussian filter to the data, with SD = 1 for both axes
gauss = ndimage.filters.gaussian_filter(f_obs_mean,
[1, 1],
mode = 'constant')
# create a pseudocolor plot
fig_3a.pcolormesh(time_mesh,
yval_mesh,
gauss,
cmap = "RdBu_r",
shading = "gouraud")
# draw a contour around larger values
# we draw the contour around values that are percentile 97.5 or larger
fig_3a.contour(time_mesh,
yval_mesh,
gauss,
levels = [np.percentile(gauss, 97.5)],
colors = "black",
linewidths = 3,
linestyles = "solid")
# set the y-axis parameters, note that the y-axis needs to be converted to
# log, and that a ticker needs to be called to set the y-axis ticks
fig_3a.set_yscale('log')
fig_3a.get_yaxis().set_major_formatter(ticker.ScalarFormatter())
fig_3a.yaxis.set_minor_formatter(ticker.NullFormatter())
fig_3a.yaxis.set_minor_locator(ticker.NullLocator())
# once the ticks are set, we assign the values of FREQS to the ticks
tick_vals = yvals[np.unique(np.linspace(0, len(yvals) - 1, 15).round().astype('int'))]
fig_3a.set_yticks(tick_vals)
# determine the y-ticks
ticks_str = []
for t in tick_vals:
if round(t) in [4, 8, 13, 19, 30]:
ticks_str.append("{0:.2f}".format(t))
else:
ticks_str.append(" ")
fig_3a.set_yticklabels(ticks_str)
# set the x-axis parameters: every 100 ms a label is placed
fig_3a.set_xticks(np.arange(0, 1.1, .25))
fig_3a.set_xticklabels([str(int(label)) for label in np.arange(0, 1001, 250)])
# set the general title, and the titles of the x-axis and the y-axis
fig_3a.set_xlabel('Time after stimulus (ms)')
fig_3a.set_ylabel('Frequency (Hz)')
fig_3a.set_title("Stimulus 1 vs. 8: permutation test TFR\nAlpha on the fast timescale (p = 0.001)")
# load the cluster data, and keep only the significant clusters
clust = np.load(os.path.join(PERM_DATA, "clust.npy"), allow_pickle = True)
clust_p_val = np.load(os.path.join(PERM_DATA, "clust_p_val.npy"))
f_obs_plot = np.zeros_like(f_obs)
for c, p_val in zip(clust, clust_p_val):
if p_val <= 0.05:
f_obs_plot[tuple(c)] = f_obs[tuple(c)]
# take the average (excluding NaNs) of the significant data
f_obs_plot_mean = np.nanmean(f_obs_plot, axis = 0)
# create a 2D raster of the significant data (no plot) to use for the colorbar
im = fig_3a.imshow(f_obs_plot_mean,
extent = [times[0], times[-1],
FREQS[0], FREQS[-1]],
aspect = "auto",
origin = "lower",
interpolation = "hanning",
cmap = "RdBu_r")
# get the colorbar of the above 2D raster, and paste it on the existing TFR plot
# note that this data is used to create the colorbar, and not the filtered data
# since the values become smaller due to the filtering process. The plot reflects
# the actual data, filtering is only done for visual appeal
cbar = fig.colorbar(im, ax = fig_3a)
# set some colorbar parameters, such as the title, ticks and tick labels
cbar.ax.set_title("F-statistic",
fontdict = {"fontsize": TEXT_SIZE})
cbar.ax.get_yaxis().set_ticks(np.arange(0, np.round(np.max(f_obs_plot_mean), 1) + 0.05, 4))
cbar.ax.tick_params(labelsize = TEXT_SIZE - 3)
# big fix: make sure that the 0 is shown on the x-axis of the final plot
fig_3a.set_xbound(0, 1)
#%%
"""
Figure 3B
"""
# Determines which part of the analysis to run + some plotting parameters
STIM_LOCKED = True
COMPUTE_TFR = False
BAND = [(8, 12, "Alpha")]
TMIN, TMAX = .65, .9
VMIN, VMAX = 0.5, 4.5
rcParams['font.family'] = 'Times New Roman'
rcParams['font.size'] = 8
# these are the subjects that had all 512 epochs recorded and stored safely
full_epochs = ["sub-02", "sub-03", "sub-04", "sub-05", "sub-06", "sub-08",
"sub-10", "sub-12", "sub-13", "sub-15", "sub-16", "sub-17",
"sub-18", "sub-19", "sub-20", "sub-21", "sub-22", "sub-23",
"sub-25", "sub-26", "sub-27", "sub-28", "sub-29", "sub-30"]
# load the TFR data
rep1 = mne.time_frequency.read_tfrs(r"C:\Users\pieter\Downloads\repetition 1 (24 subs)-tfr.h5")[0]
rep8 = mne.time_frequency.read_tfrs(r"C:\Users\pieter\Downloads\repetition 8 (24 subs)-tfr.h5")[0]
# save rep8 in temp, dB transform
temp = rep8
temp._data = 10 * np.log10(rep8._data)
# save rep1 in temp2, dB transform
temp2 = rep1
temp2._data = 10 * np.log10(rep1._data)
temp._data -= temp2._data
# check whether the difference does not equal rep_1 or rep_8
assert np.all(temp._data != rep1._data)
assert not np.sum(temp._data != rep8._data)
# colorbar with log scaled labels
def fmt_float(x, pos):
return r'${0:.2f}$'.format(x)
# define the data
avg_tfr = temp
# get the frequency bands
FMIN, FMAX, FNAME = BAND[0]
# make topoplot
avg_tfr.plot_topomap(tmin = TMIN,
tmax = TMAX,
fmin = FMIN,
fmax = FMAX,
vmin = VMIN,
vmax = VMAX,
unit = " ",
ch_type = "eeg",
cmap = "RdBu_r",
outlines = "head",
contours = 10,
colorbar = True,
cbar_fmt = fmt_float,
sensors = "ko",
axes = fig_3b,
title = " ")
# set a title which can be altered
fig_3b.set_title(r"$\alpha$ topography", size = TEXT_SIZE)
#%%
"""
Figure 3C
"""
# where to find the data files
ROOT = r"C:\Users\pieter\OneDrive - UGent\Projects\2019\overtraining - PILOT 3\figures\Publish\Data\Stimulus-locked\Theta, alpha, beta + behavioral data"
# seaborn param
sns.set_style("ticks")
sns.set_context("paper")
# read the data
df = pd.read_csv(os.path.join(ROOT, "theta_alpha_beta_behavioural.csv"))
# change the column names to their appropriate label
df.columns = ['Reaction time (ms)', 'RT_log', 'Accuracy', 'Accuracy_int',
'Error_int', 'Theta power', 'Alpha power', 'Beta power',
'Subject nr', 'Repetitions_overall', 'Repetition count',
'Block_overall', 'Block number', 'Condition', 'Trial_overall',
'Trial_block', 'Response', 'Stimulus_ID']
x_title, y_title = "Repetition count", "Alpha power"
# Novel condition
g = sns.regplot(x = x_title,
y = y_title,
data = df.loc[df["Condition"] == "Novel"],
x_estimator = np.mean,
x_ci = "ci",
ci = 95,
n_boot = 5000,
scatter_kws = {"s":15},
line_kws = {'lw': .75},
color = "darkgrey",
ax = fig_3c_l)
# Recurring condition
g = sns.regplot(x = x_title,
y = y_title,
data = df.loc[df["Condition"] == "Recurring"],
x_estimator = np.mean,
x_ci = "ci",
ci = 95,
n_boot = 5000,
scatter_kws = {"s":15},
line_kws = {'lw': .75},
color = "black",
ax = fig_3c_r)
# figure parameters (left figure)
fig_3c_l.set_title(r"Novel condition", size = TEXT_SIZE)
fig_3c_l.set_ylim([-.5, -.1])
fig_3c_l.set_yticks(np.arange(-.5, -.09, .1))
fig_3c_l.set_xticks(np.arange(1, 9))
fig_3c_l.set_xlim(0.5, 8.5)
fig_3c_l.set_xlabel(r"Stimulus number")
fig_3c_l.set_ylabel(r"$\alpha$ power")
# figure parameters (right figure)
fig_3c_r.set_xlim(0.5, 8.5)
fig_3c_r.set_xticks(np.arange(1, 9))
fig_3c_r.set_ylim([-.5, -.1])
fig_3c_r.set_yticks(np.arange(-.5, -.09, .1))
fig_3c_r.set_yticklabels([])
fig_3c_r.set_title(r"Repeating condition", size = TEXT_SIZE)
fig_3c_r.set_xlabel(r"Stimulus number")
fig_3c_r.set_ylabel(" ")
#%%
"""
Figure 3D
"""
# new variables
x_title, y_title = "Block number", "Alpha power"
# Novel condition
g = sns.regplot(x = x_title,
y = y_title,
data = df.loc[df["Condition"] == "Novel"],
x_estimator = np.mean,
x_ci = "ci",
ci = 95,
n_boot = 5000,
scatter_kws = {"s":15},
line_kws = {'lw': .75},
color = "darkgrey",
ax = fig_3d_l)
# Recurring condition
g = sns.regplot(x = x_title,
y = y_title,
data = df.loc[df["Condition"] == "Recurring"],
x_estimator = np.mean,
x_ci = "ci",
ci = 95,
n_boot = 5000,
scatter_kws = {"s":15},
line_kws = {'lw': .75},
color = "black",
ax = fig_3d_r)
# figure parameters (left figure)
fig_3d_l.set_title(r"Novel condition", size = TEXT_SIZE)
fig_3d_l.set_ylim([-.5, -.1])
fig_3d_l.set_yticks(np.arange(-.5, -.09, .1))
fig_3d_l.set_xticks(np.arange(1, 9))
fig_3d_l.set_xlim(0.5, 8.5)
fig_3d_l.set_xlabel(r"Block number")
fig_3d_l.set_ylabel(r"$\alpha$ power")
# figure parameters (right figure)
fig_3d_r.set_xlim(0.5, 8.5)
fig_3d_r.set_xticks(np.arange(1, 9))
fig_3d_r.set_ylim([-.5, -.1])
fig_3d_r.set_yticks(np.arange(-.5, -.09, .1))
fig_3d_r.set_yticklabels([])
fig_3d_r.set_title(r"Repeating condition", size = TEXT_SIZE)
fig_3d_r.set_xlabel(r"Block number")
fig_3d_r.set_ylabel(" ")
#%%
"""
Save figure
"""
# define the Figure dir + set the size of the image
FIG = r"C:\Users\pieter\OneDrive - UGent\Projects\2019\overtraining - PILOT 3\figures\Publish\Correct DPI plots"
# play around until the figure is satisfactory (difficult with high DPI)
plt.subplots_adjust(top=0.932, bottom=0.077, left=0.097, right=0.938,
hspace=0.5, wspace=0.35)
# letters indicating the panels
plt.text(-245, 5, "A", size = TEXT_SIZE+5)
plt.text(-85, 5, "B", size = TEXT_SIZE+5)
plt.text(-245, -1, "C", size = TEXT_SIZE+5)
plt.text(-115, -1, "D", size = TEXT_SIZE+5)
# dB label for panel B
plt.text(-1.5, 4.6, "dB", size = TEXT_SIZE)
# titles for panels C and D
plt.text(-200, -1.15, r"$\alpha$ power ~ fast timescale", size = TEXT_SIZE)
plt.text(-75, -1.15, r"$\alpha$ power ~ slow timescale", size = TEXT_SIZE)
# save as tiff and pdf
plt.savefig(fname = os.path.join(FIG, "Figure 3.tiff"), dpi = 300)
plt.savefig(fname = os.path.join(FIG, "Figure 3.pdf"), dpi = 300)
plt.close("all")
|
[
"numpy.sum",
"seaborn.regplot",
"matplotlib.pyplot.figure",
"numpy.mean",
"numpy.arange",
"os.path.join",
"numpy.nanmean",
"matplotlib.ticker.ScalarFormatter",
"numpy.meshgrid",
"numpy.zeros_like",
"matplotlib.pyplot.close",
"numpy.max",
"numpy.log10",
"seaborn.set_context",
"seaborn.set_style",
"mne.time_frequency.read_tfrs",
"matplotlib.pyplot.text",
"numpy.percentile",
"matplotlib.pyplot.subplots_adjust",
"numpy.all",
"numpy.concatenate",
"matplotlib.pyplot.subplot",
"scipy.ndimage.filters.gaussian_filter",
"numpy.where",
"matplotlib.ticker.NullFormatter",
"numpy.diff",
"matplotlib.gridspec.GridSpec",
"matplotlib.ticker.NullLocator",
"numpy.sqrt"
] |
[((607, 634), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 9)'}), '(figsize=(10, 9))\n', (617, 634), True, 'import matplotlib.pyplot as plt\n'), ((642, 666), 'matplotlib.gridspec.GridSpec', 'gridspec.GridSpec', (['(2)', '(13)'], {}), '(2, 13)\n', (659, 666), False, 'from matplotlib import ticker, rcParams, gridspec\n'), ((690, 712), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs[0, :8]'], {}), '(gs[0, :8])\n', (701, 712), True, 'import matplotlib.pyplot as plt\n'), ((736, 758), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs[0, 8:]'], {}), '(gs[0, 8:])\n', (747, 758), True, 'import matplotlib.pyplot as plt\n'), ((801, 824), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs[1, 0:3]'], {}), '(gs[1, 0:3])\n', (812, 824), True, 'import matplotlib.pyplot as plt\n'), ((860, 883), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs[1, 3:6]'], {}), '(gs[1, 3:6])\n', (871, 883), True, 'import matplotlib.pyplot as plt\n'), ((955, 979), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs[1, 7:10]'], {}), '(gs[1, 7:10])\n', (966, 979), True, 'import matplotlib.pyplot as plt\n'), ((1014, 1039), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs[1, 10:13]'], {}), '(gs[1, 10:13])\n', (1025, 1039), True, 'import matplotlib.pyplot as plt\n'), ((2195, 2296), 'numpy.concatenate', 'np.concatenate', (['[[times[0] - time_diff[0]], times[:-1] + time_diff, [times[-1] + time_diff[-1]]\n ]'], {}), '([[times[0] - time_diff[0]], times[:-1] + time_diff, [times[-\n 1] + time_diff[-1]]])\n', (2209, 2296), True, 'import numpy as np\n'), ((2682, 2752), 'numpy.concatenate', 'np.concatenate', (['[[yvals[0] / ratio[0]], yvals, [yvals[-1] * ratio[0]]]'], {}), '([[yvals[0] / ratio[0]], yvals, [yvals[-1] * ratio[0]]])\n', (2696, 2752), True, 'import numpy as np\n'), ((2980, 3019), 'numpy.sqrt', 'np.sqrt', (['(log_yvals[:-2] * log_yvals[2:])'], {}), '(log_yvals[:-2] * log_yvals[2:])\n', (2987, 3019), True, 'import numpy as np\n'), ((3232, 3265), 'numpy.meshgrid', 'np.meshgrid', (['time_lims', 'yval_lims'], {}), '(time_lims, yval_lims)\n', (3243, 3265), True, 'import numpy as np\n'), ((3571, 3593), 'numpy.mean', 'np.mean', (['f_obs'], {'axis': '(0)'}), '(f_obs, axis=0)\n', (3578, 3593), True, 'import numpy as np\n'), ((3670, 3738), 'scipy.ndimage.filters.gaussian_filter', 'ndimage.filters.gaussian_filter', (['f_obs_mean', '[1, 1]'], {'mode': '"""constant"""'}), "(f_obs_mean, [1, 1], mode='constant')\n", (3701, 3738), False, 'from scipy import ndimage\n'), ((5760, 5780), 'numpy.zeros_like', 'np.zeros_like', (['f_obs'], {}), '(f_obs)\n', (5773, 5780), True, 'import numpy as np\n'), ((5978, 6008), 'numpy.nanmean', 'np.nanmean', (['f_obs_plot'], {'axis': '(0)'}), '(f_obs_plot, axis=0)\n', (5988, 6008), True, 'import numpy as np\n'), ((8330, 8362), 'numpy.all', 'np.all', (['(temp._data != rep1._data)'], {}), '(temp._data != rep1._data)\n', (8336, 8362), True, 'import numpy as np\n'), ((9552, 9574), 'seaborn.set_style', 'sns.set_style', (['"""ticks"""'], {}), "('ticks')\n", (9565, 9574), True, 'import seaborn as sns\n'), ((9575, 9599), 'seaborn.set_context', 'sns.set_context', (['"""paper"""'], {}), "('paper')\n", (9590, 9599), True, 'import seaborn as sns\n'), ((10173, 10385), 'seaborn.regplot', 'sns.regplot', ([], {'x': 'x_title', 'y': 'y_title', 'data': "df.loc[df['Condition'] == 'Novel']", 'x_estimator': 'np.mean', 'x_ci': '"""ci"""', 'ci': '(95)', 'n_boot': '(5000)', 'scatter_kws': "{'s': 15}", 'line_kws': "{'lw': 0.75}", 'color': '"""darkgrey"""', 'ax': 'fig_3c_l'}), "(x=x_title, y=y_title, data=df.loc[df['Condition'] == 'Novel'],\n x_estimator=np.mean, x_ci='ci', ci=95, n_boot=5000, scatter_kws={'s': \n 15}, line_kws={'lw': 0.75}, color='darkgrey', ax=fig_3c_l)\n", (10184, 10385), True, 'import seaborn as sns\n'), ((10656, 10868), 'seaborn.regplot', 'sns.regplot', ([], {'x': 'x_title', 'y': 'y_title', 'data': "df.loc[df['Condition'] == 'Recurring']", 'x_estimator': 'np.mean', 'x_ci': '"""ci"""', 'ci': '(95)', 'n_boot': '(5000)', 'scatter_kws': "{'s': 15}", 'line_kws': "{'lw': 0.75}", 'color': '"""black"""', 'ax': 'fig_3c_r'}), "(x=x_title, y=y_title, data=df.loc[df['Condition'] ==\n 'Recurring'], x_estimator=np.mean, x_ci='ci', ci=95, n_boot=5000,\n scatter_kws={'s': 15}, line_kws={'lw': 0.75}, color='black', ax=fig_3c_r)\n", (10667, 10868), True, 'import seaborn as sns\n'), ((11889, 12101), 'seaborn.regplot', 'sns.regplot', ([], {'x': 'x_title', 'y': 'y_title', 'data': "df.loc[df['Condition'] == 'Novel']", 'x_estimator': 'np.mean', 'x_ci': '"""ci"""', 'ci': '(95)', 'n_boot': '(5000)', 'scatter_kws': "{'s': 15}", 'line_kws': "{'lw': 0.75}", 'color': '"""darkgrey"""', 'ax': 'fig_3d_l'}), "(x=x_title, y=y_title, data=df.loc[df['Condition'] == 'Novel'],\n x_estimator=np.mean, x_ci='ci', ci=95, n_boot=5000, scatter_kws={'s': \n 15}, line_kws={'lw': 0.75}, color='darkgrey', ax=fig_3d_l)\n", (11900, 12101), True, 'import seaborn as sns\n'), ((12372, 12584), 'seaborn.regplot', 'sns.regplot', ([], {'x': 'x_title', 'y': 'y_title', 'data': "df.loc[df['Condition'] == 'Recurring']", 'x_estimator': 'np.mean', 'x_ci': '"""ci"""', 'ci': '(95)', 'n_boot': '(5000)', 'scatter_kws': "{'s': 15}", 'line_kws': "{'lw': 0.75}", 'color': '"""black"""', 'ax': 'fig_3d_r'}), "(x=x_title, y=y_title, data=df.loc[df['Condition'] ==\n 'Recurring'], x_estimator=np.mean, x_ci='ci', ci=95, n_boot=5000,\n scatter_kws={'s': 15}, line_kws={'lw': 0.75}, color='black', ax=fig_3d_r)\n", (12383, 12584), True, 'import seaborn as sns\n'), ((13751, 13849), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'top': '(0.932)', 'bottom': '(0.077)', 'left': '(0.097)', 'right': '(0.938)', 'hspace': '(0.5)', 'wspace': '(0.35)'}), '(top=0.932, bottom=0.077, left=0.097, right=0.938,\n hspace=0.5, wspace=0.35)\n', (13770, 13849), True, 'import matplotlib.pyplot as plt\n'), ((13900, 13942), 'matplotlib.pyplot.text', 'plt.text', (['(-245)', '(5)', '"""A"""'], {'size': '(TEXT_SIZE + 5)'}), "(-245, 5, 'A', size=TEXT_SIZE + 5)\n", (13908, 13942), True, 'import matplotlib.pyplot as plt\n'), ((13947, 13988), 'matplotlib.pyplot.text', 'plt.text', (['(-85)', '(5)', '"""B"""'], {'size': '(TEXT_SIZE + 5)'}), "(-85, 5, 'B', size=TEXT_SIZE + 5)\n", (13955, 13988), True, 'import matplotlib.pyplot as plt\n'), ((13993, 14036), 'matplotlib.pyplot.text', 'plt.text', (['(-245)', '(-1)', '"""C"""'], {'size': '(TEXT_SIZE + 5)'}), "(-245, -1, 'C', size=TEXT_SIZE + 5)\n", (14001, 14036), True, 'import matplotlib.pyplot as plt\n'), ((14041, 14084), 'matplotlib.pyplot.text', 'plt.text', (['(-115)', '(-1)', '"""D"""'], {'size': '(TEXT_SIZE + 5)'}), "(-115, -1, 'D', size=TEXT_SIZE + 5)\n", (14049, 14084), True, 'import matplotlib.pyplot as plt\n'), ((14113, 14154), 'matplotlib.pyplot.text', 'plt.text', (['(-1.5)', '(4.6)', '"""dB"""'], {'size': 'TEXT_SIZE'}), "(-1.5, 4.6, 'dB', size=TEXT_SIZE)\n", (14121, 14154), True, 'import matplotlib.pyplot as plt\n'), ((14190, 14263), 'matplotlib.pyplot.text', 'plt.text', (['(-200)', '(-1.15)', '"""$\\\\alpha$ power ~ fast timescale"""'], {'size': 'TEXT_SIZE'}), "(-200, -1.15, '$\\\\alpha$ power ~ fast timescale', size=TEXT_SIZE)\n", (14198, 14263), True, 'import matplotlib.pyplot as plt\n'), ((14266, 14338), 'matplotlib.pyplot.text', 'plt.text', (['(-75)', '(-1.15)', '"""$\\\\alpha$ power ~ slow timescale"""'], {'size': 'TEXT_SIZE'}), "(-75, -1.15, '$\\\\alpha$ power ~ slow timescale', size=TEXT_SIZE)\n", (14274, 14338), True, 'import matplotlib.pyplot as plt\n'), ((14499, 14515), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (14508, 14515), True, 'import matplotlib.pyplot as plt\n'), ((1493, 1504), 'numpy.log10', 'np.log10', (['(4)'], {}), '(4)\n', (1501, 1504), True, 'import numpy as np\n'), ((1527, 1539), 'numpy.log10', 'np.log10', (['(30)'], {}), '(30)\n', (1535, 1539), True, 'import numpy as np\n'), ((1648, 1693), 'os.path.join', 'os.path.join', (['TIME_DATA', '"""stimulus_times.npy"""'], {}), "(TIME_DATA, 'stimulus_times.npy')\n", (1660, 1693), False, 'import os\n'), ((1713, 1749), 'numpy.where', 'np.where', (['((times > 0) & (times <= 1))'], {}), '((times > 0) & (times <= 1))\n', (1721, 1749), True, 'import numpy as np\n'), ((3355, 3391), 'os.path.join', 'os.path.join', (['PERM_DATA', '"""f_obs.npy"""'], {}), "(PERM_DATA, 'f_obs.npy')\n", (3367, 3391), False, 'import os\n'), ((4559, 4583), 'matplotlib.ticker.ScalarFormatter', 'ticker.ScalarFormatter', ([], {}), '()\n', (4581, 4583), False, 'from matplotlib import ticker, rcParams, gridspec\n'), ((4618, 4640), 'matplotlib.ticker.NullFormatter', 'ticker.NullFormatter', ([], {}), '()\n', (4638, 4640), False, 'from matplotlib import ticker, rcParams, gridspec\n'), ((4673, 4693), 'matplotlib.ticker.NullLocator', 'ticker.NullLocator', ([], {}), '()\n', (4691, 4693), False, 'from matplotlib import ticker, rcParams, gridspec\n'), ((5179, 5202), 'numpy.arange', 'np.arange', (['(0)', '(1.1)', '(0.25)'], {}), '(0, 1.1, 0.25)\n', (5188, 5202), True, 'import numpy as np\n'), ((5621, 5657), 'os.path.join', 'os.path.join', (['PERM_DATA', '"""clust.npy"""'], {}), "(PERM_DATA, 'clust.npy')\n", (5633, 5657), False, 'import os\n'), ((5702, 5744), 'os.path.join', 'os.path.join', (['PERM_DATA', '"""clust_p_val.npy"""'], {}), "(PERM_DATA, 'clust_p_val.npy')\n", (5714, 5744), False, 'import os\n'), ((7860, 7956), 'mne.time_frequency.read_tfrs', 'mne.time_frequency.read_tfrs', (['"""C:\\\\Users\\\\pieter\\\\Downloads\\\\repetition 1 (24 subs)-tfr.h5"""'], {}), "(\n 'C:\\\\Users\\\\pieter\\\\Downloads\\\\repetition 1 (24 subs)-tfr.h5')\n", (7888, 7956), False, 'import mne\n'), ((7959, 8055), 'mne.time_frequency.read_tfrs', 'mne.time_frequency.read_tfrs', (['"""C:\\\\Users\\\\pieter\\\\Downloads\\\\repetition 8 (24 subs)-tfr.h5"""'], {}), "(\n 'C:\\\\Users\\\\pieter\\\\Downloads\\\\repetition 8 (24 subs)-tfr.h5')\n", (7987, 8055), False, 'import mne\n'), ((8120, 8140), 'numpy.log10', 'np.log10', (['rep8._data'], {}), '(rep8._data)\n', (8128, 8140), True, 'import numpy as np\n'), ((8209, 8229), 'numpy.log10', 'np.log10', (['rep1._data'], {}), '(rep1._data)\n', (8217, 8229), True, 'import numpy as np\n'), ((8374, 8406), 'numpy.sum', 'np.sum', (['(temp._data != rep8._data)'], {}), '(temp._data != rep8._data)\n', (8380, 8406), True, 'import numpy as np\n'), ((9634, 9688), 'os.path.join', 'os.path.join', (['ROOT', '"""theta_alpha_beta_behavioural.csv"""'], {}), "(ROOT, 'theta_alpha_beta_behavioural.csv')\n", (9646, 9688), False, 'import os\n'), ((11260, 11287), 'numpy.arange', 'np.arange', (['(-0.5)', '(-0.09)', '(0.1)'], {}), '(-0.5, -0.09, 0.1)\n', (11269, 11287), True, 'import numpy as np\n'), ((11309, 11324), 'numpy.arange', 'np.arange', (['(1)', '(9)'], {}), '(1, 9)\n', (11318, 11324), True, 'import numpy as np\n'), ((11517, 11532), 'numpy.arange', 'np.arange', (['(1)', '(9)'], {}), '(1, 9)\n', (11526, 11532), True, 'import numpy as np\n'), ((11586, 11613), 'numpy.arange', 'np.arange', (['(-0.5)', '(-0.09)', '(0.1)'], {}), '(-0.5, -0.09, 0.1)\n', (11595, 11613), True, 'import numpy as np\n'), ((12976, 13003), 'numpy.arange', 'np.arange', (['(-0.5)', '(-0.09)', '(0.1)'], {}), '(-0.5, -0.09, 0.1)\n', (12985, 13003), True, 'import numpy as np\n'), ((13025, 13040), 'numpy.arange', 'np.arange', (['(1)', '(9)'], {}), '(1, 9)\n', (13034, 13040), True, 'import numpy as np\n'), ((13231, 13246), 'numpy.arange', 'np.arange', (['(1)', '(9)'], {}), '(1, 9)\n', (13240, 13246), True, 'import numpy as np\n'), ((13300, 13327), 'numpy.arange', 'np.arange', (['(-0.5)', '(-0.09)', '(0.1)'], {}), '(-0.5, -0.09, 0.1)\n', (13309, 13327), True, 'import numpy as np\n'), ((1908, 1922), 'numpy.diff', 'np.diff', (['times'], {}), '(times)\n', (1915, 1922), True, 'import numpy as np\n'), ((14385, 14419), 'os.path.join', 'os.path.join', (['FIG', '"""Figure 3.tiff"""'], {}), "(FIG, 'Figure 3.tiff')\n", (14397, 14419), False, 'import os\n'), ((14452, 14485), 'os.path.join', 'os.path.join', (['FIG', '"""Figure 3.pdf"""'], {}), "(FIG, 'Figure 3.pdf')\n", (14464, 14485), False, 'import os\n'), ((4214, 4240), 'numpy.percentile', 'np.percentile', (['gauss', '(97.5)'], {}), '(gauss, 97.5)\n', (4227, 4240), True, 'import numpy as np\n'), ((5256, 5279), 'numpy.arange', 'np.arange', (['(0)', '(1001)', '(250)'], {}), '(0, 1001, 250)\n', (5265, 5279), True, 'import numpy as np\n'), ((6978, 7001), 'numpy.max', 'np.max', (['f_obs_plot_mean'], {}), '(f_obs_plot_mean)\n', (6984, 7001), True, 'import numpy as np\n')]
|
"""
日期修改
"""
import os
import cv2
import numpy as np
import matplotlib.pyplot as plt
np.set_printoptions(threshold=np.inf)
root_dir = '/media/xiayule/bdcp/other'
def modify_date():
img_path = os.path.join(root_dir, '3.jpg')
img = cv2.imread(img_path)
# _, img1 = cv2.threshold(img, 150, 200, cv2.THRESH_BINARY)
hue_img = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
l_range = np.array([140, 43, 46])
h_range = np.array([180, 255, 255])
th = cv2.inRange(hue_img, l_range, h_range)
index1 = th == 255
img1 = np.zeros(img.shape, np.uint8)
img1[:, :] = (255, 255, 255)
img1[index1] = img[index1]
cv2.namedWindow('1', cv2.WINDOW_NORMAL)
cv2.imshow('1', img1)
cv2.waitKey()
cv2.destroyAllWindows()
def get_print():
"""
:return:
"""
img_path = os.path.join(root_dir, 'zhuangbei2.jpg')
img = cv2.imread(img_path)
w, h, c = img.shape
dst_img = np.ones((w, h, c), np.uint8)*255
dst_img = cv2.cvtColor(dst_img, cv2.COLOR_BGR2BGRA)
for i in range(w):
for j in range(h):
pixel = img[i, j, :]
b, g, r = pixel[0], pixel[1], pixel[2]
if 80 <= b < 160 and 80 <= g < 150 and 140 <= r < 240:
dst_img[i, j, 0] = b
dst_img[i, j, 1] = g
dst_img[i, j, 2] = r
# dst_img[i, j, 3] = [b, g, r]
else:
dst_img[i, j, 3] = 0
cv2.imwrite(os.path.join(root_dir, 'zhuangbei2.png'), dst_img)
cv2.namedWindow('1', cv2.WINDOW_NORMAL)
cv2.imshow('1', dst_img)
cv2.waitKey()
cv2.destroyAllWindows()
def get_print1():
"""
:return:
"""
img_path = os.path.join(root_dir, 'zhuangbei2.jpg')
img = cv2.imread(img_path)
w, h, c = img.shape
dst_img = np.ones((w, h, c), np.uint8)*255
dst_img = cv2.cvtColor(dst_img, cv2.COLOR_BGR2BGRA)
for i in range(w):
for j in range(h):
pixel = img[i, j, :]
b, g, r = pixel[0], pixel[1], pixel[2]
m = (int(b)+int(g)+int(r))/3
if abs(b-m) < 20 and abs(g-m) < 20 and abs(r-m) < 20:
dst_img[i, j, 3] = 0
else:
dst_img[i, j, 0] = b
dst_img[i, j, 1] = g
dst_img[i, j, 2] = r
cv2.imwrite(os.path.join(root_dir, 'zhuangbei2.png'), dst_img)
cv2.namedWindow('1', cv2.WINDOW_NORMAL)
cv2.imshow('1', dst_img)
cv2.waitKey()
cv2.destroyAllWindows()
def get_touming():
"""
:return:
"""
img_path = os.path.join(root_dir, '26.jpg')
img = cv2.imread(img_path)
w, h, c = img.shape
dst_img = np.ones((w, h, c), np.uint8)*255
dst_img = cv2.cvtColor(dst_img, cv2.COLOR_BGR2BGRA)
for i in range(w):
for j in range(h):
pixel = img[i, j, :]
b, g, r = pixel[0], pixel[1], pixel[2]
if 0 <= b < 50 and 0 <= g < 50 and 0 <= r < 50:
dst_img[i, j, 0] = b
dst_img[i, j, 1] = g
dst_img[i, j, 2] = r
# dst_img[i, j, 3] = [b, g, r]
else:
dst_img[i, j, 3] = 0
cv2.imwrite(os.path.join(root_dir, '26_1.png'), dst_img)
cv2.namedWindow('1', cv2.WINDOW_NORMAL)
cv2.imshow('1', dst_img)
cv2.waitKey()
cv2.destroyAllWindows()
def myfunc1(x):
if x >= 0:
return x
else:
return 2*x/(1+np.exp(-x))
def myfunc1_der1(x):
if x >= 0:
return 1
else:
return 2*(1 + np.exp(-x) + x * np.exp(-x)) / pow(1 + np.exp(-x), 2)
def plot_swish():
"""
swish图像
:return:
"""
x = np.linspace(-4, 4, 1001)
y = np.array([myfunc1(i) for i in x])
y_d1 = np.array([myfunc1_der1(i) for i in x])
plt.plot(x, y, x, y_d1)
plt.show()
def modify_pixel():
img_path = os.path.join(root_dir, '51.png')
img = cv2.imread(img_path).astype('int')
w, h, c = img.shape
dst_img = np.ones((w, h, c), np.uint8) * 255
dst_img = cv2.cvtColor(dst_img, cv2.COLOR_BGR2BGRA)
for i in range(w):
for j in range(h):
pixel = img[i, j, :]
b, g, r = pixel[0], pixel[1], pixel[2]
if b < 255 and g < 255 and r < 255:
dst_img[i, j, 0] = b
dst_img[i, j, 1] = g
dst_img[i, j, 2] = r+15
# dst_img[i, j, 3] = [b, g, r]
else:
dst_img[i, j, 3] = 0
dst_img[dst_img > 255] = 255
cv2.imwrite(os.path.join(root_dir, '5_1.png'), dst_img)
cv2.namedWindow('1', cv2.WINDOW_NORMAL)
cv2.imshow('1', dst_img)
cv2.waitKey()
cv2.destroyAllWindows()
if __name__ == r'__main__':
get_touming()
# plot_swish()
# modify_pixel()
|
[
"numpy.set_printoptions",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"cv2.cvtColor",
"cv2.waitKey",
"cv2.destroyAllWindows",
"numpy.zeros",
"numpy.ones",
"cv2.imread",
"cv2.inRange",
"numpy.array",
"numpy.exp",
"numpy.linspace",
"cv2.imshow",
"os.path.join",
"cv2.namedWindow"
] |
[((85, 122), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'threshold': 'np.inf'}), '(threshold=np.inf)\n', (104, 122), True, 'import numpy as np\n'), ((199, 230), 'os.path.join', 'os.path.join', (['root_dir', '"""3.jpg"""'], {}), "(root_dir, '3.jpg')\n", (211, 230), False, 'import os\n'), ((241, 261), 'cv2.imread', 'cv2.imread', (['img_path'], {}), '(img_path)\n', (251, 261), False, 'import cv2\n'), ((340, 376), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2HSV'], {}), '(img, cv2.COLOR_BGR2HSV)\n', (352, 376), False, 'import cv2\n'), ((391, 414), 'numpy.array', 'np.array', (['[140, 43, 46]'], {}), '([140, 43, 46])\n', (399, 414), True, 'import numpy as np\n'), ((429, 454), 'numpy.array', 'np.array', (['[180, 255, 255]'], {}), '([180, 255, 255])\n', (437, 454), True, 'import numpy as np\n'), ((464, 502), 'cv2.inRange', 'cv2.inRange', (['hue_img', 'l_range', 'h_range'], {}), '(hue_img, l_range, h_range)\n', (475, 502), False, 'import cv2\n'), ((537, 566), 'numpy.zeros', 'np.zeros', (['img.shape', 'np.uint8'], {}), '(img.shape, np.uint8)\n', (545, 566), True, 'import numpy as np\n'), ((636, 675), 'cv2.namedWindow', 'cv2.namedWindow', (['"""1"""', 'cv2.WINDOW_NORMAL'], {}), "('1', cv2.WINDOW_NORMAL)\n", (651, 675), False, 'import cv2\n'), ((680, 701), 'cv2.imshow', 'cv2.imshow', (['"""1"""', 'img1'], {}), "('1', img1)\n", (690, 701), False, 'import cv2\n'), ((706, 719), 'cv2.waitKey', 'cv2.waitKey', ([], {}), '()\n', (717, 719), False, 'import cv2\n'), ((724, 747), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (745, 747), False, 'import cv2\n'), ((812, 852), 'os.path.join', 'os.path.join', (['root_dir', '"""zhuangbei2.jpg"""'], {}), "(root_dir, 'zhuangbei2.jpg')\n", (824, 852), False, 'import os\n'), ((863, 883), 'cv2.imread', 'cv2.imread', (['img_path'], {}), '(img_path)\n', (873, 883), False, 'import cv2\n'), ((970, 1011), 'cv2.cvtColor', 'cv2.cvtColor', (['dst_img', 'cv2.COLOR_BGR2BGRA'], {}), '(dst_img, cv2.COLOR_BGR2BGRA)\n', (982, 1011), False, 'import cv2\n'), ((1499, 1538), 'cv2.namedWindow', 'cv2.namedWindow', (['"""1"""', 'cv2.WINDOW_NORMAL'], {}), "('1', cv2.WINDOW_NORMAL)\n", (1514, 1538), False, 'import cv2\n'), ((1543, 1567), 'cv2.imshow', 'cv2.imshow', (['"""1"""', 'dst_img'], {}), "('1', dst_img)\n", (1553, 1567), False, 'import cv2\n'), ((1572, 1585), 'cv2.waitKey', 'cv2.waitKey', ([], {}), '()\n', (1583, 1585), False, 'import cv2\n'), ((1590, 1613), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (1611, 1613), False, 'import cv2\n'), ((1678, 1718), 'os.path.join', 'os.path.join', (['root_dir', '"""zhuangbei2.jpg"""'], {}), "(root_dir, 'zhuangbei2.jpg')\n", (1690, 1718), False, 'import os\n'), ((1729, 1749), 'cv2.imread', 'cv2.imread', (['img_path'], {}), '(img_path)\n', (1739, 1749), False, 'import cv2\n'), ((1836, 1877), 'cv2.cvtColor', 'cv2.cvtColor', (['dst_img', 'cv2.COLOR_BGR2BGRA'], {}), '(dst_img, cv2.COLOR_BGR2BGRA)\n', (1848, 1877), False, 'import cv2\n'), ((2358, 2397), 'cv2.namedWindow', 'cv2.namedWindow', (['"""1"""', 'cv2.WINDOW_NORMAL'], {}), "('1', cv2.WINDOW_NORMAL)\n", (2373, 2397), False, 'import cv2\n'), ((2402, 2426), 'cv2.imshow', 'cv2.imshow', (['"""1"""', 'dst_img'], {}), "('1', dst_img)\n", (2412, 2426), False, 'import cv2\n'), ((2431, 2444), 'cv2.waitKey', 'cv2.waitKey', ([], {}), '()\n', (2442, 2444), False, 'import cv2\n'), ((2449, 2472), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (2470, 2472), False, 'import cv2\n'), ((2539, 2571), 'os.path.join', 'os.path.join', (['root_dir', '"""26.jpg"""'], {}), "(root_dir, '26.jpg')\n", (2551, 2571), False, 'import os\n'), ((2582, 2602), 'cv2.imread', 'cv2.imread', (['img_path'], {}), '(img_path)\n', (2592, 2602), False, 'import cv2\n'), ((2689, 2730), 'cv2.cvtColor', 'cv2.cvtColor', (['dst_img', 'cv2.COLOR_BGR2BGRA'], {}), '(dst_img, cv2.COLOR_BGR2BGRA)\n', (2701, 2730), False, 'import cv2\n'), ((3205, 3244), 'cv2.namedWindow', 'cv2.namedWindow', (['"""1"""', 'cv2.WINDOW_NORMAL'], {}), "('1', cv2.WINDOW_NORMAL)\n", (3220, 3244), False, 'import cv2\n'), ((3249, 3273), 'cv2.imshow', 'cv2.imshow', (['"""1"""', 'dst_img'], {}), "('1', dst_img)\n", (3259, 3273), False, 'import cv2\n'), ((3278, 3291), 'cv2.waitKey', 'cv2.waitKey', ([], {}), '()\n', (3289, 3291), False, 'import cv2\n'), ((3296, 3319), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (3317, 3319), False, 'import cv2\n'), ((3623, 3647), 'numpy.linspace', 'np.linspace', (['(-4)', '(4)', '(1001)'], {}), '(-4, 4, 1001)\n', (3634, 3647), True, 'import numpy as np\n'), ((3744, 3767), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y', 'x', 'y_d1'], {}), '(x, y, x, y_d1)\n', (3752, 3767), True, 'import matplotlib.pyplot as plt\n'), ((3772, 3782), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3780, 3782), True, 'import matplotlib.pyplot as plt\n'), ((3820, 3852), 'os.path.join', 'os.path.join', (['root_dir', '"""51.png"""'], {}), "(root_dir, '51.png')\n", (3832, 3852), False, 'import os\n'), ((3986, 4027), 'cv2.cvtColor', 'cv2.cvtColor', (['dst_img', 'cv2.COLOR_BGR2BGRA'], {}), '(dst_img, cv2.COLOR_BGR2BGRA)\n', (3998, 4027), False, 'import cv2\n'), ((4525, 4564), 'cv2.namedWindow', 'cv2.namedWindow', (['"""1"""', 'cv2.WINDOW_NORMAL'], {}), "('1', cv2.WINDOW_NORMAL)\n", (4540, 4564), False, 'import cv2\n'), ((4569, 4593), 'cv2.imshow', 'cv2.imshow', (['"""1"""', 'dst_img'], {}), "('1', dst_img)\n", (4579, 4593), False, 'import cv2\n'), ((4598, 4611), 'cv2.waitKey', 'cv2.waitKey', ([], {}), '()\n', (4609, 4611), False, 'import cv2\n'), ((4616, 4639), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (4637, 4639), False, 'import cv2\n'), ((923, 951), 'numpy.ones', 'np.ones', (['(w, h, c)', 'np.uint8'], {}), '((w, h, c), np.uint8)\n', (930, 951), True, 'import numpy as np\n'), ((1444, 1484), 'os.path.join', 'os.path.join', (['root_dir', '"""zhuangbei2.png"""'], {}), "(root_dir, 'zhuangbei2.png')\n", (1456, 1484), False, 'import os\n'), ((1789, 1817), 'numpy.ones', 'np.ones', (['(w, h, c)', 'np.uint8'], {}), '((w, h, c), np.uint8)\n', (1796, 1817), True, 'import numpy as np\n'), ((2303, 2343), 'os.path.join', 'os.path.join', (['root_dir', '"""zhuangbei2.png"""'], {}), "(root_dir, 'zhuangbei2.png')\n", (2315, 2343), False, 'import os\n'), ((2642, 2670), 'numpy.ones', 'np.ones', (['(w, h, c)', 'np.uint8'], {}), '((w, h, c), np.uint8)\n', (2649, 2670), True, 'import numpy as np\n'), ((3156, 3190), 'os.path.join', 'os.path.join', (['root_dir', '"""26_1.png"""'], {}), "(root_dir, '26_1.png')\n", (3168, 3190), False, 'import os\n'), ((3937, 3965), 'numpy.ones', 'np.ones', (['(w, h, c)', 'np.uint8'], {}), '((w, h, c), np.uint8)\n', (3944, 3965), True, 'import numpy as np\n'), ((4477, 4510), 'os.path.join', 'os.path.join', (['root_dir', '"""5_1.png"""'], {}), "(root_dir, '5_1.png')\n", (4489, 4510), False, 'import os\n'), ((3863, 3883), 'cv2.imread', 'cv2.imread', (['img_path'], {}), '(img_path)\n', (3873, 3883), False, 'import cv2\n'), ((3401, 3411), 'numpy.exp', 'np.exp', (['(-x)'], {}), '(-x)\n', (3407, 3411), True, 'import numpy as np\n'), ((3539, 3549), 'numpy.exp', 'np.exp', (['(-x)'], {}), '(-x)\n', (3545, 3549), True, 'import numpy as np\n'), ((3500, 3510), 'numpy.exp', 'np.exp', (['(-x)'], {}), '(-x)\n', (3506, 3510), True, 'import numpy as np\n'), ((3517, 3527), 'numpy.exp', 'np.exp', (['(-x)'], {}), '(-x)\n', (3523, 3527), True, 'import numpy as np\n')]
|
from cosymlib.shape import maps
import numpy as np
import sys
def plot_minimum_distortion_path_shape(shape_label1, shape_label2, num_points=20, output=sys.stdout, show_plot=True):
import matplotlib.pyplot as plt
path = get_shape_path(shape_label1, shape_label2, num_points)
shape_map_txt = " {:6} {:6}\n".format(shape_label1, shape_label2)
for idx, value in enumerate(path[0]):
shape_map_txt += '{:6.3f}, {:6.3f}'.format(path[0][idx], path[1][idx])
shape_map_txt += '\n'
print(shape_map_txt)
if show_plot:
plt.plot(path[0], path[1], 'k', linewidth=2.0)
plt.xlabel(shape_label1)
plt.ylabel(shape_label2)
plt.show()
def get_shape_path(shape_label1, shape_label2, num_points):
return maps.get_shape_map(shape_label1, shape_label2, num_points)
def plot_molecular_orbital_diagram(molecule, wfnsym, mo_range=None):
import matplotlib.pyplot as plt
labels = wfnsym.IRLab
if mo_range is not None:
ird_a_max = [np.argmax(ird_a_orb) for ird_a_orb in wfnsym.mo_IRd_a][mo_range[0]:mo_range[1]]
energies = molecule.electronic_structure.alpha_energies[mo_range[0]:mo_range[1]]
else:
ird_a_max = [np.argmax(ird_a_orb) for ird_a_orb in wfnsym.mo_IRd_a]
energies = molecule.electronic_structure.alpha_energies
ax1 = plt.axes()
ax1.axes.get_xaxis().set_visible(False) # Hide x axis
# ax1.axes.get_yaxis().set_visible(True)
degeneracy = [[energies[0]]]
for energy in energies[1:]:
if abs(energy - degeneracy[-1][-1]) < 1e-3:
degeneracy[-1].append(energy)
else:
degeneracy.append([energy])
max_value = 5e-3
x_center = []
for ix in degeneracy:
if len(ix) == 1:
x_center.append([0])
else:
x_center.append(np.linspace(-max_value, max_value, len(ix)))
x_center = [y for x in x_center for y in x]
plt.scatter(x_center, energies, s=500, marker="_", linewidth=3)
for i in range(len(energies)):
plt.text(-max_value * 2, energies[i], labels[ird_a_max[i]])
plt.show()
def swap_vectors(v1, v2, position):
vector1 = v1.get_copy()
vector2 = v2.get_copy()
for i in range(len(v1)):
if i >= position:
vector1[i] = v2[i]
vector2[i] = v1[i]
return vector1, vector2
def plot_symmetry_energy_evolution(molecules, wfnsym, mo_range=None):
import matplotlib.pyplot as plt
energies = []
ird_a_max = []
for idm, molecule in enumerate(molecules):
labels = wfnsym[idm].IRLab
if mo_range is not None:
ird_a_max.append(np.array([np.argmax(ird_a_orb) for ird_a_orb in wfnsym[idm].mo_IRd_a]
[mo_range[0]:mo_range[1]]))
energies.append(molecule.electronic_structure.alpha_energies[mo_range[0]:mo_range[1]])
else:
ird_a_max.append(np.array([np.argmax(ird_a_orb) for ird_a_orb in wfnsym[idm].mo_IRd_a]))
energies.append(molecule.electronic_structure.alpha_energies)
energies_x_orbital = np.array(energies).T
ird_a_x_orbital = np.array(ird_a_max).T
for i in range(len(ird_a_x_orbital)):
for j in range(len(ird_a_x_orbital[i])):
if j == 0:
old_ird = ird_a_x_orbital[i][0]
else:
if old_ird != ird_a_x_orbital[i][j]:
for k in range(len(ird_a_x_orbital) - i):
if old_ird == ird_a_x_orbital[k + i][j]:
ird_a_x_orbital[i], ird_a_x_orbital[k + i] = swap_vectors(ird_a_x_orbital[i],
ird_a_x_orbital[k + i], j)
energies_x_orbital[i], energies_x_orbital[k + i] = swap_vectors(energies_x_orbital[i],
energies_x_orbital[k + i],
j)
break
old_ird = ird_a_x_orbital[i][j]
for ide, energy in enumerate(energies_x_orbital):
x = np.arange(len(energy))
plt.plot(x, energy, marker='_')
for i in range(len(energy)):
plt.text(x[i], energy[i] + abs(energy[i])*0.001, labels[ird_a_x_orbital[ide][i]])
plt.show()
|
[
"cosymlib.shape.maps.get_shape_map",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"numpy.argmax",
"matplotlib.pyplot.axes",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.text",
"numpy.array",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel"
] |
[((764, 822), 'cosymlib.shape.maps.get_shape_map', 'maps.get_shape_map', (['shape_label1', 'shape_label2', 'num_points'], {}), '(shape_label1, shape_label2, num_points)\n', (782, 822), False, 'from cosymlib.shape import maps\n'), ((1337, 1347), 'matplotlib.pyplot.axes', 'plt.axes', ([], {}), '()\n', (1345, 1347), True, 'import matplotlib.pyplot as plt\n'), ((1930, 1993), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x_center', 'energies'], {'s': '(500)', 'marker': '"""_"""', 'linewidth': '(3)'}), "(x_center, energies, s=500, marker='_', linewidth=3)\n", (1941, 1993), True, 'import matplotlib.pyplot as plt\n'), ((2102, 2112), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2110, 2112), True, 'import matplotlib.pyplot as plt\n'), ((4410, 4420), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4418, 4420), True, 'import matplotlib.pyplot as plt\n'), ((559, 605), 'matplotlib.pyplot.plot', 'plt.plot', (['path[0]', 'path[1]', '"""k"""'], {'linewidth': '(2.0)'}), "(path[0], path[1], 'k', linewidth=2.0)\n", (567, 605), True, 'import matplotlib.pyplot as plt\n'), ((614, 638), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['shape_label1'], {}), '(shape_label1)\n', (624, 638), True, 'import matplotlib.pyplot as plt\n'), ((647, 671), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['shape_label2'], {}), '(shape_label2)\n', (657, 671), True, 'import matplotlib.pyplot as plt\n'), ((680, 690), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (688, 690), True, 'import matplotlib.pyplot as plt\n'), ((2037, 2096), 'matplotlib.pyplot.text', 'plt.text', (['(-max_value * 2)', 'energies[i]', 'labels[ird_a_max[i]]'], {}), '(-max_value * 2, energies[i], labels[ird_a_max[i]])\n', (2045, 2096), True, 'import matplotlib.pyplot as plt\n'), ((3092, 3110), 'numpy.array', 'np.array', (['energies'], {}), '(energies)\n', (3100, 3110), True, 'import numpy as np\n'), ((3135, 3154), 'numpy.array', 'np.array', (['ird_a_max'], {}), '(ird_a_max)\n', (3143, 3154), True, 'import numpy as np\n'), ((4242, 4273), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'energy'], {'marker': '"""_"""'}), "(x, energy, marker='_')\n", (4250, 4273), True, 'import matplotlib.pyplot as plt\n'), ((1207, 1227), 'numpy.argmax', 'np.argmax', (['ird_a_orb'], {}), '(ird_a_orb)\n', (1216, 1227), True, 'import numpy as np\n'), ((1007, 1027), 'numpy.argmax', 'np.argmax', (['ird_a_orb'], {}), '(ird_a_orb)\n', (1016, 1027), True, 'import numpy as np\n'), ((2930, 2950), 'numpy.argmax', 'np.argmax', (['ird_a_orb'], {}), '(ird_a_orb)\n', (2939, 2950), True, 'import numpy as np\n'), ((2652, 2672), 'numpy.argmax', 'np.argmax', (['ird_a_orb'], {}), '(ird_a_orb)\n', (2661, 2672), True, 'import numpy as np\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 13 17:38:37 2018
@author: simao
"""
import numpy as np
from scipy import stats
def onehotencoder(tind, *args):
if len(args) == 0:
maxclasses = max(tind)+1
elif len(args) == 1:
maxclasses = args[0]
else:
raise NotImplementedError
t = np.zeros((tind.shape[0], maxclasses))
t[np.arange(tind.shape[0]),tind.astype(np.int).reshape((-1,))] = 1
return t
def onehotnoise(tind, maxclasses, maxprob=0.5):
tind = tind.astype('int')
t = np.zeros((tind.shape[0], maxclasses))
t = t + (1 - maxprob) / (maxclasses - 1)
t[np.arange(tind.shape[0]), tind.reshape((-1,))] = maxprob
return t
def label_noise(t, pmin=0.8, pmax=1.0):
j = np.argmax(t, 1)
n = t.shape[0]
phigh = np.random.uniform(pmin, pmax, (n,))
plow = (1 - phigh) / (t.shape[1] - 1)
for i in range(n):
t[i] = plow[i]
t[i,j[i]] = phigh[i]
return t
def targetmode(tar_sequence):
idx = stats.mode(tar_sequence)[0][0]
return np.tile(idx, len(tar_sequence))
|
[
"numpy.random.uniform",
"scipy.stats.mode",
"numpy.argmax",
"numpy.zeros",
"numpy.arange"
] |
[((346, 383), 'numpy.zeros', 'np.zeros', (['(tind.shape[0], maxclasses)'], {}), '((tind.shape[0], maxclasses))\n', (354, 383), True, 'import numpy as np\n'), ((555, 592), 'numpy.zeros', 'np.zeros', (['(tind.shape[0], maxclasses)'], {}), '((tind.shape[0], maxclasses))\n', (563, 592), True, 'import numpy as np\n'), ((763, 778), 'numpy.argmax', 'np.argmax', (['t', '(1)'], {}), '(t, 1)\n', (772, 778), True, 'import numpy as np\n'), ((810, 845), 'numpy.random.uniform', 'np.random.uniform', (['pmin', 'pmax', '(n,)'], {}), '(pmin, pmax, (n,))\n', (827, 845), True, 'import numpy as np\n'), ((390, 414), 'numpy.arange', 'np.arange', (['tind.shape[0]'], {}), '(tind.shape[0])\n', (399, 414), True, 'import numpy as np\n'), ((644, 668), 'numpy.arange', 'np.arange', (['tind.shape[0]'], {}), '(tind.shape[0])\n', (653, 668), True, 'import numpy as np\n'), ((1018, 1042), 'scipy.stats.mode', 'stats.mode', (['tar_sequence'], {}), '(tar_sequence)\n', (1028, 1042), False, 'from scipy import stats\n')]
|
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import rc
from scipy.optimize import curve_fit
import matplotlib.colors as mcolors
#Write with LaTeX
rc('text', usetex=True)
rc('font', family='serif')
def func(x, a, b):
return (a * x) + b
# Data
B1 = np.array([9.38, 12.46, 15.57])
dB1 = np.array([0.04, 0.04, 0.04])
r1 = np.array([0.217, 0.28, 0.38])
dr1 = np.array([0.024, 0.04, 0.07])
B2 = np.array([9.38, 12.46, 15.57])
dB2 = np.array([0.04, 0.04, 0.04])
r2 = np.array([0.2, 0.2500, 0.33])
dr2 = np.array([0.02, 0.03, 0.06])
# Fitting
x = np.linspace(0.15, 0.4, 5)
popt1, pcov1 = curve_fit(func, r1, B1, sigma=1./(dB1*dB1))
perr1 = np.sqrt(np.diag(pcov1))
popt2, pcov2 = curve_fit(func, r2, B2, sigma=1./(dB2*dB2))
perr2 = np.sqrt(np.diag(pcov2))
# Plot
fig, ax = plt.subplots(1, 1)
# B1 = B1(1/r1)
ax.errorbar(r1, B1, xerr = dr1, yerr = dB1, capsize=3, color='black', elinewidth=1, markeredgewidth=1, linestyle='None', marker='o', label='Calculated \n Values of $B_1$')
ax.plot(x, func(x, *popt1), color='orange', label='$B1 = B1(1/r_1)$', linewidth=1.5)
# B2 = B2(1/r2)
ax.errorbar(r2, B2, xerr = dr2, yerr = dB2, capsize=3, color='black', elinewidth=1, markeredgewidth=1, linestyle='None', marker='s', label='Calculated \n Values of $B_2$')
ax.plot(x, func(x, *popt2), color='royalblue', label='$B2 = B2(1/r_2)$', linewidth=1.5)
# Figure Specifications
ax.set_ylabel('$B$ $(\mathrm{10^{-4}\,\mathrm{T}})$')
ax.set_xlabel('$1/r$ $(\mathrm{1/\mathrm{cm}})$')
ax.legend(loc = 'upper left', prop={'size': 11})
# Show the major grid lines with dark grey lines
ax.grid(b=True, which='major', color='#666666', linestyle='--')
# Show the minor grid lines
ax.minorticks_on()
ax.grid(b=True, which='minor', color='#999999', linestyle='--', alpha=0.2)
# fix quality
fig.tight_layout()
plt.show()
# Print lines' slopes and constant coefficients
print(f"\n\n a1 = {'%0.5f'%popt1[0]} ± {'%0.5f'%perr1[0]}", f",b1 = {'%0.5f'%popt1[1]} ± {'%0.5f'%perr1[1]}")
print(f"\n\n a2 = {'%0.5f'%popt2[0]} ± {'%0.5f'%perr2[0]}", f",b2 = {'%0.5f'%popt2[1]} ± {'%0.5f'%perr2[1]}")
|
[
"matplotlib.rc",
"matplotlib.pyplot.show",
"scipy.optimize.curve_fit",
"numpy.array",
"numpy.linspace",
"numpy.diag",
"matplotlib.pyplot.subplots"
] |
[((169, 192), 'matplotlib.rc', 'rc', (['"""text"""'], {'usetex': '(True)'}), "('text', usetex=True)\n", (171, 192), False, 'from matplotlib import rc\n'), ((193, 219), 'matplotlib.rc', 'rc', (['"""font"""'], {'family': '"""serif"""'}), "('font', family='serif')\n", (195, 219), False, 'from matplotlib import rc\n'), ((276, 306), 'numpy.array', 'np.array', (['[9.38, 12.46, 15.57]'], {}), '([9.38, 12.46, 15.57])\n', (284, 306), True, 'import numpy as np\n'), ((313, 341), 'numpy.array', 'np.array', (['[0.04, 0.04, 0.04]'], {}), '([0.04, 0.04, 0.04])\n', (321, 341), True, 'import numpy as np\n'), ((347, 376), 'numpy.array', 'np.array', (['[0.217, 0.28, 0.38]'], {}), '([0.217, 0.28, 0.38])\n', (355, 376), True, 'import numpy as np\n'), ((383, 412), 'numpy.array', 'np.array', (['[0.024, 0.04, 0.07]'], {}), '([0.024, 0.04, 0.07])\n', (391, 412), True, 'import numpy as np\n'), ((419, 449), 'numpy.array', 'np.array', (['[9.38, 12.46, 15.57]'], {}), '([9.38, 12.46, 15.57])\n', (427, 449), True, 'import numpy as np\n'), ((456, 484), 'numpy.array', 'np.array', (['[0.04, 0.04, 0.04]'], {}), '([0.04, 0.04, 0.04])\n', (464, 484), True, 'import numpy as np\n'), ((490, 517), 'numpy.array', 'np.array', (['[0.2, 0.25, 0.33]'], {}), '([0.2, 0.25, 0.33])\n', (498, 517), True, 'import numpy as np\n'), ((526, 554), 'numpy.array', 'np.array', (['[0.02, 0.03, 0.06]'], {}), '([0.02, 0.03, 0.06])\n', (534, 554), True, 'import numpy as np\n'), ((570, 595), 'numpy.linspace', 'np.linspace', (['(0.15)', '(0.4)', '(5)'], {}), '(0.15, 0.4, 5)\n', (581, 595), True, 'import numpy as np\n'), ((612, 660), 'scipy.optimize.curve_fit', 'curve_fit', (['func', 'r1', 'B1'], {'sigma': '(1.0 / (dB1 * dB1))'}), '(func, r1, B1, sigma=1.0 / (dB1 * dB1))\n', (621, 660), False, 'from scipy.optimize import curve_fit\n'), ((703, 751), 'scipy.optimize.curve_fit', 'curve_fit', (['func', 'r2', 'B2'], {'sigma': '(1.0 / (dB2 * dB2))'}), '(func, r2, B2, sigma=1.0 / (dB2 * dB2))\n', (712, 751), False, 'from scipy.optimize import curve_fit\n'), ((797, 815), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {}), '(1, 1)\n', (809, 815), True, 'import matplotlib.pyplot as plt\n'), ((1818, 1828), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1826, 1828), True, 'import matplotlib.pyplot as plt\n'), ((672, 686), 'numpy.diag', 'np.diag', (['pcov1'], {}), '(pcov1)\n', (679, 686), True, 'import numpy as np\n'), ((763, 777), 'numpy.diag', 'np.diag', (['pcov2'], {}), '(pcov2)\n', (770, 777), True, 'import numpy as np\n')]
|
import os
import numpy as np
import logging
from ..base import float_, int_
from .util import dataset_home, download, checksum, archive_extract, checkpoint
log = logging.getLogger(__name__)
_URL = 'http://ai.stanford.edu/~acoates/stl10/stl10_binary.tar.gz'
_SHA1 = 'b22ebbd7f3c4384ebc9ba3152939186d3750b902'
class STL10(object):
'''
The STL-10 dataset [1]
http://cs.stanford.edu/~acoates/stl10
References:
[1]: An Analysis of Single Layer Networks in Unsupervised Feature Learning,
<NAME>, <NAME>, <NAME>, AISTATS, 2011.
'''
def __init__(self):
self.name = 'stl10'
self.n_classes = 10
self.n_train = 5000
self.n_test = 8000
self.n_unlabeled = 100000
self.img_shape = (3, 96, 96)
self.data_dir = os.path.join(dataset_home, self.name)
self._npz_path = os.path.join(self.data_dir, 'stl10.npz')
self._install()
self._arrays, self.folds = self._load()
def arrays(self, dp_dtypes=False):
x_train, y_train, x_test, y_test, x_unlabeled = self._arrays
if dp_dtypes:
x_train = x_train.astype(float_)
y_train = y_train.astype(int_)
x_test = x_test.astype(float_)
y_test = y_test.astype(int_)
x_unlabeled = x_unlabeled.astype(float_)
return x_train, y_train, x_test, y_test, x_unlabeled
def _install(self):
checkpoint_file = os.path.join(self.data_dir, '__install_check')
with checkpoint(checkpoint_file) as exists:
if exists:
return
log.info('Downloading %s', _URL)
filepath = download(_URL, self.data_dir)
if _SHA1 != checksum(filepath, method='sha1'):
raise RuntimeError('Checksum mismatch for %s.' % _URL)
log.info('Unpacking %s', filepath)
archive_extract(filepath, self.data_dir)
unpack_dir = os.path.join(self.data_dir, 'stl10_binary')
log.info('Converting data to Numpy arrays')
filenames = ['train_X.bin', 'train_y.bin', 'test_X.bin',
'test_y.bin', 'unlabeled_X.bin']
def bin2numpy(filepath):
with open(filepath, 'rb') as f:
arr = np.fromfile(f, dtype=np.uint8)
if '_X' in filepath:
arr = np.reshape(arr, (-1,) + self.img_shape)
return arr
filepaths = [os.path.join(unpack_dir, f) for f in filenames]
x_train, y_train, x_test, y_test, x_unlabeled = map(bin2numpy,
filepaths)
folds = []
with open(os.path.join(unpack_dir, 'fold_indices.txt'), 'r') as f:
for line in f:
folds.append([int(s) for s in line.strip().split(' ')])
folds = np.array(folds)
with open(self._npz_path, 'wb') as f:
np.savez(f, x_train=x_train, y_train=y_train, x_test=x_test,
y_test=y_test, x_unlabeled=x_unlabeled, folds=folds)
def _load(self):
with open(self._npz_path, 'rb') as f:
dic = np.load(f)
return ((dic['x_train'], dic['y_train'], dic['x_test'],
dic['y_test'], dic['x_unlabeled']), dic['folds'])
|
[
"numpy.load",
"numpy.fromfile",
"numpy.array",
"numpy.reshape",
"numpy.savez",
"os.path.join",
"logging.getLogger"
] |
[((165, 192), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (182, 192), False, 'import logging\n'), ((796, 833), 'os.path.join', 'os.path.join', (['dataset_home', 'self.name'], {}), '(dataset_home, self.name)\n', (808, 833), False, 'import os\n'), ((859, 899), 'os.path.join', 'os.path.join', (['self.data_dir', '"""stl10.npz"""'], {}), "(self.data_dir, 'stl10.npz')\n", (871, 899), False, 'import os\n'), ((1440, 1486), 'os.path.join', 'os.path.join', (['self.data_dir', '"""__install_check"""'], {}), "(self.data_dir, '__install_check')\n", (1452, 1486), False, 'import os\n'), ((1938, 1981), 'os.path.join', 'os.path.join', (['self.data_dir', '"""stl10_binary"""'], {}), "(self.data_dir, 'stl10_binary')\n", (1950, 1981), False, 'import os\n'), ((2902, 2917), 'numpy.array', 'np.array', (['folds'], {}), '(folds)\n', (2910, 2917), True, 'import numpy as np\n'), ((3209, 3219), 'numpy.load', 'np.load', (['f'], {}), '(f)\n', (3216, 3219), True, 'import numpy as np\n'), ((2475, 2502), 'os.path.join', 'os.path.join', (['unpack_dir', 'f'], {}), '(unpack_dir, f)\n', (2487, 2502), False, 'import os\n'), ((2984, 3101), 'numpy.savez', 'np.savez', (['f'], {'x_train': 'x_train', 'y_train': 'y_train', 'x_test': 'x_test', 'y_test': 'y_test', 'x_unlabeled': 'x_unlabeled', 'folds': 'folds'}), '(f, x_train=x_train, y_train=y_train, x_test=x_test, y_test=y_test,\n x_unlabeled=x_unlabeled, folds=folds)\n', (2992, 3101), True, 'import numpy as np\n'), ((2277, 2307), 'numpy.fromfile', 'np.fromfile', (['f'], {'dtype': 'np.uint8'}), '(f, dtype=np.uint8)\n', (2288, 2307), True, 'import numpy as np\n'), ((2718, 2762), 'os.path.join', 'os.path.join', (['unpack_dir', '"""fold_indices.txt"""'], {}), "(unpack_dir, 'fold_indices.txt')\n", (2730, 2762), False, 'import os\n'), ((2379, 2418), 'numpy.reshape', 'np.reshape', (['arr', '((-1,) + self.img_shape)'], {}), '(arr, (-1,) + self.img_shape)\n', (2389, 2418), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
"""
Created on Mon Sep 13 21:46:14 2021
@author: Raj
"""
import sidpy as sid
from sidpy.sid import Reader
from sidpy.sid import Dimension
import os
import numpy as np
import h5py
from pyNSID.io.hdf_io import write_nsid_dataset
from pyNSID.io.hdf_io import create_indexed_group, write_simple_attrs
class PiFMTranslator(Reader):
"""
Class that writes images, spectrograms, point spectra and associated ancillary data sets to h5 file in pyUSID data
structure.
"""
def read(self ):
"""
Parameters
----------
file_path : String / unicode
Absolute path of the .ibw file
verbose : Boolean (Optional)
Whether or not to show print statements for debugging
Returns
-------
sidpy.Dataset : List of sidpy.Dataset objects.
Image layers are saved as separate Dataset objects
"""
self.get_path()
self.read_anfatec_params()
self.read_file_desc()
self.read_spectrograms()
self.read_imgs()
self.read_spectra()
self.datasets = self.make_datasets()
return self.datasets
def create_h5(self, append_path='', overwrite=False):
"""
Writes a new HDF5 file with the translated data
append_path : string (Optional)
h5_file to add these data to, must be a path to the h5_file on disk
overwrite : bool (optional, default=False)
If True, will overwrite an existing .h5 file of the same name
"""
self.create_hdf5_file(append_path, overwrite)
self.write_datasets_hdf5()
return
def get_path(self):
"""writes full path, directory, and file name as attributes to class"""
self.path = self._input_file_path
full_path = os.path.realpath(self.path)
directory = os.path.dirname(full_path)
# file name
basename = os.path.basename(self.path)
self.full_path = full_path
self.directory = directory
self.basename = basename
def read_anfatec_params(self):
"""reads the scan parameters and writes them to a dictionary"""
params_dictionary = {}
params = True
with open(self.path, 'r', encoding="ISO-8859-1") as f:
for line in f:
if params:
sline = [val.strip() for val in line.split(':')]
if len(sline) == 2 and sline[0][0] != ';':
params_dictionary[sline[0]] = sline[1]
#in ANFATEC parameter files, all attributes are written before file references.
if sline[0].startswith('FileDesc'):
params = False
f.close()
self.params_dictionary = params_dictionary
self.x_len, self.y_len = int(params_dictionary['xPixel']), int(params_dictionary['yPixel'])
def read_file_desc(self):
"""reads spectrogram, image, and spectra file descriptions and stores all to dictionary where
the key:value pairs are filename:[all descriptors]"""
spectrogram_desc = {}
img_desc = {}
spectrum_desc = {}
pspectrum_desc = {}
with open(self.path,'r', encoding="ISO-8859-1") as f:
lines = f.readlines()
for index, line in enumerate(lines):
sline = [val.strip() for val in line.split(':')]
#if true, then file describes image.
if sline[0].startswith('FileDescBegin'):
no_descriptors = 5
file_desc = []
for i in range(no_descriptors):
line_desc = [val.strip() for val in lines[index+i+1].split(':')]
file_desc.append(line_desc[1])
#img_desc['filename'] = caption, scale, physical unit, offset
img_desc[file_desc[0]] = file_desc[1:]
#if true, file describes spectrogram (ie hyperspectral image)
if sline[0].startswith('FileDesc2Begin'):
no_descriptors = 10
file_desc = []
for i in range(no_descriptors):
line_desc = [val.strip() for val in lines[index+i+1].split(':')]
file_desc.append(line_desc[1])
#caption, bytes perpixel, scale, physical unit, offset, offset, datatype, bytes per reading
#filename wavelengths, phys units wavelengths.
spectrogram_desc[file_desc[0]] = file_desc[1:]
if sline[0].startswith('AFMSpectrumDescBegin'):
file_desc = []
line_desc = [val.strip() for val in lines[index+1].split(':')][1]
if 'powerspectrum' in line_desc:
no_descriptors = 2
for i in range(no_descriptors):
line_desc = [val.strip() for val in lines[index+i+1].split(':')]
file_desc.append(line_desc[1])
#file name, position x, position y
pspectrum_desc[file_desc[0]] = file_desc[1:]
else:
no_descriptors = 7
for i in range(no_descriptors):
line_desc = [val.strip() for val in lines[index+i+1].split(':')]
file_desc.append(line_desc[1])
#file name, position x, position y
spectrum_desc[file_desc[0]] = file_desc[1:]
f.close()
self.img_desc = img_desc
self.spectrogram_desc = spectrogram_desc
self.spectrum_desc = spectrum_desc
self.pspectrum_desc = pspectrum_desc
def read_spectrograms(self):
"""reads spectrograms, associated spectral values, and saves them in two dictionaries"""
spectrograms = {}
spectrogram_spec_vals = {}
for file_name, descriptors in self.spectrogram_desc.items():
spec_vals_i = np.loadtxt(os.path.join(self.directory, file_name.strip('.int') + 'Wavelengths.txt'))
#if true, data is acquired with polarizer, with an attenuation data column
if np.array(spec_vals_i).ndim == 2:
spectrogram_spec_vals[file_name] = spec_vals_i[:, 0]
attenuation = {}
attenuation[file_name] = spec_vals_i[:, 1]
self.attenuation = attenuation
else:
spectrogram_spec_vals[file_name] = spec_vals_i
#load and save spectrograms
spectrogram_i = np.fromfile(os.path.join(self.directory, file_name), dtype='i4')
spectrograms[file_name] = np.zeros((self.x_len, self.y_len, len(spec_vals_i)))
for y, line in enumerate(np.split(spectrogram_i, self.y_len)):
for x, pt_spectrum in enumerate(np.split(line, self.x_len)):
spectrograms[file_name][x, y, :] = pt_spectrum * float(descriptors[2])
self.spectrograms = spectrograms
self.spectrogram_spec_vals = spectrogram_spec_vals
def read_imgs(self):
"""reads images and saves to dictionary"""
imgs = {}
for file_name, descriptors in self.img_desc.items():
img_i = np.fromfile(os.path.join(self.directory, file_name), dtype='i4')
imgs[file_name] = np.zeros((self.x_len, self.y_len))
for y, line in enumerate(np.split(img_i, self.y_len)):
for x, pixel in enumerate(np.split(line, self.x_len)):
imgs[file_name][x, y] = pixel * float(descriptors[1])
self.imgs = imgs
def read_spectra(self):
"""reads all point spectra and saves to dictionary"""
spectra = {}
spectra_spec_vals = {}
spectra_x_y_dim_name = {}
for file_name, descriptors in self.spectrum_desc.items():
spectrum_f = np.loadtxt(os.path.join(self.directory, file_name), skiprows=1)
spectra_spec_vals[file_name] = spectrum_f[:, 0]
spectra[file_name] = spectrum_f[:,1]
with open(os.path.join(self.directory, file_name)) as f:
spectra_x_y_dim_name[file_name] = f.readline().strip('\n').split('\t')
for file_name, descriptors in self.pspectrum_desc.items():
spectrum_f = np.loadtxt(os.path.join(self.directory, file_name), skiprows=1)
spectra_spec_vals[file_name] = spectrum_f[:, 0]
spectra[file_name] = spectrum_f[:,1]
with open(os.path.join(self.directory, file_name)) as f:
spectra_x_y_dim_name[file_name] = f.readline().strip('\n').split('\t')
self.spectra = spectra
self.spectra_spec_vals = spectra_spec_vals
self.spectra_x_y_dim_name = spectra_x_y_dim_name
def make_datasets(self):
datasets = []
self.make_dimensions()
# Spectrograms
if bool(self.spectrogram_desc):
for spectrogram_f, descriptors in self.spectrogram_desc.items():
# channel_i = create_indexed_group(self.h5_meas_grp, 'Channel_')
spec_vals_i = self.spectrogram_spec_vals[spectrogram_f]
spectrogram_data = self.spectrograms[spectrogram_f]
dset = sid.Dataset.from_array(spectrogram_data, name=descriptors[0])
dset.data_type = 'Spectrogram'
dset.set_dimension(0, self.dim0)
dset.set_dimension(1, self.dim0)
# spectrogram_spec_dims = Dimension('Wavelength', descriptors[8], spec_vals_i)
spectrogram_dims = Dimension(values=spec_vals_i, name='Spectrogram',
units=descriptors[3], quantity='Wavelength', type='spectral' )
dset.set_dimension(2, spectrogram_dims)
dset.metadata = {'Caption': descriptors[0],
'Bytes_Per_Pixel': descriptors[1],
'Scale': descriptors[2],
'Physical_Units': descriptors[3],
'Offset': descriptors[4],
'Datatype': descriptors[5],
'Bytes_Per_Reading': descriptors[6],
'Wavelength_File': descriptors[7],
'Wavelength_Units': descriptors[8]}
datasets.append(dset)
# Images
if bool(self.img_desc):
for img_f, descriptors in self.img_desc.items():
img_data = self.imgs[img_f]
dset = sid.Dataset.from_array(img_data, name = descriptors[0])
dset.data_type = 'Image'
dset.set_dimension(0, self.dim0)
dset.set_dimension(1, self.dim1)
dset.units = descriptors[2]
dset.quantity = descriptors[0]
dset.metadata = {'Caption': descriptors[0],
'Scale': descriptors[1],
'Physical_Units': descriptors[2],
'Offset': descriptors[3]}
datasets.append(dset)
# Spectra
if bool(self.spectrum_desc):
for spec_f, descriptors in self.spectrum_desc.items():
#create new measurement group for each spectrum
x_name = self.spectra_x_y_dim_name[spec_f][0].split(' ')[0]
x_unit = self.spectra_x_y_dim_name[spec_f][0].split(' ')[1]
y_name = self.spectra_x_y_dim_name[spec_f][1].split(' ')[0]
y_unit = self.spectra_x_y_dim_name[spec_f][1].split(' ')[1]
dset = sid.Dataset.from_array(self.spectra[spec_f], name = 'Raw_Spectrum')
dset.set_dimension(0, Dimension(np.array([float(descriptors[1])]),
name='X',units=self.params_dictionary['XPhysUnit'].replace('\xb5','u'),
quantity = 'X_position'))
dset.set_dimension(1, Dimension(np.array([float(descriptors[2])]),
name='Y',units=self.params_dictionary['YPhysUnit'].replace('\xb5','u'),
quantity = 'Y_position'))
dset.data_type = 'Spectrum'
dset.units = y_unit
dset.quantity = y_name
spectra_dims = Dimension(values=self.spectra_spec_vals[spec_f], name='Wavelength',
units=x_unit, quantity=x_name, type='spectral' )
dset.set_dimension(2, spectra_dims)
dset.metadata = {'XLoc': descriptors[1], 'YLoc': descriptors[2]}
datasets.append(dset)
# Power Spectra
if bool(self.pspectrum_desc):
for spec_f, descriptors in self.pspectrum_desc.items():
#create new measurement group for each spectrum
x_name = self.spectra_x_y_dim_name[spec_f][0].split(' ')[0]
x_unit = self.spectra_x_y_dim_name[spec_f][0].split(' ')[1]
y_name = self.spectra_x_y_dim_name[spec_f][1].split(' ')[0]
y_unit = self.spectra_x_y_dim_name[spec_f][1].split(' ')[1]
dset = sid.Dataset.from_array(self.spectra[spec_f], name = 'Power_Spectrum')
dset.set_dimension(0, Dimension(np.array([0]),
name='X',units=self.params_dictionary['XPhysUnit'].replace('\xb5','u'),
quantity = 'X_position'))
dset.set_dimension(1, Dimension(np.array([0]),
name='Y',units=self.params_dictionary['YPhysUnit'].replace('\xb5','u'),
quantity = 'Y_position'))
dset.data_type = 'Spectrum'
dset.units = y_unit
dset.quantity = y_name
spectra_dims = Dimension(values=self.spectra_spec_vals[spec_f], name='Wavelength',
units=x_unit, quantity=x_name, type='spectral' )
dset.set_dimension(2, spectra_dims)
dset.metadata = {'XLoc': 0, 'YLoc': 0}
datasets.append(dset)
return datasets
def make_dimensions(self):
x_range = float(self.params_dictionary['XScanRange'])
y_range = float(self.params_dictionary['YScanRange'])
x_center = float(self.params_dictionary['xCenter'])
y_center = float(self.params_dictionary['yCenter'])
x_start = x_center-(x_range/2); x_end = x_center+(x_range/2)
y_start = y_center-(y_range/2); y_end = y_center+(y_range/2)
dx = x_range/self.x_len
dy = y_range/self.y_len
#assumes y scan direction:down; scan angle: 0 deg
y_linspace = -np.arange(y_start, y_end, step=dy)
x_linspace = np.arange(x_start, x_end, step=dx)
qtyx = self.params_dictionary['XPhysUnit'].replace('\xb5', 'u')
qtyy = self.params_dictionary['YPhysUnit'].replace('\xb5', 'u')
self.dim0 = Dimension(x_linspace, name = 'x', units = qtyx,
dimension_type = 'spatial', quantity='Length')
self.dim1 = Dimension(y_linspace, name = 'y', units = qtyy,
dimension_type = 'spatial', quantity='Length')
# self.pos_ind, self.pos_val, self.pos_dims = pos_ind, pos_val, pos_dims
return
# HDF5 creation
def create_hdf5_file(self, append_path='', overwrite=False):
""" Sets up the HDF5 file for writing
append_path : string (Optional)
h5_file to add these data to, must be a path to the h5_file on disk
overwrite : bool (optional, default=False)
If True, will overwrite an existing .h5 file of the same name
"""
if not append_path:
h5_path = os.path.join(self.directory, self.basename.replace('.txt', '.h5'))
if os.path.exists(h5_path):
if not overwrite:
raise FileExistsError('This file already exists). Set attribute overwrite to True')
else:
print('Overwriting file', h5_path)
#os.remove(h5_path)
self.h5_f = h5py.File(h5_path, mode='w')
else:
if not os.path.exists(append_path):
raise Exception('File does not exist. Check pathname.')
self.h5_f = h5py.File(append_path, mode='r+')
self.h5_img_grp = create_indexed_group(self.h5_f, "Images")
self.h5_spectra_grp = create_indexed_group(self.h5_f, "Spectra")
self.h5_spectrogram_grp = create_indexed_group(self.h5_f, "Spectrogram")
write_simple_attrs(self.h5_img_grp, self.params_dictionary)
write_simple_attrs(self.h5_spectra_grp, self.params_dictionary)
write_simple_attrs(self.h5_spectrogram_grp, self.params_dictionary)
return
def write_datasets_hdf5(self):
""" Writes the datasets as pyNSID datasets to the HDF5 file"""
for dset in self.datasets:
if 'IMAGE' in dset.data_type.name:
write_nsid_dataset(dset, self.h5_img_grp)
elif 'SPECTRUM' in dset.data_type.name:
write_nsid_dataset(dset, self.h5_spectra_grp)
else:
write_nsid_dataset(dset, self.h5_spectrogram_grp)
self.h5_f.file.close()
return
|
[
"h5py.File",
"pyNSID.io.hdf_io.write_simple_attrs",
"os.path.basename",
"pyNSID.io.hdf_io.write_nsid_dataset",
"os.path.realpath",
"os.path.dirname",
"numpy.zeros",
"pyNSID.io.hdf_io.create_indexed_group",
"os.path.exists",
"numpy.split",
"sidpy.Dataset.from_array",
"numpy.arange",
"numpy.array",
"os.path.join",
"sidpy.sid.Dimension"
] |
[((1882, 1909), 'os.path.realpath', 'os.path.realpath', (['self.path'], {}), '(self.path)\n', (1898, 1909), False, 'import os\n'), ((1930, 1956), 'os.path.dirname', 'os.path.dirname', (['full_path'], {}), '(full_path)\n', (1945, 1956), False, 'import os\n'), ((1996, 2023), 'os.path.basename', 'os.path.basename', (['self.path'], {}), '(self.path)\n', (2012, 2023), False, 'import os\n'), ((15564, 15598), 'numpy.arange', 'np.arange', (['x_start', 'x_end'], {'step': 'dx'}), '(x_start, x_end, step=dx)\n', (15573, 15598), True, 'import numpy as np\n'), ((15781, 15873), 'sidpy.sid.Dimension', 'Dimension', (['x_linspace'], {'name': '"""x"""', 'units': 'qtyx', 'dimension_type': '"""spatial"""', 'quantity': '"""Length"""'}), "(x_linspace, name='x', units=qtyx, dimension_type='spatial',\n quantity='Length')\n", (15790, 15873), False, 'from sidpy.sid import Dimension\n'), ((15927, 16019), 'sidpy.sid.Dimension', 'Dimension', (['y_linspace'], {'name': '"""y"""', 'units': 'qtyy', 'dimension_type': '"""spatial"""', 'quantity': '"""Length"""'}), "(y_linspace, name='y', units=qtyy, dimension_type='spatial',\n quantity='Length')\n", (15936, 16019), False, 'from sidpy.sid import Dimension\n'), ((17306, 17347), 'pyNSID.io.hdf_io.create_indexed_group', 'create_indexed_group', (['self.h5_f', '"""Images"""'], {}), "(self.h5_f, 'Images')\n", (17326, 17347), False, 'from pyNSID.io.hdf_io import create_indexed_group, write_simple_attrs\n'), ((17378, 17420), 'pyNSID.io.hdf_io.create_indexed_group', 'create_indexed_group', (['self.h5_f', '"""Spectra"""'], {}), "(self.h5_f, 'Spectra')\n", (17398, 17420), False, 'from pyNSID.io.hdf_io import create_indexed_group, write_simple_attrs\n'), ((17455, 17501), 'pyNSID.io.hdf_io.create_indexed_group', 'create_indexed_group', (['self.h5_f', '"""Spectrogram"""'], {}), "(self.h5_f, 'Spectrogram')\n", (17475, 17501), False, 'from pyNSID.io.hdf_io import create_indexed_group, write_simple_attrs\n'), ((17519, 17578), 'pyNSID.io.hdf_io.write_simple_attrs', 'write_simple_attrs', (['self.h5_img_grp', 'self.params_dictionary'], {}), '(self.h5_img_grp, self.params_dictionary)\n', (17537, 17578), False, 'from pyNSID.io.hdf_io import create_indexed_group, write_simple_attrs\n'), ((17587, 17650), 'pyNSID.io.hdf_io.write_simple_attrs', 'write_simple_attrs', (['self.h5_spectra_grp', 'self.params_dictionary'], {}), '(self.h5_spectra_grp, self.params_dictionary)\n', (17605, 17650), False, 'from pyNSID.io.hdf_io import create_indexed_group, write_simple_attrs\n'), ((17659, 17726), 'pyNSID.io.hdf_io.write_simple_attrs', 'write_simple_attrs', (['self.h5_spectrogram_grp', 'self.params_dictionary'], {}), '(self.h5_spectrogram_grp, self.params_dictionary)\n', (17677, 17726), False, 'from pyNSID.io.hdf_io import create_indexed_group, write_simple_attrs\n'), ((7650, 7684), 'numpy.zeros', 'np.zeros', (['(self.x_len, self.y_len)'], {}), '((self.x_len, self.y_len))\n', (7658, 7684), True, 'import numpy as np\n'), ((15508, 15542), 'numpy.arange', 'np.arange', (['y_start', 'y_end'], {'step': 'dy'}), '(y_start, y_end, step=dy)\n', (15517, 15542), True, 'import numpy as np\n'), ((16732, 16755), 'os.path.exists', 'os.path.exists', (['h5_path'], {}), '(h5_path)\n', (16746, 16755), False, 'import os\n'), ((17057, 17085), 'h5py.File', 'h5py.File', (['h5_path'], {'mode': '"""w"""'}), "(h5_path, mode='w')\n", (17066, 17085), False, 'import h5py\n'), ((17245, 17278), 'h5py.File', 'h5py.File', (['append_path'], {'mode': '"""r+"""'}), "(append_path, mode='r+')\n", (17254, 17278), False, 'import h5py\n'), ((6867, 6906), 'os.path.join', 'os.path.join', (['self.directory', 'file_name'], {}), '(self.directory, file_name)\n', (6879, 6906), False, 'import os\n'), ((7061, 7096), 'numpy.split', 'np.split', (['spectrogram_i', 'self.y_len'], {}), '(spectrogram_i, self.y_len)\n', (7069, 7096), True, 'import numpy as np\n'), ((7567, 7606), 'os.path.join', 'os.path.join', (['self.directory', 'file_name'], {}), '(self.directory, file_name)\n', (7579, 7606), False, 'import os\n'), ((7731, 7758), 'numpy.split', 'np.split', (['img_i', 'self.y_len'], {}), '(img_i, self.y_len)\n', (7739, 7758), True, 'import numpy as np\n'), ((8241, 8280), 'os.path.join', 'os.path.join', (['self.directory', 'file_name'], {}), '(self.directory, file_name)\n', (8253, 8280), False, 'import os\n'), ((8681, 8720), 'os.path.join', 'os.path.join', (['self.directory', 'file_name'], {}), '(self.directory, file_name)\n', (8693, 8720), False, 'import os\n'), ((9659, 9720), 'sidpy.Dataset.from_array', 'sid.Dataset.from_array', (['spectrogram_data'], {'name': 'descriptors[0]'}), '(spectrogram_data, name=descriptors[0])\n', (9681, 9720), True, 'import sidpy as sid\n'), ((9992, 10107), 'sidpy.sid.Dimension', 'Dimension', ([], {'values': 'spec_vals_i', 'name': '"""Spectrogram"""', 'units': 'descriptors[3]', 'quantity': '"""Wavelength"""', 'type': '"""spectral"""'}), "(values=spec_vals_i, name='Spectrogram', units=descriptors[3],\n quantity='Wavelength', type='spectral')\n", (10001, 10107), False, 'from sidpy.sid import Dimension\n'), ((11052, 11105), 'sidpy.Dataset.from_array', 'sid.Dataset.from_array', (['img_data'], {'name': 'descriptors[0]'}), '(img_data, name=descriptors[0])\n', (11074, 11105), True, 'import sidpy as sid\n'), ((12188, 12253), 'sidpy.Dataset.from_array', 'sid.Dataset.from_array', (['self.spectra[spec_f]'], {'name': '"""Raw_Spectrum"""'}), "(self.spectra[spec_f], name='Raw_Spectrum')\n", (12210, 12253), True, 'import sidpy as sid\n'), ((12980, 13100), 'sidpy.sid.Dimension', 'Dimension', ([], {'values': 'self.spectra_spec_vals[spec_f]', 'name': '"""Wavelength"""', 'units': 'x_unit', 'quantity': 'x_name', 'type': '"""spectral"""'}), "(values=self.spectra_spec_vals[spec_f], name='Wavelength', units=\n x_unit, quantity=x_name, type='spectral')\n", (12989, 13100), False, 'from sidpy.sid import Dimension\n'), ((13841, 13908), 'sidpy.Dataset.from_array', 'sid.Dataset.from_array', (['self.spectra[spec_f]'], {'name': '"""Power_Spectrum"""'}), "(self.spectra[spec_f], name='Power_Spectrum')\n", (13863, 13908), True, 'import sidpy as sid\n'), ((14595, 14715), 'sidpy.sid.Dimension', 'Dimension', ([], {'values': 'self.spectra_spec_vals[spec_f]', 'name': '"""Wavelength"""', 'units': 'x_unit', 'quantity': 'x_name', 'type': '"""spectral"""'}), "(values=self.spectra_spec_vals[spec_f], name='Wavelength', units=\n x_unit, quantity=x_name, type='spectral')\n", (14604, 14715), False, 'from sidpy.sid import Dimension\n'), ((17120, 17147), 'os.path.exists', 'os.path.exists', (['append_path'], {}), '(append_path)\n', (17134, 17147), False, 'import os\n'), ((17982, 18023), 'pyNSID.io.hdf_io.write_nsid_dataset', 'write_nsid_dataset', (['dset', 'self.h5_img_grp'], {}), '(dset, self.h5_img_grp)\n', (18000, 18023), False, 'from pyNSID.io.hdf_io import write_nsid_dataset\n'), ((6435, 6456), 'numpy.array', 'np.array', (['spec_vals_i'], {}), '(spec_vals_i)\n', (6443, 6456), True, 'import numpy as np\n'), ((7148, 7174), 'numpy.split', 'np.split', (['line', 'self.x_len'], {}), '(line, self.x_len)\n', (7156, 7174), True, 'import numpy as np\n'), ((7816, 7842), 'numpy.split', 'np.split', (['line', 'self.x_len'], {}), '(line, self.x_len)\n', (7824, 7842), True, 'import numpy as np\n'), ((8434, 8473), 'os.path.join', 'os.path.join', (['self.directory', 'file_name'], {}), '(self.directory, file_name)\n', (8446, 8473), False, 'import os\n'), ((8874, 8913), 'os.path.join', 'os.path.join', (['self.directory', 'file_name'], {}), '(self.directory, file_name)\n', (8886, 8913), False, 'import os\n'), ((18110, 18155), 'pyNSID.io.hdf_io.write_nsid_dataset', 'write_nsid_dataset', (['dset', 'self.h5_spectra_grp'], {}), '(dset, self.h5_spectra_grp)\n', (18128, 18155), False, 'from pyNSID.io.hdf_io import write_nsid_dataset\n'), ((18225, 18274), 'pyNSID.io.hdf_io.write_nsid_dataset', 'write_nsid_dataset', (['dset', 'self.h5_spectrogram_grp'], {}), '(dset, self.h5_spectrogram_grp)\n', (18243, 18274), False, 'from pyNSID.io.hdf_io import write_nsid_dataset\n'), ((13960, 13973), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (13968, 13973), True, 'import numpy as np\n'), ((14235, 14248), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (14243, 14248), True, 'import numpy as np\n')]
|
import sys,math
import numpy as np
import scipy.sparse.linalg as slin
from scipy.sparse import coo_matrix, csr_matrix, csc_matrix
from svddenseblock import *
from mytools.ioutil import myreadfile
from os.path import expanduser
home = expanduser("~")
def loadtensor2matricization(tensorfile, sumout=[], mtype=coo_matrix,
weighted=True, dtype=int):
'sumout: marginized (sumout) the given ways'
matcols={}
rindexcols={}
xs, ys, data = [], [], []
with myreadfile(tensorfile, 'rb') as f:
for line in f:
elems = line.strip().split(',')
elems = np.array(elems)
u = int(elems[0])
colidx = range(1,len(elems)-1) #remove sumout
colidx = set(colidx) - set(list(sumout))
colidx = sorted(list(colidx))
col=' '.join(elems[colidx])
if col not in matcols:
idx = len(matcols)
matcols[col] = idx
rindexcols[idx]=col
cid = matcols[col]
w = dtype(elems[-1])
xs.append(u)
ys.append(cid)
data.append(w)
nrow, ncol = max(xs)+1, max(ys)+1
sm = mtype( (data, (xs, ys)), shape=(nrow, ncol), dtype=dtype )
if weighted is False:
sm.data[0:] = dtype(1)
f.close()
return sm, rindexcols
def matricizeSVDdenseblock(sm, rindexcols, rbd='avg'):
A, tmpB = svddenseblock(sm, rbd=rbd)
rows = A.nonzero()[0]
cols = tmpB.nonzero()[0]
bcols = set()
for col in cols:
'col name'
cnm = rindexcols[col]
cnm = cnm.strip().split(' ')
b = int(cnm[0])
bcols.add(b)
return set(rows), set(bcols)
if __name__=="__main__":
path = home+'/Data/BeerAdvocate/'
respath= path+'results/'
tsfile = path+'userbeerts.dict'
ratefile = path+'userbeerrate.dict'
tensorfile =respath+'userbeer.tensor'
sm, rindexcols = loadtensor2matricization(tensorfile,
sumout=[3],mtype=csr_matrix,
dtype=float,weighted=True)
A, B = matricizeSVDdenseblock(sm, rindexcols, rbd='avg')
|
[
"numpy.array",
"mytools.ioutil.myreadfile",
"os.path.expanduser"
] |
[((234, 249), 'os.path.expanduser', 'expanduser', (['"""~"""'], {}), "('~')\n", (244, 249), False, 'from os.path import expanduser\n'), ((499, 527), 'mytools.ioutil.myreadfile', 'myreadfile', (['tensorfile', '"""rb"""'], {}), "(tensorfile, 'rb')\n", (509, 527), False, 'from mytools.ioutil import myreadfile\n'), ((621, 636), 'numpy.array', 'np.array', (['elems'], {}), '(elems)\n', (629, 636), True, 'import numpy as np\n')]
|
import sys
import os
import numpy as np
from sklearn import metrics
from .model import SmileGAN
from .utils import highest_matching_clustering, consensus_clustering, parse_validation_data
from .clustering import Smile_GAN_train
__author__ = "<NAME>"
__copyright__ = "Copyright 2019-2020 The CBICA & SBIA Lab"
__credits__ = ["<NAME>"]
__license__ = "See LICENSE file"
__version__ = "0.1.0"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Development"
def model_filtering(model_dirs, ncluster, data, covariate=None):
"""
Function used for filter out models who have significantly different clustering results with others.
This function deal with rare failing cases of Smile-GAN
Args:
model_dirs: list, list of dirs of all saved models
ncluster: int, number of defined clusters
data, data_frame, dataframe with same format as training data. CN data must be exactly same as CN data in training dataframe while
PT data can be any samples in or out of the training set.
covariate, data_frame, dataframe with same format as training covariate. CN data must be exactly same as CN data in training covariate while
PT data can be any samples in or out of the training set.
Returns: list of index indicating outlier models
"""
_, validation_data = parse_validation_data(data, covariate)
all_prediction_labels = []
for models in model_dirs:
model = SmileGAN()
model.load(models)
all_prediction_labels.append(np.argmax(model.predict_cluster(validation_data), axis=1))
model_aris = [[] for _ in range(len(model_dirs))]
filtered_models = []
for i in range(len(model_dirs)):
for j in range(len(model_dirs)):
if i!=j:
model_aris[i].append(metrics.adjusted_rand_score(all_prediction_labels[i], all_prediction_labels[j]))
median_aris = np.median(model_aris, axis=1)
for j in range(median_aris.shape[0]):
rest_aris = np.delete(median_aris,j)
if (median_aris[j]-np.mean(rest_aris))/np.std(rest_aris)<-2:
filtered_models.append(j)
return filtered_models
def calculate_ari(prediction_labels):
model_aris = []
for i in range(len(prediction_labels)):
for j in range(i+1,len(prediction_labels)):
model_aris.append(metrics.adjusted_rand_score(prediction_labels[i], prediction_labels[j]))
return np.mean(model_aris), np.std(model_aris)
def clustering_result(model_dirs, ncluster, consensus_type, data, covariate=None):
"""
Function used for derive clustering results from several saved models
Args:
model_dirs: list, list of dirs of all saved models
ncluster: int, number of defined clusters
consensus_type: string, the method used for deriving final clustering results with all models derived through CV
choose between 'highest_matching_clustering' and 'consensus_clustering'
data, data_frame, dataframe with same format as training data. CN data must be exactly same as CN data in training dataframe while
PT data can be any samples in or out of the training set.
covariate, data_frame, dataframe with same format as training covariate. CN data must be exactly same as CN data in training covariate while
PT data can be any samples in or out of the training set.
Returns: clustering outputs.
"""
_, validation_data = parse_validation_data(data, covariate)
all_prediction_labels = []
all_prediction_probabilities = []
for models in model_dirs:
model = SmileGAN()
model.load(models)
all_prediction_labels.append(np.argmax(model.predict_cluster(validation_data), axis=1))
all_prediction_probabilities.append(model.predict_cluster(validation_data))
if len(model_dirs) > 1:
mean_ari, std_ari = calculate_ari(all_prediction_labels)
print("Results have Adjuested_random_index (ARI) = %.2f+- %.2f" %(mean_ari, std_ari))
if mean_ari<0.3 and consensus_type == 'highest_matching_clustering':
print('mean ARI < 0.3, consensus_clustering is recommended')
if len(all_prediction_labels) == 1:
return np.array(all_prediction_labels[0]), np.array(all_prediction_probabilities[0]), 1, 0
elif consensus_type == 'highest_matching_clustering':
cluster_label, cluster_prob = highest_matching_clustering(all_prediction_labels, all_prediction_probabilities, ncluster)
return cluster_label, cluster_prob, mean_ari, std_ari
elif consensus_type == 'consensus_clustering':
return consensus_clustering(all_prediction_labels, ncluster), None, mean_ari, std_ari
else:
raise Exception("Please choose between 'highest_matching_clustering' and 'consensus_clustering'")
def single_model_clustering(data, ncluster, start_saving_epoch, max_epoch, output_dir, WD_threshold, AQ_threshold, \
cluster_loss_threshold, covariate=None, saved_model_name='converged_model', lam=9, mu=5, batchSize=25, lipschitz_k = 0.5, verbose = False, \
beta1 = 0.5, lr = 0.0002, max_gnorm = 100, eval_freq = 5, save_epoch_freq = 5):
"""
one of Smile-GAN core function for clustering. Only one model will be trained. (not recommended since result may be not reproducible)
Args:
data: dataframe, dataframe file with all ROI (input features) The dataframe contains
the following headers: "
"i) the first column is the participant_id;"
"iii) the second column should be the diagnosis;"
"The following column should be the extracted features. e.g., the ROI features"
covariate: dataframe, not required; dataframe file with all confounding covariates to be corrected. The dataframe contains
the following headers: "
"i) the first column is the participant_id;"
"iii) the second column should be the diagnosis;"
"The following column should be all confounding covariates. e.g., age, sex"
ncluster: int, number of defined clusters
start_saving_epoch: int, epoch number from which model will be saved and training will be stopped if stopping criteria satisfied
max_epoch: int, maximum trainig epoch: training will stop even if criteria not satisfied.
output_dir: str, the directory underwhich model and results will be saved
WD_threshold: int, chosen WD theshold for stopping criteria
AQ_threshold: int, chosen AQ threhold for stopping criteria
cluster_loss_threshold: int, chosen cluster_loss threhold for stopping criteria
load_model: bool, whether load one pre-saved checkpoint
saved_model_name: str, the name of the saved model
lam: int, hyperparameter for cluster loss
mu: int, hyperparameter for change loss
batchsize: int, batck size for training procedure
lipschitz_k = float, hyper parameter for weight clipping of mapping and clustering function
verbose: bool, choose whether to print out training procedure
beta1: float, parameter of ADAM optimization method
lr: float, learning rate
max_gnorm: float, maximum gradient norm for gradient clipping
eval_freq: int, the frequency at which the model is evaluated during training procedure
save_epoch_freq: int, the frequency at which the model is saved during training procedure
Returns: clustering outputs.
"""
print('Start Smile-GAN for semi-supervised clustering')
Smile_GAN_model = Smile_GAN_train(ncluster, start_saving_epoch, max_epoch, WD_threshold, AQ_threshold, \
cluster_loss_threshold, lam=lam, mu=mu, batchSize=batchSize, lipschitz_k = lipschitz_k,
beta1 = beta1, lr = lr, max_gnorm = max_gnorm, eval_freq = eval_freq, save_epoch_freq = save_epoch_freq)
converge = Smile_GAN_model.train(saved_model_name, data, covariate, output_dir, verbose = verbose)
while not converge:
print("****** Model not converging or not converged at max interation, Start retraining ******")
converge = Smile_GAN_model.train(saved_model_name, data, covariate, output_dir, verbose = verbose)
cluster_label, cluster_prob, mean_ari, std_ari = clustering_result([os.path.join(output_dir,saved_model_name)], ncluster, 'highest_matching_clustering', data, covariate)
pt_data = data.loc[data['diagnosis'] == 1][['participant_id','diagnosis']]
pt_data['cluster_label'] = cluster_label + 1
for i in range(ncluster):
pt_data['p'+str(i+1)] = cluster_prob[:,i]
pt_data.to_csv(os.path.join(output_dir,'clustering_result.csv'), index = False)
return pt_data
def cross_validated_clustering(data, ncluster, fold_number, fraction, start_saving_epoch, max_epoch, output_dir, WD_threshold, AQ_threshold, \
cluster_loss_threshold, consensus_type, covariate=None, lam=9, mu=5, batchSize=25, lipschitz_k = 0.5, verbose = False, \
beta1 = 0.5, lr = 0.0002, max_gnorm = 100, eval_freq = 5, save_epoch_freq = 5, start_fold = 0, stop_fold = None, check_outlier = True):
"""
cross_validated clustering function using Smile-GAN (recommended)
Args:
data: dataframe, dataframe file with all ROI (input features) The dataframe contains
the following headers: "
"i) the first column is the participant_id;"
"iii) the second column should be the diagnosis;"
"The following column should be the extracted features. e.g., the ROI features"
covariate: dataframe, not required; dataframe file with all confounding covariates to be corrected. The dataframe contains
the following headers: "
"i) the first column is the participant_id;"
"iii) the second column should be the diagnosis;"
"The following column should be all confounding covariates. e.g., age, sex"
ncluster: int, number of defined clusters
fold_number: int, number of folds for leave-out cross validation
fraction: float, fraction of data used for training in each fold
start_saving_epoch: int, epoch number from which model will be saved and training will be stopped if stopping criteria satisfied
max_epoch: int, maximum trainig epoch: training will stop even if criteria not satisfied.
output_dir: str, the directory underwhich model and results will be saved
WD_threshold: int, chosen WD theshold for stopping criteria
AQ_threshold: int, chosen AQ threhold for stopping criteria
cluster_loss_threshold: int, chosen cluster_loss threhold for stopping criteria
###load_model: bool, whether load one pre-saved checkpoint
consensus_type: string, the method used for deriving final clustering results with all models saved during CV
choose between 'highest_matching_clustering' and 'consensus_clustering'
saved_model_name: str, the name of the saved model
lam: int, hyperparameter for cluster loss
mu: int, hyperparameter for change loss
batchsize: int, batck size for training procedure
lipschitz_k = float, hyper parameter for weight clipping of mapping and clustering function
verbose: bool, choose whether to print out training procedure
beta1: float, parameter of ADAM optimization method
lr: float, learning rate
max_gnorm: float, maximum gradient norm for gradient clipping
eval_freq: int, the frequency at which the model is evaluated during training procedure
save_epoch_freq: int, the frequency at which the model is saved during training procedure
start_fold; int, indicate the last saved fold index,
used for restart previous half-finished cross validation; set defaultly to be 0 indicating a new cv process
stop_fold: int, indicate the index of fold at which the cv early stop,
used for stopping cv process eartly and resuming later; set defaultly to be None and cv will not stop till the end
check_outlier: bool, whether check outlier model (potential unsuccessful model) after cv process and retrain the fold
Returns: clustering outputs.
"""
print('Start Smile-GAN for semi-supervised clustering')
Smile_GAN_model = Smile_GAN_train(ncluster, start_saving_epoch, max_epoch, WD_threshold, AQ_threshold, \
cluster_loss_threshold, lam=lam, mu=mu, batchSize=batchSize, \
lipschitz_k = lipschitz_k, beta1 = beta1, lr = lr, max_gnorm = max_gnorm, eval_freq = eval_freq, save_epoch_freq = save_epoch_freq)
saved_models = [os.path.join(output_dir, 'coverged_model_fold'+str(i)) for i in range(fold_number)]
if stop_fold == None:
stop_fold = fold_number
for i in range(start_fold, stop_fold):
print('****** Starting training of Fold '+str(i)+" ******")
saved_model_name = 'coverged_model_fold'+str(i)
converge = Smile_GAN_model.train(saved_model_name, data, covariate, output_dir, random_seed=i, data_fraction = fraction, verbose = verbose)
while not converge:
print("****** Model not converging or not converged at max interation, Start retraining ******")
converge = Smile_GAN_model.train(saved_model_name, data, covariate, output_dir, random_seed=i, data_fraction = fraction, verbose = verbose)
if check_outlier:
print('****** Start Checking outlier models ******')
outlier_models = model_filtering(saved_models, ncluster, data, covariate)
if len(outlier_models) > 0:
print('Model', end=' ')
for model in outlier_models:
print(str(model),end=' ')
print('have low agreement with other models')
else:
print('****** There are no outlier models ******')
for i in outlier_models:
print('****** Starting training of Fold '+str(i)+" ******")
saved_model_name = 'coverged_model_fold'+str(i)
converge = Smile_GAN_model.train(saved_model_name, data, covariate, output_dir, random_seed=i, data_fraction = fraction, verbose = verbose)
while not converge:
print("****** Model not converged at max interation, Start retraining ******")
converge = Smile_GAN_model.train(saved_model_name, data, covariate, output_dir, random_seed=i, data_fraction = fraction, verbose = verbose)
cluster_label, cluster_prob, mean_ari, std_ari = clustering_result(saved_models, ncluster, consensus_type, data, covariate)
pt_data = data.loc[data['diagnosis'] == 1][['participant_id','diagnosis']]
pt_data['cluster_label'] = cluster_label + 1
if consensus_type == "highest_matching_clustering":
for i in range(ncluster):
pt_data['p'+str(i+1)] = cluster_prob[:,i]
pt_data["ARI = %.2f+- %.2f" %(mean_ari, std_ari)] = ''
pt_data.to_csv(os.path.join(output_dir,'clustering_result.csv'), index = False)
print('****** Smile-GAN clustering finished ******')
|
[
"numpy.median",
"numpy.std",
"numpy.mean",
"numpy.array",
"sklearn.metrics.adjusted_rand_score",
"os.path.join",
"numpy.delete"
] |
[((1793, 1822), 'numpy.median', 'np.median', (['model_aris'], {'axis': '(1)'}), '(model_aris, axis=1)\n', (1802, 1822), True, 'import numpy as np\n'), ((1876, 1901), 'numpy.delete', 'np.delete', (['median_aris', 'j'], {}), '(median_aris, j)\n', (1885, 1901), True, 'import numpy as np\n'), ((2263, 2282), 'numpy.mean', 'np.mean', (['model_aris'], {}), '(model_aris)\n', (2270, 2282), True, 'import numpy as np\n'), ((2284, 2302), 'numpy.std', 'np.std', (['model_aris'], {}), '(model_aris)\n', (2290, 2302), True, 'import numpy as np\n'), ((8038, 8087), 'os.path.join', 'os.path.join', (['output_dir', '"""clustering_result.csv"""'], {}), "(output_dir, 'clustering_result.csv')\n", (8050, 8087), False, 'import os\n'), ((13855, 13904), 'os.path.join', 'os.path.join', (['output_dir', '"""clustering_result.csv"""'], {}), "(output_dir, 'clustering_result.csv')\n", (13867, 13904), False, 'import os\n'), ((3930, 3964), 'numpy.array', 'np.array', (['all_prediction_labels[0]'], {}), '(all_prediction_labels[0])\n', (3938, 3964), True, 'import numpy as np\n'), ((3966, 4007), 'numpy.array', 'np.array', (['all_prediction_probabilities[0]'], {}), '(all_prediction_probabilities[0])\n', (3974, 4007), True, 'import numpy as np\n'), ((7724, 7766), 'os.path.join', 'os.path.join', (['output_dir', 'saved_model_name'], {}), '(output_dir, saved_model_name)\n', (7736, 7766), False, 'import os\n'), ((1942, 1959), 'numpy.std', 'np.std', (['rest_aris'], {}), '(rest_aris)\n', (1948, 1959), True, 'import numpy as np\n'), ((2182, 2253), 'sklearn.metrics.adjusted_rand_score', 'metrics.adjusted_rand_score', (['prediction_labels[i]', 'prediction_labels[j]'], {}), '(prediction_labels[i], prediction_labels[j])\n', (2209, 2253), False, 'from sklearn import metrics\n'), ((1697, 1776), 'sklearn.metrics.adjusted_rand_score', 'metrics.adjusted_rand_score', (['all_prediction_labels[i]', 'all_prediction_labels[j]'], {}), '(all_prediction_labels[i], all_prediction_labels[j])\n', (1724, 1776), False, 'from sklearn import metrics\n'), ((1922, 1940), 'numpy.mean', 'np.mean', (['rest_aris'], {}), '(rest_aris)\n', (1929, 1940), True, 'import numpy as np\n')]
|
import numpy as np
import pandas as pd
import io
import re
import warnings
from scipy.stats import skew, skewtest
from scipy.stats import rankdata
from .plot_1var import *
# from plot_1var import * # for local testing only
from IPython.display import HTML
def print_list(l, br=', '):
o = ''
for e in l:
o += str(e) + br
return o[:-len(br)]
def summary(s, max_lev=10, br_way=', ', sum_num_like_cat_if_nunique_small=5):
'''
a function that takes a series and returns a summary string
'''
if s.nunique(dropna=False) == 1:
return(f'all the same: {s.unique()[0]}')
elif s.notnull().sum() == 0:
return(f'all are NaNs')
if s.dtype.name in ['object', 'bool', 'category'] or \
(('float' in s.dtype.name or 'int' in s.dtype.name) \
and s.nunique() <= sum_num_like_cat_if_nunique_small):
if len(s.unique()) <= max_lev:
# consider drop na?
vc = s.value_counts(dropna=False, normalize=True)
# vc = s.value_counts(dropna=True, normalize=True)
s = ''
for name, v in zip(vc.index, vc.values):
s += f'{name} {v*100:>2.0f}%' + br_way
return s[:-len(br_way)]
else:
vc = s.value_counts(dropna=False, normalize=True)
# vc = s.value_counts(dropna=True, normalize=True)
s = ''
i = 0
cur_sum_perc = 0
for name, v in zip(vc.index, vc.values):
if i == max_lev or \
(i >= 5 and cur_sum_perc >= 0.8) or \
(i == 0 and cur_sum_perc < 0.05):
# break if the it has describe 80% of the data, or the
break
s += f'{name} {v*100:>2.0f}%' + br_way
i += 1
cur_sum_perc += v
s += f'other {(1-cur_sum_perc)*100:>2.0f}%'
# return s[:-len(br_way)]
return s
elif 'float' in s.dtype.name or 'int' in s.dtype.name:
qs = s.quantile(q=[0, 0.25, 0.5, 0.75, 1]).values.tolist()
cv = round(s.std()/s.mean(), 2) if s.mean() != 0 else 'nan'
sk = round(skew(s[s.notnull()]), 2) if len(s[s.notnull()]) > 0 else 'nan'
o = f'{qs}{br_way}\
mean: {s.mean():.2f} std: {s.std():.2f}{br_way}\
cv: {cv} skew: {sk}'
if sum(s.notnull()) > 8: # requirement of skewtest
p = skewtest(s[s.notnull()]).pvalue
o += f'*' if p <= 0.05 else ''
if min(s[s!=0]) > 0 and len(s[s!=0]) > 8: # take log
o += f'{br_way}log skew: {skew(np.log(s[s>0])):.2f}'
p = skewtest(np.log(s[s!=0])).pvalue
o += f'*' if p != p and p <= 0.05 else ''
return o
elif 'datetime' in s.dtype.name:
qs = s.quantile(q=[0, 0.25, 0.5, 0.75, 1]).values
dt_range = (qs[-1]-qs[0]).astype('timedelta64[D]')
if dt_range > np.timedelta64(1, 'D'):
to_print = [np.datetime_as_string(q, unit='D') for q in qs]
else:
to_print = [np.datetime_as_string(q, unit='s') for q in qs]
return print_list(to_print, br=br_way)
else:
return ''
def possible_dup_lev(series, threshold=0.9, truncate=False):
try:
from fuzzywuzzy import fuzz
except ImportError:
sys.exit("""Please install fuzzywuzzy first
install it using: pip install fuzzywuzzy
if installing the dependency python-levenshtein is failed and you are using Anaconda, try
conda install -c conda-forge python-levenshtein""")
if series.dtype.name not in ['category', 'object']:
return ''
if series.nunique() > 100 and series.dtype.name == 'object' and truncate: # maybe should adjust
# warnings.warn('Checking duplicates on a long list will take a long time', RuntimeWarning)
# simplified = series.str.lower().replace(r'\W', '')
# if simplified.nunique() < series.nunique():
# return f"too many levls, didn't check, but didn't pass a quick check"
# else:
# return ''
return ''
threshold *= 100
l = series.unique().tolist()
l = [y for y in l if type(y) == str] # remove nan, True, False
candidate = []
for i in range(len(l)):
for j in range(i+1, len(l)):
if l[i].isdigit() or l[j].isdigit():
continue
if any([fuzz.ratio(l[i], l[j]) > threshold,
fuzz.partial_ratio(l[i], l[j]) > threshold,
fuzz.token_sort_ratio(l[i], l[j]) > threshold,
fuzz.token_set_ratio(l[i], l[j]) > threshold]):
candidate.append((l[i], l[j]))
o = '; '.join(['('+', '.join(can)+')' for can in candidate])
if truncate and len(o) > 1000:
o = o[:1000] + f'...truncated, call TEF.possible_dup_lev({series.name}) for a full result'
return o
def dfmeta(df, description=None, max_lev=10, transpose=True, sample=True,
style=True, color_bg_by_type=True, highlight_nan=0.5, in_cell_next_line=True,
drop=None,
check_possible_error=True, dup_lev_prop=0.9,
fitted_feat_imp=None,
plot=True,
standard=False):
# validation
assert max_lev > 2, 'max_lev should > 2'
assert sample < df.shape[0], 'sample should < nrows'
if sample == True and df.shape[0] < 3:
sample = df.shape[0]
assert drop is None or 'NaNs' not in drop, 'Cannot drop NaNs for now'
assert drop is None or 'dtype' not in drop, 'Cannot drop dtype for now'
warnings.simplefilter('ignore', RuntimeWarning) # caused from skewtest, unknown
if standard: # overwrite thise args
check_possible_error = False
sample = False
# drop=['unique levs']
# the first line, shape, dtypes, memory
buffer = io.StringIO()
df.info(verbose=False, buf=buffer)
s = buffer.getvalue()
if style == False:
print(f'shape: {df.shape}')
print(s.split('\n')[-3])
print(s.split('\n')[-2])
color_bg_by_type, highlight_nan, in_cell_next_line = False, False, False
br_way = "<br/> " if in_cell_next_line else ", " # notice a space here
o = pd.DataFrame(columns=df.columns)
o.loc['idx'] = list(range(df.shape[1]))
o.loc['dtype'] = df.dtypes
if description is not None:
o.loc['description'] = ''
for col, des in description.items():
if col in df.columns.tolist():
o.loc['description', col] = des
o.loc['NaNs'] = df.apply(lambda x: f'{sum(x.isnull())}{br_way}{sum(x.isnull())/df.shape[0]*100:.0f}%')
o.loc['unique counts'] = df.apply(lambda x: f'{len(x.unique())}{br_way}{len(x.unique())/df.shape[0]*100:.0f}%')
# def unique_index(s):
# if len(s.unique()) <= max_lev:
# o = ''
# for i in s.value_counts(dropna=False).index.tolist():
# o += str(i) + br_way
# return o[:-len(br_way)]
# else:
# return ''
# o.loc['unique levs'] = df.apply(unique_index, result_type='expand')
o.loc['summary'] = df.apply(summary, result_type='expand', max_lev=max_lev, br_way=br_way) # need result_type='true' or it will all convert to object dtype
# maybe us args=(arg1, ) or sth?
if plot and style:
o.loc['summary plot'] = ['__TO_PLOT_TO_FILL__'] * df.shape[1]
if fitted_feat_imp is not None:
def print_fitted_feat_imp(fitted_feat_imp, indices):
fitted_feat_imp = fitted_feat_imp[fitted_feat_imp.notnull()]
o = pd.Series(index=indices)
rank = len(fitted_feat_imp) - rankdata(fitted_feat_imp).astype(int) + 1
for i in range(len(fitted_feat_imp)):
o[fitted_feat_imp.index[i]] = f'{rank[i]:.0f}/{len(fitted_feat_imp)} {fitted_feat_imp[i]:.2f} {fitted_feat_imp[i]/sum(fitted_feat_imp)*100:.0f}%'
o.loc[o.isnull()] = ''
return o
o.loc['fitted feature importance'] = print_fitted_feat_imp(fitted_feat_imp, df.columns)
if check_possible_error:
def possible_nan(x):
if x.dtype.name not in ['category', 'object']:
return ''
check_list = ['NEED', 'nan', 'Nan', 'nAn', 'naN', 'NAn', 'nAN', 'NaN', 'NAN']
check_list_re = [r'^ +$', '^null$', r'^[^a-zA-Z0-9]*$']
o = ''
if sum(x==0) > 0:
o += f' "0": {sum(x==0)}, {sum(x==0)/df.shape[0]*100:.2f}%{br_way}'
for to_check in check_list:
if to_check in x.unique().tolist():
o += f' "{to_check}": {sum(x==to_check)}, {sum(x==to_check)/df.shape[0]*100:.2f}%{br_way}'
for to_check in check_list_re:
is_match = [re.match(to_check, str(lev), flags=re.IGNORECASE) is not None for lev in x]
if any(is_match):
to_print = ', '.join(x[is_match].unique())
o += f' "{to_print}": {sum(is_match)}, {sum(is_match)/df.shape[0]*100:.2f}%{br_way}'
if len(o) > 1000:
o = o[:5000] + f'...truncated'
return o
o.loc['possible NaNs'] = df.apply(possible_nan)
o.loc['possible dup lev'] = df.apply(possible_dup_lev, args=(dup_lev_prop, True))
if sample != False:
if sample == True and type(sample) is not int:
sample_df = df.sample(3).sort_index()
elif sample == 'head':
sample_df = df.head(3)
elif type(sample) is int:
sample_df = df.sample(sample)
sample_df.index = ['row ' + str(x) for x in sample_df.index.tolist()]
o = o.append(sample_df)
if drop:
o = o.drop(labels=drop)
if transpose:
o = o.transpose()
o = o.rename_axis('col name').reset_index()
if color_bg_by_type or highlight_nan != False:
def style_rule(data, color='yellow'):
if color_bg_by_type:
cell_rule = 'border: 1px solid white;'
# https://www.w3schools.com/colors/colors_picker.asp
# saturation 92%, lightness 95%
cmap = {'object': '#f2f2f2',
'datetime64[ns]': '#e7feee',
'int8': '#fefee7',
'int16': '#fefee7',
'int32': '#fefee7',
'int64': '#fefee7',
'uint8': '#fefee7',
'uint16': '#fefee7',
'uint32': '#fefee7',
'uint64': '#fefee7',
'float16': '#fef2e7',
'float32': '#fef2e7',
'float64': '#fef2e7',
'bool': '#e7fefe',
'category': '#e7ecfe'}
# if data.iloc[2] not in cmap: # idx 2 is dtype
if data.loc['dtype'].name not in cmap:
cell_rule += "background-color: grey"
else:
cell_rule += "background-color: {}".format(cmap[data.loc['dtype'].name])
rule = [cell_rule] * len(data)
if transpose:
rule[0] = 'background-color: white;'
else:
rule = [''] * len(data)
# if float(data.iloc[3][-3:-1])/100 > highlight_nan or data.iloc[3][-4:] == '100%': # idx 3 is NaNs
if float(data.loc['NaNs'][-3:-1])/100 > highlight_nan or data.loc['NaNs'][-4:] == '100%':
rule[np.where(data.index=='NaNs')[0][0]] += '; color: red'
if data.loc['unique counts'][:(3+len(br_way))] == f'{df.shape[0]}{br_way}': # all unique
rule[np.where(data.index=='unique counts')[0][0]] += '; color: blue'
elif data.loc['unique counts'][:(1+len(br_way))] == f'1{br_way}': # all the same
rule[np.where(data.index=='unique counts')[0][0]] += '; color: red'
if fitted_feat_imp is not None:
if data.loc['fitted feature importance'][:2] in ['1/', '2/', '3/']:
rule[np.where(data.index=='fitted feature importance')[0][0]] += '; font-weight: bold'
return rule
o = o.style.apply(style_rule, axis=int(transpose)) # axis=1 for row-wise, for transpose=True
if transpose:
o = o.hide_index()
if style: # caption
s = print_list(s.split('\n')[-3:-1], br='; ')
o = o.set_caption(f"shape: {df.shape}; {s}")
o = o.render() # convert from pandas.io.formats.style.Styler to html code
if plot and style:
for c in range(df.shape[1]):
html_1var = plot_1var_series(df, c, max_lev, log_numeric=False, save_plt=None, return_html=True)
o = o.replace('__TO_PLOT_TO_FILL__', html_1var, 1)
o = HTML(o) # convert from html to IPython.core.display.HTML
return o
def dfmeta_to_htmlfile(styled_df, filename, head=''):
'''
styled_df should be <class 'IPython.core.display.HTML'>
'''
r = f'<h1>{head}</h1>\n' + '<body>\n' + styled_df.data + '\n</body>'
with open(filename, 'w') as f:
f.write(r)
return f'{filename} saved'
# def print_html_standard(df, description):
# meta = dfmeta(df,
# description=description,
# check_possible_error=False, sample=False, drop=['unique levs'])
# dfmeta_verbose_html = ''
# buffer = io.StringIO()
# df.info(verbose=False, buf=buffer)
# s = buffer.getvalue().split('\n')
# dfmeta_verbose = f"shape: {df.shape}<br/>{s[-3]}<br/>{s[-2]}"
# dfmeta_verbose_html = '<p>' + dfmeta_verbose + '</p>'
# r = dfmeta_verbose_html + '<body>\n' + meta.data + '\n</body>'
# for e in r.split('\n'):
# print(e)
# def dfmeta_to_htmlfile_standard(df, description, filename, head):
# '''
# a function that call dfmeta and then dfmeta_to_htmlfile using a standard configuration
# '''
# meta = dfmeta(df,
# description=description,
# check_possible_error=False, sample=False, drop=['unique levs'])
# return dfmeta_to_htmlfile(meta, filename, head)
def get_desc_template(df, var_name='desc', suffix_idx=False):
print(var_name, '= {')
max_cn = max([len(x) for x in df.columns.tolist()]) + 1
len_cn = 25 if max_cn > 25 else max_cn
for i in range(df.shape[1]):
c = df.columns[i]
c += '"'
if c[:-1] != df.columns.tolist()[-1]:
if suffix_idx == False:
print(f' "{c:{len_cn}}: "",')
else:
print(f' "{c:{len_cn}}: "", # {i}')
else:
if suffix_idx == False:
print(f' "{c:{len_cn}}: ""')
else:
print(f' "{c:{len_cn}}: "" # {i}')
print('}')
def get_desc_template_file(df, filename='desc.py', var_name='desc', suffix_idx=False):
'''%run filename.py'''
max_cn = max([len(x) for x in df.columns.tolist()]) + 1
len_cn = 25 if max_cn > 25 else max_cn
o = var_name + ' = {' + '\n'
for i in range(df.shape[1]):
c = df.columns[i]
c += '"'
if c[:-1] != df.columns.tolist()[-1]:
o += f' "{c:{len_cn}}: "", # {i}' + '\n'
else:
o += f' "{c:{len_cn}}: "" # {i}' + '\n'
o += '}'
with open(filename, 'w') as f:
f.write(o)
return f'{filename} saved'
|
[
"pandas.DataFrame",
"fuzzywuzzy.fuzz.ratio",
"io.StringIO",
"fuzzywuzzy.fuzz.partial_ratio",
"fuzzywuzzy.fuzz.token_sort_ratio",
"warnings.simplefilter",
"numpy.log",
"numpy.datetime_as_string",
"scipy.stats.rankdata",
"numpy.timedelta64",
"numpy.where",
"pandas.Series",
"fuzzywuzzy.fuzz.token_set_ratio",
"IPython.display.HTML"
] |
[((5613, 5660), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""', 'RuntimeWarning'], {}), "('ignore', RuntimeWarning)\n", (5634, 5660), False, 'import warnings\n'), ((5883, 5896), 'io.StringIO', 'io.StringIO', ([], {}), '()\n', (5894, 5896), False, 'import io\n'), ((6257, 6289), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': 'df.columns'}), '(columns=df.columns)\n', (6269, 6289), True, 'import pandas as pd\n'), ((12880, 12887), 'IPython.display.HTML', 'HTML', (['o'], {}), '(o)\n', (12884, 12887), False, 'from IPython.display import HTML\n'), ((7642, 7666), 'pandas.Series', 'pd.Series', ([], {'index': 'indices'}), '(index=indices)\n', (7651, 7666), True, 'import pandas as pd\n'), ((2942, 2964), 'numpy.timedelta64', 'np.timedelta64', (['(1)', '"""D"""'], {}), "(1, 'D')\n", (2956, 2964), True, 'import numpy as np\n'), ((2990, 3024), 'numpy.datetime_as_string', 'np.datetime_as_string', (['q'], {'unit': '"""D"""'}), "(q, unit='D')\n", (3011, 3024), True, 'import numpy as np\n'), ((3076, 3110), 'numpy.datetime_as_string', 'np.datetime_as_string', (['q'], {'unit': '"""s"""'}), "(q, unit='s')\n", (3097, 3110), True, 'import numpy as np\n'), ((4457, 4479), 'fuzzywuzzy.fuzz.ratio', 'fuzz.ratio', (['l[i]', 'l[j]'], {}), '(l[i], l[j])\n', (4467, 4479), False, 'from fuzzywuzzy import fuzz\n'), ((4510, 4540), 'fuzzywuzzy.fuzz.partial_ratio', 'fuzz.partial_ratio', (['l[i]', 'l[j]'], {}), '(l[i], l[j])\n', (4528, 4540), False, 'from fuzzywuzzy import fuzz\n'), ((4570, 4603), 'fuzzywuzzy.fuzz.token_sort_ratio', 'fuzz.token_sort_ratio', (['l[i]', 'l[j]'], {}), '(l[i], l[j])\n', (4591, 4603), False, 'from fuzzywuzzy import fuzz\n'), ((4634, 4666), 'fuzzywuzzy.fuzz.token_set_ratio', 'fuzz.token_set_ratio', (['l[i]', 'l[j]'], {}), '(l[i], l[j])\n', (4654, 4666), False, 'from fuzzywuzzy import fuzz\n'), ((2667, 2684), 'numpy.log', 'np.log', (['s[s != 0]'], {}), '(s[s != 0])\n', (2673, 2684), True, 'import numpy as np\n'), ((7709, 7734), 'scipy.stats.rankdata', 'rankdata', (['fitted_feat_imp'], {}), '(fitted_feat_imp)\n', (7717, 7734), False, 'from scipy.stats import rankdata\n'), ((11596, 11626), 'numpy.where', 'np.where', (["(data.index == 'NaNs')"], {}), "(data.index == 'NaNs')\n", (11604, 11626), True, 'import numpy as np\n'), ((11773, 11812), 'numpy.where', 'np.where', (["(data.index == 'unique counts')"], {}), "(data.index == 'unique counts')\n", (11781, 11812), True, 'import numpy as np\n'), ((2616, 2632), 'numpy.log', 'np.log', (['s[s > 0]'], {}), '(s[s > 0])\n', (2622, 2632), True, 'import numpy as np\n'), ((11951, 11990), 'numpy.where', 'np.where', (["(data.index == 'unique counts')"], {}), "(data.index == 'unique counts')\n", (11959, 11990), True, 'import numpy as np\n'), ((12169, 12220), 'numpy.where', 'np.where', (["(data.index == 'fitted feature importance')"], {}), "(data.index == 'fitted feature importance')\n", (12177, 12220), True, 'import numpy as np\n')]
|
import os
import allel
import h5py
import numpy as np
import sys
import time
from fvTools import *
if not len(sys.argv) in [13,15]:
sys.exit("usage:\npython makeFeatureVecsForChrArmFromVcf_ogSHIC.py chrArmFileName chrArm chrLen targetPop winSize numSubWins maskFileName sampleToPopFileName ancestralArmFaFileName statFileName outFileName [segmentStart segmentEnd]\n")
if len(sys.argv) == 15:
chrArmFileName, chrArm, chrLen, targetPop, winSize, numSubWins, maskFileName, unmaskedFracCutoff, sampleToPopFileName, ancestralArmFaFileName, statFileName, outfn, segmentStart, segmentEnd = sys.argv[1:]
segmentStart, segmentEnd = int(segmentStart), int(segmentEnd)
else:
chrArmFileName, chrArm, chrLen, targetPop, winSize, numSubWins, maskFileName, unmaskedFracCutoff, sampleToPopFileName, ancestralArmFaFileName, statFileName, outfn = sys.argv[1:]
segmentStart = None
unmaskedFracCutoff = float(unmaskedFracCutoff)
chrLen, winSize, numSubWins = int(chrLen), int(winSize), int(numSubWins)
assert winSize % numSubWins == 0 and numSubWins > 1
subWinSize = int(winSize/numSubWins)
def getSubWinBounds(chrLen, subWinSize):
lastSubWinEnd = chrLen - chrLen % subWinSize
lastSubWinStart = lastSubWinEnd - subWinSize + 1
subWinBounds = []
for subWinStart in range(1, lastSubWinStart+1, subWinSize):
subWinEnd = subWinStart + subWinSize - 1
subWinBounds.append((subWinStart, subWinEnd))
return subWinBounds
def getSnpIndicesInSubWins(subWinSize, lastSubWinEnd, snpLocs):
subWinStart = 1
subWinEnd = subWinStart + subWinSize - 1
snpIndicesInSubWins = [[]]
for i in range(len(snpLocs)):
while snpLocs[i] <= lastSubWinEnd and not (snpLocs[i] >= subWinStart and snpLocs[i] <= subWinEnd):
subWinStart += subWinSize
subWinEnd += subWinSize
snpIndicesInSubWins.append([])
if snpLocs[i] <= lastSubWinEnd:
snpIndicesInSubWins[-1].append(i)
while subWinEnd < lastSubWinEnd:
snpIndicesInSubWins.append([])
subWinStart += subWinSize
subWinEnd += subWinSize
return snpIndicesInSubWins
chrArmFile = allel.read_vcf(chrArmFileName)
chroms = chrArmFile["variants/CHROM"]
positions = np.extract(chroms == chrArm, chrArmFile["variants/POS"])
if maskFileName.lower() in ["none", "false"]:
sys.stderr.write("Warning: a mask.fa file for the chr arm with all masked sites N'ed out is strongly recommended" +
" (pass in the reference to remove Ns at the very least)!\n")
unmasked = [True] * chrLen
else:
unmasked = readMaskDataForScan(maskFileName, chrArm)
assert len(unmasked) == chrLen
if statFileName.lower() in ["none", "false"]:
statFileName = None
samples = chrArmFile["samples"]
if not sampleToPopFileName.lower() in ["none", "false"]:
sampleToPop = readSampleToPopFile(sampleToPopFileName)
sampleIndicesToKeep = [i for i in range(len(samples)) if sampleToPop.get(samples[i], "popNotFound!") == targetPop]
else:
sampleIndicesToKeep = [i for i in range(len(samples))]
rawgenos = np.take(chrArmFile["calldata/GT"], [i for i in range(len(chroms)) if chroms[i] == chrArm], axis=0)
genos = allel.GenotypeArray(rawgenos)
refAlleles = np.extract(chroms == chrArm, chrArmFile['variants/REF'])
altAlleles = np.extract(chroms == chrArm, chrArmFile['variants/ALT'])
if segmentStart != None:
snpIndicesToKeep = [i for i in range(len(positions)) if segmentStart <= positions[i] <= segmentEnd]
positions = [positions[i] for i in snpIndicesToKeep]
refAlleles = [refAlleles[i] for i in snpIndicesToKeep]
altAlleles = [altAlleles[i] for i in snpIndicesToKeep]
genos = allel.GenotypeArray(genos.subset(sel0=snpIndicesToKeep))
genos = allel.GenotypeArray(genos.subset(sel1=sampleIndicesToKeep))
alleleCounts = genos.count_alleles()
#remove all but mono/biallelic unmasked sites
isBiallelic = alleleCounts.is_biallelic()
for i in range(len(isBiallelic)):
if not isBiallelic[i]:
unmasked[positions[i]-1] = False
#polarize
if not ancestralArmFaFileName.lower() in ["none", "false"]:
sys.stderr.write("polarizing snps\n")
ancArm = readFaArm(ancestralArmFaFileName, chrArm).upper()
startTime = time.clock()
#NOTE: mapping specifies which alleles to swap counts for based on polarization; leaves unpolarized snps alone
#NOTE: those snps need to be filtered later on (as done below)!
# this will also remove sites that could not be polarized
mapping, unmasked = polarizeSnps(unmasked, positions, refAlleles, altAlleles, ancArm)
sys.stderr.write("took %s seconds\n" %(time.clock()-startTime))
statNames = ["pi", "thetaW", "tajD", "thetaH", "fayWuH", "maxFDA", "HapCount", "H1", "H12", "H2/H1", "ZnS", "Omega", "distVar", "distSkew", "distKurt"]
else:
statNames = ["pi", "thetaW", "tajD", "HapCount", "H1", "H12", "H2/H1", "ZnS", "Omega", "distVar", "distSkew", "distKurt"]
snpIndicesToKeep = [i for i in range(len(positions)) if unmasked[positions[i]-1]]
genos = allel.GenotypeArray(genos.subset(sel0=snpIndicesToKeep))
positions = [positions[i] for i in snpIndicesToKeep]
alleleCounts = allel.AlleleCountsArray([[alleleCounts[i][0], max(alleleCounts[i][1:])] for i in snpIndicesToKeep])
if not ancestralArmFaFileName.lower() in ["none", "false"]:
mapping = [mapping[i] for i in snpIndicesToKeep]
alleleCounts = alleleCounts.map_alleles(mapping)
haps = genos.to_haplotypes()
subWinBounds = getSubWinBounds(chrLen, subWinSize)
precomputedStats = {} #not using this
header = "chrom classifiedWinStart classifiedWinEnd bigWinRange".split()
statHeader = "chrom start end".split()
for statName in statNames:
statHeader.append(statName)
for i in range(numSubWins):
header.append("%s_win%d" %(statName, i))
statHeader = "\t".join(statHeader)
header = "\t".join(header)
outFile=open(outfn,'w')
outFile.write(header+"\n")
statVals = {}
for statName in statNames:
statVals[statName] = []
startTime = time.clock()
goodSubWins = []
lastSubWinEnd = chrLen - chrLen % subWinSize
snpIndicesInSubWins = getSnpIndicesInSubWins(subWinSize, lastSubWinEnd, positions)
subWinIndex = 0
lastSubWinStart = lastSubWinEnd - subWinSize + 1
if statFileName:
statFile = open(statFileName, "w")
statFile.write(statHeader + "\n")
for subWinStart in range(1, lastSubWinStart+1, subWinSize):
subWinEnd = subWinStart + subWinSize - 1
unmaskedFrac = unmasked[subWinStart-1:subWinEnd].count(True)/float(subWinEnd-subWinStart+1)
if segmentStart == None or subWinStart >= segmentStart and subWinEnd <= segmentEnd:
sys.stderr.write("%d-%d num unmasked snps: %d; unmasked frac: %f\n" %(subWinStart, subWinEnd, len(snpIndicesInSubWins[subWinIndex]), unmaskedFrac))
if len(snpIndicesInSubWins[subWinIndex]) > 0 and unmaskedFrac >= unmaskedFracCutoff:
hapsInSubWin = allel.HaplotypeArray(haps.subset(sel0=snpIndicesInSubWins[subWinIndex]))
statValStr = []
for statName in statNames:
calcAndAppendStatValForScan(alleleCounts, positions, statName, subWinStart, \
subWinEnd, statVals, subWinIndex, hapsInSubWin, unmasked, precomputedStats)
statValStr.append("%s: %s" %(statName, statVals[statName][-1]))
sys.stderr.write("\t".join(statValStr) + "\n")
goodSubWins.append(True)
if statFileName:
statFile.write("\t".join([chrArm, str(subWinStart), str(subWinEnd)] + [str(statVals[statName][-1]) for statName in statNames]) + "\n")
else:
for statName in statNames:
appendStatValsForMonomorphicForScan(statName, statVals, subWinIndex)
goodSubWins.append(False)
if goodSubWins[-numSubWins:].count(True) == numSubWins:
outVec = []
for statName in statNames:
outVec += normalizeFeatureVec(statVals[statName][-numSubWins:])
midSubWinEnd = subWinEnd - subWinSize*(numSubWins/2)
midSubWinStart = midSubWinEnd-subWinSize+1
outFile.write("%s\t%d\t%d\t%d-%d\t" %(chrArm, midSubWinStart, midSubWinEnd, subWinEnd-winSize+1, subWinEnd) + "\t".join([str(x) for x in outVec]))
outFile.write('\n')
subWinIndex += 1
if statFileName:
statFile.close()
outFile.close()
sys.stderr.write("completed in %g seconds\n" %(time.clock()-startTime))
|
[
"numpy.extract",
"allel.read_vcf",
"time.clock",
"sys.stderr.write",
"allel.GenotypeArray",
"sys.exit"
] |
[((2142, 2172), 'allel.read_vcf', 'allel.read_vcf', (['chrArmFileName'], {}), '(chrArmFileName)\n', (2156, 2172), False, 'import allel\n'), ((2223, 2279), 'numpy.extract', 'np.extract', (['(chroms == chrArm)', "chrArmFile['variants/POS']"], {}), "(chroms == chrArm, chrArmFile['variants/POS'])\n", (2233, 2279), True, 'import numpy as np\n'), ((3169, 3198), 'allel.GenotypeArray', 'allel.GenotypeArray', (['rawgenos'], {}), '(rawgenos)\n', (3188, 3198), False, 'import allel\n'), ((3212, 3268), 'numpy.extract', 'np.extract', (['(chroms == chrArm)', "chrArmFile['variants/REF']"], {}), "(chroms == chrArm, chrArmFile['variants/REF'])\n", (3222, 3268), True, 'import numpy as np\n'), ((3282, 3338), 'numpy.extract', 'np.extract', (['(chroms == chrArm)', "chrArmFile['variants/ALT']"], {}), "(chroms == chrArm, chrArmFile['variants/ALT'])\n", (3292, 3338), True, 'import numpy as np\n'), ((5953, 5965), 'time.clock', 'time.clock', ([], {}), '()\n', (5963, 5965), False, 'import time\n'), ((137, 384), 'sys.exit', 'sys.exit', (['"""usage:\npython makeFeatureVecsForChrArmFromVcf_ogSHIC.py chrArmFileName chrArm chrLen targetPop winSize numSubWins maskFileName sampleToPopFileName ancestralArmFaFileName statFileName outFileName [segmentStart segmentEnd]\n"""'], {}), '(\n """usage:\npython makeFeatureVecsForChrArmFromVcf_ogSHIC.py chrArmFileName chrArm chrLen targetPop winSize numSubWins maskFileName sampleToPopFileName ancestralArmFaFileName statFileName outFileName [segmentStart segmentEnd]\n"""\n )\n', (145, 384), False, 'import sys\n'), ((2331, 2521), 'sys.stderr.write', 'sys.stderr.write', (['("Warning: a mask.fa file for the chr arm with all masked sites N\'ed out is strongly recommended"\n + """ (pass in the reference to remove Ns at the very least)!\n""")'], {}), '(\n "Warning: a mask.fa file for the chr arm with all masked sites N\'ed out is strongly recommended"\n + """ (pass in the reference to remove Ns at the very least)!\n""")\n', (2347, 2521), False, 'import sys\n'), ((4083, 4120), 'sys.stderr.write', 'sys.stderr.write', (['"""polarizing snps\n"""'], {}), "('polarizing snps\\n')\n", (4099, 4120), False, 'import sys\n'), ((4200, 4212), 'time.clock', 'time.clock', ([], {}), '()\n', (4210, 4212), False, 'import time\n'), ((8246, 8258), 'time.clock', 'time.clock', ([], {}), '()\n', (8256, 8258), False, 'import time\n'), ((4591, 4603), 'time.clock', 'time.clock', ([], {}), '()\n', (4601, 4603), False, 'import time\n')]
|
import os
from datasets.types.data_split import DataSplit
from datasets.SOT.constructor.base_interface import SingleObjectTrackingDatasetConstructor
import numpy as np
def construct_TrackingNet(constructor: SingleObjectTrackingDatasetConstructor, seed):
root_path = seed.root_path
data_type = seed.data_split
enable_set_ids = seed.enable_set_ids
sequence_name_class_map_file_path = seed.sequence_name_class_map_file_path
if data_type != DataSplit.Training and enable_set_ids is not None:
raise Exception("unsupported configuration")
sequence_name_class_map = {}
if sequence_name_class_map_file_path is None:
sequence_name_class_map_file_path = os.path.join(os.path.dirname(__file__), 'data_specs', 'trackingnet_sequence_classes_map.txt')
for line in open(sequence_name_class_map_file_path, 'r', encoding='utf-8'):
line = line.strip()
name, category = line.split('\t')
sequence_name_class_map[name] = category
categories = set(sequence_name_class_map.values())
category_id_name_map = {i: v for i, v in enumerate(categories)}
category_name_id_map = {v: i for i, v in enumerate(categories)}
if enable_set_ids is not None:
trackingNetSubsets = ['TRAIN_{}'.format(v) for v in enable_set_ids]
else:
trackingNetSubsets = []
if data_type & DataSplit.Training:
trackingNetSubsets = ['TRAIN_{}'.format(v) for v in range(12)]
if data_type & DataSplit.Testing:
trackingNetSubsets.append('TEST')
sequence_list = []
for subset in trackingNetSubsets:
subset_path = os.path.join(root_path, subset)
frames_path = os.path.join(subset_path, 'frames')
anno_path = os.path.join(subset_path, 'anno')
bounding_box_annotation_files = os.listdir(anno_path)
bounding_box_annotation_files = [bounding_box_annotation_file for bounding_box_annotation_file in
bounding_box_annotation_files if bounding_box_annotation_file.endswith('.txt')]
bounding_box_annotation_files.sort()
sequences = [sequence[:-4] for sequence in bounding_box_annotation_files]
for sequence, bounding_box_annotation_file in zip(sequences, bounding_box_annotation_files):
sequence_image_path = os.path.join(frames_path, sequence)
bounding_box_annotation_file_path = os.path.join(anno_path, bounding_box_annotation_file)
sequence_list.append((sequence, sequence_image_path, bounding_box_annotation_file_path))
constructor.set_category_id_name_map(category_id_name_map)
constructor.set_total_number_of_sequences(len(sequence_list))
for sequence, sequence_image_path, sequence_bounding_box_annotation_file_path in sequence_list:
with constructor.new_sequence(category_name_id_map[sequence_name_class_map[sequence]]) as sequence_constructor:
sequence_constructor.set_name(sequence)
bounding_boxes = np.loadtxt(sequence_bounding_box_annotation_file_path, dtype=np.float, delimiter=',')
images = os.listdir(sequence_image_path)
images = [image for image in images if image.endswith('.jpg')]
if bounding_boxes.ndim == 2:
is_testing_sequence = False
assert len(images) == len(bounding_boxes)
else:
is_testing_sequence = True
assert bounding_boxes.ndim == 1 and bounding_boxes.shape[0] == 4
for i in range(len(images)):
image_file_name = '{}.jpg'.format(i)
image_file_path = os.path.join(sequence_image_path, image_file_name)
with sequence_constructor.new_frame() as frame_constructor:
frame_constructor.set_path(image_file_path)
if is_testing_sequence:
if i == 0:
frame_constructor.set_bounding_box(bounding_boxes.tolist())
else:
frame_constructor.set_bounding_box(bounding_boxes[i].tolist())
|
[
"os.path.dirname",
"numpy.loadtxt",
"os.path.join",
"os.listdir"
] |
[((1623, 1654), 'os.path.join', 'os.path.join', (['root_path', 'subset'], {}), '(root_path, subset)\n', (1635, 1654), False, 'import os\n'), ((1677, 1712), 'os.path.join', 'os.path.join', (['subset_path', '"""frames"""'], {}), "(subset_path, 'frames')\n", (1689, 1712), False, 'import os\n'), ((1733, 1766), 'os.path.join', 'os.path.join', (['subset_path', '"""anno"""'], {}), "(subset_path, 'anno')\n", (1745, 1766), False, 'import os\n'), ((1808, 1829), 'os.listdir', 'os.listdir', (['anno_path'], {}), '(anno_path)\n', (1818, 1829), False, 'import os\n'), ((705, 730), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (720, 730), False, 'import os\n'), ((2320, 2355), 'os.path.join', 'os.path.join', (['frames_path', 'sequence'], {}), '(frames_path, sequence)\n', (2332, 2355), False, 'import os\n'), ((2404, 2457), 'os.path.join', 'os.path.join', (['anno_path', 'bounding_box_annotation_file'], {}), '(anno_path, bounding_box_annotation_file)\n', (2416, 2457), False, 'import os\n'), ((2991, 3080), 'numpy.loadtxt', 'np.loadtxt', (['sequence_bounding_box_annotation_file_path'], {'dtype': 'np.float', 'delimiter': '""","""'}), "(sequence_bounding_box_annotation_file_path, dtype=np.float,\n delimiter=',')\n", (3001, 3080), True, 'import numpy as np\n'), ((3098, 3129), 'os.listdir', 'os.listdir', (['sequence_image_path'], {}), '(sequence_image_path)\n', (3108, 3129), False, 'import os\n'), ((3619, 3669), 'os.path.join', 'os.path.join', (['sequence_image_path', 'image_file_name'], {}), '(sequence_image_path, image_file_name)\n', (3631, 3669), False, 'import os\n')]
|
import pytest
from ..width import nonparam_width, gauss_model, radial_profile
from .testing_utils import generate_filament_model
import numpy as np
import numpy.testing as npt
from scipy import ndimage as nd
def generate_gaussian_profile(pts, width=3.0, amplitude=2.0, background=0.5):
return amplitude * np.exp(- pts ** 2 / (2 * width ** 2)) + background
def test_nonparam():
pts = np.linspace(0, 10, 100)
profile = generate_gaussian_profile(pts)
params, errors, fail = \
nonparam_width(pts, profile, pts, profile, 1.0, 5, 99)
# This shouldn't be failing
assert fail is False
# Check the amplitude
npt.assert_allclose(params[0], 2.5, atol=0.01)
# Width
npt.assert_allclose(params[1], 3.0, atol=0.01)
# Background
npt.assert_allclose(params[2], 0.5, atol=0.02)
def test_gaussian():
pts = np.linspace(0, 10, 100)
profile = generate_gaussian_profile(pts)
params, errors, _, _, fail = \
gauss_model(pts, profile, np.ones_like(pts), 1.0)
# Check the amplitude
npt.assert_allclose(params[0], 2.5, atol=0.01)
# Width
npt.assert_allclose(params[1], 3.0, atol=0.01)
# Background
npt.assert_allclose(params[2], 0.5, atol=0.02)
@pytest.mark.parametrize(('theta'), [(0.0)])
def test_radial_profile_output(theta):
model, skeleton = generate_filament_model(width=10.0,
amplitude=1.0, background=0.0)
dist_transform = nd.distance_transform_edt((~skeleton).astype(np.int))
dist, radprof, weights, unbin_dist, unbin_radprof = \
radial_profile(model, dist_transform, dist_transform,
((0, 0), (model.shape[0] // 2, model.shape[1] // 2)),
img_scale=1.0, auto_cut=False, max_distance=20)
params, errors, _, _, fail = \
gauss_model(dist, radprof, np.ones_like(dist), 1.0)
npt.assert_allclose(params[:-1], [1.0, 10.0, 0.0], atol=1e-1)
@pytest.mark.parametrize(('cutoff'), [(10.0), (20.0), (30.0)])
def test_radial_profile_cutoff(cutoff):
model, skeleton = generate_filament_model(width=10.0,
amplitude=1.0, background=0.0)
dist_transform = nd.distance_transform_edt((~skeleton).astype(np.int))
dist, radprof, weights, unbin_dist, unbin_radprof = \
radial_profile(model, dist_transform, dist_transform,
((0, 0), (model.shape[0] // 2, model.shape[1] // 2)),
img_scale=1.0, auto_cut=False, max_distance=cutoff)
assert unbin_dist.max() == cutoff
assert dist.max() < cutoff
@pytest.mark.parametrize(('padding'), [(5.0), (10.0), (20.0)])
def test_radial_profile_padding(padding, max_distance=20.0):
model, skeleton = generate_filament_model(width=10.0,
amplitude=1.0, background=0.0)
dist_transform = nd.distance_transform_edt((~skeleton).astype(np.int))
dist, radprof, weights, unbin_dist, unbin_radprof = \
radial_profile(model, dist_transform, dist_transform,
((0, 0), (model.shape[0] // 2, model.shape[1] // 2)),
img_scale=1.0, auto_cut=False,
max_distance=max_distance, pad_to_distance=padding)
if padding <= max_distance:
assert unbin_dist.max() == max_distance
assert dist.max() < max_distance
else:
assert unbin_dist.max() == padding
assert dist.max() < padding
@pytest.mark.xfail(raises=ValueError)
def test_radial_profile_fail_pad(padding=30.0, max_distance=20.0):
'''
Cannot pad greater than max_distance
'''
model, skeleton = generate_filament_model(width=10.0,
amplitude=1.0, background=0.0)
dist_transform = nd.distance_transform_edt((~skeleton).astype(np.int))
dist, radprof, weights, unbin_dist, unbin_radprof = \
radial_profile(model, dist_transform, dist_transform,
((0, 0), (model.shape[0] // 2, model.shape[1] // 2)),
img_scale=1.0, auto_cut=False,
max_distance=max_distance, pad_to_distance=padding)
def test_radial_profile_autocut():
'''
Test auto-cutting with a secondary offset peak.
'''
model, skeleton = generate_filament_model(width=10.0,
amplitude=1.0, background=0.0)
model += np.roll(model, -30, axis=0).copy()
model += np.roll(model, +30, axis=0).copy()
# all_skeleton += np.roll(skeleton, -30, axis=0)
dist_transform = nd.distance_transform_edt((~skeleton).astype(np.int))
dist, radprof, weights, unbin_dist, unbin_radprof = \
radial_profile(model, dist_transform, dist_transform,
((0, 0), (model.shape[0] // 2, model.shape[1] // 2)),
img_scale=1.0, auto_cut=True,
max_distance=50.0, auto_cut_kwargs={'smooth_size': 3.0,
'pad_cut': 0})
npt.assert_equal(dist.max(), 19.25)
def test_radial_profile_autocut_plateau():
'''
Test auto-cutting with a plateau and a second fall.
'''
model, skeleton = generate_filament_model(shape=160, width=10.0,
amplitude=10.0, background=5.0)
# Create a second drop-off profile 40 pixels from the center on each side.
for i, row in enumerate(model[120:].T):
model[120:, i] = generate_gaussian_profile(np.arange(row.size),
width=5.0,
amplitude=5.0,
background=0.0)
for i, row in enumerate(model[:40].T):
model[:40, i] = generate_gaussian_profile(np.arange(row.size),
width=5.0,
amplitude=5.0,
background=0.0)[::-1]
dist_transform = nd.distance_transform_edt((~skeleton).astype(np.int))
dist, radprof, weights, unbin_dist, unbin_radprof = \
radial_profile(model, dist_transform, dist_transform,
((0, 0), (model.shape[0] // 2, model.shape[1] // 2)),
img_scale=1.0, auto_cut=True,
max_distance=60.0, auto_cut_kwargs={'smooth_size': 3.0,
'pad_cut': 0,
'interp_factor': 1})
# By-eye, this should be 18-19
npt.assert_almost_equal(dist.max(), 38.201, decimal=3)
|
[
"numpy.ones_like",
"numpy.roll",
"numpy.testing.assert_allclose",
"numpy.arange",
"numpy.exp",
"numpy.linspace",
"pytest.mark.parametrize",
"pytest.mark.xfail"
] |
[((1239, 1278), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""theta"""', '[0.0]'], {}), "('theta', [0.0])\n", (1262, 1278), False, 'import pytest\n'), ((1969, 2022), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""cutoff"""', '[10.0, 20.0, 30.0]'], {}), "('cutoff', [10.0, 20.0, 30.0])\n", (1992, 2022), False, 'import pytest\n'), ((2629, 2682), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""padding"""', '[5.0, 10.0, 20.0]'], {}), "('padding', [5.0, 10.0, 20.0])\n", (2652, 2682), False, 'import pytest\n'), ((3505, 3541), 'pytest.mark.xfail', 'pytest.mark.xfail', ([], {'raises': 'ValueError'}), '(raises=ValueError)\n', (3522, 3541), False, 'import pytest\n'), ((399, 422), 'numpy.linspace', 'np.linspace', (['(0)', '(10)', '(100)'], {}), '(0, 10, 100)\n', (410, 422), True, 'import numpy as np\n'), ((651, 697), 'numpy.testing.assert_allclose', 'npt.assert_allclose', (['params[0]', '(2.5)'], {'atol': '(0.01)'}), '(params[0], 2.5, atol=0.01)\n', (670, 697), True, 'import numpy.testing as npt\n'), ((714, 760), 'numpy.testing.assert_allclose', 'npt.assert_allclose', (['params[1]', '(3.0)'], {'atol': '(0.01)'}), '(params[1], 3.0, atol=0.01)\n', (733, 760), True, 'import numpy.testing as npt\n'), ((782, 828), 'numpy.testing.assert_allclose', 'npt.assert_allclose', (['params[2]', '(0.5)'], {'atol': '(0.02)'}), '(params[2], 0.5, atol=0.02)\n', (801, 828), True, 'import numpy.testing as npt\n'), ((863, 886), 'numpy.linspace', 'np.linspace', (['(0)', '(10)', '(100)'], {}), '(0, 10, 100)\n', (874, 886), True, 'import numpy as np\n'), ((1058, 1104), 'numpy.testing.assert_allclose', 'npt.assert_allclose', (['params[0]', '(2.5)'], {'atol': '(0.01)'}), '(params[0], 2.5, atol=0.01)\n', (1077, 1104), True, 'import numpy.testing as npt\n'), ((1121, 1167), 'numpy.testing.assert_allclose', 'npt.assert_allclose', (['params[1]', '(3.0)'], {'atol': '(0.01)'}), '(params[1], 3.0, atol=0.01)\n', (1140, 1167), True, 'import numpy.testing as npt\n'), ((1189, 1235), 'numpy.testing.assert_allclose', 'npt.assert_allclose', (['params[2]', '(0.5)'], {'atol': '(0.02)'}), '(params[2], 0.5, atol=0.02)\n', (1208, 1235), True, 'import numpy.testing as npt\n'), ((1904, 1964), 'numpy.testing.assert_allclose', 'npt.assert_allclose', (['params[:-1]', '[1.0, 10.0, 0.0]'], {'atol': '(0.1)'}), '(params[:-1], [1.0, 10.0, 0.0], atol=0.1)\n', (1923, 1964), True, 'import numpy.testing as npt\n'), ((1003, 1020), 'numpy.ones_like', 'np.ones_like', (['pts'], {}), '(pts)\n', (1015, 1020), True, 'import numpy as np\n'), ((1874, 1892), 'numpy.ones_like', 'np.ones_like', (['dist'], {}), '(dist)\n', (1886, 1892), True, 'import numpy as np\n'), ((314, 350), 'numpy.exp', 'np.exp', (['(-pts ** 2 / (2 * width ** 2))'], {}), '(-pts ** 2 / (2 * width ** 2))\n', (320, 350), True, 'import numpy as np\n'), ((4459, 4486), 'numpy.roll', 'np.roll', (['model', '(-30)'], {'axis': '(0)'}), '(model, -30, axis=0)\n', (4466, 4486), True, 'import numpy as np\n'), ((4507, 4534), 'numpy.roll', 'np.roll', (['model', '(+30)'], {'axis': '(0)'}), '(model, +30, axis=0)\n', (4514, 4534), True, 'import numpy as np\n'), ((5559, 5578), 'numpy.arange', 'np.arange', (['row.size'], {}), '(row.size)\n', (5568, 5578), True, 'import numpy as np\n'), ((5870, 5889), 'numpy.arange', 'np.arange', (['row.size'], {}), '(row.size)\n', (5879, 5889), True, 'import numpy as np\n')]
|
# CASA Next Generation Infrastructure
# Copyright (C) 2021 AUI, Inc. Washington DC, USA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#################################
# Helper File
#
# Not exposed in API
#
#################################
import warnings, time, os, psutil, multiprocessing, logging, re
import numpy as np
# from casatools import table as tb
from casatools import ms
from casatools import image as ia
from casatools import quanta as qa
try:
import pandas as pd
import xarray, dask, dask.array, dask.delayed, dask.distributed
except:
print('#### ERROR - dask and/or xarray dependencies are missing ####')
try:
from casacore import tables
except:
print('#### ERROR - python-casacore not found, must be manually installed by user ####')
warnings.filterwarnings('ignore', category=FutureWarning)
# TODO: python-casacore dependency is needed here
# Problems with the table tool:
# - inflates data sizes by reading everything as 64-bit float / 128-bit complex,
# - segfaults when used in dask delayed objects with non-locking reads
# - row access not available, segfaults on column access for some test data
########################################################
# helper function to initialize the processing environment
def initialize_processing(cores=None, memory_limit=None):
# setup dask.distributed based multiprocessing environment
if cores is None: cores = multiprocessing.cpu_count()
if memory_limit is None: memory_limit = str(round(((psutil.virtual_memory().available / (1024 ** 2)) * 0.75) / cores)) + 'MB'
dask.config.set({"distributed.scheduler.allowed-failures": 10})
dask.config.set({"distributed.scheduler.work-stealing": False})
dask.config.set({"distributed.scheduler.unknown-task-duration": '99m'})
dask.config.set({"distributed.worker.memory.pause": False})
dask.config.set({"distributed.worker.memory.terminate": False})
dask.config.set({"distributed.worker.memory.recent-to-old-time": '999s'})
dask.config.set({"distributed.comm.timeouts.connect": '360s'})
dask.config.set({"distributed.comm.timeouts.tcp": '360s'})
dask.config.set({"distributed.nanny.environ.OMP_NUM_THREADS": 1})
dask.config.set({"distributed.nanny.environ.MKL_NUM_THREADS": 1})
cluster = dask.distributed.LocalCluster(n_workers=cores, threads_per_worker=1, processes=True, memory_limit=memory_limit, silence_logs=logging.ERROR)
client = dask.distributed.Client(cluster)
return client
########################################################
# helper for reading time columns to datetime format
# pandas datetimes are referenced against a 0 of 1970-01-01
# CASA's modified julian day reference time is (of course) 1858-11-17
# this requires a correction of 3506716800 seconds which is hardcoded to save time
def convert_time(rawtimes):
correction = 3506716800.0
return pd.to_datetime(np.array(rawtimes) - correction, unit='s').values
# dt = pd.to_datetime(np.atleast_1d(rawtimes) - correction, unit='s').values
# if len(np.array(rawtimes).shape) == 0: dt = dt[0]
# return dt
def revert_time(datetimes):
return (datetimes.astype(float) / 10 ** 9) + 3506716800.0
#######################################################################################
# return a dictionary of table attributes created from keywords and column descriptions
def extract_table_attributes(infile):
tb_tool = tables.table(infile, readonly=True, lockoptions={'option': 'usernoread'}, ack=False)
kwd = tb_tool.getkeywords()
attrs = dict([(kk, kwd[kk]) for kk in kwd if kk not in os.listdir(infile)])
cols = tb_tool.colnames()
column_descriptions = {}
for col in cols:
column_descriptions[col] = tb_tool.getcoldesc(col)
attrs['column_descriptions'] = column_descriptions
attrs['info'] = tb_tool.info()
tb_tool.close()
return attrs
#################################################
# translate numpy dtypes to casacore type strings
def type_converter(npdtype):
cctype = 'bad'
if (npdtype == 'int64') or (npdtype == 'int32'):
cctype = 'int'
elif npdtype == 'bool':
cctype = 'bool'
elif npdtype == 'float32':
cctype = 'float'
elif (npdtype == 'float64') or (npdtype == 'datetime64[ns]'):
cctype = 'double'
elif npdtype == 'complex64':
cctype = 'complex'
elif npdtype == 'complex128':
cctype = 'dcomplex'
elif str(npdtype).startswith('<U'):
cctype = 'string'
return cctype
###############################################################################
# create and initialize new output table
def create_table(outfile, xds, max_rows, infile=None, cols=None, generic=False):
if os.path.isdir(outfile):
os.system('rm -fr %s' % outfile)
# create column descriptions for table description
if cols is None: cols = list(set(list(xds.data_vars) + list(xds.attrs['column_descriptions'].keys())) if 'column_descriptions' in xds.attrs else list(xds.data_vars))
tabledesc = {}
for col in cols:
if ('column_descriptions' in xds.attrs) and (col in xds.attrs['column_descriptions']):
coldesc = xds.attrs['column_descriptions'][col]
else:
coldesc = {'valueType': type_converter(xds[col].dtype)}
if generic or (col == 'UVW'): # will be statically shaped even if not originally
coldesc = {'shape': tuple(np.clip(xds[col].shape[1:], 1, None))}
elif xds[col].ndim > 1: # make variably shaped
coldesc = {'ndim': xds[col].ndim - 1}
coldesc['name'] = col
coldesc['desc'] = col
tabledesc[col] = coldesc
if generic:
tb_tool = tables.table(outfile, tabledesc=tabledesc, nrow=max_rows, readonly=False, lockoptions={'option': 'permanentwait'}, ack=False)
else:
tb_tool = tables.default_ms(outfile, tabledesc)
tb_tool.addrows(max_rows)
if 'DATA_DESC_ID' in cols: tb_tool.putcol('DATA_DESC_ID', np.zeros((max_rows), dtype='int32') - 1, 0, max_rows)
# write xds attributes to table keywords, skipping certain reserved attributes
existing_keywords = tb_tool.getkeywords()
for attr in xds.attrs:
if attr in ['bad_cols', 'bad_types', 'column_descriptions', 'history', 'subtables', 'info'] + list(existing_keywords.keys()): continue
tb_tool.putkeyword(attr, xds.attrs[attr])
if 'info' in xds.attrs: tb_tool.putinfo(xds.attrs['info'])
# copy subtables and add to main table
if infile:
subtables = [ss.path for ss in os.scandir(infile) if ss.is_dir() and ('SORTED_TABLE' not in ss.path)]
os.system('cp -r %s %s' % (' '.join(subtables), outfile))
for subtable in subtables:
sub_tbl = tables.table(os.path.join(outfile, subtable[subtable.rindex('/') + 1:]), readonly=False, lockoptions={'option': 'permanentwait'}, ack=False)
tb_tool.putkeyword(subtable[subtable.rindex('/') + 1:], sub_tbl, makesubrecord=True)
sub_tbl.close()
tb_tool.close()
##################################################################################################
##
## MeasurementSets
##
##################################################################################################
##################################################################
# takes a list of visibility xarray datasets and packages them as a dataset of datasets
# xds_list is a list of tuples (name, xds)
def vis_xds_packager(xds_list):
mxds = xarray.Dataset(attrs=dict(xds_list))
coords = {}
if 'ANTENNA' in mxds.attrs:
coords['antenna_ids'] = mxds.ANTENNA.row.values
coords['antennas'] = xarray.DataArray(mxds.ANTENNA.NAME.values, dims=['antenna_ids'])
if 'FIELD' in mxds.attrs:
coords['field_ids'] = mxds.FIELD.row.values
coords['fields'] = xarray.DataArray(mxds.FIELD.NAME.values, dims=['field_ids'])
if 'FEED' in mxds.attrs:
coords['feed_ids'] = mxds.FEED.FEED_ID.values
if 'OBSERVATION' in mxds.attrs:
coords['observation_ids'] = mxds.OBSERVATION.row.values
coords['observations'] = xarray.DataArray(mxds.OBSERVATION.PROJECT.values, dims=['observation_ids'])
if 'POLARIZATION' in mxds.attrs:
coords['polarization_ids'] = mxds.POLARIZATION.row.values
if 'SOURCE' in mxds.attrs:
coords['source_ids'] = mxds.SOURCE.SOURCE_ID.values
coords['sources'] = xarray.DataArray(mxds.SOURCE.NAME.values, dims=['source_ids'])
if 'SPECTRAL_WINDOW' in mxds.attrs:
coords['spw_ids'] = mxds.SPECTRAL_WINDOW.row.values
if 'STATE' in mxds.attrs:
coords['state_ids'] = mxds.STATE.row.values
mxds = mxds.assign_coords(coords)
return mxds
########################################################################################
# translates MS selection parameters into corresponding row indices and channel indices
def ms_selection(infile, outfile=None, verbose=False, spw=None, field=None, times=None, baseline=None, scan=None, scanintent=None, array=None, uvdist=None, observation=None, polarization=None):
"""
"""
infile = os.path.expanduser(infile)
mstool = ms()
mstool.open(infile)
# build the selection structure
selection = {}
if (spw is not None) and (len(spw) > 0): selection['spw'] = spw
if (field is not None) and (len(field) > 0): selection['field'] = field
if (scan is not None) and (len(scan) > 0): selection['scan'] = scan
if (baseline is not None) and (len(baseline) > 0): selection['baseline'] = baseline
if (times is not None) and (len(times) > 0): selection['time'] = times
if (scanintent is not None) and (len(scanintent) > 0): selection['scanintent'] = scanintent
if (uvdist is not None) and (len(uvdist) > 0): selection['uvdist'] = uvdist
if (polarization is not None) and (len(polarization) > 0): selection['polarization'] = polarization
if (array is not None) and (len(array) > 0): selection['array'] = array
if (observation is not None) and (len(observation) > 0): selection['observation'] = observation
# build structure of indices per DDI, intersected with selection criteria
ddis, total_rows = [], None
chanmap = {} # dict of ddis to channels
if len(selection) > 0:
if verbose: print('selecting data...')
mstool.msselect(selection)
total_rows = mstool.range('rows')['rows']
selectedindices = mstool.msselectedindices()
ddis, chanranges = selectedindices['dd'], selectedindices['channel']
for ci, cr in enumerate(chanranges):
if ddis[ci] not in chanmap: chanmap[ddis[ci]] = []
chanmap[ddis[ci]] = np.concatenate((chanmap[ddis[ci]], list(range(cr[1], cr[2] + 1, cr[3]))), axis=0).astype(int)
# copy the selected table to the outfile destination if given
if outfile is not None:
outfile = os.path.expanduser(outfile)
if verbose: print('copying selection to output...')
if len(selection) > 0:
mstool.split(outfile, whichcol='all')
else:
os.system('rm -fr %s' % outfile)
os.system('cp -r %s %s' % (infile, outfile))
mstool.reset()
if len(ddis) == 0: # selection didn't reduce ddi count, so get them all
ddis = list(mstool.range('data_desc_id')['data_desc_id'])
# figure out which selected rows are in which ddis
if verbose: print('intersecting DDI row ids...')
rowmap = {} # dict of ddis to (rows, channels)
for ddi in ddis:
mstool.selectinit(datadescid=ddi)
ddirowidxs = mstool.range('rows')['rows']
if total_rows is None:
rowmap[ddi] = (ddirowidxs, chanmap[ddi] if ddi in chanmap else None)
else:
rowmap[ddi] = (np.intersect1d(ddirowidxs, total_rows, assume_unique=True), chanmap[ddi] if ddi in chanmap else None)
mstool.reset()
mstool.close()
if verbose: print('selection complete')
return rowmap
##################################################################
## expand row dimension of xds to (time, baseline)
def expand_xds(xds):
txds = xds.copy()
unique_baselines, baselines = np.unique([txds.ANTENNA1.values, txds.ANTENNA2.values], axis=1, return_inverse=True)
txds['baseline'] = xarray.DataArray(baselines.astype('int32'), dims=['row'])
txds['time'] = txds['TIME'].copy()
try:
txds = txds.set_index(row=['time', 'baseline']).unstack('row').transpose('time', 'baseline', ...)
# unstack makes everything a float, so we need to reset to the proper type
for dv in txds.data_vars:
txds[dv] = txds[dv].astype(xds[dv].dtype)
except:
print("WARNING: Cannot expand rows to (time, baseline), possibly duplicate values in (time, baseline)")
txds = xds.copy()
return txds
##################################################################
## flatten (time, baseline) dimensions of xds back to single row
def flatten_xds(xds):
nan_int = np.array([np.nan]).astype('int32')[0]
txds = xds.copy()
# flatten the time x baseline dimensions of main table
if ('time' in xds.dims) and ('baseline' in xds.dims):
txds = xds.stack({'row': ('time', 'baseline')}).transpose('row', ...)
txds = txds.where((txds.STATE_ID != nan_int) & (txds.FIELD_ID != nan_int), drop=True) #.unify_chunks()
for dv in list(xds.data_vars):
txds[dv] = txds[dv].astype(xds[dv].dtype)
return txds
##################################################################
# read casacore table format in to memory
##################################################################
def read_generic_table(infile, subtables=False, timecols=None, ignore=None):
"""
read generic casacore table format to xarray dataset loaded in memory
Parameters
----------
infile : str
Input table filename. To read a subtable simply append the subtable folder name under the main table (ie infile = '/path/mytable.tbl/mysubtable')
subtables : bool
Whether or not to include subtables underneath the specified table. If true, an attribute called subtables will be added to the returned xds.
Default False
timecols : list
list of column names to convert to numpy datetime format. Default None leaves times as their original casacore format.
ignore : list
list of column names to ignore and not try to read. Default None reads all columns
Returns
-------
xarray.core.dataset.Dataset
"""
if timecols is None: timecols = []
if ignore is None: ignore = []
infile = os.path.expanduser(infile)
assert os.path.isdir(infile), "invalid input filename to read_generic_table"
attrs = extract_table_attributes(infile)
tb_tool = tables.table(infile, readonly=True, lockoptions={'option': 'usernoread'}, ack=False)
if tb_tool.nrows() == 0:
tb_tool.close()
return xarray.Dataset(attrs=attrs)
dims = ['row'] + ['d%i' % ii for ii in range(1, 20)]
cols = tb_tool.colnames()
ctype = dict([(col, tb_tool.getcell(col, 0)) for col in cols if (col not in ignore) and (tb_tool.iscelldefined(col, 0))])
mvars, mcoords, xds = {}, {}, xarray.Dataset()
tr = tb_tool.row(ignore, exclude=True)[:]
# extract data for each col
for col in ctype.keys():
if tb_tool.coldatatype(col) == 'record': continue # not supported
try:
data = np.stack([rr[col] for rr in tr]) # .astype(ctype[col].dtype)
if isinstance(tr[0][col], dict):
data = np.stack([rr[col]['array'].reshape(rr[col]['shape']) if len(rr[col]['array']) > 0 else np.array(['']) for rr in tr])
except:
# sometimes the columns are variable, so we need to standardize to the largest sizes
if len(np.unique([isinstance(rr[col], dict) for rr in tr])) > 1: continue # can't deal with this case
mshape = np.array(max([np.array(rr[col]).shape for rr in tr]))
try:
data = np.stack([np.pad(rr[col] if len(rr[col]) > 0 else np.array(rr[col]).reshape(np.arange(len(mshape)) * 0),
[(0, ss) for ss in mshape - np.array(rr[col]).shape], 'constant', constant_values=np.array([np.nan]).astype(np.array(ctype[col]).dtype)[0]) for rr in tr])
except:
data = []
if len(data) == 0: continue
if col in timecols: convert_time(data)
if col.endswith('_ID'):
mcoords[col] = xarray.DataArray(data, dims=['d%i_%i' % (di, ds) for di, ds in enumerate(np.array(data).shape)])
else:
mvars[col] = xarray.DataArray(data, dims=['d%i_%i' % (di, ds) for di, ds in enumerate(np.array(data).shape)])
xds = xarray.Dataset(mvars, coords=mcoords)
xds = xds.rename(dict([(dv, dims[di]) for di, dv in enumerate(xds.dims)]))
attrs['bad_cols'] = list(np.setdiff1d([dv for dv in tb_tool.colnames()], [dv for dv in list(xds.data_vars) + list(xds.coords)]))
# if this table has subtables, use a recursive call to store them in subtables attribute
if subtables:
stbl_list = sorted([tt for tt in os.listdir(infile) if os.path.isdir(os.path.join(infile, tt))])
attrs['subtables'] = []
for ii, subtable in enumerate(stbl_list):
sxds = read_generic_table(os.path.join(infile, subtable), subtables=subtables, timecols=timecols, ignore=ignore)
if len(sxds.dims) != 0: attrs['subtables'] += [(subtable, sxds)]
xds = xds.assign_attrs(attrs)
tb_tool.close()
return xds
##################################################################
# Summarize the contents of an MS directory in casacore table format
def describe_ms(infile):
infile = os.path.expanduser(infile) # does nothing if $HOME is unknown
assert os.path.isdir(infile), "invalid input filename to describe_ms"
# figure out characteristics of main table from select subtables (must all be present)
spw_xds = read_generic_table(os.path.join(infile, 'SPECTRAL_WINDOW'))
pol_xds = read_generic_table(os.path.join(infile, 'POLARIZATION'))
ddi_xds = read_generic_table(os.path.join(infile, 'DATA_DESCRIPTION'))
ddis = list(ddi_xds.row.values)
summary = pd.DataFrame([])
spw_ids = ddi_xds.SPECTRAL_WINDOW_ID.values
pol_ids = ddi_xds.POLARIZATION_ID.values
chans = spw_xds.NUM_CHAN.values
pols = pol_xds.NUM_CORR.values
for ddi in ddis:
print('processing ddi %i of %i' % (ddi + 1, len(ddis)), end='\r')
sorted_table = tables.taql('select * from %s where DATA_DESC_ID = %i' % (infile, ddi))
sdf = {'ddi': ddi, 'spw_id': spw_ids[ddi], 'pol_id': pol_ids[ddi], 'rows': sorted_table.nrows(),
'times': len(np.unique(sorted_table.getcol('TIME'))),
'baselines': len(np.unique(np.hstack([sorted_table.getcol(rr)[:, None] for rr in ['ANTENNA1', 'ANTENNA2']]), axis=0)),
'chans': chans[spw_ids[ddi]],
'pols': pols[pol_ids[ddi]]}
sdf['size_MB'] = np.ceil((sdf['times'] * sdf['baselines'] * sdf['chans'] * sdf['pols'] * 9) / 1024 ** 2).astype(int)
summary = pd.concat([summary, pd.DataFrame(sdf, index=[str(ddi)])], axis=0, sort=False)
sorted_table.close()
print(' ' * 50, end='\r')
return summary.set_index('ddi').sort_index()
#######################################################
# helper function extract data chunk for each col
# this is fed to dask.delayed
def read_flat_col_chunk(infile, col, cshape, ridxs, cstart, pstart):
tb_tool = tables.table(infile, readonly=True, lockoptions={'option': 'usernoread'}, ack=False)
rgrps = [(rr[0], rr[-1]) for rr in np.split(ridxs, np.where(np.diff(ridxs) > 1)[0] + 1)]
try:
if (len(cshape) == 1) or (col == 'UVW'): # all the scalars and UVW
data = np.concatenate([tb_tool.getcol(col, rr[0], rr[1] - rr[0] + 1) for rr in rgrps], axis=0)
elif len(cshape) == 2: # WEIGHT, SIGMA
data = np.concatenate([tb_tool.getcolslice(col, pstart, pstart + cshape[1] - 1, [], rr[0], rr[1] - rr[0] + 1) for rr in rgrps], axis=0)
elif len(cshape) == 3: # DATA and FLAG
data = np.concatenate([tb_tool.getcolslice(col, (cstart, pstart), (cstart + cshape[1] - 1, pstart + cshape[2] - 1), [], rr[0], rr[1] - rr[0] + 1) for rr in rgrps], axis=0)
except:
print('ERROR reading chunk: ', col, cshape, cstart, pstart)
tb_tool.close()
return data
##############################################################
def read_flat_main_table(infile, ddi, rowidxs=None, chunks=(22000, 512, 2)):
# get row indices relative to full main table
if rowidxs is None:
tb_tool = tables.taql('select rowid() as ROWS from %s where DATA_DESC_ID = %i' % (infile, ddi))
rowidxs = tb_tool.getcol('ROWS')
tb_tool.close()
nrows = len(rowidxs)
if nrows == 0:
return xarray.Dataset()
tb_tool = tables.taql('select * from %s where DATA_DESC_ID = %i' % (infile, ddi))
cols = tb_tool.colnames()
ignore = [col for col in cols if (not tb_tool.iscelldefined(col, 0)) or (tb_tool.coldatatype(col) == 'record')]
cdata = dict([(col, tb_tool.getcol(col, 0, 1)) for col in cols if col not in ignore])
chan_cnt, pol_cnt = [(cdata[cc].shape[1], cdata[cc].shape[2]) for cc in cdata if len(cdata[cc].shape) == 3][0]
mvars, mcoords, bvars, xds = {}, {}, {}, xarray.Dataset()
tb_tool.close()
# loop over row chunks
for rc in range(0, nrows, chunks[0]):
crlen = min(chunks[0], nrows - rc) # chunk row length
rcidxs = rowidxs[rc:rc + chunks[0]]
# loop over each column and create delayed dask arrays
for col in cdata.keys():
if col not in bvars: bvars[col] = []
if len(cdata[col].shape) == 1:
delayed_array = dask.delayed(read_flat_col_chunk)(infile, col, (crlen,), rcidxs, None, None)
bvars[col] += [dask.array.from_delayed(delayed_array, (crlen,), cdata[col].dtype)]
elif col == 'UVW':
delayed_array = dask.delayed(read_flat_col_chunk)(infile, col, (crlen, 3), rcidxs, None, None)
bvars[col] += [dask.array.from_delayed(delayed_array, (crlen, 3), cdata[col].dtype)]
elif len(cdata[col].shape) == 2:
pol_list = []
dd = 1 if cdata[col].shape[1] == chan_cnt else 2
for pc in range(0, cdata[col].shape[1], chunks[dd]):
plen = min(chunks[dd], cdata[col].shape[1] - pc)
delayed_array = dask.delayed(read_flat_col_chunk)(infile, col, (crlen, plen), rcidxs, None, pc)
pol_list += [dask.array.from_delayed(delayed_array, (crlen, plen), cdata[col].dtype)]
bvars[col] += [dask.array.concatenate(pol_list, axis=1)]
elif len(cdata[col].shape) == 3:
chan_list = []
for cc in range(0, chan_cnt, chunks[1]):
clen = min(chunks[1], chan_cnt - cc)
pol_list = []
for pc in range(0, cdata[col].shape[2], chunks[2]):
plen = min(chunks[2], cdata[col].shape[2] - pc)
delayed_array = dask.delayed(read_flat_col_chunk)(infile, col, (crlen, clen, plen), rcidxs, cc, pc)
pol_list += [dask.array.from_delayed(delayed_array, (crlen, clen, plen), cdata[col].dtype)]
chan_list += [dask.array.concatenate(pol_list, axis=2)]
bvars[col] += [dask.array.concatenate(chan_list, axis=1)]
# now concat all the dask chunks from each time to make the xds
mvars = {}
for kk in bvars.keys():
if kk == 'UVW':
mvars[kk] = xarray.DataArray(dask.array.concatenate(bvars[kk], axis=0), dims=['row', 'uvw_index'])
elif len(bvars[kk][0].shape) == 2 and (bvars[kk][0].shape[-1] == pol_cnt):
mvars[kk] = xarray.DataArray(dask.array.concatenate(bvars[kk], axis=0), dims=['row', 'pol'])
elif len(bvars[kk][0].shape) == 2 and (bvars[kk][0].shape[-1] == chan_cnt):
mvars[kk] = xarray.DataArray(dask.array.concatenate(bvars[kk], axis=0), dims=['row', 'chan'])
else:
mvars[kk] = xarray.DataArray(dask.array.concatenate(bvars[kk], axis=0), dims=['row', 'chan', 'pol'][:len(bvars[kk][0].shape)])
mvars['TIME'] = xarray.DataArray(convert_time(mvars['TIME'].values), dims=['row']).chunk({'row': chunks[0]})
attrs = extract_table_attributes(infile)
attrs['bad_cols'] = ignore
xds = xarray.Dataset(mvars, coords=mcoords).assign_attrs(attrs)
return xds
#####################################################################
def read_ms(infile, rowmap=None, subtables=False, expand=False, chunks=(22000, 512, 2)):
"""
Read legacy format MS to xarray Visibility Dataset
The MS is partitioned by DDI, which guarantees a fixed data shape per partition. This results in separate xarray
dataset (xds) partitions contained within a main xds (mxds).
Parameters
----------
infile : str
Input MS filename
rowmap : dict
Dictionary of DDI to tuple of (row indices, channel indices). Returned by ms_selection function. Default None ignores selections
subtables : bool
Also read and include subtables along with main table selection. Default False will omit subtables (faster)
expand : bool
Whether or not to return the original flat row structure of the MS (False) or expand the rows to time x baseline dimensions (True).
Expanding the rows allows for easier indexing and parallelization across time and baseline dimensions, at the cost of some conversion
time. Default False
chunks: 4-D tuple of ints
Shape of desired chunking in the form of (time, baseline, channel, polarization). Larger values reduce the number of chunks and
speed up the reads at the cost of more memory. Chunk size is the product of the four numbers. Default is (400, 400, 64, 2). None
disables re-chunking and returns native chunk size from table row reads
Returns
-------
xarray.core.dataset.Dataset
Main xarray dataset of datasets for this visibility set
"""
import warnings
warnings.filterwarnings('ignore', category=FutureWarning)
# parse filename to use
infile = os.path.expanduser(infile)
assert os.path.isdir(infile), "invalid input filename to read_ms"
# we need the spectral window, polarization, and data description tables for processing the main table
spw_xds = read_generic_table(os.path.join(infile, 'SPECTRAL_WINDOW'))
pol_xds = read_generic_table(os.path.join(infile, 'POLARIZATION'))
ddi_xds = read_generic_table(os.path.join(infile, 'DATA_DESCRIPTION'))
# each DATA_DESC_ID (ddi) is a fixed shape that may differ from others
# form a list of ddis to process, each will be placed it in its own xarray dataset and partition
ddis = np.arange(ddi_xds.row.shape[0]) if rowmap is None else list(rowmap.keys())
xds_list = []
####################################################################
# process each selected DDI from the input MS, assume a fixed shape within the ddi (should always be true)
for ddi in ddis:
rowidxs = None if rowmap is None else rowmap[ddi][0]
chanidxs = None if rowmap is None else rowmap[ddi][1]
if ((rowidxs is not None) and (len(rowidxs) == 0)) or ((chanidxs is not None) and (len(chanidxs) == 0)): continue
xds = read_flat_main_table(infile, ddi, rowidxs=rowidxs, chunks=chunks)
if len(xds.dims) == 0: continue
# grab the channel frequency values from the spw table data and pol idxs from the polarization table, add spw and pol ids
chan = spw_xds.CHAN_FREQ.values[ddi_xds.SPECTRAL_WINDOW_ID.values[ddi], :xds.chan.shape[0]]
pol = pol_xds.CORR_TYPE.values[ddi_xds.POLARIZATION_ID.values[ddi], :xds.pol.shape[0]]
coords = {'chan': chan, 'pol': pol, 'spw_id': [ddi_xds['SPECTRAL_WINDOW_ID'].values[ddi]], 'pol_id': [ddi_xds['POLARIZATION_ID'].values[ddi]]}
xds = xds.assign_coords(coords) # .assign_attrs(attrs)
# filter by channel selection
if (chanidxs is not None) and (len(chanidxs) < len(xds.chan)):
xds = xds.isel(chan=chanidxs)
spw_xds['CHAN_FREQ'][ddi_xds.SPECTRAL_WINDOW_ID.values[ddi], :len(chanidxs)] = spw_xds.CHAN_FREQ[ddi_xds.SPECTRAL_WINDOW_ID.values[ddi], chanidxs]
# expand the row dimension out to (time, baseline)
if expand:
xds = expand_xds(xds)
xds_list += [('xds' + str(ddi), xds)]
# read other subtables
xds_list += [('SPECTRAL_WINDOW', spw_xds), ('POLARIZATION', pol_xds), ('DATA_DESCRIPTION', ddi_xds)]
if subtables:
skip_tables = ['SORTED_TABLE', 'SPECTRAL_WINDOW', 'POLARIZATION', 'DATA_DESCRIPTION']
stbl_list = sorted([tt for tt in os.listdir(infile) if os.path.isdir(os.path.join(infile, tt)) and tt not in skip_tables])
for ii, subtable in enumerate(stbl_list):
sxds = read_generic_table(os.path.join(infile, subtable), subtables=True, timecols=['TIME'], ignore=[])
if len(sxds.dims) != 0: xds_list += [(subtable, sxds)]
# build the master xds to return
mxds = vis_xds_packager(xds_list)
return mxds
############################################################################################
## write functions
############################################################################################
###################################
def write_generic_table(xds, outfile, subtable='', cols=None, verbose=False):
"""
Write generic xds contents back to casacore table format on disk
Parameters
----------
xds : xarray.Dataset
Source xarray dataset data
outfile : str
Destination filename (or parent main table if writing subtable)
subtable : str
Name of the subtable being written, triggers special logic to add subtable to parent table. Default '' for normal generic writes
cols : str or list
List of cols to write. Default None writes all columns
"""
outfile = os.path.expanduser(outfile)
if verbose: print('writing %s...' % os.path.join(outfile, subtable))
if cols is None: cols = list(set(list(xds.data_vars) + [cc for cc in xds.coords if cc not in xds.dims] + (list(xds.attrs['column_descriptions'].keys() if 'column_descriptions' in xds.attrs else []))))
cols = list(np.atleast_1d(cols))
max_rows = xds.row.shape[0] if 'row' in xds.dims else 0
create_table(os.path.join(outfile, subtable), xds, max_rows, infile=None, cols=cols, generic=True)
tb_tool = tables.table(os.path.join(outfile, subtable), readonly=False, lockoptions={'option': 'permanentwait'}, ack=False)
try:
for dv in cols:
if (dv not in xds) or (np.prod(xds[dv].shape) == 0): continue
values = xds[dv].values if xds[dv].dtype != 'datetime64[ns]' else revert_time(xds[dv].values)
tb_tool.putcol(dv, values, 0, values.shape[0], 1)
except:
print("ERROR: exception in write generic table - %s, %s, %s, %s" % (os.path.join(outfile,subtable), dv, str(values.shape), tb_tool.nrows()))
# now we have to add this subtable to the main table keywords (assuming a main table already exists)
if len(subtable) > 0:
main_tbl = tables.table(outfile, readonly=False, lockoptions={'option': 'permanentwait'}, ack=False)
main_tbl.putkeyword(subtable, tb_tool, makesubrecord=True)
main_tbl.done()
tb_tool.close()
# if this table has its own subtables, they need to be written out recursively
if 'subtables' in xds.attrs:
for st in list(xds.attrs['subtables']):
write_generic_table(st[1], os.path.join(outfile, subtable, st[0]), subtable='', verbose=verbose)
###################################
def write_main_table_slice(xda, outfile, ddi, col, full_shape, starts):
"""
Write an xds row chunk to the corresponding main table slice
"""
# trigger the DAG for this chunk and return values while the table is unlocked
values = xda.compute().values
if xda.dtype == 'datetime64[ns]':
values = revert_time(values)
tb_tool = tables.table(outfile, readonly=False, lockoptions={'option': 'permanentwait'}, ack=False)
tbs = tables.taql('select * from $tb_tool where DATA_DESC_ID = %i' % ddi)
if tbs.nrows() == 0: # this DDI has not been started yet
tbs = tables.taql('select * from $tb_tool where DATA_DESC_ID = -1')
#try:
if (values.ndim == 1) or (col == 'UVW'): # scalar columns
tbs.putcol(col, values, starts[0], len(values))
else:
if not tbs.iscelldefined(col, starts[0]): tbs.putcell(col, starts[0]+np.arange(len(values)), np.zeros((full_shape)))
tbs.putcolslice(col, values, starts[1:values.ndim], tuple(np.array(starts[1:values.ndim]) + np.array(values.shape[1:])-1), [], starts[0], len(values), 1)
#except:
# print("ERROR: write exception - %s, %s, %s" % (col, str(values.shape), str(starts)))
tbs.close()
tb_tool.close()
###################################
def write_ms(mxds, outfile, infile=None, subtables=False, modcols=None, verbose=False, execute=True):
"""
Write ms format xds contents back to casacore table format on disk
Parameters
----------
mxds : xarray.Dataset
Source multi-xarray dataset (originally created by read_ms)
outfile : str
Destination filename
infile : str
Source filename to copy subtables from. Generally faster than reading/writing through mxds via the subtables parameter. Default None
does not copy subtables to output.
subtables : bool
Also write subtables from mxds. Default of False only writes mxds attributes that begin with xdsN to the MS main table.
Setting to True will write all other mxds attributes to subtables of the main table. This is probably going to be SLOW!
Use infile instead whenever possible.
modcols : list
List of strings indicating what column(s) were modified (aka xds data_vars). Different logic can be applied to speed up processing when
a data_var has not been modified from the input. Default None assumes everything has been modified (SLOW)
verbose : bool
Whether or not to print output progress. Since writes will typically execute the DAG, if something is
going to go wrong, it will be here. Default False
execute : bool
Whether or not to actually execute the DAG, or just return it with write steps appended. Default True will execute it
"""
outfile = os.path.expanduser(outfile)
if verbose: print('initializing output...')
start = time.time()
xds_list = [flatten_xds(mxds.attrs[kk]) for kk in mxds.attrs if kk.startswith('xds')]
cols = list(set([dv for dx in xds_list for dv in dx.data_vars]))
if modcols is None: modcols = cols
modcols = list(np.atleast_1d(modcols))
# create an empty main table with enough space for all desired xds partitions
# the first selected xds partition will be passed to create_table to provide a definition of columns and table keywords
# we first need to add in additional keywords for the selected subtables that will be written as well
max_rows = np.sum([dx.row.shape[0] for dx in xds_list])
create_table(outfile, xds_list[0], max_rows=max_rows, infile=infile, cols=cols, generic=False)
# start a list of dask delayed writes to disk (to be executed later)
# the SPECTRAL_WINDOW table is assumed to always be present and will always be written since it is needed for channel frequencies
delayed_writes = [dask.delayed(write_generic_table)(mxds.SPECTRAL_WINDOW, outfile, 'SPECTRAL_WINDOW', cols=None)]
if subtables: # also write the rest of the subtables
for subtable in list(mxds.attrs.keys()):
if subtable.startswith('xds') or (subtable == 'SPECTRAL_WINDOW'): continue
if verbose: print('writing subtable %s...' % subtable)
delayed_writes += [dask.delayed(write_generic_table)(mxds.attrs[subtable], outfile, subtable, cols=None, verbose=verbose)]
for xds in xds_list:
txds = xds.copy().unify_chunks()
ddi = txds.DATA_DESC_ID[:1].values[0]
# serial write entire DDI column first so subsequent delayed writes can find their spot
if verbose: print('setting up DDI %i...' % ddi)
write_main_table_slice(txds['DATA_DESC_ID'], outfile, ddi=-1, col='DATA_DESC_ID', full_shape=None, starts=(0,))
# write each chunk of each modified data_var, triggering the DAG along the way
for col in modcols:
chunks = txds[col].chunks
dims = txds[col].dims
for d0 in range(len(chunks[0])):
d0start = ([0] + list(np.cumsum(chunks[0][:-1])))[d0]
for d1 in range(len(chunks[1]) if len(chunks) > 1 else 1):
d1start = ([0] + list(np.cumsum(chunks[1][:-1])))[d1] if len(chunks) > 1 else 0
for d2 in range(len(chunks[2]) if len(chunks) > 2 else 1):
d2start = ([0] + list(np.cumsum(chunks[2][:-1])))[d2] if len(chunks) > 2 else 0
starts = [d0start, d1start, d2start]
lengths = [chunks[0][d0], (chunks[1][d1] if len(chunks) > 1 else 0), (chunks[2][d2] if len(chunks) > 2 else 0)]
slices = [slice(starts[0], starts[0]+lengths[0]), slice(starts[1], starts[1]+lengths[1]), slice(starts[2], starts[2]+lengths[2])]
txda = txds[col].isel(dict(zip(dims, slices)), missing_dims='ignore')
delayed_writes += [dask.delayed(write_main_table_slice)(txda, outfile, ddi=ddi, col=col, full_shape=txds[col].shape[1:], starts=starts)]
# now write remaining data_vars from the xds that weren't modified
# this can be done faster by collapsing the chunking to maximum size (minimum #) possible
max_chunk_size = np.prod([txds.chunks[kk][0] for kk in txds.chunks if kk in ['row', 'chan', 'pol']])
for col in list(np.setdiff1d(cols, modcols)):
col_chunk_size = np.prod([kk[0] for kk in txds[col].chunks])
col_rows = int(np.ceil(max_chunk_size / col_chunk_size)) * txds[col].chunks[0][0]
for rr in range(0, txds[col].row.shape[0], col_rows):
txda = txds[col].isel(row=slice(rr, rr + col_rows))
delayed_writes += [dask.delayed(write_main_table_slice)(txda, outfile, ddi=ddi, col=col, full_shape=txda.shape[1:], starts=(rr,)+(0,)*(len(txda.shape)-1))]
if execute:
if verbose: print('triggering DAG...')
zs = dask.compute(delayed_writes)
if verbose: print('execution time %0.2f sec' % (time.time() - start))
else:
if verbose: print('returning delayed task list')
return delayed_writes
###########################################################################################################
def visplot(xda, axis=None, overplot=False, drawplot=True, tsize=250):
"""
Plot a preview of Visibility xarray DataArray contents
Parameters
----------
xda : xarray.core.dataarray.DataArray
input DataArray to plot
axis : str or list or xarray.core.dataarray.DataArray
Coordinate(s) within the xarray DataArray, or a second xarray DataArray to plot against. Default None uses range.
All other coordinates will be maxed across dims
overplot : bool
Overlay new plot on to existing window. Default of False makes a new window for each plot
drawplot : bool
Display plot window. Should pretty much always be True unless you want to overlay things
in a Jupyter notebook.
tsize : int
target size of the preview plot (might be smaller). Default is 250 points per axis
Returns
-------
Open matplotlib window
"""
import matplotlib.pyplot as plt
import xarray
import numpy as np
import warnings
warnings.simplefilter("ignore", category=RuntimeWarning) # suppress warnings about nan-slices
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
if overplot:
axes = None
else:
fig, axes = plt.subplots(1, 1)
# fast decimate to roughly the desired size
thinf = np.ceil(np.array(xda.shape) / tsize)
txda = xda.thin(dict([(xda.dims[ii], int(thinf[ii])) for ii in range(len(thinf))]))
# can't plot complex numbers, bools (sometimes), or strings
if (txda.dtype == 'complex128') or (txda.dtype == 'complex64'):
txda = (txda.real ** 2 + txda.imag ** 2) ** 0.5
elif txda.dtype == 'bool':
txda = txda.astype(int)
elif txda.dtype.type is np.int32:
txda = txda.where(txda > np.full((1), np.nan, dtype=np.int32)[0])
elif txda.dtype.type is np.str_:
txda = xarray.DataArray(np.unique(txda, return_inverse=True)[1], dims=txda.dims, coords=txda.coords, name=txda.name)
######################
# decisions based on supplied axis to plot against
# no axis - plot against range of data
# collapse all but first dimension
if axis is None:
collapse = [ii for ii in range(1, txda.ndim)]
if len(collapse) > 0: txda = txda.max(axis=collapse)
txda[txda.dims[0]] = np.arange(txda.shape[0])
txda.plot.line(ax=axes, marker='.', linewidth=0.0)
# another xarray DataArray as axis
elif type(axis) == xarray.core.dataarray.DataArray:
txda2 = axis.thin(dict([(xda.dims[ii], int(thinf[ii])) for ii in range(len(thinf))]))
if txda2.dtype.type is np.int32: txda2 = txda2.where(txda2 > np.full((1), np.nan, dtype=np.int32)[0])
xarray.Dataset({txda.name: txda, txda2.name: txda2}).plot.scatter(txda.name, txda2.name)
# single axis
elif len(np.atleast_1d(axis)) == 1:
axis = np.atleast_1d(axis)[0]
# coord ndim is 1
if txda[axis].ndim == 1:
collapse = [ii for ii in range(txda.ndim) if txda.dims[ii] not in txda[axis].dims]
if len(collapse) > 0: txda = txda.max(axis=collapse)
txda.plot.line(ax=axes, x=axis, marker='.', linewidth=0.0)
# coord ndim is 2
elif txda[axis].ndim == 2:
collapse = [ii for ii in range(txda.ndim) if txda.dims[ii] not in txda[axis].dims]
if len(collapse) > 0: txda = txda.max(axis=collapse)
txda.plot.pcolormesh(ax=axes, x=axis, y=txda.dims[0])
# two axes
elif len(axis) == 2:
collapse = [ii for ii in range(txda.ndim) if txda.dims[ii] not in (txda[axis[0]].dims + txda[axis[1]].dims)]
if len(collapse) > 0: txda = txda.max(axis=collapse)
txda.plot.pcolormesh(ax=axes, x=axis[0], y=axis[1])
plt.title(txda.name)
if drawplot:
plt.show()
##################################################################################################
##
## Images
##
##################################################################################################
############################################
def read_image_chunk(infile, shapes, starts):
tb_tool = tables.table(infile, readonly=True, lockoptions={'option': 'usernoread'}, ack=False)
data = tb_tool.getcellslice(tb_tool.colnames()[0], 0, starts, tuple(np.array(starts) + np.array(shapes) - 1))
tb_tool.close()
return data
############################################
def read_image_array(infile, dimorder, chunks):
tb_tool = tables.table(infile, readonly=True, lockoptions={'option': 'usernoread'}, ack=False)
cshape = eval(tb_tool.getcolshapestring(tb_tool.colnames()[0])[0])
cdata = tb_tool.getcellslice(tb_tool.colnames()[0], 0, tuple(np.repeat(0, len(cshape))), tuple(np.repeat(0, len(cshape))))
tb_tool.close()
# expand the actual data shape to the full 5 possible dims
full_shape = cshape + [1 for rr in range(5) if rr >= len(cshape)]
full_chunks = chunks[::-1] + [1 for rr in range(5) if rr >= len(chunks)]
d0slices = []
for d0 in range(0, full_shape[0], full_chunks[0]):
d0len = min(full_chunks[0], full_shape[0] - d0)
d1slices = []
for d1 in range(0, full_shape[1], full_chunks[1]):
d1len = min(full_chunks[1], full_shape[1] - d1)
d2slices = []
for d2 in range(0, full_shape[2], full_chunks[2]):
d2len = min(full_chunks[2], full_shape[2] - d2)
d3slices = []
for d3 in range(0, full_shape[3], full_chunks[3]):
d3len = min(full_chunks[3], full_shape[3] - d3)
d4slices = []
for d4 in range(0, full_shape[4], full_chunks[4]):
d4len = min(full_chunks[4], full_shape[4] - d4)
shapes = tuple([d0len, d1len, d2len, d3len, d4len][:len(cshape)])
starts = tuple([d0, d1, d2, d3, d4][:len(cshape)])
delayed_array = dask.delayed(read_image_chunk)(infile, shapes, starts)
d4slices += [dask.array.from_delayed(delayed_array, shapes, cdata.dtype)]
d3slices += [dask.array.concatenate(d4slices, axis=4)] if len(cshape) > 4 else d4slices
d2slices += [dask.array.concatenate(d3slices, axis=3)] if len(cshape) > 3 else d3slices
d1slices += [dask.array.concatenate(d2slices, axis=2)] if len(cshape) > 2 else d2slices
d0slices += [dask.array.concatenate(d1slices, axis=1)] if len(cshape) > 1 else d1slices
xda = xarray.DataArray(dask.array.concatenate(d0slices, axis=0), dims=dimorder[::-1]).transpose()
return xda
############################################
def read_image(infile, masks=True, history=True, chunks=(1000, 1000, 1, 4), verbose=False):
"""
Read casacore format Image to xarray Image Dataset format
Parameters
----------
infile : str
Input image filename (.image or .fits format)
masks : bool
Also read image masks as additional image data_vars. Default is True
history : bool
Also read history log table. Default is True
chunks: 4-D tuple of ints
Shape of desired chunking in the form of (l, m, chan, pol). Default is (1000, 1000, 1, 4)
Note: chunk size is the product of the four numbers (up to the actual size of the dimension)
Returns
-------
xarray.core.dataset.Dataset
new xarray Datasets of Image data contents
"""
infile = os.path.expanduser(infile)
IA = ia()
QA = qa()
rc = IA.open(infile)
csys = IA.coordsys()
ims = IA.shape() # image shape
attrs = extract_table_attributes(infile)
if verbose: print('opening %s with shape %s' % (infile, str(ims)))
# construct a mapping of dimension names to image indices
dimmap = [(coord[:-1], attrs['coords']['pixelmap%s' % coord[-1]][0]) for coord in attrs['coords'] if coord[:-1] in ['direction', 'stokes', 'spectral', 'linear']]
dimmap = dict([(rr[0].replace('stokes','pol').replace('spectral','chan').replace('linear','component'), rr[1]) for rr in dimmap])
if 'direction' in dimmap: dimmap['l'] = dimmap.pop('direction')
if 'l' in dimmap: dimmap['m'] = dimmap['l'] + 1
# compute world coordinates for spherical dimensions
sphr_dims = [dimmap['l'], dimmap['m']] if 'l' in dimmap else []
coord_idxs = np.mgrid[[range(ims[dd]) if dd in sphr_dims else range(1) for dd in range(len(ims))]].reshape(len(ims), -1)
coord_world = csys.toworldmany(coord_idxs.astype(float))['numeric'][sphr_dims].reshape((-1,) + tuple(ims[sphr_dims]))
coords = dict([(['right_ascension','declination'][dd], (['l', 'm'], coord_world[di])) for di, dd in enumerate(sphr_dims)])
# compute world coordinates for cartesian dimensions
cart_names, cart_dims = list(zip(*[(kk, dimmap[kk]) for kk in dimmap if kk != 'direction']))
for cd in range(len(cart_dims)):
coord_idxs = np.mgrid[[range(ims[dd]) if dd == cart_dims[cd] else range(1) for dd in range(len(ims))]].reshape(len(ims), -1)
coord_world = csys.toworldmany(coord_idxs.astype(float))['numeric'][cart_dims[cd]].reshape(-1,)
coords.update({cart_names[cd]: coord_world})
# assign values to l, m coords based on incr and refpix in metadata
if len(sphr_dims) > 0:
sphr_coord = [coord for coord in attrs['coords'] if coord.startswith('direction')][0]
coords['l'] = np.arange(-attrs['coords'][sphr_coord]['crpix'][0], ims[0]-attrs['coords'][sphr_coord]['crpix'][0]) * attrs['coords'][sphr_coord]['cdelt'][0]
coords['m'] = np.arange(-attrs['coords'][sphr_coord]['crpix'][1], ims[1]-attrs['coords'][sphr_coord]['crpix'][1]) * attrs['coords'][sphr_coord]['cdelt'][1]
rc = csys.done()
rc = IA.close()
# chunks are in (l, m, chan, pol) order, rearrange to match the actual data order
dimorder = [dd for rr in range(5) for dd in dimmap if (dimmap[dd] is not None) and (dimmap[dd] == rr)]
chunks = list(np.array(chunks + (9999999,))[[['l', 'm', 'chan', 'pol', 'component'].index(rr) for rr in dimorder]])
# wrap the actual image data reads in dask delayed calls returned as an xarray dataarray
xds = xarray.Dataset(coords=coords)
xda = read_image_array(infile, dimorder, chunks)
xda = xda.rename('IMAGE')
xds[xda.name] = xda
# add mask(s) alongside image data
if masks and 'masks' in attrs:
for ii, mask in enumerate(list(attrs['masks'].keys())):
if not os.path.isdir(os.path.join(infile, mask)): continue
xda = read_image_array(os.path.join(infile, mask), dimorder, chunks)
xda = xda.rename('IMAGE_%s' % mask)
xds[xda.name] = xda
attrs[mask+'_column_descriptions'] = extract_table_attributes(os.path.join(infile, mask))['column_descriptions']
# if also loading history, put it as another xds in the attrs
if history and os.path.isdir(os.path.join(infile, 'logtable')):
attrs['history'] = read_generic_table(os.path.join(infile, 'logtable'))
if 'coords' in attrs: attrs['icoords'] = attrs.pop('coords') # rename coord table keyword to avoid confusion with xds coords
xds = xds.assign_attrs(attrs)
return xds
############################################
def write_image_slice(xda, outfile, col, starts):
"""
Write image xda chunk to the corresponding image table slice
"""
# trigger the DAG for this chunk and return values while the table is unlocked
values = xda.compute().values
tb_tool = tables.table(outfile, readonly=False, lockoptions={'option': 'permanentwait'}, ack=False)
tb_tool.putcellslice(col, 0, values, starts, tuple(np.array(starts) + np.array(values.shape) - 1))
tb_tool.close()
############################################
def write_image(xds, outfile, portion='IMAGE', masks=True, history=True, verbose=False, execute=True):
"""
Read casacore format Image to xarray Image Dataset format
Parameters
----------
xds : xarray.Dataset
Image xarray dataset to write
outfile : str
Output image filename (.image format)
portion : str
Name of the data_var in the xds that corresponds to the image data. Default 'IMAGE'
masks : bool
Also write the masks to the output. Can be used instead of infile parameter. Default True
history : bool
Also write the history log file to the output. Can be used instead of infile paramter. Default True
verbose : bool
Whether or not to print output progress. Since writes will typically execute the DAG, if something is
going to go wrong, it will be here. Default False
execute : bool
Whether or not to actually execute the DAG, or just return it with write steps appended. Default True will execute it
"""
outfile = os.path.expanduser(outfile)
start = time.time()
xds = xds.copy()
# initialize list of column names and xda's to be written. The column names are not the same as the data_var names
cols = [list(xds.attrs['column_descriptions'].keys())[0] if 'column_descriptions' in xds.attrs else list(xds.data_vars.keys())[0]]
xda_list = [xds[portion]]
subtable_list = ['']
if 'icoords' in xds.attrs: xds.attrs['coords'] = xds.attrs.pop('icoords') # rename back for proper table keyword creation
# initialize output table (must do it this way since create_table mysteriously throws image tool errors when subsequently opened)
IA = ia()
imtype = 'd' if xds[portion].dtype == 'float64' else 'c' if xds[portion].dtype == 'complex64' else 'cd' if xds[portion].dtype == 'complex128' else 'f'
IA.fromshape(outfile, list(xds[portion].shape), csys=xds.attrs['coords'], overwrite=True, log=False, type=imtype)
IA.close()
# write image history to logfile subtable (not delayed)
if history and ('history' in xds.attrs):
if verbose: print('writing history log...')
write_generic_table(xds.history, outfile, subtable='logtable')
# add masks to the list of xda's to be written
if masks and ('masks' in xds.attrs):
for mask in xds.masks:
if verbose: print('writing %s...' % mask)
mask_var = '%s_%s' % (portion, mask)
if (mask + '_column_descriptions' not in xds.attrs) or (mask_var not in xds): continue
cols += [list(xds.attrs[mask+'_column_descriptions'].keys())[0]]
xda_list += [xds[mask_var]]
subtable_list += [mask]
xds.attrs['masks'][mask]['mask'] = 'Table: %s' % os.path.abspath(os.path.join(outfile, mask))
xds.attrs[mask+'_column_descriptions'][cols[-1]]['shape'] = list(xds[mask_var].transpose().shape)
txds = xarray.Dataset({mask_var: xds[mask_var]}).assign_attrs({'column_descriptions': xds.attrs[mask+'_column_descriptions']})
create_table(os.path.join(outfile, mask), txds, max_rows=1, infile=None, cols=[cols[-1]], generic=True)
# write xds attribute to output table keywords
tb_tool = tables.table(outfile, readonly=False, lockoptions={'option': 'permanentwait'}, ack=False)
for attr in xds.attrs:
if (attr in ['bad_cols', 'bad_types', 'column_descriptions', 'history', 'subtables', 'info']) or attr.endswith('column_descriptions'): continue
tb_tool.putkeyword(attr, xds.attrs[attr])
if 'info' in xds.attrs: tb_tool.putinfo(xds.attrs['info'])
tb_tool.close()
# write each xda transposed to disk
chunks = [rr[0] for rr in xds[portion].chunks][::-1]
cshapes = xds[portion].shape[::-1]
dims = xds[portion].dims[::-1]
delayed_writes = []
for ii, xda in enumerate(xda_list):
for d0 in range(0, cshapes[0], chunks[0]):
d0len = min(chunks[0], cshapes[0] - d0)
for d1 in range(0, cshapes[1] if len(cshapes) > 1 else 1, chunks[1] if len(chunks) > 1 else 1):
d1len = min(chunks[1], cshapes[1] - d1) if len(cshapes) > 1 else 0
for d2 in range(0, cshapes[2] if len(cshapes) > 2 else 1, chunks[2] if len(chunks) > 2 else 1):
d2len = min(chunks[2], cshapes[2] - d2) if len(cshapes) > 2 else 0
for d3 in range(0, cshapes[3] if len(cshapes) > 3 else 1, chunks[3] if len(chunks) > 3 else 1):
d3len = min(chunks[3], cshapes[3] - d3) if len(cshapes) > 3 else 0
for d4 in range(0, cshapes[4] if len(cshapes) > 4 else 1, chunks[4] if len(chunks) > 4 else 1):
d4len = min(chunks[4], cshapes[4] - d4) if len(cshapes) > 4 else 0
starts = [d0, d1, d2, d3, d4][:len(cshapes)]
slices = [slice(d0, d0+d0len), slice(d1, d1+d1len), slice(d2, d2+d2len), slice(d3, d3+d3len), slice(d4, d4+d4len)]
txda = xda.transpose().isel(dict(zip(dims, slices)), missing_dims='ignore')
delayed_writes += [dask.delayed(write_image_slice)(txda, os.path.join(outfile, subtable_list[ii]), col=cols[ii], starts=starts)]
if execute:
if verbose: print('triggering DAG...')
zs = dask.compute(delayed_writes)
if verbose: print('execution time %0.2f sec' % (time.time() - start))
else:
if verbose: print('returning delayed task list')
return delayed_writes
|
[
"matplotlib.pyplot.title",
"psutil.virtual_memory",
"numpy.sum",
"casatools.quanta",
"numpy.clip",
"numpy.arange",
"os.path.join",
"numpy.unique",
"multiprocessing.cpu_count",
"pandas.DataFrame",
"os.path.expanduser",
"dask.distributed.Client",
"numpy.prod",
"casacore.tables.default_ms",
"warnings.simplefilter",
"numpy.full",
"casacore.tables.table",
"dask.distributed.LocalCluster",
"dask.config.set",
"numpy.cumsum",
"dask.compute",
"numpy.intersect1d",
"matplotlib.pyplot.subplots",
"dask.array.from_delayed",
"numpy.stack",
"matplotlib.pyplot.show",
"numpy.ceil",
"os.system",
"xarray.Dataset",
"os.listdir",
"os.scandir",
"casatools.image",
"dask.delayed",
"warnings.filterwarnings",
"os.path.isdir",
"numpy.setdiff1d",
"pandas.plotting.register_matplotlib_converters",
"numpy.zeros",
"time.time",
"casacore.tables.taql",
"numpy.diff",
"numpy.array",
"xarray.DataArray",
"dask.array.concatenate",
"numpy.atleast_1d",
"casatools.ms"
] |
[((1379, 1436), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {'category': 'FutureWarning'}), "('ignore', category=FutureWarning)\n", (1402, 1436), False, 'import warnings\n'), ((2192, 2255), 'dask.config.set', 'dask.config.set', (["{'distributed.scheduler.allowed-failures': 10}"], {}), "({'distributed.scheduler.allowed-failures': 10})\n", (2207, 2255), False, 'import xarray, dask, dask.array, dask.delayed, dask.distributed\n'), ((2260, 2323), 'dask.config.set', 'dask.config.set', (["{'distributed.scheduler.work-stealing': False}"], {}), "({'distributed.scheduler.work-stealing': False})\n", (2275, 2323), False, 'import xarray, dask, dask.array, dask.delayed, dask.distributed\n'), ((2328, 2399), 'dask.config.set', 'dask.config.set', (["{'distributed.scheduler.unknown-task-duration': '99m'}"], {}), "({'distributed.scheduler.unknown-task-duration': '99m'})\n", (2343, 2399), False, 'import xarray, dask, dask.array, dask.delayed, dask.distributed\n'), ((2404, 2463), 'dask.config.set', 'dask.config.set', (["{'distributed.worker.memory.pause': False}"], {}), "({'distributed.worker.memory.pause': False})\n", (2419, 2463), False, 'import xarray, dask, dask.array, dask.delayed, dask.distributed\n'), ((2468, 2531), 'dask.config.set', 'dask.config.set', (["{'distributed.worker.memory.terminate': False}"], {}), "({'distributed.worker.memory.terminate': False})\n", (2483, 2531), False, 'import xarray, dask, dask.array, dask.delayed, dask.distributed\n'), ((2536, 2609), 'dask.config.set', 'dask.config.set', (["{'distributed.worker.memory.recent-to-old-time': '999s'}"], {}), "({'distributed.worker.memory.recent-to-old-time': '999s'})\n", (2551, 2609), False, 'import xarray, dask, dask.array, dask.delayed, dask.distributed\n'), ((2614, 2676), 'dask.config.set', 'dask.config.set', (["{'distributed.comm.timeouts.connect': '360s'}"], {}), "({'distributed.comm.timeouts.connect': '360s'})\n", (2629, 2676), False, 'import xarray, dask, dask.array, dask.delayed, dask.distributed\n'), ((2681, 2739), 'dask.config.set', 'dask.config.set', (["{'distributed.comm.timeouts.tcp': '360s'}"], {}), "({'distributed.comm.timeouts.tcp': '360s'})\n", (2696, 2739), False, 'import xarray, dask, dask.array, dask.delayed, dask.distributed\n'), ((2744, 2809), 'dask.config.set', 'dask.config.set', (["{'distributed.nanny.environ.OMP_NUM_THREADS': 1}"], {}), "({'distributed.nanny.environ.OMP_NUM_THREADS': 1})\n", (2759, 2809), False, 'import xarray, dask, dask.array, dask.delayed, dask.distributed\n'), ((2814, 2879), 'dask.config.set', 'dask.config.set', (["{'distributed.nanny.environ.MKL_NUM_THREADS': 1}"], {}), "({'distributed.nanny.environ.MKL_NUM_THREADS': 1})\n", (2829, 2879), False, 'import xarray, dask, dask.array, dask.delayed, dask.distributed\n'), ((2894, 3037), 'dask.distributed.LocalCluster', 'dask.distributed.LocalCluster', ([], {'n_workers': 'cores', 'threads_per_worker': '(1)', 'processes': '(True)', 'memory_limit': 'memory_limit', 'silence_logs': 'logging.ERROR'}), '(n_workers=cores, threads_per_worker=1,\n processes=True, memory_limit=memory_limit, silence_logs=logging.ERROR)\n', (2923, 3037), False, 'import xarray, dask, dask.array, dask.delayed, dask.distributed\n'), ((3047, 3079), 'dask.distributed.Client', 'dask.distributed.Client', (['cluster'], {}), '(cluster)\n', (3070, 3079), False, 'import xarray, dask, dask.array, dask.delayed, dask.distributed\n'), ((4031, 4119), 'casacore.tables.table', 'tables.table', (['infile'], {'readonly': '(True)', 'lockoptions': "{'option': 'usernoread'}", 'ack': '(False)'}), "(infile, readonly=True, lockoptions={'option': 'usernoread'},\n ack=False)\n", (4043, 4119), False, 'from casacore import tables\n'), ((5338, 5360), 'os.path.isdir', 'os.path.isdir', (['outfile'], {}), '(outfile)\n', (5351, 5360), False, 'import warnings, time, os, psutil, multiprocessing, logging, re\n'), ((9760, 9786), 'os.path.expanduser', 'os.path.expanduser', (['infile'], {}), '(infile)\n', (9778, 9786), False, 'import warnings, time, os, psutil, multiprocessing, logging, re\n'), ((9800, 9804), 'casatools.ms', 'ms', ([], {}), '()\n', (9802, 9804), False, 'from casatools import ms\n'), ((12791, 12879), 'numpy.unique', 'np.unique', (['[txds.ANTENNA1.values, txds.ANTENNA2.values]'], {'axis': '(1)', 'return_inverse': '(True)'}), '([txds.ANTENNA1.values, txds.ANTENNA2.values], axis=1,\n return_inverse=True)\n', (12800, 12879), True, 'import numpy as np\n'), ((15237, 15263), 'os.path.expanduser', 'os.path.expanduser', (['infile'], {}), '(infile)\n', (15255, 15263), False, 'import warnings, time, os, psutil, multiprocessing, logging, re\n'), ((15275, 15296), 'os.path.isdir', 'os.path.isdir', (['infile'], {}), '(infile)\n', (15288, 15296), False, 'import warnings, time, os, psutil, multiprocessing, logging, re\n'), ((15405, 15493), 'casacore.tables.table', 'tables.table', (['infile'], {'readonly': '(True)', 'lockoptions': "{'option': 'usernoread'}", 'ack': '(False)'}), "(infile, readonly=True, lockoptions={'option': 'usernoread'},\n ack=False)\n", (15417, 15493), False, 'from casacore import tables\n'), ((17391, 17428), 'xarray.Dataset', 'xarray.Dataset', (['mvars'], {'coords': 'mcoords'}), '(mvars, coords=mcoords)\n', (17405, 17428), False, 'import xarray\n'), ((18389, 18415), 'os.path.expanduser', 'os.path.expanduser', (['infile'], {}), '(infile)\n', (18407, 18415), False, 'import warnings, time, os, psutil, multiprocessing, logging, re\n'), ((18463, 18484), 'os.path.isdir', 'os.path.isdir', (['infile'], {}), '(infile)\n', (18476, 18484), False, 'import warnings, time, os, psutil, multiprocessing, logging, re\n'), ((18889, 18905), 'pandas.DataFrame', 'pd.DataFrame', (['[]'], {}), '([])\n', (18901, 18905), True, 'import pandas as pd\n'), ((20208, 20296), 'casacore.tables.table', 'tables.table', (['infile'], {'readonly': '(True)', 'lockoptions': "{'option': 'usernoread'}", 'ack': '(False)'}), "(infile, readonly=True, lockoptions={'option': 'usernoread'},\n ack=False)\n", (20220, 20296), False, 'from casacore import tables\n'), ((21600, 21671), 'casacore.tables.taql', 'tables.taql', (["('select * from %s where DATA_DESC_ID = %i' % (infile, ddi))"], {}), "('select * from %s where DATA_DESC_ID = %i' % (infile, ddi))\n", (21611, 21671), False, 'from casacore import tables\n'), ((26943, 27000), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {'category': 'FutureWarning'}), "('ignore', category=FutureWarning)\n", (26966, 27000), False, 'import warnings\n'), ((27043, 27069), 'os.path.expanduser', 'os.path.expanduser', (['infile'], {}), '(infile)\n', (27061, 27069), False, 'import warnings, time, os, psutil, multiprocessing, logging, re\n'), ((27081, 27102), 'os.path.isdir', 'os.path.isdir', (['infile'], {}), '(infile)\n', (27094, 27102), False, 'import warnings, time, os, psutil, multiprocessing, logging, re\n'), ((30880, 30907), 'os.path.expanduser', 'os.path.expanduser', (['outfile'], {}), '(outfile)\n', (30898, 30907), False, 'import warnings, time, os, psutil, multiprocessing, logging, re\n'), ((32977, 33070), 'casacore.tables.table', 'tables.table', (['outfile'], {'readonly': '(False)', 'lockoptions': "{'option': 'permanentwait'}", 'ack': '(False)'}), "(outfile, readonly=False, lockoptions={'option':\n 'permanentwait'}, ack=False)\n", (32989, 33070), False, 'from casacore import tables\n'), ((33077, 33144), 'casacore.tables.taql', 'tables.taql', (["('select * from $tb_tool where DATA_DESC_ID = %i' % ddi)"], {}), "('select * from $tb_tool where DATA_DESC_ID = %i' % ddi)\n", (33088, 33144), False, 'from casacore import tables\n'), ((35402, 35429), 'os.path.expanduser', 'os.path.expanduser', (['outfile'], {}), '(outfile)\n', (35420, 35429), False, 'import warnings, time, os, psutil, multiprocessing, logging, re\n'), ((35490, 35501), 'time.time', 'time.time', ([], {}), '()\n', (35499, 35501), False, 'import warnings, time, os, psutil, multiprocessing, logging, re\n'), ((36072, 36116), 'numpy.sum', 'np.sum', (['[dx.row.shape[0] for dx in xds_list]'], {}), '([dx.row.shape[0] for dx in xds_list])\n', (36078, 36116), True, 'import numpy as np\n'), ((40811, 40867), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""'], {'category': 'RuntimeWarning'}), "('ignore', category=RuntimeWarning)\n", (40832, 40867), False, 'import warnings\n'), ((40973, 41005), 'pandas.plotting.register_matplotlib_converters', 'register_matplotlib_converters', ([], {}), '()\n', (41003, 41005), False, 'from pandas.plotting import register_matplotlib_converters\n'), ((43575, 43595), 'matplotlib.pyplot.title', 'plt.title', (['txda.name'], {}), '(txda.name)\n', (43584, 43595), True, 'import matplotlib.pyplot as plt\n'), ((43958, 44046), 'casacore.tables.table', 'tables.table', (['infile'], {'readonly': '(True)', 'lockoptions': "{'option': 'usernoread'}", 'ack': '(False)'}), "(infile, readonly=True, lockoptions={'option': 'usernoread'},\n ack=False)\n", (43970, 44046), False, 'from casacore import tables\n'), ((44302, 44390), 'casacore.tables.table', 'tables.table', (['infile'], {'readonly': '(True)', 'lockoptions': "{'option': 'usernoread'}", 'ack': '(False)'}), "(infile, readonly=True, lockoptions={'option': 'usernoread'},\n ack=False)\n", (44314, 44390), False, 'from casacore import tables\n'), ((47305, 47331), 'os.path.expanduser', 'os.path.expanduser', (['infile'], {}), '(infile)\n', (47323, 47331), False, 'import warnings, time, os, psutil, multiprocessing, logging, re\n'), ((47342, 47346), 'casatools.image', 'ia', ([], {}), '()\n', (47344, 47346), True, 'from casatools import image as ia\n'), ((47356, 47360), 'casatools.quanta', 'qa', ([], {}), '()\n', (47358, 47360), True, 'from casatools import quanta as qa\n'), ((50010, 50039), 'xarray.Dataset', 'xarray.Dataset', ([], {'coords': 'coords'}), '(coords=coords)\n', (50024, 50039), False, 'import xarray\n'), ((51350, 51443), 'casacore.tables.table', 'tables.table', (['outfile'], {'readonly': '(False)', 'lockoptions': "{'option': 'permanentwait'}", 'ack': '(False)'}), "(outfile, readonly=False, lockoptions={'option':\n 'permanentwait'}, ack=False)\n", (51362, 51443), False, 'from casacore import tables\n'), ((52648, 52675), 'os.path.expanduser', 'os.path.expanduser', (['outfile'], {}), '(outfile)\n', (52666, 52675), False, 'import warnings, time, os, psutil, multiprocessing, logging, re\n'), ((52688, 52699), 'time.time', 'time.time', ([], {}), '()\n', (52697, 52699), False, 'import warnings, time, os, psutil, multiprocessing, logging, re\n'), ((53302, 53306), 'casatools.image', 'ia', ([], {}), '()\n', (53304, 53306), True, 'from casatools import image as ia\n'), ((54840, 54933), 'casacore.tables.table', 'tables.table', (['outfile'], {'readonly': '(False)', 'lockoptions': "{'option': 'permanentwait'}", 'ack': '(False)'}), "(outfile, readonly=False, lockoptions={'option':\n 'permanentwait'}, ack=False)\n", (54852, 54933), False, 'from casacore import tables\n'), ((2030, 2057), 'multiprocessing.cpu_count', 'multiprocessing.cpu_count', ([], {}), '()\n', (2055, 2057), False, 'import warnings, time, os, psutil, multiprocessing, logging, re\n'), ((5370, 5402), 'os.system', 'os.system', (["('rm -fr %s' % outfile)"], {}), "('rm -fr %s' % outfile)\n", (5379, 5402), False, 'import warnings, time, os, psutil, multiprocessing, logging, re\n'), ((6331, 6460), 'casacore.tables.table', 'tables.table', (['outfile'], {'tabledesc': 'tabledesc', 'nrow': 'max_rows', 'readonly': '(False)', 'lockoptions': "{'option': 'permanentwait'}", 'ack': '(False)'}), "(outfile, tabledesc=tabledesc, nrow=max_rows, readonly=False,\n lockoptions={'option': 'permanentwait'}, ack=False)\n", (6343, 6460), False, 'from casacore import tables\n'), ((6485, 6522), 'casacore.tables.default_ms', 'tables.default_ms', (['outfile', 'tabledesc'], {}), '(outfile, tabledesc)\n', (6502, 6522), False, 'from casacore import tables\n'), ((8308, 8372), 'xarray.DataArray', 'xarray.DataArray', (['mxds.ANTENNA.NAME.values'], {'dims': "['antenna_ids']"}), "(mxds.ANTENNA.NAME.values, dims=['antenna_ids'])\n", (8324, 8372), False, 'import xarray\n'), ((8482, 8542), 'xarray.DataArray', 'xarray.DataArray', (['mxds.FIELD.NAME.values'], {'dims': "['field_ids']"}), "(mxds.FIELD.NAME.values, dims=['field_ids'])\n", (8498, 8542), False, 'import xarray\n'), ((8759, 8834), 'xarray.DataArray', 'xarray.DataArray', (['mxds.OBSERVATION.PROJECT.values'], {'dims': "['observation_ids']"}), "(mxds.OBSERVATION.PROJECT.values, dims=['observation_ids'])\n", (8775, 8834), False, 'import xarray\n'), ((9057, 9119), 'xarray.DataArray', 'xarray.DataArray', (['mxds.SOURCE.NAME.values'], {'dims': "['source_ids']"}), "(mxds.SOURCE.NAME.values, dims=['source_ids'])\n", (9073, 9119), False, 'import xarray\n'), ((11512, 11539), 'os.path.expanduser', 'os.path.expanduser', (['outfile'], {}), '(outfile)\n', (11530, 11539), False, 'import warnings, time, os, psutil, multiprocessing, logging, re\n'), ((15558, 15585), 'xarray.Dataset', 'xarray.Dataset', ([], {'attrs': 'attrs'}), '(attrs=attrs)\n', (15572, 15585), False, 'import xarray\n'), ((15834, 15850), 'xarray.Dataset', 'xarray.Dataset', ([], {}), '()\n', (15848, 15850), False, 'import xarray\n'), ((18651, 18690), 'os.path.join', 'os.path.join', (['infile', '"""SPECTRAL_WINDOW"""'], {}), "(infile, 'SPECTRAL_WINDOW')\n", (18663, 18690), False, 'import warnings, time, os, psutil, multiprocessing, logging, re\n'), ((18725, 18761), 'os.path.join', 'os.path.join', (['infile', '"""POLARIZATION"""'], {}), "(infile, 'POLARIZATION')\n", (18737, 18761), False, 'import warnings, time, os, psutil, multiprocessing, logging, re\n'), ((18796, 18836), 'os.path.join', 'os.path.join', (['infile', '"""DATA_DESCRIPTION"""'], {}), "(infile, 'DATA_DESCRIPTION')\n", (18808, 18836), False, 'import warnings, time, os, psutil, multiprocessing, logging, re\n'), ((19189, 19260), 'casacore.tables.taql', 'tables.taql', (["('select * from %s where DATA_DESC_ID = %i' % (infile, ddi))"], {}), "('select * from %s where DATA_DESC_ID = %i' % (infile, ddi))\n", (19200, 19260), False, 'from casacore import tables\n'), ((21357, 21447), 'casacore.tables.taql', 'tables.taql', (["('select rowid() as ROWS from %s where DATA_DESC_ID = %i' % (infile, ddi))"], {}), "('select rowid() as ROWS from %s where DATA_DESC_ID = %i' % (\n infile, ddi))\n", (21368, 21447), False, 'from casacore import tables\n'), ((21568, 21584), 'xarray.Dataset', 'xarray.Dataset', ([], {}), '()\n', (21582, 21584), False, 'import xarray\n'), ((22068, 22084), 'xarray.Dataset', 'xarray.Dataset', ([], {}), '()\n', (22082, 22084), False, 'import xarray\n'), ((27281, 27320), 'os.path.join', 'os.path.join', (['infile', '"""SPECTRAL_WINDOW"""'], {}), "(infile, 'SPECTRAL_WINDOW')\n", (27293, 27320), False, 'import warnings, time, os, psutil, multiprocessing, logging, re\n'), ((27355, 27391), 'os.path.join', 'os.path.join', (['infile', '"""POLARIZATION"""'], {}), "(infile, 'POLARIZATION')\n", (27367, 27391), False, 'import warnings, time, os, psutil, multiprocessing, logging, re\n'), ((27426, 27466), 'os.path.join', 'os.path.join', (['infile', '"""DATA_DESCRIPTION"""'], {}), "(infile, 'DATA_DESCRIPTION')\n", (27438, 27466), False, 'import warnings, time, os, psutil, multiprocessing, logging, re\n'), ((27656, 27687), 'numpy.arange', 'np.arange', (['ddi_xds.row.shape[0]'], {}), '(ddi_xds.row.shape[0])\n', (27665, 27687), True, 'import numpy as np\n'), ((31202, 31221), 'numpy.atleast_1d', 'np.atleast_1d', (['cols'], {}), '(cols)\n', (31215, 31221), True, 'import numpy as np\n'), ((31301, 31332), 'os.path.join', 'os.path.join', (['outfile', 'subtable'], {}), '(outfile, subtable)\n', (31313, 31332), False, 'import warnings, time, os, psutil, multiprocessing, logging, re\n'), ((31415, 31446), 'os.path.join', 'os.path.join', (['outfile', 'subtable'], {}), '(outfile, subtable)\n', (31427, 31446), False, 'import warnings, time, os, psutil, multiprocessing, logging, re\n'), ((32103, 32196), 'casacore.tables.table', 'tables.table', (['outfile'], {'readonly': '(False)', 'lockoptions': "{'option': 'permanentwait'}", 'ack': '(False)'}), "(outfile, readonly=False, lockoptions={'option':\n 'permanentwait'}, ack=False)\n", (32115, 32196), False, 'from casacore import tables\n'), ((33221, 33282), 'casacore.tables.taql', 'tables.taql', (['"""select * from $tb_tool where DATA_DESC_ID = -1"""'], {}), "('select * from $tb_tool where DATA_DESC_ID = -1')\n", (33232, 33282), False, 'from casacore import tables\n'), ((35720, 35742), 'numpy.atleast_1d', 'np.atleast_1d', (['modcols'], {}), '(modcols)\n', (35733, 35742), True, 'import numpy as np\n'), ((38793, 38880), 'numpy.prod', 'np.prod', (["[txds.chunks[kk][0] for kk in txds.chunks if kk in ['row', 'chan', 'pol']]"], {}), "([txds.chunks[kk][0] for kk in txds.chunks if kk in ['row', 'chan',\n 'pol']])\n", (38800, 38880), True, 'import numpy as np\n'), ((39481, 39509), 'dask.compute', 'dask.compute', (['delayed_writes'], {}), '(delayed_writes)\n', (39493, 39509), False, 'import xarray, dask, dask.array, dask.delayed, dask.distributed\n'), ((41074, 41092), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {}), '(1, 1)\n', (41086, 41092), True, 'import matplotlib.pyplot as plt\n'), ((42135, 42159), 'numpy.arange', 'np.arange', (['txda.shape[0]'], {}), '(txda.shape[0])\n', (42144, 42159), True, 'import numpy as np\n'), ((43621, 43631), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (43629, 43631), True, 'import matplotlib.pyplot as plt\n'), ((56952, 56980), 'dask.compute', 'dask.compute', (['delayed_writes'], {}), '(delayed_writes)\n', (56964, 56980), False, 'import xarray, dask, dask.array, dask.delayed, dask.distributed\n'), ((11707, 11739), 'os.system', 'os.system', (["('rm -fr %s' % outfile)"], {}), "('rm -fr %s' % outfile)\n", (11716, 11739), False, 'import warnings, time, os, psutil, multiprocessing, logging, re\n'), ((11752, 11796), 'os.system', 'os.system', (["('cp -r %s %s' % (infile, outfile))"], {}), "('cp -r %s %s' % (infile, outfile))\n", (11761, 11796), False, 'import warnings, time, os, psutil, multiprocessing, logging, re\n'), ((16068, 16100), 'numpy.stack', 'np.stack', (['[rr[col] for rr in tr]'], {}), '([rr[col] for rr in tr])\n', (16076, 16100), True, 'import numpy as np\n'), ((25235, 25272), 'xarray.Dataset', 'xarray.Dataset', (['mvars'], {'coords': 'mcoords'}), '(mvars, coords=mcoords)\n', (25249, 25272), False, 'import xarray\n'), ((36446, 36479), 'dask.delayed', 'dask.delayed', (['write_generic_table'], {}), '(write_generic_table)\n', (36458, 36479), False, 'import xarray, dask, dask.array, dask.delayed, dask.distributed\n'), ((38901, 38928), 'numpy.setdiff1d', 'np.setdiff1d', (['cols', 'modcols'], {}), '(cols, modcols)\n', (38913, 38928), True, 'import numpy as np\n'), ((38960, 39003), 'numpy.prod', 'np.prod', (['[kk[0] for kk in txds[col].chunks]'], {}), '([kk[0] for kk in txds[col].chunks])\n', (38967, 39003), True, 'import numpy as np\n'), ((41162, 41181), 'numpy.array', 'np.array', (['xda.shape'], {}), '(xda.shape)\n', (41170, 41181), True, 'import numpy as np\n'), ((49244, 49350), 'numpy.arange', 'np.arange', (["(-attrs['coords'][sphr_coord]['crpix'][0])", "(ims[0] - attrs['coords'][sphr_coord]['crpix'][0])"], {}), "(-attrs['coords'][sphr_coord]['crpix'][0], ims[0] - attrs['coords'\n ][sphr_coord]['crpix'][0])\n", (49253, 49350), True, 'import numpy as np\n'), ((49408, 49514), 'numpy.arange', 'np.arange', (["(-attrs['coords'][sphr_coord]['crpix'][1])", "(ims[1] - attrs['coords'][sphr_coord]['crpix'][1])"], {}), "(-attrs['coords'][sphr_coord]['crpix'][1], ims[1] - attrs['coords'\n ][sphr_coord]['crpix'][1])\n", (49417, 49514), True, 'import numpy as np\n'), ((49804, 49833), 'numpy.array', 'np.array', (['(chunks + (9999999,))'], {}), '(chunks + (9999999,))\n', (49812, 49833), True, 'import numpy as np\n'), ((50743, 50775), 'os.path.join', 'os.path.join', (['infile', '"""logtable"""'], {}), "(infile, 'logtable')\n", (50755, 50775), False, 'import warnings, time, os, psutil, multiprocessing, logging, re\n'), ((50824, 50856), 'os.path.join', 'os.path.join', (['infile', '"""logtable"""'], {}), "(infile, 'logtable')\n", (50836, 50856), False, 'import warnings, time, os, psutil, multiprocessing, logging, re\n'), ((3507, 3525), 'numpy.array', 'np.array', (['rawtimes'], {}), '(rawtimes)\n', (3515, 3525), True, 'import numpy as np\n'), ((7188, 7206), 'os.scandir', 'os.scandir', (['infile'], {}), '(infile)\n', (7198, 7206), False, 'import warnings, time, os, psutil, multiprocessing, logging, re\n'), ((12387, 12445), 'numpy.intersect1d', 'np.intersect1d', (['ddirowidxs', 'total_rows'], {'assume_unique': '(True)'}), '(ddirowidxs, total_rows, assume_unique=True)\n', (12401, 12445), True, 'import numpy as np\n'), ((13619, 13637), 'numpy.array', 'np.array', (['[np.nan]'], {}), '([np.nan])\n', (13627, 13637), True, 'import numpy as np\n'), ((17978, 18008), 'os.path.join', 'os.path.join', (['infile', 'subtable'], {}), '(infile, subtable)\n', (17990, 18008), False, 'import warnings, time, os, psutil, multiprocessing, logging, re\n'), ((19682, 19772), 'numpy.ceil', 'np.ceil', (["(sdf['times'] * sdf['baselines'] * sdf['chans'] * sdf['pols'] * 9 / 1024 ** 2)"], {}), "(sdf['times'] * sdf['baselines'] * sdf['chans'] * sdf['pols'] * 9 / \n 1024 ** 2)\n", (19689, 19772), True, 'import numpy as np\n'), ((24434, 24475), 'dask.array.concatenate', 'dask.array.concatenate', (['bvars[kk]'], {'axis': '(0)'}), '(bvars[kk], axis=0)\n', (24456, 24475), False, 'import xarray, dask, dask.array, dask.delayed, dask.distributed\n'), ((29798, 29828), 'os.path.join', 'os.path.join', (['infile', 'subtable'], {}), '(infile, subtable)\n', (29810, 29828), False, 'import warnings, time, os, psutil, multiprocessing, logging, re\n'), ((30948, 30979), 'os.path.join', 'os.path.join', (['outfile', 'subtable'], {}), '(outfile, subtable)\n', (30960, 30979), False, 'import warnings, time, os, psutil, multiprocessing, logging, re\n'), ((32508, 32546), 'os.path.join', 'os.path.join', (['outfile', 'subtable', 'st[0]'], {}), '(outfile, subtable, st[0])\n', (32520, 32546), False, 'import warnings, time, os, psutil, multiprocessing, logging, re\n'), ((33524, 33544), 'numpy.zeros', 'np.zeros', (['full_shape'], {}), '(full_shape)\n', (33532, 33544), True, 'import numpy as np\n'), ((46277, 46317), 'dask.array.concatenate', 'dask.array.concatenate', (['d1slices'], {'axis': '(1)'}), '(d1slices, axis=1)\n', (46299, 46317), False, 'import xarray, dask, dask.array, dask.delayed, dask.distributed\n'), ((46380, 46420), 'dask.array.concatenate', 'dask.array.concatenate', (['d0slices'], {'axis': '(0)'}), '(d0slices, axis=0)\n', (46402, 46420), False, 'import xarray, dask, dask.array, dask.delayed, dask.distributed\n'), ((50392, 50418), 'os.path.join', 'os.path.join', (['infile', 'mask'], {}), '(infile, mask)\n', (50404, 50418), False, 'import warnings, time, os, psutil, multiprocessing, logging, re\n'), ((54683, 54710), 'os.path.join', 'os.path.join', (['outfile', 'mask'], {}), '(outfile, mask)\n', (54695, 54710), False, 'import warnings, time, os, psutil, multiprocessing, logging, re\n'), ((4207, 4225), 'os.listdir', 'os.listdir', (['infile'], {}), '(infile)\n', (4217, 4225), False, 'import warnings, time, os, psutil, multiprocessing, logging, re\n'), ((6623, 6656), 'numpy.zeros', 'np.zeros', (['max_rows'], {'dtype': '"""int32"""'}), "(max_rows, dtype='int32')\n", (6631, 6656), True, 'import numpy as np\n'), ((17794, 17812), 'os.listdir', 'os.listdir', (['infile'], {}), '(infile)\n', (17804, 17812), False, 'import warnings, time, os, psutil, multiprocessing, logging, re\n'), ((22504, 22537), 'dask.delayed', 'dask.delayed', (['read_flat_col_chunk'], {}), '(read_flat_col_chunk)\n', (22516, 22537), False, 'import xarray, dask, dask.array, dask.delayed, dask.distributed\n'), ((22612, 22678), 'dask.array.from_delayed', 'dask.array.from_delayed', (['delayed_array', '(crlen,)', 'cdata[col].dtype'], {}), '(delayed_array, (crlen,), cdata[col].dtype)\n', (22635, 22678), False, 'import xarray, dask, dask.array, dask.delayed, dask.distributed\n'), ((24628, 24669), 'dask.array.concatenate', 'dask.array.concatenate', (['bvars[kk]'], {'axis': '(0)'}), '(bvars[kk], axis=0)\n', (24650, 24669), False, 'import xarray, dask, dask.array, dask.delayed, dask.distributed\n'), ((29620, 29638), 'os.listdir', 'os.listdir', (['infile'], {}), '(infile)\n', (29630, 29638), False, 'import warnings, time, os, psutil, multiprocessing, logging, re\n'), ((31584, 31606), 'numpy.prod', 'np.prod', (['xds[dv].shape'], {}), '(xds[dv].shape)\n', (31591, 31606), True, 'import numpy as np\n'), ((36834, 36867), 'dask.delayed', 'dask.delayed', (['write_generic_table'], {}), '(write_generic_table)\n', (36846, 36867), False, 'import xarray, dask, dask.array, dask.delayed, dask.distributed\n'), ((39031, 39071), 'numpy.ceil', 'np.ceil', (['(max_chunk_size / col_chunk_size)'], {}), '(max_chunk_size / col_chunk_size)\n', (39038, 39071), True, 'import numpy as np\n'), ((42648, 42667), 'numpy.atleast_1d', 'np.atleast_1d', (['axis'], {}), '(axis)\n', (42661, 42667), True, 'import numpy as np\n'), ((42690, 42709), 'numpy.atleast_1d', 'np.atleast_1d', (['axis'], {}), '(axis)\n', (42703, 42709), True, 'import numpy as np\n'), ((44115, 44131), 'numpy.array', 'np.array', (['starts'], {}), '(starts)\n', (44123, 44131), True, 'import numpy as np\n'), ((44134, 44150), 'numpy.array', 'np.array', (['shapes'], {}), '(shapes)\n', (44142, 44150), True, 'import numpy as np\n'), ((46181, 46221), 'dask.array.concatenate', 'dask.array.concatenate', (['d2slices'], {'axis': '(2)'}), '(d2slices, axis=2)\n', (46203, 46221), False, 'import xarray, dask, dask.array, dask.delayed, dask.distributed\n'), ((50319, 50345), 'os.path.join', 'os.path.join', (['infile', 'mask'], {}), '(infile, mask)\n', (50331, 50345), False, 'import warnings, time, os, psutil, multiprocessing, logging, re\n'), ((50592, 50618), 'os.path.join', 'os.path.join', (['infile', 'mask'], {}), '(infile, mask)\n', (50604, 50618), False, 'import warnings, time, os, psutil, multiprocessing, logging, re\n'), ((51495, 51511), 'numpy.array', 'np.array', (['starts'], {}), '(starts)\n', (51503, 51511), True, 'import numpy as np\n'), ((51514, 51536), 'numpy.array', 'np.array', (['values.shape'], {}), '(values.shape)\n', (51522, 51536), True, 'import numpy as np\n'), ((54380, 54407), 'os.path.join', 'os.path.join', (['outfile', 'mask'], {}), '(outfile, mask)\n', (54392, 54407), False, 'import warnings, time, os, psutil, multiprocessing, logging, re\n'), ((54538, 54579), 'xarray.Dataset', 'xarray.Dataset', (['{mask_var: xds[mask_var]}'], {}), '({mask_var: xds[mask_var]})\n', (54552, 54579), False, 'import xarray\n'), ((6042, 6078), 'numpy.clip', 'np.clip', (['xds[col].shape[1:]', '(1)', 'None'], {}), '(xds[col].shape[1:], 1, None)\n', (6049, 6078), True, 'import numpy as np\n'), ((17830, 17854), 'os.path.join', 'os.path.join', (['infile', 'tt'], {}), '(infile, tt)\n', (17842, 17854), False, 'import warnings, time, os, psutil, multiprocessing, logging, re\n'), ((22744, 22777), 'dask.delayed', 'dask.delayed', (['read_flat_col_chunk'], {}), '(read_flat_col_chunk)\n', (22756, 22777), False, 'import xarray, dask, dask.array, dask.delayed, dask.distributed\n'), ((22854, 22922), 'dask.array.from_delayed', 'dask.array.from_delayed', (['delayed_array', '(crlen, 3)', 'cdata[col].dtype'], {}), '(delayed_array, (crlen, 3), cdata[col].dtype)\n', (22877, 22922), False, 'import xarray, dask, dask.array, dask.delayed, dask.distributed\n'), ((24817, 24858), 'dask.array.concatenate', 'dask.array.concatenate', (['bvars[kk]'], {'axis': '(0)'}), '(bvars[kk], axis=0)\n', (24839, 24858), False, 'import xarray, dask, dask.array, dask.delayed, dask.distributed\n'), ((24937, 24978), 'dask.array.concatenate', 'dask.array.concatenate', (['bvars[kk]'], {'axis': '(0)'}), '(bvars[kk], axis=0)\n', (24959, 24978), False, 'import xarray, dask, dask.array, dask.delayed, dask.distributed\n'), ((31879, 31910), 'os.path.join', 'os.path.join', (['outfile', 'subtable'], {}), '(outfile, subtable)\n', (31891, 31910), False, 'import warnings, time, os, psutil, multiprocessing, logging, re\n'), ((33614, 33645), 'numpy.array', 'np.array', (['starts[1:values.ndim]'], {}), '(starts[1:values.ndim])\n', (33622, 33645), True, 'import numpy as np\n'), ((33648, 33674), 'numpy.array', 'np.array', (['values.shape[1:]'], {}), '(values.shape[1:])\n', (33656, 33674), True, 'import numpy as np\n'), ((39267, 39303), 'dask.delayed', 'dask.delayed', (['write_main_table_slice'], {}), '(write_main_table_slice)\n', (39279, 39303), False, 'import xarray, dask, dask.array, dask.delayed, dask.distributed\n'), ((39566, 39577), 'time.time', 'time.time', ([], {}), '()\n', (39575, 39577), False, 'import warnings, time, os, psutil, multiprocessing, logging, re\n'), ((42527, 42579), 'xarray.Dataset', 'xarray.Dataset', (['{txda.name: txda, txda2.name: txda2}'], {}), '({txda.name: txda, txda2.name: txda2})\n', (42541, 42579), False, 'import xarray\n'), ((46081, 46121), 'dask.array.concatenate', 'dask.array.concatenate', (['d3slices'], {'axis': '(3)'}), '(d3slices, axis=3)\n', (46103, 46121), False, 'import xarray, dask, dask.array, dask.delayed, dask.distributed\n'), ((57037, 57048), 'time.time', 'time.time', ([], {}), '()\n', (57046, 57048), False, 'import warnings, time, os, psutil, multiprocessing, logging, re\n'), ((16285, 16299), 'numpy.array', 'np.array', (["['']"], {}), "([''])\n", (16293, 16299), True, 'import numpy as np\n'), ((23456, 23496), 'dask.array.concatenate', 'dask.array.concatenate', (['pol_list'], {'axis': '(1)'}), '(pol_list, axis=1)\n', (23478, 23496), False, 'import xarray, dask, dask.array, dask.delayed, dask.distributed\n'), ((29656, 29680), 'os.path.join', 'os.path.join', (['infile', 'tt'], {}), '(infile, tt)\n', (29668, 29680), False, 'import warnings, time, os, psutil, multiprocessing, logging, re\n'), ((37595, 37620), 'numpy.cumsum', 'np.cumsum', (['chunks[0][:-1]'], {}), '(chunks[0][:-1])\n', (37604, 37620), True, 'import numpy as np\n'), ((41602, 41636), 'numpy.full', 'np.full', (['(1)', 'np.nan'], {'dtype': 'np.int32'}), '(1, np.nan, dtype=np.int32)\n', (41609, 41636), True, 'import numpy as np\n'), ((41712, 41748), 'numpy.unique', 'np.unique', (['txda'], {'return_inverse': '(True)'}), '(txda, return_inverse=True)\n', (41721, 41748), True, 'import numpy as np\n'), ((42478, 42512), 'numpy.full', 'np.full', (['(1)', 'np.nan'], {'dtype': 'np.int32'}), '(1, np.nan, dtype=np.int32)\n', (42485, 42512), True, 'import numpy as np\n'), ((45791, 45821), 'dask.delayed', 'dask.delayed', (['read_image_chunk'], {}), '(read_image_chunk)\n', (45803, 45821), False, 'import xarray, dask, dask.array, dask.delayed, dask.distributed\n'), ((45883, 45942), 'dask.array.from_delayed', 'dask.array.from_delayed', (['delayed_array', 'shapes', 'cdata.dtype'], {}), '(delayed_array, shapes, cdata.dtype)\n', (45906, 45942), False, 'import xarray, dask, dask.array, dask.delayed, dask.distributed\n'), ((45977, 46017), 'dask.array.concatenate', 'dask.array.concatenate', (['d4slices'], {'axis': '(4)'}), '(d4slices, axis=4)\n', (45999, 46017), False, 'import xarray, dask, dask.array, dask.delayed, dask.distributed\n'), ((16578, 16595), 'numpy.array', 'np.array', (['rr[col]'], {}), '(rr[col])\n', (16586, 16595), True, 'import numpy as np\n'), ((20357, 20371), 'numpy.diff', 'np.diff', (['ridxs'], {}), '(ridxs)\n', (20364, 20371), True, 'import numpy as np\n'), ((23239, 23272), 'dask.delayed', 'dask.delayed', (['read_flat_col_chunk'], {}), '(read_flat_col_chunk)\n', (23251, 23272), False, 'import xarray, dask, dask.array, dask.delayed, dask.distributed\n'), ((23352, 23423), 'dask.array.from_delayed', 'dask.array.from_delayed', (['delayed_array', '(crlen, plen)', 'cdata[col].dtype'], {}), '(delayed_array, (crlen, plen), cdata[col].dtype)\n', (23375, 23423), False, 'import xarray, dask, dask.array, dask.delayed, dask.distributed\n'), ((24214, 24255), 'dask.array.concatenate', 'dask.array.concatenate', (['chan_list'], {'axis': '(1)'}), '(chan_list, axis=1)\n', (24236, 24255), False, 'import xarray, dask, dask.array, dask.delayed, dask.distributed\n'), ((38476, 38512), 'dask.delayed', 'dask.delayed', (['write_main_table_slice'], {}), '(write_main_table_slice)\n', (38488, 38512), False, 'import xarray, dask, dask.array, dask.delayed, dask.distributed\n'), ((2114, 2137), 'psutil.virtual_memory', 'psutil.virtual_memory', ([], {}), '()\n', (2135, 2137), False, 'import warnings, time, os, psutil, multiprocessing, logging, re\n'), ((17220, 17234), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (17228, 17234), True, 'import numpy as np\n'), ((17356, 17370), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (17364, 17370), True, 'import numpy as np\n'), ((24141, 24181), 'dask.array.concatenate', 'dask.array.concatenate', (['pol_list'], {'axis': '(2)'}), '(pol_list, axis=2)\n', (24163, 24181), False, 'import xarray, dask, dask.array, dask.delayed, dask.distributed\n'), ((37745, 37770), 'numpy.cumsum', 'np.cumsum', (['chunks[1][:-1]'], {}), '(chunks[1][:-1])\n', (37754, 37770), True, 'import numpy as np\n'), ((56765, 56796), 'dask.delayed', 'dask.delayed', (['write_image_slice'], {}), '(write_image_slice)\n', (56777, 56796), False, 'import xarray, dask, dask.array, dask.delayed, dask.distributed\n'), ((56803, 56843), 'os.path.join', 'os.path.join', (['outfile', 'subtable_list[ii]'], {}), '(outfile, subtable_list[ii])\n', (56815, 56843), False, 'import warnings, time, os, psutil, multiprocessing, logging, re\n'), ((23907, 23940), 'dask.delayed', 'dask.delayed', (['read_flat_col_chunk'], {}), '(read_flat_col_chunk)\n', (23919, 23940), False, 'import xarray, dask, dask.array, dask.delayed, dask.distributed\n'), ((24028, 24105), 'dask.array.from_delayed', 'dask.array.from_delayed', (['delayed_array', '(crlen, clen, plen)', 'cdata[col].dtype'], {}), '(delayed_array, (crlen, clen, plen), cdata[col].dtype)\n', (24051, 24105), False, 'import xarray, dask, dask.array, dask.delayed, dask.distributed\n'), ((37929, 37954), 'numpy.cumsum', 'np.cumsum', (['chunks[2][:-1]'], {}), '(chunks[2][:-1])\n', (37938, 37954), True, 'import numpy as np\n'), ((16708, 16725), 'numpy.array', 'np.array', (['rr[col]'], {}), '(rr[col])\n', (16716, 16725), True, 'import numpy as np\n'), ((16831, 16848), 'numpy.array', 'np.array', (['rr[col]'], {}), '(rr[col])\n', (16839, 16848), True, 'import numpy as np\n'), ((16885, 16903), 'numpy.array', 'np.array', (['[np.nan]'], {}), '([np.nan])\n', (16893, 16903), True, 'import numpy as np\n'), ((16911, 16931), 'numpy.array', 'np.array', (['ctype[col]'], {}), '(ctype[col])\n', (16919, 16931), True, 'import numpy as np\n')]
|
import math
import os
import xml.etree.ElementTree
import numpy as np
import paddle
import six
from PIL import Image
from utils import image_util
class Settings(object):
def __init__(self,
label_file_path=None,
resize_h=300,
resize_w=300,
mean_value=127.5,
std_value=0.007843,
apply_distort=True,
apply_expand=True,
ap_version='11point'):
self._ap_version = ap_version
self._label_list = []
if label_file_path is not None:
with open(label_file_path, 'r', encoding='utf-8') as f:
lines = f.readlines()
for line in lines:
self._label_list.append(line.strip().replace('\n', ''))
self._apply_distort = apply_distort
self._apply_expand = apply_expand
self._resize_height = resize_h
self._resize_width = resize_w
self._img_mean = mean_value
self._img_std = std_value
self._expand_prob = 0.5
self._expand_max_ratio = 4
self._hue_prob = 0.5
self._hue_delta = 18
self._contrast_prob = 0.5
self._contrast_delta = 0.5
self._saturation_prob = 0.5
self._saturation_delta = 0.5
self._brightness_prob = 0.5
self._brightness_delta = 0.125
@property
def ap_version(self):
return self._ap_version
@property
def apply_expand(self):
return self._apply_expand
@property
def apply_distort(self):
return self._apply_distort
@property
def label_list(self):
return self._label_list
@property
def resize_h(self):
return self._resize_height
@property
def resize_w(self):
return self._resize_width
@property
def img_mean(self):
return self._img_mean
@property
def img_std(self):
return self._img_std
def preprocess(img, bbox_labels, mode, settings):
img_width, img_height = img.size
sampled_labels = bbox_labels
if mode == 'train':
if settings._apply_distort:
img = image_util.distort_image(img, settings)
if settings._apply_expand:
img, bbox_labels, img_width, img_height = image_util.expand_image(
img, bbox_labels, img_width, img_height, settings)
# sampling, hard-code here
batch_sampler = [image_util.sampler(1, 1, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0),
image_util.sampler(1, 50, 0.3, 1.0, 0.5, 2.0, 0.1, 0.0),
image_util.sampler(1, 50, 0.3, 1.0, 0.5, 2.0, 0.3, 0.0),
image_util.sampler(1, 50, 0.3, 1.0, 0.5, 2.0, 0.5, 0.0),
image_util.sampler(1, 50, 0.3, 1.0, 0.5, 2.0, 0.7, 0.0),
image_util.sampler(1, 50, 0.3, 1.0, 0.5, 2.0, 0.9, 0.0),
image_util.sampler(1, 50, 0.3, 1.0, 0.5, 2.0, 0.0, 1.0)]
sampled_bbox = image_util.generate_batch_samples(batch_sampler, bbox_labels)
img = np.array(img)
if len(sampled_bbox) > 0:
idx = int(np.random.uniform(0, len(sampled_bbox)))
img, sampled_labels = image_util.crop_image(img, bbox_labels, sampled_bbox[idx], img_width, img_height)
img = Image.fromarray(img)
img = img.resize((settings.resize_w, settings.resize_h), Image.ANTIALIAS)
img = np.array(img)
if mode == 'train':
mirror = int(np.random.uniform(0, 2))
if mirror == 1:
img = img[:, ::-1, :]
for i in range(len(sampled_labels)):
tmp = sampled_labels[i][1]
sampled_labels[i][1] = 1 - sampled_labels[i][3]
sampled_labels[i][3] = 1 - tmp
# HWC to CHW
if len(img.shape) == 3:
img = np.swapaxes(img, 1, 2)
img = np.swapaxes(img, 1, 0)
img = img.astype('float32')
img -= settings.img_mean
img = img * settings.img_std
return img, sampled_labels
def pascalvoc(settings, file_list, mode, batch_size, shuffle):
def reader():
if mode == 'train' and shuffle:
np.random.shuffle(file_list)
batch_out = []
cnt = 0
for image in file_list:
image_path, label_path = image.split('\t')
if not os.path.exists(image_path):
raise ValueError("%s is not exist, you should specify data path correctly." % image_path)
im = Image.open(image_path)
if im.mode == 'L':
im = im.convert('RGB')
im_width, im_height = im.size
# layout: label | xmin | ymin | xmax | ymax | difficult
bbox_labels = []
root = xml.etree.ElementTree.parse(label_path).getroot()
for object in root.findall('object'):
# start from 1
bbox_sample = [float(settings.label_list.index(object.find('name').text))]
bbox = object.find('bndbox')
difficult = float(object.find('difficult').text)
bbox_sample.append(float(bbox.find('xmin').text) / im_width)
bbox_sample.append(float(bbox.find('ymin').text) / im_height)
bbox_sample.append(float(bbox.find('xmax').text) / im_width)
bbox_sample.append(float(bbox.find('ymax').text) / im_height)
bbox_sample.append(difficult)
bbox_labels.append(bbox_sample)
im, sample_labels = preprocess(im, bbox_labels, mode, settings)
sample_labels = np.array(sample_labels)
if len(sample_labels) == 0: continue
im = im.astype('float32')
boxes = sample_labels[:, 1:5]
lbls = sample_labels[:, 0].astype('int32')
difficults = sample_labels[:, -1].astype('int32')
batch_out.append((im, boxes, lbls, difficults))
if len(batch_out) == batch_size:
yield batch_out
cnt += len(batch_out)
batch_out = []
if mode == 'test' and len(batch_out) > 1:
yield batch_out
cnt += len(batch_out)
return reader
def train(settings, file_list_path, batch_size, shuffle=True, use_multiprocess=True, num_workers=4):
readers = []
images = [line.strip() for line in open(file_list_path)]
np.random.shuffle(images)
n = int(math.ceil(len(images) // num_workers)) if use_multiprocess else len(images)
image_lists = [images[i:i + n] for i in range(0, len(images), n)]
for l in image_lists:
readers.append(pascalvoc(settings, l, 'train', batch_size, shuffle))
if use_multiprocess:
return paddle.reader.multiprocess_reader(readers, False)
else:
return readers[0]
def test(settings, file_list_path, batch_size):
image_list = [line.strip() for line in open(file_list_path)]
return pascalvoc(settings, image_list, 'test', batch_size, False)
|
[
"numpy.random.uniform",
"utils.image_util.sampler",
"utils.image_util.crop_image",
"utils.image_util.generate_batch_samples",
"PIL.Image.open",
"os.path.exists",
"numpy.array",
"numpy.swapaxes",
"PIL.Image.fromarray",
"utils.image_util.distort_image",
"paddle.reader.multiprocess_reader",
"numpy.random.shuffle",
"utils.image_util.expand_image"
] |
[((3440, 3453), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (3448, 3453), True, 'import numpy as np\n'), ((6376, 6401), 'numpy.random.shuffle', 'np.random.shuffle', (['images'], {}), '(images)\n', (6393, 6401), True, 'import numpy as np\n'), ((3012, 3073), 'utils.image_util.generate_batch_samples', 'image_util.generate_batch_samples', (['batch_sampler', 'bbox_labels'], {}), '(batch_sampler, bbox_labels)\n', (3045, 3073), False, 'from utils import image_util\n'), ((3089, 3102), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (3097, 3102), True, 'import numpy as np\n'), ((3331, 3351), 'PIL.Image.fromarray', 'Image.fromarray', (['img'], {}), '(img)\n', (3346, 3351), False, 'from PIL import Image\n'), ((3845, 3867), 'numpy.swapaxes', 'np.swapaxes', (['img', '(1)', '(2)'], {}), '(img, 1, 2)\n', (3856, 3867), True, 'import numpy as np\n'), ((3882, 3904), 'numpy.swapaxes', 'np.swapaxes', (['img', '(1)', '(0)'], {}), '(img, 1, 0)\n', (3893, 3904), True, 'import numpy as np\n'), ((6704, 6753), 'paddle.reader.multiprocess_reader', 'paddle.reader.multiprocess_reader', (['readers', '(False)'], {}), '(readers, False)\n', (6737, 6753), False, 'import paddle\n'), ((2160, 2199), 'utils.image_util.distort_image', 'image_util.distort_image', (['img', 'settings'], {}), '(img, settings)\n', (2184, 2199), False, 'from utils import image_util\n'), ((2289, 2363), 'utils.image_util.expand_image', 'image_util.expand_image', (['img', 'bbox_labels', 'img_width', 'img_height', 'settings'], {}), '(img, bbox_labels, img_width, img_height, settings)\n', (2312, 2363), False, 'from utils import image_util\n'), ((2441, 2495), 'utils.image_util.sampler', 'image_util.sampler', (['(1)', '(1)', '(1.0)', '(1.0)', '(1.0)', '(1.0)', '(0.0)', '(0.0)'], {}), '(1, 1, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0)\n', (2459, 2495), False, 'from utils import image_util\n'), ((2522, 2577), 'utils.image_util.sampler', 'image_util.sampler', (['(1)', '(50)', '(0.3)', '(1.0)', '(0.5)', '(2.0)', '(0.1)', '(0.0)'], {}), '(1, 50, 0.3, 1.0, 0.5, 2.0, 0.1, 0.0)\n', (2540, 2577), False, 'from utils import image_util\n'), ((2604, 2659), 'utils.image_util.sampler', 'image_util.sampler', (['(1)', '(50)', '(0.3)', '(1.0)', '(0.5)', '(2.0)', '(0.3)', '(0.0)'], {}), '(1, 50, 0.3, 1.0, 0.5, 2.0, 0.3, 0.0)\n', (2622, 2659), False, 'from utils import image_util\n'), ((2686, 2741), 'utils.image_util.sampler', 'image_util.sampler', (['(1)', '(50)', '(0.3)', '(1.0)', '(0.5)', '(2.0)', '(0.5)', '(0.0)'], {}), '(1, 50, 0.3, 1.0, 0.5, 2.0, 0.5, 0.0)\n', (2704, 2741), False, 'from utils import image_util\n'), ((2768, 2823), 'utils.image_util.sampler', 'image_util.sampler', (['(1)', '(50)', '(0.3)', '(1.0)', '(0.5)', '(2.0)', '(0.7)', '(0.0)'], {}), '(1, 50, 0.3, 1.0, 0.5, 2.0, 0.7, 0.0)\n', (2786, 2823), False, 'from utils import image_util\n'), ((2850, 2905), 'utils.image_util.sampler', 'image_util.sampler', (['(1)', '(50)', '(0.3)', '(1.0)', '(0.5)', '(2.0)', '(0.9)', '(0.0)'], {}), '(1, 50, 0.3, 1.0, 0.5, 2.0, 0.9, 0.0)\n', (2868, 2905), False, 'from utils import image_util\n'), ((2932, 2987), 'utils.image_util.sampler', 'image_util.sampler', (['(1)', '(50)', '(0.3)', '(1.0)', '(0.5)', '(2.0)', '(0.0)', '(1.0)'], {}), '(1, 50, 0.3, 1.0, 0.5, 2.0, 0.0, 1.0)\n', (2950, 2987), False, 'from utils import image_util\n'), ((3234, 3319), 'utils.image_util.crop_image', 'image_util.crop_image', (['img', 'bbox_labels', 'sampled_bbox[idx]', 'img_width', 'img_height'], {}), '(img, bbox_labels, sampled_bbox[idx], img_width,\n img_height)\n', (3255, 3319), False, 'from utils import image_util\n'), ((3500, 3523), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(2)'], {}), '(0, 2)\n', (3517, 3523), True, 'import numpy as np\n'), ((4165, 4193), 'numpy.random.shuffle', 'np.random.shuffle', (['file_list'], {}), '(file_list)\n', (4182, 4193), True, 'import numpy as np\n'), ((4490, 4512), 'PIL.Image.open', 'Image.open', (['image_path'], {}), '(image_path)\n', (4500, 4512), False, 'from PIL import Image\n'), ((5582, 5605), 'numpy.array', 'np.array', (['sample_labels'], {}), '(sample_labels)\n', (5590, 5605), True, 'import numpy as np\n'), ((4339, 4365), 'os.path.exists', 'os.path.exists', (['image_path'], {}), '(image_path)\n', (4353, 4365), False, 'import os\n')]
|
import numpy as np
from napari.utils import nbscreenshot
def test_nbscreenshot(viewer_factory):
"""Test taking a screenshot."""
view, viewer = viewer_factory()
np.random.seed(0)
data = np.random.random((10, 15))
viewer.add_image(data)
rich_display_object = nbscreenshot(viewer)
assert hasattr(rich_display_object, '_repr_png_')
# Trigger method that would run in jupyter notebook cell automatically
rich_display_object._repr_png_()
assert rich_display_object.image is not None
|
[
"numpy.random.random",
"numpy.random.seed",
"napari.utils.nbscreenshot"
] |
[((176, 193), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (190, 193), True, 'import numpy as np\n'), ((205, 231), 'numpy.random.random', 'np.random.random', (['(10, 15)'], {}), '((10, 15))\n', (221, 231), True, 'import numpy as np\n'), ((286, 306), 'napari.utils.nbscreenshot', 'nbscreenshot', (['viewer'], {}), '(viewer)\n', (298, 306), False, 'from napari.utils import nbscreenshot\n')]
|
import os
import numpy as np
import torch
import torch.nn as nn
import matplotlib.pyplot as plt
from medpy.metric import binary
#use gpu if available
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
class AE(nn.Module):
def __init__(self, latent_size=100):
super().__init__()
self.init_layers(latent_size)
self.apply(self.weight_init)
self.loss_function=self.Loss()
self.metrics=self.Metrics()
self.optimizer=torch.optim.Adam(self.parameters(),lr=2e-4,weight_decay=1e-5)
def init_layers(self,latent_size):
self.encoder = nn.Sequential(
nn.Conv2d(in_channels=4,out_channels=32,kernel_size=4,stride=2,padding=1),
nn.BatchNorm2d(num_features=32),
nn.LeakyReLU(.2),
nn.Dropout(0.5),
nn.Conv2d(in_channels=32,out_channels=32,kernel_size=4,stride=2,padding=1),
nn.BatchNorm2d(num_features=32),
nn.LeakyReLU(.2),
nn.Dropout(0.5),
nn.Conv2d(in_channels=32,out_channels=32,kernel_size=4,stride=2,padding=1),
nn.BatchNorm2d(num_features=32),
nn.LeakyReLU(.2),
nn.Dropout(0.5),
nn.Conv2d(in_channels=32,out_channels=32,kernel_size=3,stride=1,padding=1),
nn.BatchNorm2d(num_features=32),
nn.LeakyReLU(.2),
nn.Dropout(0.5),
nn.Conv2d(in_channels=32,out_channels=64,kernel_size=4,stride=2,padding=1),
nn.BatchNorm2d(num_features=64),
nn.LeakyReLU(.2),
nn.Dropout(0.5),
nn.Conv2d(in_channels=64,out_channels=64,kernel_size=3,stride=1,padding=1),
nn.BatchNorm2d(num_features=64),
nn.LeakyReLU(.2),
nn.Dropout(0.5),
nn.Conv2d(in_channels=64,out_channels=128,kernel_size=4,stride=2,padding=1),
nn.BatchNorm2d(num_features=128),
nn.LeakyReLU(.2),
nn.Dropout(0.5),
nn.Conv2d(in_channels=128,out_channels=64,kernel_size=3,stride=1,padding=1),
nn.BatchNorm2d(num_features=64),
nn.LeakyReLU(.2),
nn.Dropout(0.5),
nn.Conv2d(in_channels=64,out_channels=32,kernel_size=3,stride=1,padding=1),
nn.BatchNorm2d(num_features=32),
nn.LeakyReLU(.2),
nn.Dropout(0.5),
nn.Conv2d(in_channels=32,out_channels=latent_size,kernel_size=4,stride=2,padding=1)
)
self.decoder = nn.Sequential(
nn.ConvTranspose2d(in_channels=latent_size,out_channels=32,kernel_size=4,stride=2,padding=1),
nn.BatchNorm2d(num_features=32),
nn.LeakyReLU(.2),
nn.Dropout(0.5),
nn.ConvTranspose2d(in_channels=32,out_channels=64,kernel_size=3,stride=1,padding=1),
nn.BatchNorm2d(num_features=64),
nn.LeakyReLU(.2),
nn.Dropout(0.5),
nn.ConvTranspose2d(in_channels=64,out_channels=128,kernel_size=3,stride=1,padding=1),
nn.BatchNorm2d(num_features=128),
nn.LeakyReLU(.2),
nn.Dropout(0.5),
nn.ConvTranspose2d(in_channels=128,out_channels=64,kernel_size=4,stride=2,padding=1),
nn.BatchNorm2d(num_features=64),
nn.LeakyReLU(.2),
nn.Dropout(0.5),
nn.ConvTranspose2d(in_channels=64,out_channels=64,kernel_size=3,stride=1,padding=1),
nn.BatchNorm2d(num_features=64),
nn.LeakyReLU(.2),
nn.Dropout(0.5),
nn.ConvTranspose2d(in_channels=64,out_channels=32,kernel_size=4,stride=2,padding=1),
nn.BatchNorm2d(num_features=32),
nn.LeakyReLU(.2),
nn.Dropout(0.5),
nn.ConvTranspose2d(in_channels=32,out_channels=32,kernel_size=3,stride=1,padding=1),
nn.BatchNorm2d(num_features=32),
nn.LeakyReLU(.2),
nn.Dropout(0.5),
nn.ConvTranspose2d(in_channels=32,out_channels=32,kernel_size=4,stride=2,padding=1),
nn.BatchNorm2d(num_features=32),
nn.LeakyReLU(.2),
nn.Dropout(0.5),
nn.ConvTranspose2d(in_channels=32,out_channels=32,kernel_size=4,stride=2,padding=1),
nn.BatchNorm2d(num_features=32),
nn.LeakyReLU(.2),
nn.Dropout(0.5),
nn.ConvTranspose2d(in_channels=32,out_channels=4,kernel_size=4,stride=2,padding=1),
nn.Softmax(dim=1)
)
def weight_init(self,m):
if isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d):
nn.init.kaiming_uniform_(m.weight)
def forward(self, x):
latent = self.encoder(x)
reconstruction = self.decoder(latent)
return reconstruction
class Loss():
def __init__(self,call_id=0):
self.MSELoss=nn.MSELoss()
self.GDLoss=self.GDLoss()
class GDLoss:
def __call__(self,x,y):
tp=torch.sum(x*y,dim=(0,2,3))
fp=torch.sum(x*(1-y),dim=(0,2,3))
fn=torch.sum((1-x)*y,dim=(0,2,3))
nominator=2*tp+1e-05
denominator=2*tp+fp+fn+1e-05
dice_score=-(nominator/(denominator+1e-8))[1:].mean()
return dice_score
def __call__(self,prediction,target,epoch=None,validation=False):
contributes={}
contributes["MSELoss"]=self.MSELoss(prediction,target)
contributes["GDLoss"]=self.GDLoss(prediction,target)
contributes["Total"]=contributes["MSELoss"]+contributes["GDLoss"]
if validation:
return {k:v.item() for k,v in contributes.items()}
return contributes["Total"]
class Metrics():
def __init__(self):
self.DC=self.DC()
self.HD=self.HD()
class DC:
def __call__(self,prediction,target):
try:
return binary.dc(prediction,target)
except Exception:
return 0
class HD:
def __call__(self,prediction,target):
try:
return binary.hd(prediction,target)
except Exception:
return np.nan
def __call__(self,prediction,target,validation=False):
metrics={}
for c,key in enumerate(["BK_","RV_","MYO_","LV_"]):
ref=np.copy(target)
pred=np.copy(prediction)
ref=np.where(ref!=c,0,1)
pred=np.where(pred!=c,0,1)
metrics[key+"dc"]=self.DC(pred,ref)
metrics[key+"hd"]=self.HD(pred,ref)
return metrics
def training_routine(self,epochs,train_loader,val_loader,ckpt_folder):
if not os.path.isdir(ckpt_folder):
os.mkdir(ckpt_folder)
history = []
best_acc = None
for epoch in epochs:
#training
self.train()
for patient in train_loader:
for batch in patient:
batch=batch.to(device)
self.optimizer.zero_grad()
reconstruction=self.forward(batch)
loss=self.loss_function(reconstruction,batch,epoch)
loss.backward()
self.optimizer.step()
#validation
self.eval()
with torch.no_grad():
result = self.evaluation_routine(val_loader)
#checkpoint
if(best_acc==None or result['Total']<best_acc or epoch%10==0):
ckpt=os.path.join(ckpt_folder,"{:03d}.pth".format(epoch))
if(best_acc==None or result['Total']<best_acc): best_acc=result['Total']; ckpt=ckpt.split(".pth")[0]+"_best.pth"
torch.save({"AE": self.state_dict(),"AE_optim": self.optimizer.state_dict(),"epoch": epoch},ckpt)
#report
self.epoch_end(epoch, result)
history.append(result)
return history
def evaluation_routine(self,val_loader):
epoch_summary={}
for patient in val_loader:
gt=[];reconstruction=[]
#loss terms
for batch in patient:
batch={"gt":batch.to(device)}
batch["reconstruction"]=self.forward(batch["gt"])
gt=torch.cat([gt,batch["gt"]],dim=0) if len(gt)>0 else batch["gt"]
reconstruction=torch.cat([reconstruction,batch["reconstruction"]],dim=0) if len(reconstruction)>0 else batch["reconstruction"]
for k,v in self.loss_function(batch["reconstruction"],batch["gt"],validation=True).items():
if k not in epoch_summary.keys(): epoch_summary[k]=[]
epoch_summary[k].append(v)
#validation metrics
gt=np.argmax(gt.cpu().numpy(),axis=1)
gt={"ED":gt[:len(gt)//2],"ES":gt[len(gt)//2:]}
reconstruction=np.argmax(reconstruction.cpu().numpy(),axis=1)
reconstruction={"ED":reconstruction[:len(reconstruction)//2],"ES":reconstruction[len(reconstruction)//2:]}
for phase in ["ED","ES"]:
for k,v in self.metrics(reconstruction[phase],gt[phase]).items():
if k not in epoch_summary.keys(): epoch_summary[k]=[]
epoch_summary[k].append(v)
epoch_summary={k:np.mean(v) for k,v in epoch_summary.items()}
return epoch_summary
def epoch_end(self,epoch,result):
print("\033[1mEpoch [{}]\033[0m".format(epoch))
header,row="",""
for k,v in result.items():
header+="{:.6}\t".format(k);row+="{:.6}\t".format("{:.4f}".format(v))
print(header);print(row)
def plot_history(history):
losses = [x['Total'] for x in history]
plt.plot(losses, '-x', label="loss")
plt.xlabel('epoch')
plt.ylabel('loss')
plt.legend()
plt.title('Losses vs. No. of epochs')
plt.grid()
plt.show()
|
[
"matplotlib.pyplot.title",
"torch.nn.Dropout",
"os.mkdir",
"torch.cat",
"numpy.mean",
"torch.nn.Softmax",
"torch.no_grad",
"torch.nn.MSELoss",
"numpy.copy",
"medpy.metric.binary.dc",
"matplotlib.pyplot.show",
"matplotlib.pyplot.legend",
"torch.nn.Conv2d",
"torch.nn.BatchNorm2d",
"torch.cuda.is_available",
"matplotlib.pyplot.ylabel",
"torch.nn.LeakyReLU",
"torch.nn.init.kaiming_uniform_",
"matplotlib.pyplot.grid",
"torch.sum",
"torch.nn.ConvTranspose2d",
"matplotlib.pyplot.plot",
"medpy.metric.binary.hd",
"os.path.isdir",
"numpy.where",
"matplotlib.pyplot.xlabel"
] |
[((8627, 8663), 'matplotlib.pyplot.plot', 'plt.plot', (['losses', '"""-x"""'], {'label': '"""loss"""'}), "(losses, '-x', label='loss')\n", (8635, 8663), True, 'import matplotlib.pyplot as plt\n'), ((8666, 8685), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""epoch"""'], {}), "('epoch')\n", (8676, 8685), True, 'import matplotlib.pyplot as plt\n'), ((8688, 8706), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""loss"""'], {}), "('loss')\n", (8698, 8706), True, 'import matplotlib.pyplot as plt\n'), ((8709, 8721), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (8719, 8721), True, 'import matplotlib.pyplot as plt\n'), ((8724, 8761), 'matplotlib.pyplot.title', 'plt.title', (['"""Losses vs. No. of epochs"""'], {}), "('Losses vs. No. of epochs')\n", (8733, 8761), True, 'import matplotlib.pyplot as plt\n'), ((8764, 8774), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (8772, 8774), True, 'import matplotlib.pyplot as plt\n'), ((8777, 8787), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8785, 8787), True, 'import matplotlib.pyplot as plt\n'), ((184, 209), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (207, 209), False, 'import torch\n'), ((599, 676), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': '(4)', 'out_channels': '(32)', 'kernel_size': '(4)', 'stride': '(2)', 'padding': '(1)'}), '(in_channels=4, out_channels=32, kernel_size=4, stride=2, padding=1)\n', (608, 676), True, 'import torch.nn as nn\n'), ((680, 711), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', ([], {'num_features': '(32)'}), '(num_features=32)\n', (694, 711), True, 'import torch.nn as nn\n'), ((719, 736), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)'], {}), '(0.2)\n', (731, 736), True, 'import torch.nn as nn\n'), ((743, 758), 'torch.nn.Dropout', 'nn.Dropout', (['(0.5)'], {}), '(0.5)\n', (753, 758), True, 'import torch.nn as nn\n'), ((767, 845), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': '(32)', 'out_channels': '(32)', 'kernel_size': '(4)', 'stride': '(2)', 'padding': '(1)'}), '(in_channels=32, out_channels=32, kernel_size=4, stride=2, padding=1)\n', (776, 845), True, 'import torch.nn as nn\n'), ((849, 880), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', ([], {'num_features': '(32)'}), '(num_features=32)\n', (863, 880), True, 'import torch.nn as nn\n'), ((888, 905), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)'], {}), '(0.2)\n', (900, 905), True, 'import torch.nn as nn\n'), ((912, 927), 'torch.nn.Dropout', 'nn.Dropout', (['(0.5)'], {}), '(0.5)\n', (922, 927), True, 'import torch.nn as nn\n'), ((936, 1014), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': '(32)', 'out_channels': '(32)', 'kernel_size': '(4)', 'stride': '(2)', 'padding': '(1)'}), '(in_channels=32, out_channels=32, kernel_size=4, stride=2, padding=1)\n', (945, 1014), True, 'import torch.nn as nn\n'), ((1018, 1049), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', ([], {'num_features': '(32)'}), '(num_features=32)\n', (1032, 1049), True, 'import torch.nn as nn\n'), ((1057, 1074), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)'], {}), '(0.2)\n', (1069, 1074), True, 'import torch.nn as nn\n'), ((1081, 1096), 'torch.nn.Dropout', 'nn.Dropout', (['(0.5)'], {}), '(0.5)\n', (1091, 1096), True, 'import torch.nn as nn\n'), ((1105, 1183), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': '(32)', 'out_channels': '(32)', 'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)'}), '(in_channels=32, out_channels=32, kernel_size=3, stride=1, padding=1)\n', (1114, 1183), True, 'import torch.nn as nn\n'), ((1187, 1218), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', ([], {'num_features': '(32)'}), '(num_features=32)\n', (1201, 1218), True, 'import torch.nn as nn\n'), ((1226, 1243), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)'], {}), '(0.2)\n', (1238, 1243), True, 'import torch.nn as nn\n'), ((1250, 1265), 'torch.nn.Dropout', 'nn.Dropout', (['(0.5)'], {}), '(0.5)\n', (1260, 1265), True, 'import torch.nn as nn\n'), ((1274, 1352), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': '(32)', 'out_channels': '(64)', 'kernel_size': '(4)', 'stride': '(2)', 'padding': '(1)'}), '(in_channels=32, out_channels=64, kernel_size=4, stride=2, padding=1)\n', (1283, 1352), True, 'import torch.nn as nn\n'), ((1356, 1387), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', ([], {'num_features': '(64)'}), '(num_features=64)\n', (1370, 1387), True, 'import torch.nn as nn\n'), ((1395, 1412), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)'], {}), '(0.2)\n', (1407, 1412), True, 'import torch.nn as nn\n'), ((1419, 1434), 'torch.nn.Dropout', 'nn.Dropout', (['(0.5)'], {}), '(0.5)\n', (1429, 1434), True, 'import torch.nn as nn\n'), ((1443, 1521), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': '(64)', 'out_channels': '(64)', 'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)'}), '(in_channels=64, out_channels=64, kernel_size=3, stride=1, padding=1)\n', (1452, 1521), True, 'import torch.nn as nn\n'), ((1525, 1556), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', ([], {'num_features': '(64)'}), '(num_features=64)\n', (1539, 1556), True, 'import torch.nn as nn\n'), ((1564, 1581), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)'], {}), '(0.2)\n', (1576, 1581), True, 'import torch.nn as nn\n'), ((1588, 1603), 'torch.nn.Dropout', 'nn.Dropout', (['(0.5)'], {}), '(0.5)\n', (1598, 1603), True, 'import torch.nn as nn\n'), ((1612, 1691), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': '(64)', 'out_channels': '(128)', 'kernel_size': '(4)', 'stride': '(2)', 'padding': '(1)'}), '(in_channels=64, out_channels=128, kernel_size=4, stride=2, padding=1)\n', (1621, 1691), True, 'import torch.nn as nn\n'), ((1695, 1727), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', ([], {'num_features': '(128)'}), '(num_features=128)\n', (1709, 1727), True, 'import torch.nn as nn\n'), ((1735, 1752), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)'], {}), '(0.2)\n', (1747, 1752), True, 'import torch.nn as nn\n'), ((1759, 1774), 'torch.nn.Dropout', 'nn.Dropout', (['(0.5)'], {}), '(0.5)\n', (1769, 1774), True, 'import torch.nn as nn\n'), ((1783, 1862), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': '(128)', 'out_channels': '(64)', 'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)'}), '(in_channels=128, out_channels=64, kernel_size=3, stride=1, padding=1)\n', (1792, 1862), True, 'import torch.nn as nn\n'), ((1866, 1897), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', ([], {'num_features': '(64)'}), '(num_features=64)\n', (1880, 1897), True, 'import torch.nn as nn\n'), ((1905, 1922), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)'], {}), '(0.2)\n', (1917, 1922), True, 'import torch.nn as nn\n'), ((1929, 1944), 'torch.nn.Dropout', 'nn.Dropout', (['(0.5)'], {}), '(0.5)\n', (1939, 1944), True, 'import torch.nn as nn\n'), ((1953, 2031), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': '(64)', 'out_channels': '(32)', 'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)'}), '(in_channels=64, out_channels=32, kernel_size=3, stride=1, padding=1)\n', (1962, 2031), True, 'import torch.nn as nn\n'), ((2035, 2066), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', ([], {'num_features': '(32)'}), '(num_features=32)\n', (2049, 2066), True, 'import torch.nn as nn\n'), ((2074, 2091), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)'], {}), '(0.2)\n', (2086, 2091), True, 'import torch.nn as nn\n'), ((2098, 2113), 'torch.nn.Dropout', 'nn.Dropout', (['(0.5)'], {}), '(0.5)\n', (2108, 2113), True, 'import torch.nn as nn\n'), ((2122, 2213), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': '(32)', 'out_channels': 'latent_size', 'kernel_size': '(4)', 'stride': '(2)', 'padding': '(1)'}), '(in_channels=32, out_channels=latent_size, kernel_size=4, stride=2,\n padding=1)\n', (2131, 2213), True, 'import torch.nn as nn\n'), ((2253, 2353), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', ([], {'in_channels': 'latent_size', 'out_channels': '(32)', 'kernel_size': '(4)', 'stride': '(2)', 'padding': '(1)'}), '(in_channels=latent_size, out_channels=32, kernel_size=4,\n stride=2, padding=1)\n', (2271, 2353), True, 'import torch.nn as nn\n'), ((2353, 2384), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', ([], {'num_features': '(32)'}), '(num_features=32)\n', (2367, 2384), True, 'import torch.nn as nn\n'), ((2392, 2409), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)'], {}), '(0.2)\n', (2404, 2409), True, 'import torch.nn as nn\n'), ((2416, 2431), 'torch.nn.Dropout', 'nn.Dropout', (['(0.5)'], {}), '(0.5)\n', (2426, 2431), True, 'import torch.nn as nn\n'), ((2440, 2531), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', ([], {'in_channels': '(32)', 'out_channels': '(64)', 'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)'}), '(in_channels=32, out_channels=64, kernel_size=3, stride=1,\n padding=1)\n', (2458, 2531), True, 'import torch.nn as nn\n'), ((2531, 2562), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', ([], {'num_features': '(64)'}), '(num_features=64)\n', (2545, 2562), True, 'import torch.nn as nn\n'), ((2570, 2587), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)'], {}), '(0.2)\n', (2582, 2587), True, 'import torch.nn as nn\n'), ((2594, 2609), 'torch.nn.Dropout', 'nn.Dropout', (['(0.5)'], {}), '(0.5)\n', (2604, 2609), True, 'import torch.nn as nn\n'), ((2618, 2711), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', ([], {'in_channels': '(64)', 'out_channels': '(128)', 'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)'}), '(in_channels=64, out_channels=128, kernel_size=3, stride=\n 1, padding=1)\n', (2636, 2711), True, 'import torch.nn as nn\n'), ((2710, 2742), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', ([], {'num_features': '(128)'}), '(num_features=128)\n', (2724, 2742), True, 'import torch.nn as nn\n'), ((2750, 2767), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)'], {}), '(0.2)\n', (2762, 2767), True, 'import torch.nn as nn\n'), ((2774, 2789), 'torch.nn.Dropout', 'nn.Dropout', (['(0.5)'], {}), '(0.5)\n', (2784, 2789), True, 'import torch.nn as nn\n'), ((2798, 2891), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', ([], {'in_channels': '(128)', 'out_channels': '(64)', 'kernel_size': '(4)', 'stride': '(2)', 'padding': '(1)'}), '(in_channels=128, out_channels=64, kernel_size=4, stride=\n 2, padding=1)\n', (2816, 2891), True, 'import torch.nn as nn\n'), ((2890, 2921), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', ([], {'num_features': '(64)'}), '(num_features=64)\n', (2904, 2921), True, 'import torch.nn as nn\n'), ((2929, 2946), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)'], {}), '(0.2)\n', (2941, 2946), True, 'import torch.nn as nn\n'), ((2953, 2968), 'torch.nn.Dropout', 'nn.Dropout', (['(0.5)'], {}), '(0.5)\n', (2963, 2968), True, 'import torch.nn as nn\n'), ((2977, 3068), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', ([], {'in_channels': '(64)', 'out_channels': '(64)', 'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)'}), '(in_channels=64, out_channels=64, kernel_size=3, stride=1,\n padding=1)\n', (2995, 3068), True, 'import torch.nn as nn\n'), ((3068, 3099), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', ([], {'num_features': '(64)'}), '(num_features=64)\n', (3082, 3099), True, 'import torch.nn as nn\n'), ((3107, 3124), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)'], {}), '(0.2)\n', (3119, 3124), True, 'import torch.nn as nn\n'), ((3131, 3146), 'torch.nn.Dropout', 'nn.Dropout', (['(0.5)'], {}), '(0.5)\n', (3141, 3146), True, 'import torch.nn as nn\n'), ((3155, 3246), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', ([], {'in_channels': '(64)', 'out_channels': '(32)', 'kernel_size': '(4)', 'stride': '(2)', 'padding': '(1)'}), '(in_channels=64, out_channels=32, kernel_size=4, stride=2,\n padding=1)\n', (3173, 3246), True, 'import torch.nn as nn\n'), ((3246, 3277), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', ([], {'num_features': '(32)'}), '(num_features=32)\n', (3260, 3277), True, 'import torch.nn as nn\n'), ((3285, 3302), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)'], {}), '(0.2)\n', (3297, 3302), True, 'import torch.nn as nn\n'), ((3309, 3324), 'torch.nn.Dropout', 'nn.Dropout', (['(0.5)'], {}), '(0.5)\n', (3319, 3324), True, 'import torch.nn as nn\n'), ((3333, 3424), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', ([], {'in_channels': '(32)', 'out_channels': '(32)', 'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)'}), '(in_channels=32, out_channels=32, kernel_size=3, stride=1,\n padding=1)\n', (3351, 3424), True, 'import torch.nn as nn\n'), ((3424, 3455), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', ([], {'num_features': '(32)'}), '(num_features=32)\n', (3438, 3455), True, 'import torch.nn as nn\n'), ((3463, 3480), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)'], {}), '(0.2)\n', (3475, 3480), True, 'import torch.nn as nn\n'), ((3487, 3502), 'torch.nn.Dropout', 'nn.Dropout', (['(0.5)'], {}), '(0.5)\n', (3497, 3502), True, 'import torch.nn as nn\n'), ((3511, 3602), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', ([], {'in_channels': '(32)', 'out_channels': '(32)', 'kernel_size': '(4)', 'stride': '(2)', 'padding': '(1)'}), '(in_channels=32, out_channels=32, kernel_size=4, stride=2,\n padding=1)\n', (3529, 3602), True, 'import torch.nn as nn\n'), ((3602, 3633), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', ([], {'num_features': '(32)'}), '(num_features=32)\n', (3616, 3633), True, 'import torch.nn as nn\n'), ((3641, 3658), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)'], {}), '(0.2)\n', (3653, 3658), True, 'import torch.nn as nn\n'), ((3665, 3680), 'torch.nn.Dropout', 'nn.Dropout', (['(0.5)'], {}), '(0.5)\n', (3675, 3680), True, 'import torch.nn as nn\n'), ((3689, 3780), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', ([], {'in_channels': '(32)', 'out_channels': '(32)', 'kernel_size': '(4)', 'stride': '(2)', 'padding': '(1)'}), '(in_channels=32, out_channels=32, kernel_size=4, stride=2,\n padding=1)\n', (3707, 3780), True, 'import torch.nn as nn\n'), ((3780, 3811), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', ([], {'num_features': '(32)'}), '(num_features=32)\n', (3794, 3811), True, 'import torch.nn as nn\n'), ((3819, 3836), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)'], {}), '(0.2)\n', (3831, 3836), True, 'import torch.nn as nn\n'), ((3843, 3858), 'torch.nn.Dropout', 'nn.Dropout', (['(0.5)'], {}), '(0.5)\n', (3853, 3858), True, 'import torch.nn as nn\n'), ((3867, 3957), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', ([], {'in_channels': '(32)', 'out_channels': '(4)', 'kernel_size': '(4)', 'stride': '(2)', 'padding': '(1)'}), '(in_channels=32, out_channels=4, kernel_size=4, stride=2,\n padding=1)\n', (3885, 3957), True, 'import torch.nn as nn\n'), ((3957, 3974), 'torch.nn.Softmax', 'nn.Softmax', ([], {'dim': '(1)'}), '(dim=1)\n', (3967, 3974), True, 'import torch.nn as nn\n'), ((4085, 4119), 'torch.nn.init.kaiming_uniform_', 'nn.init.kaiming_uniform_', (['m.weight'], {}), '(m.weight)\n', (4109, 4119), True, 'import torch.nn as nn\n'), ((4312, 4324), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {}), '()\n', (4322, 4324), True, 'import torch.nn as nn\n'), ((5977, 6003), 'os.path.isdir', 'os.path.isdir', (['ckpt_folder'], {}), '(ckpt_folder)\n', (5990, 6003), False, 'import os\n'), ((6011, 6032), 'os.mkdir', 'os.mkdir', (['ckpt_folder'], {}), '(ckpt_folder)\n', (6019, 6032), False, 'import os\n'), ((8236, 8246), 'numpy.mean', 'np.mean', (['v'], {}), '(v)\n', (8243, 8246), True, 'import numpy as np\n'), ((4423, 4454), 'torch.sum', 'torch.sum', (['(x * y)'], {'dim': '(0, 2, 3)'}), '(x * y, dim=(0, 2, 3))\n', (4432, 4454), False, 'import torch\n'), ((4461, 4498), 'torch.sum', 'torch.sum', (['(x * (1 - y))'], {'dim': '(0, 2, 3)'}), '(x * (1 - y), dim=(0, 2, 3))\n', (4470, 4498), False, 'import torch\n'), ((4503, 4540), 'torch.sum', 'torch.sum', (['((1 - x) * y)'], {'dim': '(0, 2, 3)'}), '((1 - x) * y, dim=(0, 2, 3))\n', (4512, 4540), False, 'import torch\n'), ((5656, 5671), 'numpy.copy', 'np.copy', (['target'], {}), '(target)\n', (5663, 5671), True, 'import numpy as np\n'), ((5685, 5704), 'numpy.copy', 'np.copy', (['prediction'], {}), '(prediction)\n', (5692, 5704), True, 'import numpy as np\n'), ((5718, 5742), 'numpy.where', 'np.where', (['(ref != c)', '(0)', '(1)'], {}), '(ref != c, 0, 1)\n', (5726, 5742), True, 'import numpy as np\n'), ((5752, 5777), 'numpy.where', 'np.where', (['(pred != c)', '(0)', '(1)'], {}), '(pred != c, 0, 1)\n', (5760, 5777), True, 'import numpy as np\n'), ((6477, 6492), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (6490, 6492), False, 'import torch\n'), ((5267, 5296), 'medpy.metric.binary.dc', 'binary.dc', (['prediction', 'target'], {}), '(prediction, target)\n', (5276, 5296), False, 'from medpy.metric import binary\n'), ((5430, 5459), 'medpy.metric.binary.hd', 'binary.hd', (['prediction', 'target'], {}), '(prediction, target)\n', (5439, 5459), False, 'from medpy.metric import binary\n'), ((7304, 7339), 'torch.cat', 'torch.cat', (["[gt, batch['gt']]"], {'dim': '(0)'}), "([gt, batch['gt']], dim=0)\n", (7313, 7339), False, 'import torch\n'), ((7391, 7450), 'torch.cat', 'torch.cat', (["[reconstruction, batch['reconstruction']]"], {'dim': '(0)'}), "([reconstruction, batch['reconstruction']], dim=0)\n", (7400, 7450), False, 'import torch\n')]
|
# -*- coding: utf-8 -*-
"""
Created on Fri Aug 30 20:15:18 2019
@author: autol
"""
#%%
from plotxy import plot_gd_xy,iters_gd_plot,plot_gd_contour
from initdata import init_data,init_data1,data_b,init_data_house
from func import gradient_descent_f
from varclass import VarSetX
from sklearn.model_selection import ParameterGrid
import matplotlib.pyplot as plt
import numpy as np
#%% Example
n=20
w = np.ones(2);w
X,y=init_data1(n,45,w,b=0);X # eta = 1e-2
#X,y=init_data_house(n,45,w);X # 1e-7
X_b = data_b(X);X_b
y
#%%
B_b = np.linalg.inv(X_b.T.dot(X_b)) @ (X_b.T.dot(y));B_b
B = np.linalg.inv(X.T.dot(X)) @ (X.T.dot(y));B
#%%
#w = np.array([-2.5,-2.5]);w
#w = np.array([0.,0.]);w
A = 2./len(y)*X.T.dot(X) # ŋ=1 # 海森矩阵
J = lambda w: np.mean((X.dot(w)-y)**2) # 目标函数
gJ = lambda w: 2./len(y)*X.T.dot(X.dot(w)-y) # 梯度函数
#A = X.T@X # ŋ=1/n
#J = lambda w: w.dot(A).dot(w)
#gJ = lambda w: A.dot(w)
pgrid =list(ParameterGrid(dict(sgd=[0,1],
isStep=[0],
# ρ=[.5,5,10],
# n_b=[2,5],
# ŋ_a=[1], # ŋ_a 要大于1
method=['mm21','mm22','mm23','mm24','mm25'],
#method=['mm31','mm32','mm33','mm34','mm30'],
#method=['mm40','mm41','mm42','mm43','mm44','mm45','mm46'],
#method=['mm51','mm52','mm53','mm54','mm55'],
#method=['mm10'],
#method=['mm90','mm91','mm92','mm93','mm94',],
)))
skwargs = dict(A=A,ŋ=.1,ŋ_a=1,tol=0.05,
ε=.001,λ=.1,α=.5,γ=0.5,β1=.9,β2=.999)
wws=[];ess=[];rets=[]
for pg in pgrid:
w0 = w.copy()-np.random.uniform(1,3,2) #任意起点
kwargs=dict(X=X.copy(),y=y.copy(),
gJ=gJ,J=J,w=w0,)
kwargs.update(skwargs) ; kwargs.update(pg) ; var = VarSetX(kwargs)
ret = gradient_descent_f(var,n_iters=20,skipConv=0,
**kwargs)
ww = np.stack(ret['wh'][:,1])
es = ret['wh'][:,2]
wws.append(ww); ess.append(es); rets.append(ret)
print(ww,es)
#%%
x = np.zeros(len(w));x
x = np.vstack([x, np.amax(X,axis=0)]);x
x_b = data_b(x)
yh = x.dot(B); yh
fig, ax = plt.subplots(figsize = (8,8))
ax.plot(X[:,0],y,'o')
ax.plot(x[:,0],yh,color='b',linewidth=5)
ws = [ww[int(i)] for i in np.linspace(0,len(ww)-1,10)]
for wx in ws:
yh = x.dot(wx);yh # 画渐近的基准线
ax.plot(x[:,0],yh,color='r')
ax.set_xlabel('x')
ax.set_ylabel('y')
#%%
plot_gd_contour(J,wws,ess,pgrid,skwargs,B)
#%%
paras = skwargs.copy()
paras.pop('A')
iters_gd_plot(rets,var,pgrid,paras=paras,
**kwargs)
|
[
"numpy.stack",
"numpy.random.uniform",
"varclass.VarSetX",
"plotxy.plot_gd_contour",
"numpy.ones",
"initdata.data_b",
"plotxy.iters_gd_plot",
"numpy.amax",
"initdata.init_data1",
"func.gradient_descent_f",
"matplotlib.pyplot.subplots"
] |
[((402, 412), 'numpy.ones', 'np.ones', (['(2)'], {}), '(2)\n', (409, 412), True, 'import numpy as np\n'), ((419, 444), 'initdata.init_data1', 'init_data1', (['n', '(45)', 'w'], {'b': '(0)'}), '(n, 45, w, b=0)\n', (429, 444), False, 'from initdata import init_data, init_data1, data_b, init_data_house\n'), ((502, 511), 'initdata.data_b', 'data_b', (['X'], {}), '(X)\n', (508, 511), False, 'from initdata import init_data, init_data1, data_b, init_data_house\n'), ((2015, 2024), 'initdata.data_b', 'data_b', (['x'], {}), '(x)\n', (2021, 2024), False, 'from initdata import init_data, init_data1, data_b, init_data_house\n'), ((2054, 2082), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(8, 8)'}), '(figsize=(8, 8))\n', (2066, 2082), True, 'import matplotlib.pyplot as plt\n'), ((2328, 2375), 'plotxy.plot_gd_contour', 'plot_gd_contour', (['J', 'wws', 'ess', 'pgrid', 'skwargs', 'B'], {}), '(J, wws, ess, pgrid, skwargs, B)\n', (2343, 2375), False, 'from plotxy import plot_gd_xy, iters_gd_plot, plot_gd_contour\n'), ((2414, 2468), 'plotxy.iters_gd_plot', 'iters_gd_plot', (['rets', 'var', 'pgrid'], {'paras': 'paras'}), '(rets, var, pgrid, paras=paras, **kwargs)\n', (2427, 2468), False, 'from plotxy import plot_gd_xy, iters_gd_plot, plot_gd_contour\n'), ((1700, 1715), 'varclass.VarSetX', 'VarSetX', (['kwargs'], {}), '(kwargs)\n', (1707, 1715), False, 'from varclass import VarSetX\n'), ((1726, 1783), 'func.gradient_descent_f', 'gradient_descent_f', (['var'], {'n_iters': '(20)', 'skipConv': '(0)'}), '(var, n_iters=20, skipConv=0, **kwargs)\n', (1744, 1783), False, 'from func import gradient_descent_f\n'), ((1821, 1846), 'numpy.stack', 'np.stack', (["ret['wh'][:, 1]"], {}), "(ret['wh'][:, 1])\n", (1829, 1846), True, 'import numpy as np\n'), ((1541, 1567), 'numpy.random.uniform', 'np.random.uniform', (['(1)', '(3)', '(2)'], {}), '(1, 3, 2)\n', (1558, 1567), True, 'import numpy as np\n'), ((1987, 2005), 'numpy.amax', 'np.amax', (['X'], {'axis': '(0)'}), '(X, axis=0)\n', (1994, 2005), True, 'import numpy as np\n')]
|
##########################################################
# pytorch-kaldi v.0.1
# <NAME>, <NAME>
# Mila, University of Montreal
# October 2018
#
# Description: This script generates kaldi ark files containing raw features.
# The file list must be a file containing "snt_id file.wav".
# Note that only wav files are supported here (sphere or other format are not supported)
##########################################################
import scipy.io.wavfile
import math
import numpy as np
import os
from data_io import read_vec_int_ark, write_mat
# Run it for all the data chunks (e.g., train, dev, test) => uncomment
lab_folder = "/users/parcollet/KALDI/kaldi-trunk/egs/timit/s5/exp/dnn4_pretrain-dbn_dnn_ali_test"
lab_opts = "ali-to-pdf"
out_folder = "/users/parcollet/KALDI/kaldi-trunk/egs/timit/s5/data/raw_TIMIT_200ms/test"
wav_lst = "/users/parcollet/KALDI/kaldi-trunk/egs/timit/s5/data/test/wav.lst"
scp_file_out = "/users/parcollet/KALDI/kaldi-trunk/egs/timit/s5/data/raw_TIMIT_200ms/test/feats_raw.scp"
# lab_folder='quick_test/dnn4_pretrain-dbn_dnn_ali_dev'
# lab_opts='ali-to-pdf'
# out_folder='raw_TIMIT_200ms/dev'
# wav_lst='/home/mirco/pytorch-kaldi-new/quick_test/data/dev/wav_lst.scp'
# scp_file_out='quick_test/data/dev/feats_raw.scp'
# lab_folder='quick_test/dnn4_pretrain-dbn_dnn_ali_test'
# lab_opts='ali-to-pdf'
# out_folder='raw_TIMIT_200ms/test'
# wav_lst='/home/mirco/pytorch-kaldi-new/quick_test/data/test/wav_lst.scp'
# scp_file_out='quick_test/data/test/feats_raw.scp'
sig_fs = 16000 # Hz
sig_wlen = 200 # ms
lab_fs = 16000 # Hz
lab_wlen = 25 # ms
lab_wshift = 10 # ms
sig_wlen_samp = int((sig_fs * sig_wlen) / 1000)
lab_wlen_samp = int((lab_fs * lab_wlen) / 1000)
lab_wshift_samp = int((lab_fs * lab_wshift) / 1000)
# Create the output folder
try:
os.stat(out_folder)
except:
os.makedirs(out_folder)
# Creare the scp file
scp_file = open(scp_file_out, "w")
# reading the labels
lab = {
k: v
for k, v in read_vec_int_ark(
"gunzip -c " + lab_folder + "/ali*.gz | " + lab_opts + " " + lab_folder + "/final.mdl ark:- ark:-|", out_folder
)
}
# reading the list file
with open(wav_lst) as f:
sig_lst = f.readlines()
sig_lst = [x.strip() for x in sig_lst]
for sig_file in sig_lst:
sig_id = sig_file.split(" ")[0]
sig_path = sig_file.split(" ")[1]
[fs, signal] = scipy.io.wavfile.read(sig_path)
signal = signal.astype(float) / 32768
signal = signal / np.max(np.abs(signal))
cnt_fr = 0
beg_samp = 0
frame_all = []
while beg_samp + lab_wlen_samp < signal.shape[0]:
sample_fr = np.zeros(sig_wlen_samp)
central_sample_lab = int(((beg_samp + lab_wlen_samp / 2) - 1))
central_fr_index = int(((sig_wlen_samp / 2) - 1))
beg_signal_fr = int(central_sample_lab - (sig_wlen_samp / 2))
end_signal_fr = int(central_sample_lab + (sig_wlen_samp / 2))
if beg_signal_fr >= 0 and end_signal_fr <= signal.shape[0]:
sample_fr = signal[beg_signal_fr:end_signal_fr]
else:
if beg_signal_fr < 0:
n_left_samples = central_sample_lab
sample_fr[central_fr_index - n_left_samples + 1 :] = signal[0:end_signal_fr]
if end_signal_fr > signal.shape[0]:
n_right_samples = signal.shape[0] - central_sample_lab
sample_fr[0 : central_fr_index + n_right_samples + 1] = signal[beg_signal_fr:]
frame_all.append(sample_fr)
cnt_fr = cnt_fr + 1
beg_samp = beg_samp + lab_wshift_samp
frame_all = np.asarray(frame_all)
# Save the matrix into a kaldi ark
out_file = out_folder + "/" + sig_id + ".ark"
write_mat(out_folder, out_file, frame_all, key=sig_id)
print(sig_id)
scp_file.write(sig_id + " " + out_folder + "/" + sig_id + ".ark:" + str(len(sig_id) + 1) + "\n")
N_fr_comp = 1 + math.floor((signal.shape[0] - 400) / 160)
# print("%s %i %i "%(lab[sig_id].shape[0],N_fr_comp,cnt_fr))
scp_file.close()
|
[
"data_io.write_mat",
"numpy.abs",
"os.makedirs",
"os.stat",
"data_io.read_vec_int_ark",
"numpy.asarray",
"numpy.zeros",
"math.floor"
] |
[((1797, 1816), 'os.stat', 'os.stat', (['out_folder'], {}), '(out_folder)\n', (1804, 1816), False, 'import os\n'), ((3554, 3575), 'numpy.asarray', 'np.asarray', (['frame_all'], {}), '(frame_all)\n', (3564, 3575), True, 'import numpy as np\n'), ((3670, 3724), 'data_io.write_mat', 'write_mat', (['out_folder', 'out_file', 'frame_all'], {'key': 'sig_id'}), '(out_folder, out_file, frame_all, key=sig_id)\n', (3679, 3724), False, 'from data_io import read_vec_int_ark, write_mat\n'), ((1829, 1852), 'os.makedirs', 'os.makedirs', (['out_folder'], {}), '(out_folder)\n', (1840, 1852), False, 'import os\n'), ((1967, 2100), 'data_io.read_vec_int_ark', 'read_vec_int_ark', (["('gunzip -c ' + lab_folder + '/ali*.gz | ' + lab_opts + ' ' + lab_folder +\n '/final.mdl ark:- ark:-|')", 'out_folder'], {}), "('gunzip -c ' + lab_folder + '/ali*.gz | ' + lab_opts + ' ' +\n lab_folder + '/final.mdl ark:- ark:-|', out_folder)\n", (1983, 2100), False, 'from data_io import read_vec_int_ark, write_mat\n'), ((2596, 2619), 'numpy.zeros', 'np.zeros', (['sig_wlen_samp'], {}), '(sig_wlen_samp)\n', (2604, 2619), True, 'import numpy as np\n'), ((3865, 3906), 'math.floor', 'math.floor', (['((signal.shape[0] - 400) / 160)'], {}), '((signal.shape[0] - 400) / 160)\n', (3875, 3906), False, 'import math\n'), ((2453, 2467), 'numpy.abs', 'np.abs', (['signal'], {}), '(signal)\n', (2459, 2467), True, 'import numpy as np\n')]
|
import argparse
import numpy as np
from scipy.io import wavfile
from tqdm import trange
from ar_model import ARmodel
def correctSignal(signal, model, window_size, pred_size, step, treshold=3):
"""Correct signal using AR model
Args:
signal (np.array): signal to correct
model (ARmodel): autoregresive model
window_size (int): length of the window for updating AR model coefs
pred_size (int): number of samples to generate from AR model
step (int): step interval
treshold (float): how many times error have to be bigger then standard
deviation to classify sample as disturbed
Returns:
np.array: cerrected signal
"""
out = np.copy(signal)
for i in trange(0, input.shape[0]-window_size-pred_size, step):
paramsEnd = i+window_size
predEnd = paramsEnd+pred_size
model.updateParams(out[i:paramsEnd])
estimated = model.estimateSignal(pred_size, out[paramsEnd-model.r:paramsEnd])
err = np.abs(out[paramsEnd:predEnd] - estimated)
std = np.std(err)
disturbed = np.abs(err) > std*treshold
disturbanceLength = 0
for j in range(pred_size):
if disturbed[j]:
disturbanceLength += 1
elif disturbanceLength > 0:
k = j + paramsEnd
before = signal[k-disturbanceLength-1]
after = signal[k]
out[k-disturbanceLength:k] = np.linspace(before,after,disturbanceLength+2)[1:-1]
disturbanceLength = 0
return out
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Removing impulse interference from music recordings")
parser.add_argument('filename', metavar='filename', type=str, help='path to wave file')
parser.add_argument('-r', '--order', type=int, default=4, help='order of AR model')
parser.add_argument('-o', '--out_file', type=str, default='out.wav', help='name of the output file')
parser.add_argument('-u', '--param_window', type=int, default=256, help='length of the window for updating AR model coefs')
parser.add_argument('-e', '--pred_widnow', type=int, default=8, help='number of samples to generate from AR model')
parser.add_argument('-s', '--step', type=int, default=4, help='step interval')
parser.add_argument('-d', '--decay', type=float, default=1.0, help='decay rate for exponential window')
parser.add_argument('-m', '--max_std', type=float, default=3.0, help='how many times error have to be bigger then standard deviation to classify sample as disturbed')
args = parser.parse_args()
fs, input = wavfile.read(args.filename)
input = input / 2**15
model = ARmodel(args.order, args.decay)
output = correctSignal(input, model, args.param_window, args.pred_widnow, args.step, args.max_std)
wavfile.write(args.out_file, fs, output)
|
[
"numpy.abs",
"argparse.ArgumentParser",
"numpy.copy",
"tqdm.trange",
"numpy.std",
"scipy.io.wavfile.read",
"ar_model.ARmodel",
"scipy.io.wavfile.write",
"numpy.linspace"
] |
[((711, 726), 'numpy.copy', 'np.copy', (['signal'], {}), '(signal)\n', (718, 726), True, 'import numpy as np\n'), ((741, 798), 'tqdm.trange', 'trange', (['(0)', '(input.shape[0] - window_size - pred_size)', 'step'], {}), '(0, input.shape[0] - window_size - pred_size, step)\n', (747, 798), False, 'from tqdm import trange\n'), ((1621, 1716), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Removing impulse interference from music recordings"""'}), "(description=\n 'Removing impulse interference from music recordings')\n", (1644, 1716), False, 'import argparse\n'), ((2655, 2682), 'scipy.io.wavfile.read', 'wavfile.read', (['args.filename'], {}), '(args.filename)\n', (2667, 2682), False, 'from scipy.io import wavfile\n'), ((2722, 2753), 'ar_model.ARmodel', 'ARmodel', (['args.order', 'args.decay'], {}), '(args.order, args.decay)\n', (2729, 2753), False, 'from ar_model import ARmodel\n'), ((2862, 2902), 'scipy.io.wavfile.write', 'wavfile.write', (['args.out_file', 'fs', 'output'], {}), '(args.out_file, fs, output)\n', (2875, 2902), False, 'from scipy.io import wavfile\n'), ((1015, 1057), 'numpy.abs', 'np.abs', (['(out[paramsEnd:predEnd] - estimated)'], {}), '(out[paramsEnd:predEnd] - estimated)\n', (1021, 1057), True, 'import numpy as np\n'), ((1072, 1083), 'numpy.std', 'np.std', (['err'], {}), '(err)\n', (1078, 1083), True, 'import numpy as np\n'), ((1105, 1116), 'numpy.abs', 'np.abs', (['err'], {}), '(err)\n', (1111, 1116), True, 'import numpy as np\n'), ((1474, 1523), 'numpy.linspace', 'np.linspace', (['before', 'after', '(disturbanceLength + 2)'], {}), '(before, after, disturbanceLength + 2)\n', (1485, 1523), True, 'import numpy as np\n')]
|
import unittest
from numpy import hstack, max, abs, sqrt
from cantera import Solution, gas_constant
import numpy as np
from spitfire import ChemicalMechanismSpec
from os.path import join, abspath
from subprocess import getoutput
test_mech_directory = abspath(join('tests', 'test_mechanisms', 'old_xmls'))
mechs = [x.replace('.xml', '') for x in getoutput('ls ' + test_mech_directory + ' | grep .xml').split('\n')]
def rhs_cantera(p_arg, T_arg, y_arg, rhoin, Tin_arg, yin_arg, tau_arg, gas, rhs_chem_in):
gas.TPY = T_arg, p_arg, y_arg
rho = gas.density_mass
cv = gas.cv_mass
e = gas.standard_int_energies_RT * gas.T * gas_constant / gas.molecular_weights
gas.TDY = Tin_arg, rhoin, yin_arg
ein = gas.standard_int_energies_RT * gas.T * gas_constant / gas.molecular_weights
rhs = np.copy(rhs_chem_in)
rhsMass = np.zeros(gas.n_species + 1)
rhsMass[0] += (rhoin - rho)
rhsMass[1] += 1. / (rho * cv) * (rhoin * np.sum(yin_arg * (ein - e)))
rhsMass[2:] += rhoin / rho * (yin_arg[:-1] - y_arg[:-1])
rhs += rhsMass / tau_arg
return rhs
def validate_on_mechanism(mech, temperature, pressure, tau, do_rhs, do_jac):
xml = join(test_mech_directory, mech + '.xml')
T = temperature
Tin = T + 1000.
p = pressure
r = ChemicalMechanismSpec(xml, 'gas').griffon
gas = Solution(xml)
ns = gas.n_species
y = np.ones(ns) # equal masses in the reactor
gas.TPY = T, p, y
y = np.copy(gas.Y)
rho = gas.density_mass
xin = np.ones(ns) # equal moles in the feed
gas.TPX = Tin, p, xin
yin = np.copy(gas.Y)
rhoin = gas.density_mass
state = hstack((rho, T, y[:-1]))
rhsGRChemOnly = np.zeros(ns + 1)
r.reactor_rhs_isochoric(state, rhoin, Tin, yin, tau, 0, 0, 0, 0, 0, 0, False, rhsGRChemOnly)
rhsCN = rhs_cantera(p, T, y, rhoin, Tin, yin, tau, gas, rhsGRChemOnly)
rhsGR = np.empty(ns + 1)
r.reactor_rhs_isochoric(state, rhoin, Tin, yin, tau, 0, 0, 0, 0, 0, 0, True, rhsGR)
if do_rhs:
return max(abs(rhsGR - rhsCN) / (abs(rhsCN) + 1.)) < 100. * sqrt(np.finfo(float).eps)
if do_jac:
jacGR = np.empty((ns + 1) * (ns + 1))
r.reactor_jac_isochoric(state, rhoin, Tin, yin, tau, 0, 0, 0, 0, 0, 0, True, 0, rhsGR, jacGR)
jacGR = jacGR.reshape((ns + 1, ns + 1), order='F')
drho = 1.e-6
dT = 1.e-6
dY = 1.e-6
jacFD = np.empty((ns + 1, ns + 1))
rhsGR1, rhsGR2 = np.empty(ns + 1), np.empty(ns + 1)
state_m = hstack((rho - drho, T, y[:-1]))
state_p = hstack((rho + drho, T, y[:-1]))
r.reactor_rhs_isochoric(state_m, rhoin, Tin, yin, tau, 0, 0, 0, 0, 0, 0, True, rhsGR1)
r.reactor_rhs_isochoric(state_p, rhoin, Tin, yin, tau, 0, 0, 0, 0, 0, 0, True, rhsGR2)
jacFD[:, 0] = (- rhsGR1 + rhsGR2) / (2. * drho)
state_m = hstack((rho, T - dT, y[:-1]))
state_p = hstack((rho, T + dT, y[:-1]))
r.reactor_rhs_isochoric(state_m, rhoin, Tin, yin, tau, 0, 0, 0, 0, 0, 0, True, rhsGR1)
r.reactor_rhs_isochoric(state_p, rhoin, Tin, yin, tau, 0, 0, 0, 0, 0, 0, True, rhsGR2)
jacFD[:, 1] = (- rhsGR1 + rhsGR2) / (2. * dT)
for i in range(ns - 1):
y_m1, y_p1 = np.copy(y), np.copy(y)
y_m1[i] += - dY
y_m1[-1] -= - dY
y_p1[i] += dY
y_p1[-1] -= dY
state_m = hstack((rho, T, y_m1[:-1]))
state_p = hstack((rho, T, y_p1[:-1]))
r.reactor_rhs_isochoric(state_m, rhoin, Tin, yin, tau, 0, 0, 0, 0, 0, 0, True, rhsGR1)
r.reactor_rhs_isochoric(state_p, rhoin, Tin, yin, tau, 0, 0, 0, 0, 0, 0, True, rhsGR2)
jacFD[:, 2 + i] = (- rhsGR1 + rhsGR2) / (2. * dY)
return max(abs(jacGR - jacFD) / (abs(jacGR) + 1.)) < 4.e-3
def create_test(m, T, p, tau, do_rhs, do_jac):
def test(self):
self.assertTrue(validate_on_mechanism(m, T, p, tau, do_rhs, do_jac))
return test
class Accuracy(unittest.TestCase):
pass
tau_list = [1.e-6, 1.e-3]
for mech in mechs:
for tau in tau_list:
rhsname = 'test_rhs_' + mech + '_' + 'tau=' + str(tau)
jacname = 'test_jac_' + mech + '_' + 'tau=' + str(tau)
setattr(Accuracy, rhsname, create_test(mech, 600., 101325, tau, True, False))
if 'methane' not in mech: # skip methane in the finite difference Jacobian tests
setattr(Accuracy, jacname, create_test(mech, 600., 101325, tau, False, True))
if __name__ == '__main__':
unittest.main()
|
[
"unittest.main",
"spitfire.ChemicalMechanismSpec",
"numpy.sum",
"numpy.abs",
"numpy.copy",
"numpy.empty",
"numpy.zeros",
"numpy.ones",
"numpy.hstack",
"numpy.finfo",
"cantera.Solution",
"subprocess.getoutput",
"os.path.join"
] |
[((260, 304), 'os.path.join', 'join', (['"""tests"""', '"""test_mechanisms"""', '"""old_xmls"""'], {}), "('tests', 'test_mechanisms', 'old_xmls')\n", (264, 304), False, 'from os.path import join, abspath\n'), ((809, 829), 'numpy.copy', 'np.copy', (['rhs_chem_in'], {}), '(rhs_chem_in)\n', (816, 829), True, 'import numpy as np\n'), ((844, 871), 'numpy.zeros', 'np.zeros', (['(gas.n_species + 1)'], {}), '(gas.n_species + 1)\n', (852, 871), True, 'import numpy as np\n'), ((1172, 1212), 'os.path.join', 'join', (['test_mech_directory', "(mech + '.xml')"], {}), "(test_mech_directory, mech + '.xml')\n", (1176, 1212), False, 'from os.path import join, abspath\n'), ((1332, 1345), 'cantera.Solution', 'Solution', (['xml'], {}), '(xml)\n', (1340, 1345), False, 'from cantera import Solution, gas_constant\n'), ((1378, 1389), 'numpy.ones', 'np.ones', (['ns'], {}), '(ns)\n', (1385, 1389), True, 'import numpy as np\n'), ((1451, 1465), 'numpy.copy', 'np.copy', (['gas.Y'], {}), '(gas.Y)\n', (1458, 1465), True, 'import numpy as np\n'), ((1504, 1515), 'numpy.ones', 'np.ones', (['ns'], {}), '(ns)\n', (1511, 1515), True, 'import numpy as np\n'), ((1579, 1593), 'numpy.copy', 'np.copy', (['gas.Y'], {}), '(gas.Y)\n', (1586, 1593), True, 'import numpy as np\n'), ((1636, 1660), 'numpy.hstack', 'hstack', (['(rho, T, y[:-1])'], {}), '((rho, T, y[:-1]))\n', (1642, 1660), False, 'from numpy import hstack, max, abs, sqrt\n'), ((1682, 1698), 'numpy.zeros', 'np.zeros', (['(ns + 1)'], {}), '(ns + 1)\n', (1690, 1698), True, 'import numpy as np\n'), ((1883, 1899), 'numpy.empty', 'np.empty', (['(ns + 1)'], {}), '(ns + 1)\n', (1891, 1899), True, 'import numpy as np\n'), ((4496, 4511), 'unittest.main', 'unittest.main', ([], {}), '()\n', (4509, 4511), False, 'import unittest\n'), ((1279, 1312), 'spitfire.ChemicalMechanismSpec', 'ChemicalMechanismSpec', (['xml', '"""gas"""'], {}), "(xml, 'gas')\n", (1300, 1312), False, 'from spitfire import ChemicalMechanismSpec\n'), ((2130, 2159), 'numpy.empty', 'np.empty', (['((ns + 1) * (ns + 1))'], {}), '((ns + 1) * (ns + 1))\n', (2138, 2159), True, 'import numpy as np\n'), ((2397, 2423), 'numpy.empty', 'np.empty', (['(ns + 1, ns + 1)'], {}), '((ns + 1, ns + 1))\n', (2405, 2423), True, 'import numpy as np\n'), ((2503, 2534), 'numpy.hstack', 'hstack', (['(rho - drho, T, y[:-1])'], {}), '((rho - drho, T, y[:-1]))\n', (2509, 2534), False, 'from numpy import hstack, max, abs, sqrt\n'), ((2553, 2584), 'numpy.hstack', 'hstack', (['(rho + drho, T, y[:-1])'], {}), '((rho + drho, T, y[:-1]))\n', (2559, 2584), False, 'from numpy import hstack, max, abs, sqrt\n'), ((2850, 2879), 'numpy.hstack', 'hstack', (['(rho, T - dT, y[:-1])'], {}), '((rho, T - dT, y[:-1]))\n', (2856, 2879), False, 'from numpy import hstack, max, abs, sqrt\n'), ((2898, 2927), 'numpy.hstack', 'hstack', (['(rho, T + dT, y[:-1])'], {}), '((rho, T + dT, y[:-1]))\n', (2904, 2927), False, 'from numpy import hstack, max, abs, sqrt\n'), ((949, 976), 'numpy.sum', 'np.sum', (['(yin_arg * (ein - e))'], {}), '(yin_arg * (ein - e))\n', (955, 976), True, 'import numpy as np\n'), ((2449, 2465), 'numpy.empty', 'np.empty', (['(ns + 1)'], {}), '(ns + 1)\n', (2457, 2465), True, 'import numpy as np\n'), ((2467, 2483), 'numpy.empty', 'np.empty', (['(ns + 1)'], {}), '(ns + 1)\n', (2475, 2483), True, 'import numpy as np\n'), ((3385, 3412), 'numpy.hstack', 'hstack', (['(rho, T, y_m1[:-1])'], {}), '((rho, T, y_m1[:-1]))\n', (3391, 3412), False, 'from numpy import hstack, max, abs, sqrt\n'), ((3435, 3462), 'numpy.hstack', 'hstack', (['(rho, T, y_p1[:-1])'], {}), '((rho, T, y_p1[:-1]))\n', (3441, 3462), False, 'from numpy import hstack, max, abs, sqrt\n'), ((346, 401), 'subprocess.getoutput', 'getoutput', (["('ls ' + test_mech_directory + ' | grep .xml')"], {}), "('ls ' + test_mech_directory + ' | grep .xml')\n", (355, 401), False, 'from subprocess import getoutput\n'), ((3230, 3240), 'numpy.copy', 'np.copy', (['y'], {}), '(y)\n', (3237, 3240), True, 'import numpy as np\n'), ((3242, 3252), 'numpy.copy', 'np.copy', (['y'], {}), '(y)\n', (3249, 3252), True, 'import numpy as np\n'), ((2023, 2041), 'numpy.abs', 'abs', (['(rhsGR - rhsCN)'], {}), '(rhsGR - rhsCN)\n', (2026, 2041), False, 'from numpy import hstack, max, abs, sqrt\n'), ((3743, 3761), 'numpy.abs', 'abs', (['(jacGR - jacFD)'], {}), '(jacGR - jacFD)\n', (3746, 3761), False, 'from numpy import hstack, max, abs, sqrt\n'), ((2045, 2055), 'numpy.abs', 'abs', (['rhsCN'], {}), '(rhsCN)\n', (2048, 2055), False, 'from numpy import hstack, max, abs, sqrt\n'), ((2077, 2092), 'numpy.finfo', 'np.finfo', (['float'], {}), '(float)\n', (2085, 2092), True, 'import numpy as np\n'), ((3765, 3775), 'numpy.abs', 'abs', (['jacGR'], {}), '(jacGR)\n', (3768, 3775), False, 'from numpy import hstack, max, abs, sqrt\n')]
|
import matplotlib, numpy, pprint
# matplotlib.rcParams['pdf.fonttype'] = 42
# matplotlib.rcParams['ps.fonttype'] = 42
matplotlib.use('Agg')
import matplotlib.pyplot as plot
import gzip, csv, pylab
from collections import namedtuple
from rvs import *
from patch import *
"""
task events table contains the following fields:
1. timestamp
2. missing info
3. job ID
4. task index - within the job
5. machine ID
6. event type
7. user name
8. scheduling class
9. priority
10. resource request for CPU cores
11. resource request for RAM
12. resource request for local disk space
13. different-machine constraint
"""
jobevents_f_to_i = {
'timestamp': 0,
'job id': 2,
'event': 3,
'job name': 6,
'logical job name': 7
}
taskevents_f_to_i = {
'timestamp': 0,
'job id': 2,
'task index': 3,
'event': 5
}
e_to_i = {
'schedule': 1,
'finish': 4
}
def counter_to_furl(counter, obj="task"):
part = str(counter)
part = (5 - len(part) )*'0' + part
return "/home/mfa51/google-clusterdata-2011/{}_events/part-{}-of-00500.csv.gz".format(obj, part)
def deneme():
job_task_i__sch_finish_time_m = {}
counter = 0
while 1:
furl = counter_to_furl(counter)
try:
with gzip.open(furl, mode="rt") as f:
reader = csv.reader(f)
for line in reader:
i = line[taskevents_f_to_i['job id'] ] + '_' + line[taskevents_f_to_i['task index'] ]
e = int(line[taskevents_f_to_i['event'] ] )
if e == e_to_i['schedule'] or e == e_to_i['finish']:
t = float(line[taskevents_f_to_i['timestamp'] ] )/10**6
if i not in job_task_i__sch_finish_time_m:
job_task_i__sch_finish_time_m[i] = [t]
else:
job_task_i__sch_finish_time_m[i].append(t)
except (OSError, IOError) as e:
log(WARNING, "done with the files.")
break
counter += 1
if counter > 10:
break
with open("task_lifetime.dat", 'wt') as f:
writer = csv.writer(f, delimiter=',')
for job_task_i,sch_finish_time in job_task_i__sch_finish_time_m.items():
if len(sch_finish_time) >= 2:
sch_finish_time = [t for t in sch_finish_time if t]
if len(sch_finish_time) == 1:
sch_finish_time.append(0)
# elif len(sch_finish_time) > 2:
# log(WARNING, "More than 2 scheduling or finish events for single task; sch_finish_time= {}".format(sch_finish_time) )
lifetime = abs(sch_finish_time[1] - sch_finish_time[0] )
writer.writerow([job_task_i, lifetime] )
def write_num_tasks_per_job():
wf = open("num_tasks.dat", 'wt')
writer = csv.writer(wf, delimiter=',')
counter = 0
while 1:
print("counter= {}".format(counter) )
ji__ti_l_m = {}
furl = counter_to_furl(counter)
try:
with gzip.open(furl, mode="rt") as f:
reader = csv.reader(f)
for line in reader:
ji = int(line[taskevents_f_to_i['job id'] ] )
ti = int(line[taskevents_f_to_i['task index'] ] )
e = int(line[taskevents_f_to_i['event'] ] )
if e == e_to_i['schedule']:
if ji not in ji__ti_l_m:
ji__ti_l_m[ji] = set()
ji__ti_l_m[ji].add(ti)
print("counter= {}, writing now...".format(counter) )
for ji, ti_l in ji__ti_l_m.items():
writer.writerow([ji, len(ti_l) ] )
except (OSError, IOError) as e:
log(WARNING, "done with the files.")
break
counter += 1
if counter > 510:
break
wf.close()
def do_possible_merges_in_num_tasks():
ji__num_task_m = {}
with open("num_tasks.dat", mode="rt") as f:
reader = csv.reader(f)
for line in reader:
ji = int(line[0] )
num_task = int(line[1] )
if ji not in ji__num_task_m:
ji__num_task_m[ji] = 0
ji__num_task_m[ji] += num_task
with open("num_tasks_merged.dat", mode="wt") as f:
writer = csv.writer(f, delimiter=',')
for ji, num_tasks in ji__num_task_m.items():
writer.writerow([ji, num_tasks] )
log(WARNING, "done.")
def write_jobs_w_num_task(num_task):
ji_l = []
with open("num_tasks_merged.dat", mode="rt") as f:
reader = csv.reader(f)
for line in reader:
num_task_ = int(line[1] )
if num_task_ == num_task:
ji_l.append(int(line[0] ) )
print("writing, len(ji_l)= {}".format(len(ji_l) ) )
with open("jobs_w_num_task_{}.dat".format(num_task), mode="wt") as f:
writer = csv.writer(f, delimiter=',')
for ji in ji_l:
writer.writerow([ji] )
log(WARNING, "done.")
def write_task_lifetimes(num_task):
log(WARNING, "started; num_task= {}".format(num_task) )
ji_l = []
with open("jobs_w_num_task_{}.dat".format(num_task), mode="rt") as f:
reader = csv.reader(f)
for line in reader:
ji_l.append(int(line[0] ) )
#
Entry = namedtuple('Entry', 'ji ti')
entry__sch_fin_l_m = {}
counter = 0
while 1:
print("counter= {}".format(counter) )
furl = counter_to_furl(counter)
try:
with gzip.open(furl, mode="rt") as f:
reader = csv.reader(f)
for line in reader:
ji = int(line[taskevents_f_to_i['job id'] ] )
if ji in ji_l:
e = int(line[taskevents_f_to_i['event'] ] )
if e == e_to_i['schedule'] or e == e_to_i['finish']:
ti = int(line[taskevents_f_to_i['task index'] ] )
entry = Entry(ji=ji, ti=ti)
t = float(line[taskevents_f_to_i['timestamp'] ] )/10**6
if entry not in entry__sch_fin_l_m:
entry__sch_fin_l_m[entry] = [0,0]
if e == e_to_i['schedule']:
entry__sch_fin_l_m[entry][0] = t
elif e == e_to_i['finish']:
entry__sch_fin_l_m[entry][1] = t
except (OSError, IOError) as e:
log(WARNING, "done with the files.")
break
counter += 1
if counter > 510:
break
print("writing now...")
with open("task_lifetimes_for_jobs_w_num_task_{}.dat".format(num_task), mode="wt") as f:
writer = csv.writer(f, delimiter=',')
for entry, sch_fin_tuple in entry__sch_fin_l_m.items():
if sch_fin_tuple[0] < sch_fin_tuple[1]:
lt = sch_fin_tuple[1] - sch_fin_tuple[0]
writer.writerow([lt] )
log(WARNING, "done.")
def filter_task_lifetimes(num_task):
lifetime_l = []
with open("task_lifetimes_for_jobs_w_num_task_{}.dat".format(num_task), mode="rt") as f:
reader = csv.reader(f)
for line in reader:
lt = float(line[0] )
if lt < 5000:
lifetime_l.append(lt)
with open("filtered_task_lifetimes_for_jobs_w_num_task_{}.dat".format(num_task), mode="wt") as f:
writer = csv.writer(f, delimiter=',')
for lt in lifetime_l:
writer.writerow([lt] )
log(WARNING, "done.")
# ****************************** PLOT ***************************** #
def plot_num_tasks_hist():
num_tasks_l = []
with open("num_tasks_merged.dat", mode="rt") as f:
reader = csv.reader(f)
for line in reader:
num_task = int(line[1] )
# if num_task > 1000:
# print("num_task= {}".format(num_task) )
# if num_task > 1 and num_task < 2000:
num_tasks_l.append(num_task)
# num_task__num_job_m = {}
# for n in num_tasks_l:
# if n not in num_task__num_job_m:
# num_task__num_job_m[n] = 0
# num_task__num_job_m[n] += 1
# print("num_task__num_job_m= {}".format(pprint.pformat(num_task__num_job_m) ) )
# plot.hist(num_tasks_l, bins=1000, histtype='step')
plot.hist(num_tasks_l, bins=100, histtype='step', normed=True, lw=2)
plot.xlabel("Number of tasks")
plot.ylabel("Frequency")
plot.savefig("plot_num_tasks_hist.png", bbox_inches='tight')
plot.gcf().clear()
log(WARNING, "done.")
def plot_task_lifetime_hist(k):
lifetime_l = []
with open("filtered_task_lifetimes_for_jobs_w_num_task_{}.dat".format(k), mode="rt") as f:
reader = csv.reader(f)
for line in reader:
lifetime_l.append(float(line[0] ) )
# rv = Pareto(a=2, loc=2)
# for i in range(1000000):
# lifetime_l.append(rv.gen_sample() )
lifetime_l = numpy.sort(lifetime_l)
print("len(lifetime_l)= {}".format(len(lifetime_l) ) )
fig = plot.figure(1)
# def_size = fig.get_size_inches()
# fig.set_size_inches(def_size[0]*1.5, def_size[1] )
plot.subplot(211)
# plot.step(x_l, y_l, 'bo', label='log-linear', lw=2)
plot.hist(lifetime_l, bins=100, histtype='step', normed=True, lw=2)
plot.xlabel("X (s)")
plot.ylabel("Frequency")
plot.title(r'$k= {}$'.format(k) )
x_l = lifetime_l[::-1]
y_l = numpy.arange(lifetime_l.size)/lifetime_l.size
plot.subplot(223)
plot.yscale('log')
plot.step(x_l, y_l, 'bo', label='log(tail) vs. X', lw=2)
plot.xlabel("X (s)")
plot.ylabel("Tail")
plot.legend()
plot.subplot(224)
plot.xscale('log')
plot.yscale('log')
plot.step(x_l, y_l, 'bo', label='log(tail) vs. log(X)', lw=2)
plot.xlabel("X (s)")
plot.legend()
# plot.xlabel("X")
# plot.xlabel("Task lifetime X (s)")
# plot.ylabel(r'$Pr\{X > x\}$')
plot.savefig("plot_task_lifetime_hist_k_{}.png".format(k) )
plot.gcf().clear()
log(WARNING, "done; k= {}".format(k) )
def pplot_task_lifetime_hist(k):
log(INFO, "started; k= {}".format(k) )
lifetime_l = []
# with open("filtered_task_lifetimes_for_jobs_w_num_task_{}.dat".format(k), mode="rt") as f:
with open("task_lifetimes_for_jobs_w_num_task_{}.dat".format(k), mode="rt") as f:
reader = csv.reader(f)
for line in reader:
lifetime_l.append(float(line[0] ) )
lifetime_l = numpy.sort(lifetime_l)
print("len(lifetime_l)= {}".format(len(lifetime_l) ) )
#
# plot.hist(lifetime_l, bins=100, histtype='step', normed=True, lw=2)
x_l = lifetime_l[::-1]
y_l = numpy.arange(lifetime_l.size)/lifetime_l.size
# y_l = [math.log(y + 0.000001) for y in y_l]
# m, b = numpy.polyfit(x_l, y_l, 1)
# plot.plot(x_l, m*x_l+b, 'r', lw=1, ls=':')
# step_size = 10
# num_rank = math.ceil(x_l[0]/step_size)
# # rank__avg_lifetime_l = []
# rank__num_lifetime_l = []
# i = 0
# for r in range(1, num_rank+1):
# sum_ = 0
# counter = 0
# while i < len(x_l) and x_l[i] > x_l[0]-r*step_size:
# counter += 1
# sum_ += x_l[i]
# i += 1
# rank__num_lifetime_l.append(counter)
# # avg = 0
# # if counter:
# # avg = sum_/counter
# # rank__avg_lifetime_l.append(avg)
# # print("i= {}, rank__avg_lifetime_l=\n{}".format(i, rank__avg_lifetime_l) )
# rank__num_lifetime_l = list(reversed(rank__num_lifetime_l) )
# rank_freq_l = [n/sum(rank__num_lifetime_l) for n in rank__num_lifetime_l]
# rank_tailprob_l = [sum(rank_freq_l[r-1:]) for r in range(1, num_rank+1) ]
# # plot.plot(range(1, num_rank+1), rank__avg_lifetime_l, 'bo', ls=':')
# # plot.xlabel(r'Rank', fontsize=13)
# # plot.ylabel(r'Tail distribution', fontsize=13)
# # plot.step(range(1, num_rank+1), rank_tailprob_l, 'bo', ls=':')
# # plot.yscale('log')
# # plot.xscale('log')
if k == 15:
plot.xlim(([10, 2*10**5] ) )
plot.ylim(([1/2*10**(-5), 1.3] ) )
elif k == 400:
plot.xlim(([10, 2*10**4] ) )
plot.ylim(([10**(-6), 1.3] ) )
elif k == 1050:
plot.xlim(([10, 2*10**4] ) )
plot.ylim(([10**(-6), 1.3] ) )
# plot.step(x_l, y_l, 'bo', lw=1, ls=':')
plot.step(x_l, y_l, 'bo', ms=10, mew=0, ls=':')
plot.xscale('log')
plot.yscale('log')
plot.xlabel(r'Task lifetime', fontsize=18)
plot.ylabel(r'Tail distribution', fontsize=18)
# plot.ylabel(r'Fraction of tasks completed in x')
# plot.title(r'Jobs with {} tasks'.format(k), fontsize=13)
# plot.title('k= {}, Mean= {}, Stdev= {}'.format(k, round(numpy.mean(x_l), 1), round(numpy.std(x_l), 1) ), fontsize=13)
plot.title('k= {}, Mean= {}'.format(k, round(numpy.mean(x_l), 1) ), fontsize=18)
plot.gcf().set_size_inches(4, 3)
prettify(plot.gca() )
# plot.savefig("pplot_task_lifetime_hist_k_{}.pdf".format(k) )
plot.savefig("pplot_task_lifetime_hist_k_{}.png".format(k), bbox_inches='tight')
plot.gcf().clear()
log(WARNING, "done; k= {}".format(k) )
def plot_qq_task_lifetimes(k):
lifetime_l = []
# with open("filtered_task_lifetimes_for_jobs_w_num_task_{}.dat".format(k), mode="rt") as f:
with open("task_lifetimes_for_jobs_w_num_task_{}.dat".format(k), mode="rt") as f:
reader = csv.reader(f)
for line in reader:
lifetime_l.append(float(line[0] ) )
lifetime_l = numpy.sort(lifetime_l)
print("len(lifetime_l)= {}".format(len(lifetime_l) ) )
# For different dists: https://docs.scipy.org/doc/scipy/reference/stats.html
# scipy.stats.probplot(lifetime_l, dist="expon", plot=plot)
# scipy.stats.probplot(lifetime_l, dist="pareto", sparams=(1.2,), plot=plot)
plot.savefig("plot_qq_task_lifetimes_k_{}.png".format(k) )
log(WARNING, "done; k= {}".format(k) )
if __name__ == "__main__":
## Uncomment with caution!
# write_num_tasks_per_job()
# do_possible_merges_in_num_tasks()
# write_jobs_w_num_task(num_task=15)
# write_jobs_w_num_task(num_task=400)
# write_jobs_w_num_task(num_task=1000)
# write_jobs_w_num_task(num_task=1050)
# write_task_lifetimes(num_task=15)
# filter_task_lifetimes(num_task=15)
# write_task_lifetimes(num_task=400)
# filter_task_lifetimes(num_task=400)
# write_task_lifetimes(num_task=1000)
# filter_task_lifetimes(num_task=1000)
# write_task_lifetimes(num_task=1050)
# filter_task_lifetimes(num_task=1050)
# plot_num_tasks_hist()
# plot_task_lifetime_hist(k=15)
# plot_task_lifetime_hist(k=400)
# plot_task_lifetime_hist(k=1000)
# plot_task_lifetime_hist(k=1050)
# pplot_task_lifetime_hist(k=15)
# pplot_task_lifetime_hist(k=400)
# pplot_task_lifetime_hist(k=1000)
pplot_task_lifetime_hist(k=1050)
# plot_qq_task_lifetimes(k=400)
pass
|
[
"matplotlib.pyplot.yscale",
"csv.reader",
"matplotlib.pyplot.step",
"matplotlib.pyplot.figure",
"numpy.mean",
"numpy.arange",
"matplotlib.pyplot.gca",
"csv.writer",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.legend",
"numpy.sort",
"matplotlib.use",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.gcf",
"matplotlib.pyplot.xscale",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.xlim",
"gzip.open",
"matplotlib.pyplot.hist",
"collections.namedtuple",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.savefig"
] |
[((118, 139), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (132, 139), False, 'import matplotlib, numpy, pprint\n'), ((2584, 2613), 'csv.writer', 'csv.writer', (['wf'], {'delimiter': '""","""'}), "(wf, delimiter=',')\n", (2594, 2613), False, 'import gzip, csv, pylab\n'), ((4766, 4794), 'collections.namedtuple', 'namedtuple', (['"""Entry"""', '"""ji ti"""'], {}), "('Entry', 'ji ti')\n", (4776, 4794), False, 'from collections import namedtuple\n'), ((7427, 7495), 'matplotlib.pyplot.hist', 'plot.hist', (['num_tasks_l'], {'bins': '(100)', 'histtype': '"""step"""', 'normed': '(True)', 'lw': '(2)'}), "(num_tasks_l, bins=100, histtype='step', normed=True, lw=2)\n", (7436, 7495), True, 'import matplotlib.pyplot as plot\n'), ((7501, 7531), 'matplotlib.pyplot.xlabel', 'plot.xlabel', (['"""Number of tasks"""'], {}), "('Number of tasks')\n", (7512, 7531), True, 'import matplotlib.pyplot as plot\n'), ((7534, 7558), 'matplotlib.pyplot.ylabel', 'plot.ylabel', (['"""Frequency"""'], {}), "('Frequency')\n", (7545, 7558), True, 'import matplotlib.pyplot as plot\n'), ((7561, 7621), 'matplotlib.pyplot.savefig', 'plot.savefig', (['"""plot_num_tasks_hist.png"""'], {'bbox_inches': '"""tight"""'}), "('plot_num_tasks_hist.png', bbox_inches='tight')\n", (7573, 7621), True, 'import matplotlib.pyplot as plot\n'), ((8021, 8043), 'numpy.sort', 'numpy.sort', (['lifetime_l'], {}), '(lifetime_l)\n', (8031, 8043), False, 'import matplotlib, numpy, pprint\n'), ((8112, 8126), 'matplotlib.pyplot.figure', 'plot.figure', (['(1)'], {}), '(1)\n', (8123, 8126), True, 'import matplotlib.pyplot as plot\n'), ((8221, 8238), 'matplotlib.pyplot.subplot', 'plot.subplot', (['(211)'], {}), '(211)\n', (8233, 8238), True, 'import matplotlib.pyplot as plot\n'), ((8297, 8364), 'matplotlib.pyplot.hist', 'plot.hist', (['lifetime_l'], {'bins': '(100)', 'histtype': '"""step"""', 'normed': '(True)', 'lw': '(2)'}), "(lifetime_l, bins=100, histtype='step', normed=True, lw=2)\n", (8306, 8364), True, 'import matplotlib.pyplot as plot\n'), ((8367, 8387), 'matplotlib.pyplot.xlabel', 'plot.xlabel', (['"""X (s)"""'], {}), "('X (s)')\n", (8378, 8387), True, 'import matplotlib.pyplot as plot\n'), ((8390, 8414), 'matplotlib.pyplot.ylabel', 'plot.ylabel', (['"""Frequency"""'], {}), "('Frequency')\n", (8401, 8414), True, 'import matplotlib.pyplot as plot\n'), ((8535, 8552), 'matplotlib.pyplot.subplot', 'plot.subplot', (['(223)'], {}), '(223)\n', (8547, 8552), True, 'import matplotlib.pyplot as plot\n'), ((8555, 8573), 'matplotlib.pyplot.yscale', 'plot.yscale', (['"""log"""'], {}), "('log')\n", (8566, 8573), True, 'import matplotlib.pyplot as plot\n'), ((8576, 8632), 'matplotlib.pyplot.step', 'plot.step', (['x_l', 'y_l', '"""bo"""'], {'label': '"""log(tail) vs. X"""', 'lw': '(2)'}), "(x_l, y_l, 'bo', label='log(tail) vs. X', lw=2)\n", (8585, 8632), True, 'import matplotlib.pyplot as plot\n'), ((8635, 8655), 'matplotlib.pyplot.xlabel', 'plot.xlabel', (['"""X (s)"""'], {}), "('X (s)')\n", (8646, 8655), True, 'import matplotlib.pyplot as plot\n'), ((8658, 8677), 'matplotlib.pyplot.ylabel', 'plot.ylabel', (['"""Tail"""'], {}), "('Tail')\n", (8669, 8677), True, 'import matplotlib.pyplot as plot\n'), ((8680, 8693), 'matplotlib.pyplot.legend', 'plot.legend', ([], {}), '()\n', (8691, 8693), True, 'import matplotlib.pyplot as plot\n'), ((8696, 8713), 'matplotlib.pyplot.subplot', 'plot.subplot', (['(224)'], {}), '(224)\n', (8708, 8713), True, 'import matplotlib.pyplot as plot\n'), ((8716, 8734), 'matplotlib.pyplot.xscale', 'plot.xscale', (['"""log"""'], {}), "('log')\n", (8727, 8734), True, 'import matplotlib.pyplot as plot\n'), ((8737, 8755), 'matplotlib.pyplot.yscale', 'plot.yscale', (['"""log"""'], {}), "('log')\n", (8748, 8755), True, 'import matplotlib.pyplot as plot\n'), ((8758, 8819), 'matplotlib.pyplot.step', 'plot.step', (['x_l', 'y_l', '"""bo"""'], {'label': '"""log(tail) vs. log(X)"""', 'lw': '(2)'}), "(x_l, y_l, 'bo', label='log(tail) vs. log(X)', lw=2)\n", (8767, 8819), True, 'import matplotlib.pyplot as plot\n'), ((8822, 8842), 'matplotlib.pyplot.xlabel', 'plot.xlabel', (['"""X (s)"""'], {}), "('X (s)')\n", (8833, 8842), True, 'import matplotlib.pyplot as plot\n'), ((8845, 8858), 'matplotlib.pyplot.legend', 'plot.legend', ([], {}), '()\n', (8856, 8858), True, 'import matplotlib.pyplot as plot\n'), ((9463, 9485), 'numpy.sort', 'numpy.sort', (['lifetime_l'], {}), '(lifetime_l)\n', (9473, 9485), False, 'import matplotlib, numpy, pprint\n'), ((11206, 11253), 'matplotlib.pyplot.step', 'plot.step', (['x_l', 'y_l', '"""bo"""'], {'ms': '(10)', 'mew': '(0)', 'ls': '""":"""'}), "(x_l, y_l, 'bo', ms=10, mew=0, ls=':')\n", (11215, 11253), True, 'import matplotlib.pyplot as plot\n'), ((11259, 11277), 'matplotlib.pyplot.xscale', 'plot.xscale', (['"""log"""'], {}), "('log')\n", (11270, 11277), True, 'import matplotlib.pyplot as plot\n'), ((11280, 11298), 'matplotlib.pyplot.yscale', 'plot.yscale', (['"""log"""'], {}), "('log')\n", (11291, 11298), True, 'import matplotlib.pyplot as plot\n'), ((11301, 11342), 'matplotlib.pyplot.xlabel', 'plot.xlabel', (['"""Task lifetime"""'], {'fontsize': '(18)'}), "('Task lifetime', fontsize=18)\n", (11312, 11342), True, 'import matplotlib.pyplot as plot\n'), ((11346, 11391), 'matplotlib.pyplot.ylabel', 'plot.ylabel', (['"""Tail distribution"""'], {'fontsize': '(18)'}), "('Tail distribution', fontsize=18)\n", (11357, 11391), True, 'import matplotlib.pyplot as plot\n'), ((12324, 12346), 'numpy.sort', 'numpy.sort', (['lifetime_l'], {}), '(lifetime_l)\n', (12334, 12346), False, 'import matplotlib, numpy, pprint\n'), ((1945, 1973), 'csv.writer', 'csv.writer', (['f'], {'delimiter': '""","""'}), "(f, delimiter=',')\n", (1955, 1973), False, 'import gzip, csv, pylab\n'), ((3587, 3600), 'csv.reader', 'csv.reader', (['f'], {}), '(f)\n', (3597, 3600), False, 'import gzip, csv, pylab\n'), ((3850, 3878), 'csv.writer', 'csv.writer', (['f'], {'delimiter': '""","""'}), "(f, delimiter=',')\n", (3860, 3878), False, 'import gzip, csv, pylab\n'), ((4108, 4121), 'csv.reader', 'csv.reader', (['f'], {}), '(f)\n', (4118, 4121), False, 'import gzip, csv, pylab\n'), ((4385, 4413), 'csv.writer', 'csv.writer', (['f'], {'delimiter': '""","""'}), "(f, delimiter=',')\n", (4395, 4413), False, 'import gzip, csv, pylab\n'), ((4679, 4692), 'csv.reader', 'csv.reader', (['f'], {}), '(f)\n', (4689, 4692), False, 'import gzip, csv, pylab\n'), ((5968, 5996), 'csv.writer', 'csv.writer', (['f'], {'delimiter': '""","""'}), "(f, delimiter=',')\n", (5978, 5996), False, 'import gzip, csv, pylab\n'), ((6367, 6380), 'csv.reader', 'csv.reader', (['f'], {}), '(f)\n', (6377, 6380), False, 'import gzip, csv, pylab\n'), ((6598, 6626), 'csv.writer', 'csv.writer', (['f'], {'delimiter': '""","""'}), "(f, delimiter=',')\n", (6608, 6626), False, 'import gzip, csv, pylab\n'), ((6891, 6904), 'csv.reader', 'csv.reader', (['f'], {}), '(f)\n', (6901, 6904), False, 'import gzip, csv, pylab\n'), ((7824, 7837), 'csv.reader', 'csv.reader', (['f'], {}), '(f)\n', (7834, 7837), False, 'import gzip, csv, pylab\n'), ((8487, 8516), 'numpy.arange', 'numpy.arange', (['lifetime_l.size'], {}), '(lifetime_l.size)\n', (8499, 8516), False, 'import matplotlib, numpy, pprint\n'), ((9365, 9378), 'csv.reader', 'csv.reader', (['f'], {}), '(f)\n', (9375, 9378), False, 'import gzip, csv, pylab\n'), ((9656, 9685), 'numpy.arange', 'numpy.arange', (['lifetime_l.size'], {}), '(lifetime_l.size)\n', (9668, 9685), False, 'import matplotlib, numpy, pprint\n'), ((10918, 10946), 'matplotlib.pyplot.xlim', 'plot.xlim', (['[10, 2 * 10 ** 5]'], {}), '([10, 2 * 10 ** 5])\n', (10927, 10946), True, 'import matplotlib.pyplot as plot\n'), ((10951, 10985), 'matplotlib.pyplot.ylim', 'plot.ylim', (['[1 / 2 * 10 ** -5, 1.3]'], {}), '([1 / 2 * 10 ** -5, 1.3])\n', (10960, 10985), True, 'import matplotlib.pyplot as plot\n'), ((11761, 11771), 'matplotlib.pyplot.gca', 'plot.gca', ([], {}), '()\n', (11769, 11771), True, 'import matplotlib.pyplot as plot\n'), ((12226, 12239), 'csv.reader', 'csv.reader', (['f'], {}), '(f)\n', (12236, 12239), False, 'import gzip, csv, pylab\n'), ((7624, 7634), 'matplotlib.pyplot.gcf', 'plot.gcf', ([], {}), '()\n', (7632, 7634), True, 'import matplotlib.pyplot as plot\n'), ((9020, 9030), 'matplotlib.pyplot.gcf', 'plot.gcf', ([], {}), '()\n', (9028, 9030), True, 'import matplotlib.pyplot as plot\n'), ((11007, 11035), 'matplotlib.pyplot.xlim', 'plot.xlim', (['[10, 2 * 10 ** 4]'], {}), '([10, 2 * 10 ** 4])\n', (11016, 11035), True, 'import matplotlib.pyplot as plot\n'), ((11040, 11066), 'matplotlib.pyplot.ylim', 'plot.ylim', (['[10 ** -6, 1.3]'], {}), '([10 ** -6, 1.3])\n', (11049, 11066), True, 'import matplotlib.pyplot as plot\n'), ((11717, 11727), 'matplotlib.pyplot.gcf', 'plot.gcf', ([], {}), '()\n', (11725, 11727), True, 'import matplotlib.pyplot as plot\n'), ((11924, 11934), 'matplotlib.pyplot.gcf', 'plot.gcf', ([], {}), '()\n', (11932, 11934), True, 'import matplotlib.pyplot as plot\n'), ((1190, 1216), 'gzip.open', 'gzip.open', (['furl'], {'mode': '"""rt"""'}), "(furl, mode='rt')\n", (1199, 1216), False, 'import gzip, csv, pylab\n'), ((1240, 1253), 'csv.reader', 'csv.reader', (['f'], {}), '(f)\n', (1250, 1253), False, 'import gzip, csv, pylab\n'), ((2757, 2783), 'gzip.open', 'gzip.open', (['furl'], {'mode': '"""rt"""'}), "(furl, mode='rt')\n", (2766, 2783), False, 'import gzip, csv, pylab\n'), ((2807, 2820), 'csv.reader', 'csv.reader', (['f'], {}), '(f)\n', (2817, 2820), False, 'import gzip, csv, pylab\n'), ((4944, 4970), 'gzip.open', 'gzip.open', (['furl'], {'mode': '"""rt"""'}), "(furl, mode='rt')\n", (4953, 4970), False, 'import gzip, csv, pylab\n'), ((4994, 5007), 'csv.reader', 'csv.reader', (['f'], {}), '(f)\n', (5004, 5007), False, 'import gzip, csv, pylab\n'), ((11093, 11121), 'matplotlib.pyplot.xlim', 'plot.xlim', (['[10, 2 * 10 ** 4]'], {}), '([10, 2 * 10 ** 4])\n', (11102, 11121), True, 'import matplotlib.pyplot as plot\n'), ((11126, 11152), 'matplotlib.pyplot.ylim', 'plot.ylim', (['[10 ** -6, 1.3]'], {}), '([10 ** -6, 1.3])\n', (11135, 11152), True, 'import matplotlib.pyplot as plot\n'), ((11676, 11691), 'numpy.mean', 'numpy.mean', (['x_l'], {}), '(x_l)\n', (11686, 11691), False, 'import matplotlib, numpy, pprint\n')]
|
from readwrite import get_data
import pandas as pd
import matplotlib.pyplot as plt
from scipy.stats import gaussian_kde
import numpy as np
def scatter(path, name):
data = get_data(path)
pd_data = pd.DataFrame(data)
plt.title("column 0 " + name)
plt.plot(pd_data[0])
plt.show()
plt.title("column 1 " + name)
plt.plot(pd_data[1])
plt.show()
plt.title("column 2 " + name)
plt.plot(pd_data[2])
plt.show()
return pd_data
def boxplot(datas, names):
column0 = []
column1 = []
column2 = []
labels = []
for i in range(len(datas)):
column0.append(datas[i][0])
column1.append(datas[i][1])
column2.append(datas[i][2])
labels.append(names[i])
plt.title("Boxplot column 0")
plt.boxplot(column0, labels=labels)
plt.show()
plt.title("Boxplot column 1")
plt.boxplot(column1, labels=labels)
plt.show()
plt.title("Boxplot column 2")
plt.boxplot(column2, labels=labels)
plt.show()
def draw_gaussian(datas):
pdf_ticks = np.linspace(0, 10000, 100000, endpoint=False)
density = gaussian_kde(datas)
plt.plot(pdf_ticks, density(pdf_ticks), color='r')
plt.show()
|
[
"pandas.DataFrame",
"matplotlib.pyplot.title",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.boxplot",
"scipy.stats.gaussian_kde",
"numpy.linspace",
"readwrite.get_data"
] |
[((173, 187), 'readwrite.get_data', 'get_data', (['path'], {}), '(path)\n', (181, 187), False, 'from readwrite import get_data\n'), ((199, 217), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {}), '(data)\n', (211, 217), True, 'import pandas as pd\n'), ((220, 249), 'matplotlib.pyplot.title', 'plt.title', (["('column 0 ' + name)"], {}), "('column 0 ' + name)\n", (229, 249), True, 'import matplotlib.pyplot as plt\n'), ((251, 271), 'matplotlib.pyplot.plot', 'plt.plot', (['pd_data[0]'], {}), '(pd_data[0])\n', (259, 271), True, 'import matplotlib.pyplot as plt\n'), ((273, 283), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (281, 283), True, 'import matplotlib.pyplot as plt\n'), ((286, 315), 'matplotlib.pyplot.title', 'plt.title', (["('column 1 ' + name)"], {}), "('column 1 ' + name)\n", (295, 315), True, 'import matplotlib.pyplot as plt\n'), ((317, 337), 'matplotlib.pyplot.plot', 'plt.plot', (['pd_data[1]'], {}), '(pd_data[1])\n', (325, 337), True, 'import matplotlib.pyplot as plt\n'), ((339, 349), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (347, 349), True, 'import matplotlib.pyplot as plt\n'), ((352, 381), 'matplotlib.pyplot.title', 'plt.title', (["('column 2 ' + name)"], {}), "('column 2 ' + name)\n", (361, 381), True, 'import matplotlib.pyplot as plt\n'), ((383, 403), 'matplotlib.pyplot.plot', 'plt.plot', (['pd_data[2]'], {}), '(pd_data[2])\n', (391, 403), True, 'import matplotlib.pyplot as plt\n'), ((405, 415), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (413, 415), True, 'import matplotlib.pyplot as plt\n'), ((665, 694), 'matplotlib.pyplot.title', 'plt.title', (['"""Boxplot column 0"""'], {}), "('Boxplot column 0')\n", (674, 694), True, 'import matplotlib.pyplot as plt\n'), ((696, 731), 'matplotlib.pyplot.boxplot', 'plt.boxplot', (['column0'], {'labels': 'labels'}), '(column0, labels=labels)\n', (707, 731), True, 'import matplotlib.pyplot as plt\n'), ((733, 743), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (741, 743), True, 'import matplotlib.pyplot as plt\n'), ((746, 775), 'matplotlib.pyplot.title', 'plt.title', (['"""Boxplot column 1"""'], {}), "('Boxplot column 1')\n", (755, 775), True, 'import matplotlib.pyplot as plt\n'), ((777, 812), 'matplotlib.pyplot.boxplot', 'plt.boxplot', (['column1'], {'labels': 'labels'}), '(column1, labels=labels)\n', (788, 812), True, 'import matplotlib.pyplot as plt\n'), ((814, 824), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (822, 824), True, 'import matplotlib.pyplot as plt\n'), ((827, 856), 'matplotlib.pyplot.title', 'plt.title', (['"""Boxplot column 2"""'], {}), "('Boxplot column 2')\n", (836, 856), True, 'import matplotlib.pyplot as plt\n'), ((858, 893), 'matplotlib.pyplot.boxplot', 'plt.boxplot', (['column2'], {'labels': 'labels'}), '(column2, labels=labels)\n', (869, 893), True, 'import matplotlib.pyplot as plt\n'), ((895, 905), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (903, 905), True, 'import matplotlib.pyplot as plt\n'), ((946, 991), 'numpy.linspace', 'np.linspace', (['(0)', '(10000)', '(100000)'], {'endpoint': '(False)'}), '(0, 10000, 100000, endpoint=False)\n', (957, 991), True, 'import numpy as np\n'), ((1004, 1023), 'scipy.stats.gaussian_kde', 'gaussian_kde', (['datas'], {}), '(datas)\n', (1016, 1023), False, 'from scipy.stats import gaussian_kde\n'), ((1077, 1087), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1085, 1087), True, 'import matplotlib.pyplot as plt\n')]
|
#!/usr/bin/env python
"""
# Author: <NAME>
# Created Time : Tue 29 Sep 2020 01:41:23 PM CST
# File Name: function.py
# Description:
"""
import torch
import numpy as np
import os
import scanpy as sc
from anndata import AnnData
from .data import load_data
from .net.vae import VAE
from .net.utils import EarlyStopping
from .metrics import batch_entropy_mixing_score, silhouette_score
from .logger import create_logger
from .plot import embedding
def SCALE(
data_list,
batch_categories=None,
profile='RNA',
join='inner',
batch_key='batch',
batch_name='batch',
min_features=600,
min_cells=3,
n_top_features=2000,
batch_size=64,
lr=2e-4,
max_iteration=30000,
seed=124,
gpu=0,
outdir='output/',
projection=None,
repeat=False,
impute=None,
chunk_size=20000,
ignore_umap=False,
verbose=False,
assess=False,
show=True,
):
"""
Single-Cell integrative Analysis via Latent feature Extraction
Parameters
----------
data_list
A path list of AnnData matrices to concatenate with. Each matrix is referred to as a 'batch'.
batch_categories
Categories for the batch annotation. By default, use increasing numbers.
profile
Specify the single-cell profile, RNA or ATAC. Default: RNA.
join
Use intersection ('inner') or union ('outer') of variables of different batches.
batch_key
Add the batch annotation to obs using this key. By default, batch_key='batch'.
batch_name
Use this annotation in obs as batches for training model. Default: 'batch'.
min_features
Filtered out cells that are detected in less than min_features. Default: 600.
min_cells
Filtered out genes that are detected in less than min_cells. Default: 3.
n_top_features
Number of highly-variable genes to keep. Default: 2000.
batch_size
Number of samples per batch to load. Default: 64.
lr
Learning rate. Default: 2e-4.
max_iteration
Max iterations for training. Training one batch_size samples is one iteration. Default: 30000.
seed
Random seed for torch and numpy. Default: 124.
gpu
Index of GPU to use if GPU is available. Default: 0.
outdir
Output directory. Default: 'output/'.
projection
Use for new dataset projection. Input the folder containing the pre-trained model. If None, don't do projection. Default: None.
repeat
Use with projection. If False, concatenate the reference and projection datasets for downstream analysis. If True, only use projection datasets. Default: False.
impute
If True, calculate the imputed gene expression and store it at adata.layers['impute']. Default: False.
chunk_size
Number of samples from the same batch to transform. Default: 20000.
ignore_umap
If True, do not perform UMAP for visualization and leiden for clustering. Default: False.
verbose
Verbosity, True or False. Default: False.
assess
If True, calculate the entropy_batch_mixing score and silhouette score to evaluate integration results. Default: False.
Returns
-------
The output folder contains:
adata.h5ad
The AnnData matrice after batch effects removal. The low-dimensional representation of the data is stored at adata.obsm['latent'].
checkpoint
model.pt contains the variables of the model and config.pt contains the parameters of the model.
log.txt
Records raw data information, filter conditions, model parameters etc.
umap.pdf
UMAP plot for visualization.
"""
np.random.seed(seed) # seed
torch.manual_seed(seed)
if torch.cuda.is_available(): # cuda device
device='cuda'
torch.cuda.set_device(gpu)
else:
device='cpu'
outdir = outdir+'/'
os.makedirs(outdir+'/checkpoint', exist_ok=True)
log = create_logger('', fh=outdir+'log.txt')
if not projection:
adata, trainloader, testloader = load_data(
data_list, batch_categories,
join=join,
profile=profile,
n_top_features=n_top_features,
batch_size=batch_size,
chunk_size=chunk_size,
min_features=min_features,
min_cells=min_cells,
batch_name=batch_name,
batch_key=batch_key,
log=log
)
early_stopping = EarlyStopping(patience=10, checkpoint_file=outdir+'/checkpoint/model.pt')
x_dim, n_domain = adata.shape[1], len(adata.obs['batch'].cat.categories)
# model config
enc = [['fc', 1024, 1, 'relu'],['fc', 10, '', '']] # TO DO
dec = [['fc', x_dim, n_domain, 'sigmoid']]
model = VAE(enc, dec, n_domain=n_domain)
log.info('model\n'+model.__repr__())
model.fit(
trainloader,
lr=lr,
max_iteration=max_iteration,
device=device,
early_stopping=early_stopping,
verbose=verbose,
)
torch.save({'n_top_features':adata.var.index, 'enc':enc, 'dec':dec, 'n_domain':n_domain}, outdir+'/checkpoint/config.pt')
else:
state = torch.load(projection+'/checkpoint/config.pt')
n_top_features, enc, dec, n_domain = state['n_top_features'], state['enc'], state['dec'], state['n_domain']
model = VAE(enc, dec, n_domain=n_domain)
model.load_model(projection+'/checkpoint/model.pt')
model.to(device)
adata, trainloader, testloader = load_data(
data_list, batch_categories,
join='outer',
profile=profile,
chunk_size=chunk_size,
n_top_features=n_top_features,
min_cells=0,
min_features=min_features,
batch_name=batch_name,
batch_key=batch_key,
log = log
)
# log.info('Processed dataset shape: {}'.format(adata.shape))
adata.obsm['latent'] = model.encodeBatch(testloader, device=device) # save latent rep
if impute:
adata.layers['impute'] = model.encodeBatch(testloader, out='impute', batch_id=impute, device=device)
log.info('Output dir: {}'.format(outdir))
if projection and (not repeat):
ref = sc.read_h5ad(projection+'/adata.h5ad')
adata = AnnData.concatenate(
ref, adata,
batch_categories=['reference', 'query'],
batch_key='projection',
index_unique=None
)
adata.write(outdir+'adata.h5ad', compression='gzip')
if not ignore_umap: #and adata.shape[0]<1e6:
log.info('Plot umap')
sc.pp.neighbors(adata, n_neighbors=30, use_rep='latent')
sc.tl.umap(adata, min_dist=0.1)
sc.tl.leiden(adata)
# UMAP visualization
sc.settings.figdir = outdir
sc.set_figure_params(dpi=80, figsize=(10,10), fontsize=20)
cols = ['batch', 'celltype', 'leiden']
color = [c for c in cols if c in adata.obs]
if len(color) > 0:
if projection and (not repeat):
embedding(adata, groupby='projection', save='.pdf', show=show)
else:
sc.pl.umap(adata, color=color, save='.pdf', wspace=0.4, ncols=4, show=show)
if assess:
if len(adata.obs['batch'].cat.categories) > 1:
entropy_score = batch_entropy_mixing_score(adata.obsm['X_umap'], adata.obs['batch'])
log.info('batch_entropy_mixing_score: {:.3f}'.format(entropy_score))
if 'celltype' in adata.obs:
sil_score = silhouette_score(adata.obsm['X_umap'], adata.obs['celltype'].cat.codes)
log.info("silhouette_score: {:.3f}".format(sil_score))
adata.write(outdir+'adata.h5ad', compression='gzip')
return adata
def label_transfer(ref, query, rep='latent', label='celltype'):
"""
Label transfer
Parameters
-----------
ref
reference containing the projected representations and labels
query
query data to transfer label
rep
representations to train the classifier. Default is `latent`
label
label name. Defautl is `celltype` stored in ref.obs
Returns
--------
transfered label
"""
from sklearn.neighbors import KNeighborsClassifier
X_train = ref.obsm[rep]
y_train = ref.obs[label]
X_test = query.obsm[rep]
knn = knn = KNeighborsClassifier().fit(X_train, y_train)
y_test = knn.predict(X_test)
return y_test
|
[
"scanpy.tl.umap",
"numpy.random.seed",
"os.makedirs",
"torch.manual_seed",
"torch.load",
"scanpy.pp.neighbors",
"scanpy.read_h5ad",
"scanpy.pl.umap",
"torch.save",
"scanpy.tl.leiden",
"sklearn.neighbors.KNeighborsClassifier",
"torch.cuda.is_available",
"torch.cuda.set_device",
"anndata.AnnData.concatenate",
"scanpy.set_figure_params"
] |
[((3790, 3810), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (3804, 3810), True, 'import numpy as np\n'), ((3822, 3845), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (3839, 3845), False, 'import torch\n'), ((3854, 3879), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (3877, 3879), False, 'import torch\n'), ((4016, 4066), 'os.makedirs', 'os.makedirs', (["(outdir + '/checkpoint')"], {'exist_ok': '(True)'}), "(outdir + '/checkpoint', exist_ok=True)\n", (4027, 4066), False, 'import os\n'), ((3925, 3951), 'torch.cuda.set_device', 'torch.cuda.set_device', (['gpu'], {}), '(gpu)\n', (3946, 3951), False, 'import torch\n'), ((5239, 5370), 'torch.save', 'torch.save', (["{'n_top_features': adata.var.index, 'enc': enc, 'dec': dec, 'n_domain':\n n_domain}", "(outdir + '/checkpoint/config.pt')"], {}), "({'n_top_features': adata.var.index, 'enc': enc, 'dec': dec,\n 'n_domain': n_domain}, outdir + '/checkpoint/config.pt')\n", (5249, 5370), False, 'import torch\n'), ((5392, 5440), 'torch.load', 'torch.load', (["(projection + '/checkpoint/config.pt')"], {}), "(projection + '/checkpoint/config.pt')\n", (5402, 5440), False, 'import torch\n'), ((6490, 6530), 'scanpy.read_h5ad', 'sc.read_h5ad', (["(projection + '/adata.h5ad')"], {}), "(projection + '/adata.h5ad')\n", (6502, 6530), True, 'import scanpy as sc\n'), ((6545, 6664), 'anndata.AnnData.concatenate', 'AnnData.concatenate', (['ref', 'adata'], {'batch_categories': "['reference', 'query']", 'batch_key': '"""projection"""', 'index_unique': 'None'}), "(ref, adata, batch_categories=['reference', 'query'],\n batch_key='projection', index_unique=None)\n", (6564, 6664), False, 'from anndata import AnnData\n'), ((6868, 6924), 'scanpy.pp.neighbors', 'sc.pp.neighbors', (['adata'], {'n_neighbors': '(30)', 'use_rep': '"""latent"""'}), "(adata, n_neighbors=30, use_rep='latent')\n", (6883, 6924), True, 'import scanpy as sc\n'), ((6933, 6964), 'scanpy.tl.umap', 'sc.tl.umap', (['adata'], {'min_dist': '(0.1)'}), '(adata, min_dist=0.1)\n', (6943, 6964), True, 'import scanpy as sc\n'), ((6973, 6992), 'scanpy.tl.leiden', 'sc.tl.leiden', (['adata'], {}), '(adata)\n', (6985, 6992), True, 'import scanpy as sc\n'), ((7075, 7134), 'scanpy.set_figure_params', 'sc.set_figure_params', ([], {'dpi': '(80)', 'figsize': '(10, 10)', 'fontsize': '(20)'}), '(dpi=80, figsize=(10, 10), fontsize=20)\n', (7095, 7134), True, 'import scanpy as sc\n'), ((8706, 8728), 'sklearn.neighbors.KNeighborsClassifier', 'KNeighborsClassifier', ([], {}), '()\n', (8726, 8728), False, 'from sklearn.neighbors import KNeighborsClassifier\n'), ((7417, 7492), 'scanpy.pl.umap', 'sc.pl.umap', (['adata'], {'color': 'color', 'save': '""".pdf"""', 'wspace': '(0.4)', 'ncols': '(4)', 'show': 'show'}), "(adata, color=color, save='.pdf', wspace=0.4, ncols=4, show=show)\n", (7427, 7492), True, 'import scanpy as sc\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Apr 10 14:19:04 2020
@author: corkep
"""
import numpy as np
import numpy.testing as nt
import unittest
from math import pi
import math
from scipy.linalg import logm, expm
from spatialmath.base.transformsNd import *
from spatialmath.base.transforms3d import trotx, transl, rotx, isrot, ishom
from spatialmath.base.transforms2d import trot2, transl2, rot2, isrot2, ishom2
from spatialmath.base import sym
import matplotlib.pyplot as plt
class TestND(unittest.TestCase):
def test_iseye(self):
self.assertTrue(iseye(np.eye(1)))
self.assertTrue(iseye(np.eye(2)))
self.assertTrue(iseye(np.eye(3)))
self.assertTrue(iseye(np.eye(5)))
self.assertFalse(iseye(2 * np.eye(3)))
self.assertFalse(iseye(-np.eye(3)))
self.assertFalse(iseye(np.array([[1, 0, 0], [0, 1, 0]])))
self.assertFalse(iseye(np.array([1, 0, 0])))
def test_r2t(self):
# 3D
R = rotx(0.3)
T = r2t(R)
nt.assert_array_almost_equal(T[0:3,3], np.r_[0,0,0])
nt.assert_array_almost_equal(T[:3,:3], R)
theta = sym.symbol('theta')
R = rotx(theta)
T = r2t(R)
self.assertEqual(r2t(R).dtype, 'O')
nt.assert_array_almost_equal(T[0:3,3], np.r_[0,0,0])
# nt.assert_array_almost_equal(T[:3,:3], R)
self.assertTrue((T[:3,:3] == R).all())
# 2D
R = rot2(0.3)
T = r2t(R)
nt.assert_array_almost_equal(T[0:2,2], np.r_[0,0])
nt.assert_array_almost_equal(T[:2,:2], R)
theta = sym.symbol('theta')
R = rot2(theta)
T = r2t(R)
self.assertEqual(r2t(R).dtype, 'O')
nt.assert_array_almost_equal(T[0:2,2], np.r_[0,0])
nt.assert_array_almost_equal(T[:2,:2], R)
with self.assertRaises(ValueError):
r2t(3)
with self.assertRaises(ValueError):
r2t(np.eye(3,4))
def test_t2r(self):
# 3D
t=[1,2,3]
T = trotx(0.3, t=t)
R = t2r(T)
nt.assert_array_almost_equal(T[:3,:3], R)
nt.assert_array_almost_equal(transl(T), np.array(t))
# 2D
t=[1,2]
T = trot2(0.3, t=t)
R = t2r(T)
nt.assert_array_almost_equal(T[:2,:2], R)
nt.assert_array_almost_equal(transl2(T), np.array(t))
with self.assertRaises(ValueError):
t2r(3)
with self.assertRaises(ValueError):
r2t(np.eye(3,4))
def test_rt2tr(self):
# 3D
R = rotx(0.2)
t = [3, 4, 5]
T = rt2tr(R, t)
nt.assert_array_almost_equal(t2r(T), R)
nt.assert_array_almost_equal(transl(T), np.array(t))
theta = sym.symbol('theta')
R = rotx(theta)
self.assertEqual(r2t(R).dtype, 'O')
# 2D
R = rot2(0.2)
t = [3, 4]
T = rt2tr(R, t)
nt.assert_array_almost_equal(t2r(T), R)
nt.assert_array_almost_equal(transl2(T), np.array(t))
theta = sym.symbol('theta')
R = rot2(theta)
self.assertEqual(r2t(R).dtype, 'O')
with self.assertRaises(ValueError):
rt2tr(3, 4)
with self.assertRaises(ValueError):
rt2tr(np.eye(3,4), [1,2,3,4])
def test_tr2rt(self):
# 3D
T = trotx(0.3, t=[1,2,3])
R, t = tr2rt(T)
nt.assert_array_almost_equal(T[:3,:3], R)
nt.assert_array_almost_equal(T[:3,3], t)
# 2D
T = trot2(0.3, t=[1,2])
R, t = tr2rt(T)
nt.assert_array_almost_equal(T[:2,:2], R)
nt.assert_array_almost_equal(T[:2,2], t)
with self.assertRaises(ValueError):
R, t = tr2rt(3)
with self.assertRaises(ValueError):
R, t = tr2rt(np.eye(3,4))
def test_checks(self):
# 3D case, with rotation matrix
R = np.eye(3)
self.assertTrue(isR(R))
self.assertFalse(isrot2(R))
self.assertTrue(isrot(R))
self.assertFalse(ishom(R))
self.assertTrue(ishom2(R))
self.assertFalse(isrot2(R, True))
self.assertTrue(isrot(R, True))
self.assertFalse(ishom(R, True))
self.assertTrue(ishom2(R, True))
# 3D case, invalid rotation matrix
R = np.eye(3)
R[0, 1] = 2
self.assertFalse(isR(R))
self.assertFalse(isrot2(R))
self.assertTrue(isrot(R))
self.assertFalse(ishom(R))
self.assertTrue(ishom2(R))
self.assertFalse(isrot2(R, True))
self.assertFalse(isrot(R, True))
self.assertFalse(ishom(R, True))
self.assertFalse(ishom2(R, True))
# 3D case, with rotation matrix
T = np.array([[1, 0, 0, 3], [0, 1, 0, 4], [0, 0, 1, 5], [0, 0, 0, 1]])
self.assertFalse(isR(T))
self.assertFalse(isrot2(T))
self.assertFalse(isrot(T))
self.assertTrue(ishom(T))
self.assertFalse(ishom2(T))
self.assertFalse(isrot2(T, True))
self.assertFalse(isrot(T, True))
self.assertTrue(ishom(T, True))
self.assertFalse(ishom2(T, True))
# 3D case, invalid rotation matrix
T = np.array([[1, 0, 0, 3], [0, 1, 1, 4], [0, 0, 1, 5], [0, 0, 0, 1]])
self.assertFalse(isR(T))
self.assertFalse(isrot2(T))
self.assertFalse(isrot(T))
self.assertTrue(ishom(T),)
self.assertFalse(ishom2(T))
self.assertFalse(isrot2(T, True))
self.assertFalse(isrot(T, True))
self.assertFalse(ishom(T, True))
self.assertFalse(ishom2(T, True))
# 3D case, invalid bottom row
T = np.array([[1, 0, 0, 3], [0, 1, 1, 4], [0, 0, 1, 5], [9, 0, 0, 1]])
self.assertFalse(isR(T))
self.assertFalse(isrot2(T))
self.assertFalse(isrot(T))
self.assertTrue(ishom(T))
self.assertFalse(ishom2(T))
self.assertFalse(isrot2(T, True))
self.assertFalse(isrot(T, True))
self.assertFalse(ishom(T, True))
self.assertFalse(ishom2(T, True))
# skew matrices
S = np.array([
[0, 2],
[-2, 0]])
nt.assert_equal(isskew(S), True)
S[0, 0] = 1
nt.assert_equal(isskew(S), False)
S = np.array([
[0, -3, 2],
[3, 0, -1],
[-2, 1, 0]])
nt.assert_equal(isskew(S), True)
S[0, 0] = 1
nt.assert_equal(isskew(S), False)
def test_homog(self):
nt.assert_almost_equal(e2h([1, 2, 3]), np.c_[1, 2, 3, 1].T)
nt.assert_almost_equal(h2e([2, 4, 6, 2]), np.c_[1, 2, 3].T)
def test_homtrans(self):
#3D
T = trotx(pi/2, t=[1,2,3])
v = [10,12,14]
v2 = homtrans(T, v)
nt.assert_almost_equal(v2, np.c_[11, -12, 15].T)
v = np.c_[[10,12,14], [-3,-4,-5]]
v2 = homtrans(T, v)
nt.assert_almost_equal(v2, np.c_[[11, -12, 15], [-2,7,-1]])
#2D
T = trot2(pi/2, t=[1,2])
v = [10,12]
v2 = homtrans(T, v)
nt.assert_almost_equal(v2, np.c_[-11, 12].T)
v = np.c_[[10,12], [-3,-4]]
v2 = homtrans(T, v)
nt.assert_almost_equal(v2, np.c_[[-11, 12], [5, -1]])
with self.assertRaises(ValueError):
T = trotx(pi/2, t=[1,2,3])
v = [10,12]
v2 = homtrans(T, v)
def test_skew(self):
# 3D
sk = skew([1, 2, 3])
self.assertEqual(sk.shape, (3,3))
nt.assert_almost_equal(sk + sk.T, np.zeros((3,3)))
self.assertEqual(sk[2,1], 1)
self.assertEqual(sk[0,2], 2)
self.assertEqual(sk[1,0], 3)
nt.assert_almost_equal(sk.diagonal(), np.r_[0,0,0])
# 2D
sk = skew([1])
self.assertEqual(sk.shape, (2,2))
nt.assert_almost_equal(sk + sk.T, np.zeros((2,2)))
self.assertEqual(sk[1,0], 1)
nt.assert_almost_equal(sk.diagonal(), np.r_[0,0])
with self.assertRaises(ValueError):
sk = skew([1,2])
def test_vex(self):
# 3D
t = [3, 4, 5]
sk = skew(t)
nt.assert_almost_equal(vex(sk), t)
# 2D
t = [3]
sk = skew(t)
nt.assert_almost_equal(vex(sk), t)
def test_isskew(self):
t = [3, 4, 5]
sk = skew(t)
self.assertTrue(isskew(sk))
sk[0,0] = 3
self.assertFalse(isskew(sk))
# 2D
t = [3]
sk = skew(t)
self.assertTrue(isskew(sk))
sk[0,0] = 3
self.assertFalse(isskew(sk))
def test_isskewa(self):
# 3D
t = [3, 4, 5, 6, 7, 8]
sk = skewa(t)
self.assertTrue(isskewa(sk))
sk[0,0] = 3
self.assertFalse(isskew(sk))
sk = skewa(t)
sk[3,3] = 3
self.assertFalse(isskew(sk))
# 2D
t = [3, 4, 5]
sk = skew(t)
self.assertTrue(isskew(sk))
sk[0,0] = 3
self.assertFalse(isskew(sk))
sk = skewa(t)
sk[2,2] = 3
self.assertFalse(isskew(sk))
def test_skewa(self):
# 3D
sk = skewa([1, 2, 3, 4, 5, 6])
self.assertEqual(sk.shape, (4,4))
nt.assert_almost_equal(sk.diagonal(), np.r_[0,0,0,0])
nt.assert_almost_equal(sk[-1,:], np.r_[0,0,0,0])
nt.assert_almost_equal(sk[:3,3], [1, 2, 3])
nt.assert_almost_equal(vex(sk[:3,:3]), [4,5,6])
# 2D
sk = skewa([1, 2, 3])
self.assertEqual(sk.shape, (3,3))
nt.assert_almost_equal(sk.diagonal(), np.r_[0,0,0])
nt.assert_almost_equal(sk[-1,:], np.r_[0,0,0])
nt.assert_almost_equal(sk[:2,2], [1, 2])
nt.assert_almost_equal(vex(sk[:2,:2]), [3])
with self.assertRaises(ValueError):
sk = skew([1,2])
def test_vexa(self):
# 3D
t = [1, 2, 3, 4, 5, 6]
sk = skewa(t)
nt.assert_almost_equal(vexa(sk), t)
# 2D
t = [1, 2, 3]
sk = skewa(t)
nt.assert_almost_equal(vexa(sk), t)
def test_det(self):
a = np.array([[1, 2], [3, 4]])
self.assertAlmostEqual(np.linalg.det(a), det(a))
x, y = sym.symbol('x y')
a = np.array([[x, y], [y, x]])
self.assertEqual(det(a), x**2 - y**2)
# ---------------------------------------------------------------------------------------#
if __name__ == '__main__':
unittest.main()
|
[
"unittest.main",
"spatialmath.base.transforms3d.rotx",
"spatialmath.base.transforms2d.ishom2",
"spatialmath.base.transforms2d.isrot2",
"numpy.testing.assert_almost_equal",
"spatialmath.base.transforms2d.rot2",
"numpy.zeros",
"spatialmath.base.transforms3d.isrot",
"spatialmath.base.transforms2d.trot2",
"spatialmath.base.sym.symbol",
"spatialmath.base.transforms3d.trotx",
"numpy.array",
"numpy.linalg.det",
"spatialmath.base.transforms3d.transl",
"numpy.eye",
"numpy.testing.assert_array_almost_equal",
"spatialmath.base.transforms3d.ishom",
"spatialmath.base.transforms2d.transl2"
] |
[((10322, 10337), 'unittest.main', 'unittest.main', ([], {}), '()\n', (10335, 10337), False, 'import unittest\n'), ((994, 1003), 'spatialmath.base.transforms3d.rotx', 'rotx', (['(0.3)'], {}), '(0.3)\n', (998, 1003), False, 'from spatialmath.base.transforms3d import trotx, transl, rotx, isrot, ishom\n'), ((1031, 1086), 'numpy.testing.assert_array_almost_equal', 'nt.assert_array_almost_equal', (['T[0:3, 3]', 'np.r_[0, 0, 0]'], {}), '(T[0:3, 3], np.r_[0, 0, 0])\n', (1059, 1086), True, 'import numpy.testing as nt\n'), ((1092, 1134), 'numpy.testing.assert_array_almost_equal', 'nt.assert_array_almost_equal', (['T[:3, :3]', 'R'], {}), '(T[:3, :3], R)\n', (1120, 1134), True, 'import numpy.testing as nt\n'), ((1151, 1170), 'spatialmath.base.sym.symbol', 'sym.symbol', (['"""theta"""'], {}), "('theta')\n", (1161, 1170), False, 'from spatialmath.base import sym\n'), ((1183, 1194), 'spatialmath.base.transforms3d.rotx', 'rotx', (['theta'], {}), '(theta)\n', (1187, 1194), False, 'from spatialmath.base.transforms3d import trotx, transl, rotx, isrot, ishom\n'), ((1266, 1321), 'numpy.testing.assert_array_almost_equal', 'nt.assert_array_almost_equal', (['T[0:3, 3]', 'np.r_[0, 0, 0]'], {}), '(T[0:3, 3], np.r_[0, 0, 0])\n', (1294, 1321), True, 'import numpy.testing as nt\n'), ((1444, 1453), 'spatialmath.base.transforms2d.rot2', 'rot2', (['(0.3)'], {}), '(0.3)\n', (1448, 1453), False, 'from spatialmath.base.transforms2d import trot2, transl2, rot2, isrot2, ishom2\n'), ((1481, 1533), 'numpy.testing.assert_array_almost_equal', 'nt.assert_array_almost_equal', (['T[0:2, 2]', 'np.r_[0, 0]'], {}), '(T[0:2, 2], np.r_[0, 0])\n', (1509, 1533), True, 'import numpy.testing as nt\n'), ((1540, 1582), 'numpy.testing.assert_array_almost_equal', 'nt.assert_array_almost_equal', (['T[:2, :2]', 'R'], {}), '(T[:2, :2], R)\n', (1568, 1582), True, 'import numpy.testing as nt\n'), ((1599, 1618), 'spatialmath.base.sym.symbol', 'sym.symbol', (['"""theta"""'], {}), "('theta')\n", (1609, 1618), False, 'from spatialmath.base import sym\n'), ((1631, 1642), 'spatialmath.base.transforms2d.rot2', 'rot2', (['theta'], {}), '(theta)\n', (1635, 1642), False, 'from spatialmath.base.transforms2d import trot2, transl2, rot2, isrot2, ishom2\n'), ((1714, 1766), 'numpy.testing.assert_array_almost_equal', 'nt.assert_array_almost_equal', (['T[0:2, 2]', 'np.r_[0, 0]'], {}), '(T[0:2, 2], np.r_[0, 0])\n', (1742, 1766), True, 'import numpy.testing as nt\n'), ((1773, 1815), 'numpy.testing.assert_array_almost_equal', 'nt.assert_array_almost_equal', (['T[:2, :2]', 'R'], {}), '(T[:2, :2], R)\n', (1801, 1815), True, 'import numpy.testing as nt\n'), ((2021, 2036), 'spatialmath.base.transforms3d.trotx', 'trotx', (['(0.3)'], {'t': 't'}), '(0.3, t=t)\n', (2026, 2036), False, 'from spatialmath.base.transforms3d import trotx, transl, rotx, isrot, ishom\n'), ((2064, 2106), 'numpy.testing.assert_array_almost_equal', 'nt.assert_array_almost_equal', (['T[:3, :3]', 'R'], {}), '(T[:3, :3], R)\n', (2092, 2106), True, 'import numpy.testing as nt\n'), ((2214, 2229), 'spatialmath.base.transforms2d.trot2', 'trot2', (['(0.3)'], {'t': 't'}), '(0.3, t=t)\n', (2219, 2229), False, 'from spatialmath.base.transforms2d import trot2, transl2, rot2, isrot2, ishom2\n'), ((2257, 2299), 'numpy.testing.assert_array_almost_equal', 'nt.assert_array_almost_equal', (['T[:2, :2]', 'R'], {}), '(T[:2, :2], R)\n', (2285, 2299), True, 'import numpy.testing as nt\n'), ((2551, 2560), 'spatialmath.base.transforms3d.rotx', 'rotx', (['(0.2)'], {}), '(0.2)\n', (2555, 2560), False, 'from spatialmath.base.transforms3d import trotx, transl, rotx, isrot, ishom\n'), ((2733, 2752), 'spatialmath.base.sym.symbol', 'sym.symbol', (['"""theta"""'], {}), "('theta')\n", (2743, 2752), False, 'from spatialmath.base import sym\n'), ((2765, 2776), 'spatialmath.base.transforms3d.rotx', 'rotx', (['theta'], {}), '(theta)\n', (2769, 2776), False, 'from spatialmath.base.transforms3d import trotx, transl, rotx, isrot, ishom\n'), ((2847, 2856), 'spatialmath.base.transforms2d.rot2', 'rot2', (['(0.2)'], {}), '(0.2)\n', (2851, 2856), False, 'from spatialmath.base.transforms2d import trot2, transl2, rot2, isrot2, ishom2\n'), ((3027, 3046), 'spatialmath.base.sym.symbol', 'sym.symbol', (['"""theta"""'], {}), "('theta')\n", (3037, 3046), False, 'from spatialmath.base import sym\n'), ((3059, 3070), 'spatialmath.base.transforms2d.rot2', 'rot2', (['theta'], {}), '(theta)\n', (3063, 3070), False, 'from spatialmath.base.transforms2d import trot2, transl2, rot2, isrot2, ishom2\n'), ((3323, 3346), 'spatialmath.base.transforms3d.trotx', 'trotx', (['(0.3)'], {'t': '[1, 2, 3]'}), '(0.3, t=[1, 2, 3])\n', (3328, 3346), False, 'from spatialmath.base.transforms3d import trotx, transl, rotx, isrot, ishom\n'), ((3377, 3419), 'numpy.testing.assert_array_almost_equal', 'nt.assert_array_almost_equal', (['T[:3, :3]', 'R'], {}), '(T[:3, :3], R)\n', (3405, 3419), True, 'import numpy.testing as nt\n'), ((3427, 3468), 'numpy.testing.assert_array_almost_equal', 'nt.assert_array_almost_equal', (['T[:3, 3]', 't'], {}), '(T[:3, 3], t)\n', (3455, 3468), True, 'import numpy.testing as nt\n'), ((3494, 3514), 'spatialmath.base.transforms2d.trot2', 'trot2', (['(0.3)'], {'t': '[1, 2]'}), '(0.3, t=[1, 2])\n', (3499, 3514), False, 'from spatialmath.base.transforms2d import trot2, transl2, rot2, isrot2, ishom2\n'), ((3546, 3588), 'numpy.testing.assert_array_almost_equal', 'nt.assert_array_almost_equal', (['T[:2, :2]', 'R'], {}), '(T[:2, :2], R)\n', (3574, 3588), True, 'import numpy.testing as nt\n'), ((3596, 3637), 'numpy.testing.assert_array_almost_equal', 'nt.assert_array_almost_equal', (['T[:2, 2]', 't'], {}), '(T[:2, 2], t)\n', (3624, 3637), True, 'import numpy.testing as nt\n'), ((3874, 3883), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (3880, 3883), True, 'import numpy as np\n'), ((4276, 4285), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (4282, 4285), True, 'import numpy as np\n'), ((4698, 4764), 'numpy.array', 'np.array', (['[[1, 0, 0, 3], [0, 1, 0, 4], [0, 0, 1, 5], [0, 0, 0, 1]]'], {}), '([[1, 0, 0, 3], [0, 1, 0, 4], [0, 0, 1, 5], [0, 0, 0, 1]])\n', (4706, 4764), True, 'import numpy as np\n'), ((5160, 5226), 'numpy.array', 'np.array', (['[[1, 0, 0, 3], [0, 1, 1, 4], [0, 0, 1, 5], [0, 0, 0, 1]]'], {}), '([[1, 0, 0, 3], [0, 1, 1, 4], [0, 0, 1, 5], [0, 0, 0, 1]])\n', (5168, 5226), True, 'import numpy as np\n'), ((5619, 5685), 'numpy.array', 'np.array', (['[[1, 0, 0, 3], [0, 1, 1, 4], [0, 0, 1, 5], [9, 0, 0, 1]]'], {}), '([[1, 0, 0, 3], [0, 1, 1, 4], [0, 0, 1, 5], [9, 0, 0, 1]])\n', (5627, 5685), True, 'import numpy as np\n'), ((6063, 6090), 'numpy.array', 'np.array', (['[[0, 2], [-2, 0]]'], {}), '([[0, 2], [-2, 0]])\n', (6071, 6090), True, 'import numpy as np\n'), ((6232, 6278), 'numpy.array', 'np.array', (['[[0, -3, 2], [3, 0, -1], [-2, 1, 0]]'], {}), '([[0, -3, 2], [3, 0, -1], [-2, 1, 0]])\n', (6240, 6278), True, 'import numpy as np\n'), ((6639, 6665), 'spatialmath.base.transforms3d.trotx', 'trotx', (['(pi / 2)'], {'t': '[1, 2, 3]'}), '(pi / 2, t=[1, 2, 3])\n', (6644, 6665), False, 'from spatialmath.base.transforms3d import trotx, transl, rotx, isrot, ishom\n'), ((6721, 6769), 'numpy.testing.assert_almost_equal', 'nt.assert_almost_equal', (['v2', 'np.c_[11, -12, 15].T'], {}), '(v2, np.c_[11, -12, 15].T)\n', (6743, 6769), True, 'import numpy.testing as nt\n'), ((6848, 6909), 'numpy.testing.assert_almost_equal', 'nt.assert_almost_equal', (['v2', 'np.c_[[11, -12, 15], [-2, 7, -1]]'], {}), '(v2, np.c_[[11, -12, 15], [-2, 7, -1]])\n', (6870, 6909), True, 'import numpy.testing as nt\n'), ((6935, 6958), 'spatialmath.base.transforms2d.trot2', 'trot2', (['(pi / 2)'], {'t': '[1, 2]'}), '(pi / 2, t=[1, 2])\n', (6940, 6958), False, 'from spatialmath.base.transforms2d import trot2, transl2, rot2, isrot2, ishom2\n'), ((7012, 7056), 'numpy.testing.assert_almost_equal', 'nt.assert_almost_equal', (['v2', 'np.c_[-11, 12].T'], {}), '(v2, np.c_[-11, 12].T)\n', (7034, 7056), True, 'import numpy.testing as nt\n'), ((7129, 7182), 'numpy.testing.assert_almost_equal', 'nt.assert_almost_equal', (['v2', 'np.c_[[-11, 12], [5, -1]]'], {}), '(v2, np.c_[[-11, 12], [5, -1]])\n', (7151, 7182), True, 'import numpy.testing as nt\n'), ((9186, 9238), 'numpy.testing.assert_almost_equal', 'nt.assert_almost_equal', (['sk[-1, :]', 'np.r_[0, 0, 0, 0]'], {}), '(sk[-1, :], np.r_[0, 0, 0, 0])\n', (9208, 9238), True, 'import numpy.testing as nt\n'), ((9243, 9287), 'numpy.testing.assert_almost_equal', 'nt.assert_almost_equal', (['sk[:3, 3]', '[1, 2, 3]'], {}), '(sk[:3, 3], [1, 2, 3])\n', (9265, 9287), True, 'import numpy.testing as nt\n'), ((9497, 9546), 'numpy.testing.assert_almost_equal', 'nt.assert_almost_equal', (['sk[-1, :]', 'np.r_[0, 0, 0]'], {}), '(sk[-1, :], np.r_[0, 0, 0])\n', (9519, 9546), True, 'import numpy.testing as nt\n'), ((9552, 9593), 'numpy.testing.assert_almost_equal', 'nt.assert_almost_equal', (['sk[:2, 2]', '[1, 2]'], {}), '(sk[:2, 2], [1, 2])\n', (9574, 9593), True, 'import numpy.testing as nt\n'), ((9996, 10022), 'numpy.array', 'np.array', (['[[1, 2], [3, 4]]'], {}), '([[1, 2], [3, 4]])\n', (10004, 10022), True, 'import numpy as np\n'), ((10096, 10113), 'spatialmath.base.sym.symbol', 'sym.symbol', (['"""x y"""'], {}), "('x y')\n", (10106, 10113), False, 'from spatialmath.base import sym\n'), ((10126, 10152), 'numpy.array', 'np.array', (['[[x, y], [y, x]]'], {}), '([[x, y], [y, x]])\n', (10134, 10152), True, 'import numpy as np\n'), ((2143, 2152), 'spatialmath.base.transforms3d.transl', 'transl', (['T'], {}), '(T)\n', (2149, 2152), False, 'from spatialmath.base.transforms3d import trotx, transl, rotx, isrot, ishom\n'), ((2154, 2165), 'numpy.array', 'np.array', (['t'], {}), '(t)\n', (2162, 2165), True, 'import numpy as np\n'), ((2336, 2346), 'spatialmath.base.transforms2d.transl2', 'transl2', (['T'], {}), '(T)\n', (2343, 2346), False, 'from spatialmath.base.transforms2d import trot2, transl2, rot2, isrot2, ishom2\n'), ((2348, 2359), 'numpy.array', 'np.array', (['t'], {}), '(t)\n', (2356, 2359), True, 'import numpy as np\n'), ((2692, 2701), 'spatialmath.base.transforms3d.transl', 'transl', (['T'], {}), '(T)\n', (2698, 2701), False, 'from spatialmath.base.transforms3d import trotx, transl, rotx, isrot, ishom\n'), ((2703, 2714), 'numpy.array', 'np.array', (['t'], {}), '(t)\n', (2711, 2714), True, 'import numpy as np\n'), ((2985, 2995), 'spatialmath.base.transforms2d.transl2', 'transl2', (['T'], {}), '(T)\n', (2992, 2995), False, 'from spatialmath.base.transforms2d import trot2, transl2, rot2, isrot2, ishom2\n'), ((2997, 3008), 'numpy.array', 'np.array', (['t'], {}), '(t)\n', (3005, 3008), True, 'import numpy as np\n'), ((3941, 3950), 'spatialmath.base.transforms2d.isrot2', 'isrot2', (['R'], {}), '(R)\n', (3947, 3950), False, 'from spatialmath.base.transforms2d import trot2, transl2, rot2, isrot2, ishom2\n'), ((3976, 3984), 'spatialmath.base.transforms3d.isrot', 'isrot', (['R'], {}), '(R)\n', (3981, 3984), False, 'from spatialmath.base.transforms3d import trotx, transl, rotx, isrot, ishom\n'), ((4011, 4019), 'spatialmath.base.transforms3d.ishom', 'ishom', (['R'], {}), '(R)\n', (4016, 4019), False, 'from spatialmath.base.transforms3d import trotx, transl, rotx, isrot, ishom\n'), ((4045, 4054), 'spatialmath.base.transforms2d.ishom2', 'ishom2', (['R'], {}), '(R)\n', (4051, 4054), False, 'from spatialmath.base.transforms2d import trot2, transl2, rot2, isrot2, ishom2\n'), ((4081, 4096), 'spatialmath.base.transforms2d.isrot2', 'isrot2', (['R', '(True)'], {}), '(R, True)\n', (4087, 4096), False, 'from spatialmath.base.transforms2d import trot2, transl2, rot2, isrot2, ishom2\n'), ((4122, 4136), 'spatialmath.base.transforms3d.isrot', 'isrot', (['R', '(True)'], {}), '(R, True)\n', (4127, 4136), False, 'from spatialmath.base.transforms3d import trotx, transl, rotx, isrot, ishom\n'), ((4163, 4177), 'spatialmath.base.transforms3d.ishom', 'ishom', (['R', '(True)'], {}), '(R, True)\n', (4168, 4177), False, 'from spatialmath.base.transforms3d import trotx, transl, rotx, isrot, ishom\n'), ((4203, 4218), 'spatialmath.base.transforms2d.ishom2', 'ishom2', (['R', '(True)'], {}), '(R, True)\n', (4209, 4218), False, 'from spatialmath.base.transforms2d import trot2, transl2, rot2, isrot2, ishom2\n'), ((4364, 4373), 'spatialmath.base.transforms2d.isrot2', 'isrot2', (['R'], {}), '(R)\n', (4370, 4373), False, 'from spatialmath.base.transforms2d import trot2, transl2, rot2, isrot2, ishom2\n'), ((4399, 4407), 'spatialmath.base.transforms3d.isrot', 'isrot', (['R'], {}), '(R)\n', (4404, 4407), False, 'from spatialmath.base.transforms3d import trotx, transl, rotx, isrot, ishom\n'), ((4434, 4442), 'spatialmath.base.transforms3d.ishom', 'ishom', (['R'], {}), '(R)\n', (4439, 4442), False, 'from spatialmath.base.transforms3d import trotx, transl, rotx, isrot, ishom\n'), ((4468, 4477), 'spatialmath.base.transforms2d.ishom2', 'ishom2', (['R'], {}), '(R)\n', (4474, 4477), False, 'from spatialmath.base.transforms2d import trot2, transl2, rot2, isrot2, ishom2\n'), ((4504, 4519), 'spatialmath.base.transforms2d.isrot2', 'isrot2', (['R', '(True)'], {}), '(R, True)\n', (4510, 4519), False, 'from spatialmath.base.transforms2d import trot2, transl2, rot2, isrot2, ishom2\n'), ((4546, 4560), 'spatialmath.base.transforms3d.isrot', 'isrot', (['R', '(True)'], {}), '(R, True)\n', (4551, 4560), False, 'from spatialmath.base.transforms3d import trotx, transl, rotx, isrot, ishom\n'), ((4587, 4601), 'spatialmath.base.transforms3d.ishom', 'ishom', (['R', '(True)'], {}), '(R, True)\n', (4592, 4601), False, 'from spatialmath.base.transforms3d import trotx, transl, rotx, isrot, ishom\n'), ((4628, 4643), 'spatialmath.base.transforms2d.ishom2', 'ishom2', (['R', '(True)'], {}), '(R, True)\n', (4634, 4643), False, 'from spatialmath.base.transforms2d import trot2, transl2, rot2, isrot2, ishom2\n'), ((4823, 4832), 'spatialmath.base.transforms2d.isrot2', 'isrot2', (['T'], {}), '(T)\n', (4829, 4832), False, 'from spatialmath.base.transforms2d import trot2, transl2, rot2, isrot2, ishom2\n'), ((4859, 4867), 'spatialmath.base.transforms3d.isrot', 'isrot', (['T'], {}), '(T)\n', (4864, 4867), False, 'from spatialmath.base.transforms3d import trotx, transl, rotx, isrot, ishom\n'), ((4893, 4901), 'spatialmath.base.transforms3d.ishom', 'ishom', (['T'], {}), '(T)\n', (4898, 4901), False, 'from spatialmath.base.transforms3d import trotx, transl, rotx, isrot, ishom\n'), ((4928, 4937), 'spatialmath.base.transforms2d.ishom2', 'ishom2', (['T'], {}), '(T)\n', (4934, 4937), False, 'from spatialmath.base.transforms2d import trot2, transl2, rot2, isrot2, ishom2\n'), ((4964, 4979), 'spatialmath.base.transforms2d.isrot2', 'isrot2', (['T', '(True)'], {}), '(T, True)\n', (4970, 4979), False, 'from spatialmath.base.transforms2d import trot2, transl2, rot2, isrot2, ishom2\n'), ((5006, 5020), 'spatialmath.base.transforms3d.isrot', 'isrot', (['T', '(True)'], {}), '(T, True)\n', (5011, 5020), False, 'from spatialmath.base.transforms3d import trotx, transl, rotx, isrot, ishom\n'), ((5046, 5060), 'spatialmath.base.transforms3d.ishom', 'ishom', (['T', '(True)'], {}), '(T, True)\n', (5051, 5060), False, 'from spatialmath.base.transforms3d import trotx, transl, rotx, isrot, ishom\n'), ((5087, 5102), 'spatialmath.base.transforms2d.ishom2', 'ishom2', (['T', '(True)'], {}), '(T, True)\n', (5093, 5102), False, 'from spatialmath.base.transforms2d import trot2, transl2, rot2, isrot2, ishom2\n'), ((5285, 5294), 'spatialmath.base.transforms2d.isrot2', 'isrot2', (['T'], {}), '(T)\n', (5291, 5294), False, 'from spatialmath.base.transforms2d import trot2, transl2, rot2, isrot2, ishom2\n'), ((5321, 5329), 'spatialmath.base.transforms3d.isrot', 'isrot', (['T'], {}), '(T)\n', (5326, 5329), False, 'from spatialmath.base.transforms3d import trotx, transl, rotx, isrot, ishom\n'), ((5355, 5363), 'spatialmath.base.transforms3d.ishom', 'ishom', (['T'], {}), '(T)\n', (5360, 5363), False, 'from spatialmath.base.transforms3d import trotx, transl, rotx, isrot, ishom\n'), ((5391, 5400), 'spatialmath.base.transforms2d.ishom2', 'ishom2', (['T'], {}), '(T)\n', (5397, 5400), False, 'from spatialmath.base.transforms2d import trot2, transl2, rot2, isrot2, ishom2\n'), ((5427, 5442), 'spatialmath.base.transforms2d.isrot2', 'isrot2', (['T', '(True)'], {}), '(T, True)\n', (5433, 5442), False, 'from spatialmath.base.transforms2d import trot2, transl2, rot2, isrot2, ishom2\n'), ((5469, 5483), 'spatialmath.base.transforms3d.isrot', 'isrot', (['T', '(True)'], {}), '(T, True)\n', (5474, 5483), False, 'from spatialmath.base.transforms3d import trotx, transl, rotx, isrot, ishom\n'), ((5510, 5524), 'spatialmath.base.transforms3d.ishom', 'ishom', (['T', '(True)'], {}), '(T, True)\n', (5515, 5524), False, 'from spatialmath.base.transforms3d import trotx, transl, rotx, isrot, ishom\n'), ((5551, 5566), 'spatialmath.base.transforms2d.ishom2', 'ishom2', (['T', '(True)'], {}), '(T, True)\n', (5557, 5566), False, 'from spatialmath.base.transforms2d import trot2, transl2, rot2, isrot2, ishom2\n'), ((5744, 5753), 'spatialmath.base.transforms2d.isrot2', 'isrot2', (['T'], {}), '(T)\n', (5750, 5753), False, 'from spatialmath.base.transforms2d import trot2, transl2, rot2, isrot2, ishom2\n'), ((5780, 5788), 'spatialmath.base.transforms3d.isrot', 'isrot', (['T'], {}), '(T)\n', (5785, 5788), False, 'from spatialmath.base.transforms3d import trotx, transl, rotx, isrot, ishom\n'), ((5814, 5822), 'spatialmath.base.transforms3d.ishom', 'ishom', (['T'], {}), '(T)\n', (5819, 5822), False, 'from spatialmath.base.transforms3d import trotx, transl, rotx, isrot, ishom\n'), ((5849, 5858), 'spatialmath.base.transforms2d.ishom2', 'ishom2', (['T'], {}), '(T)\n', (5855, 5858), False, 'from spatialmath.base.transforms2d import trot2, transl2, rot2, isrot2, ishom2\n'), ((5885, 5900), 'spatialmath.base.transforms2d.isrot2', 'isrot2', (['T', '(True)'], {}), '(T, True)\n', (5891, 5900), False, 'from spatialmath.base.transforms2d import trot2, transl2, rot2, isrot2, ishom2\n'), ((5927, 5941), 'spatialmath.base.transforms3d.isrot', 'isrot', (['T', '(True)'], {}), '(T, True)\n', (5932, 5941), False, 'from spatialmath.base.transforms3d import trotx, transl, rotx, isrot, ishom\n'), ((5968, 5982), 'spatialmath.base.transforms3d.ishom', 'ishom', (['T', '(True)'], {}), '(T, True)\n', (5973, 5982), False, 'from spatialmath.base.transforms3d import trotx, transl, rotx, isrot, ishom\n'), ((6009, 6024), 'spatialmath.base.transforms2d.ishom2', 'ishom2', (['T', '(True)'], {}), '(T, True)\n', (6015, 6024), False, 'from spatialmath.base.transforms2d import trot2, transl2, rot2, isrot2, ishom2\n'), ((7246, 7272), 'spatialmath.base.transforms3d.trotx', 'trotx', (['(pi / 2)'], {'t': '[1, 2, 3]'}), '(pi / 2, t=[1, 2, 3])\n', (7251, 7272), False, 'from spatialmath.base.transforms3d import trotx, transl, rotx, isrot, ishom\n'), ((7477, 7493), 'numpy.zeros', 'np.zeros', (['(3, 3)'], {}), '((3, 3))\n', (7485, 7493), True, 'import numpy as np\n'), ((7786, 7802), 'numpy.zeros', 'np.zeros', (['(2, 2)'], {}), '((2, 2))\n', (7794, 7802), True, 'import numpy as np\n'), ((10054, 10070), 'numpy.linalg.det', 'np.linalg.det', (['a'], {}), '(a)\n', (10067, 10070), True, 'import numpy as np\n'), ((595, 604), 'numpy.eye', 'np.eye', (['(1)'], {}), '(1)\n', (601, 604), True, 'import numpy as np\n'), ((637, 646), 'numpy.eye', 'np.eye', (['(2)'], {}), '(2)\n', (643, 646), True, 'import numpy as np\n'), ((679, 688), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (685, 688), True, 'import numpy as np\n'), ((721, 730), 'numpy.eye', 'np.eye', (['(5)'], {}), '(5)\n', (727, 730), True, 'import numpy as np\n'), ((856, 888), 'numpy.array', 'np.array', (['[[1, 0, 0], [0, 1, 0]]'], {}), '([[1, 0, 0], [0, 1, 0]])\n', (864, 888), True, 'import numpy as np\n'), ((922, 941), 'numpy.array', 'np.array', (['[1, 0, 0]'], {}), '([1, 0, 0])\n', (930, 941), True, 'import numpy as np\n'), ((1940, 1952), 'numpy.eye', 'np.eye', (['(3)', '(4)'], {}), '(3, 4)\n', (1946, 1952), True, 'import numpy as np\n'), ((2486, 2498), 'numpy.eye', 'np.eye', (['(3)', '(4)'], {}), '(3, 4)\n', (2492, 2498), True, 'import numpy as np\n'), ((3247, 3259), 'numpy.eye', 'np.eye', (['(3)', '(4)'], {}), '(3, 4)\n', (3253, 3259), True, 'import numpy as np\n'), ((3780, 3792), 'numpy.eye', 'np.eye', (['(3)', '(4)'], {}), '(3, 4)\n', (3786, 3792), True, 'import numpy as np\n'), ((769, 778), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (775, 778), True, 'import numpy as np\n'), ((813, 822), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (819, 822), True, 'import numpy as np\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jan 27 16:40:49 2020
@author: krugefr1
"""
import numpy as np
import os
try:
import arthor
except ImportError:
arthor = None
from rdkit import Chem
from rdkit.Chem import rdSubstructLibrary
import pickle
import random
import pandas as pd
import copy
from automated_series_classification import UPGMAclustering, Butinaclustering, utilsDataPrep
class Classification:
def __init__(self,
proj,
datapath,
dbpath,
filename,
chembldb,
flimit=1e-3,
MinClusterSize=20,
clustering='UPGMA',
calcDists=True,
calcScores=False,
smilesCol='Smiles',
idCol='ID',
onlyCompleteRings=False,
useArthor=True):
global arthor
if not useArthor:
arthor = None
self.useArthor = useArthor
self.proj = proj
self.datapath = datapath
self.dbpath = dbpath
self.chembldb = chembldb
self.flimit = flimit
self.MinClusterSize = MinClusterSize
self.clustering = clustering
self.calcScores = calcScores
self.calcDists = calcDists
self.smilesCol = smilesCol
self.idCol = idCol
self.onlyCompleteRings = onlyCompleteRings
# load data
self.moldata_proj, self.distdata_proj = utilsDataPrep.PrepareData(
self.proj,
self.datapath,
filename,
distMeasure='Tanimoto',
FP='Morgan2',
calcDists=self.calcDists,
smilesCol=smilesCol)
if arthor is not None:
if not os.path.isdir(dbpath):
os.mkdir(dbpath)
# set up project database for arthor substructure matching
df = self.moldata_proj[[smilesCol, idCol]]
df.to_csv('./arthor/{0}.smi'.format(self.proj),
header=None,
index=None,
sep=' ')
os.system('smi2atdb -j 0 -l {0}{1}.smi {0}{1}.atdb'.format(
self.dbpath, self.proj))
os.system('atdb2fp -j 0 {0}{1}.atdb'.format(
self.dbpath, self.proj))
self.proj_db = arthor.SubDb('{0}{1}.atdb'.format(
self.dbpath, self.proj))
else:
if type(dbpath) == rdSubstructLibrary.SubstructLibrary:
self.proj_db = dbpath
self.db_size = len(self.proj_db)
else:
if not os.path.exists(dbpath):
print("creating database")
mols = rdSubstructLibrary.CachedTrustedSmilesMolHolder()
fps = rdSubstructLibrary.PatternHolder()
for smi in self.moldata_proj[smilesCol]:
m = Chem.MolFromSmiles(smi)
mols.AddSmiles(Chem.MolToSmiles(m))
fps.AddFingerprint(Chem.PatternFingerprint(m))
self.proj_db = rdSubstructLibrary.SubstructLibrary(
mols, fps)
self.db_size = len(mols)
pickle.dump(self.proj_db, open(dbpath, 'wb+'))
else:
self.proj_db = pickle.load(open(dbpath, 'rb'))
self.db_size = len(self.proj_db)
def AssignSeriesToMCS(self, MCSdict):
# assign series to MCS of selected clusters
smartslist = [v[2] for v in MCSdict.values()]
MolAssign_prel = {}
MolAssignment = {}
for s in range(len(smartslist)):
if arthor is not None:
res = self.proj_db.search(smartslist[s])
mols = [int(i) for i in res.to_array()]
else:
mols = self.proj_db.GetMatches(Chem.MolFromSmarts(
smartslist[s]),
maxResults=self.db_size)
MolAssign_prel[list(MCSdict.keys())[s]] = list(mols)
# remove all series that are entirely in another series
for key1 in MolAssign_prel.keys():
add = 1
for key2 in MolAssign_prel.keys():
if key2 != key1:
if set(MolAssign_prel[key1]).issubset(
set(MolAssign_prel[key2])):
if set(MolAssign_prel[key2]).issubset(
set(MolAssign_prel[key1])) and (
MCSdict[key1][0] >= MCSdict[key2][0]):
add = 1
else:
add = 0
break
if add == 1 and MolAssign_prel[key1] not in MolAssignment.values():
MolAssignment[key1] = MolAssign_prel[key1]
MolAssignment = {
k: MolAssignment[k]
for k in MolAssignment.keys()
if len(MolAssignment[k]) > self.MinClusterSize
}
if self.calcScores:
MCSdict = {
k: (MCSdict[k][0], len(MolAssignment[k]), MCSdict[k][2],
MCSdict[k][3], MolAssignment[k])
for k in MolAssignment.keys()
}
else:
MCSdict = {
k: (MCSdict[k][0], len(MolAssignment[k]), MCSdict[k][2],
MolAssignment[k])
for k in MolAssignment.keys()
}
return MolAssignment, MCSdict
def ApplyClustering(self):
# apply custering and calculate MCS
if self.clustering == 'UPGMA':
MCSdict = UPGMAclustering.ApplyUPGMA(
self.distdata_proj,
self.moldata_proj,
self.chembldb,
self.flimit,
self.MinClusterSize,
self.calcScores,
onlyCompleteRings=self.onlyCompleteRings,
useArthor=self.useArthor)
elif self.clustering == 'Butina':
distdata = copy.deepcopy(self.distdata_proj)
MCSdict = Butinaclustering.ApplyButina(distdata,
self.moldata_proj,
self.chembldb,
self.flimit,
self.MinClusterSize,
self.calcScores,
useArthor=self.useArthor)
else:
print('Clustering algorithm not implemented.')
return
# assign series through substructure matching and filtering
self.MolAssignment, self.MCSdict = self.AssignSeriesToMCS(MCSdict)
# prepare and save output
self.moldata_proj['ClusterID'] = [
list() for x in range(self.moldata_proj.shape[0])
]
for k, vs in self.MolAssignment.items():
for v in vs:
self.moldata_proj['ClusterID'].iloc[v].append(k)
if self.clustering == 'UPGMA':
self.moldata_proj.to_csv('{0}moldata_UPGMA.csv'.format(
self.datapath))
with open('{0}ClusterData_UPGMA.pkl'.format(self.datapath),
'wb') as fileout:
pickle.dump(self.MCSdict, fileout)
elif self.clustering == 'Butina':
self.moldata_proj.to_csv('{0}moldata_Butina.csv'.format(
self.datapath))
with open('{0}ClusterData_Butina.pkl'.format(self.datapath),
'wb') as fileout:
pickle.dump(self.MCSdict, fileout)
else:
print('Clustering algorithm not implemented.')
return
def CalculatePerformance(self, seriescolumn='series assignment'):
# benchmark the automated classification against a different (probably human-defined) classification
# human-defined compound assignment is specified in the column "seriescolumn" of the dataframe "moldata"
# automated classification assignment specified in dict "MolAssignment"
# calculates F1 score of automatically-identified series w.r.t. to all human-defined series, then links
# each automatically-identified series to the human-defined series with highest F1 score
scaflist = list(set(self.moldata_proj['scaffold'].tolist()))
scaflist.sort()
intersect_matrix = np.zeros((len(scaflist), len(self.MolAssignment)))
NMatchScaf = []
NMatchCluster = np.array([len(v) for v in self.MolAssignment.values()])
for scaf_ind in range(len(scaflist)):
mollist = self.moldata_proj[self.idCol].loc[self.moldata_proj[
seriescolumn].map(lambda x: scaflist[scaf_ind] in x)].tolist()
intersect_scaf = np.array([
len(list(set(mollist) & set(clusterlist)))
for clusterlist in self.MolAssignment.values()
])
intersect_matrix[scaf_ind, :] = intersect_scaf
NMatchScaf.append(len(mollist))
NMatchScaf = np.array(NMatchScaf)
RecallMatrix = intersect_matrix / NMatchScaf[:, None]
PrecMatrix = intersect_matrix / NMatchCluster[None, :]
Fscore = (2 * RecallMatrix * PrecMatrix) / (RecallMatrix + PrecMatrix +
1e-9)
maxscore = np.argmax(Fscore, axis=0)
PrecVector = np.zeros(len(self.MolAssignment))
RecallVector = np.zeros(len(self.MolAssignment))
FscoreVector = np.zeros(len(self.MolAssignment))
LinkVector = []
for col in range(len(self.MolAssignment)):
PrecVector[col] = PrecMatrix[maxscore[col], col]
RecallVector[col] = RecallMatrix[maxscore[col], col]
FscoreVector[col] = Fscore[maxscore[col], col]
LinkVector.append((list(self.MolAssignment.keys())[col],
scaflist[maxscore[col]]))
LinkVector = np.array(LinkVector)
self.PerformanceClusters = {
'recall': RecallVector,
'precision': PrecVector,
'Fscore': FscoreVector,
'linked series': LinkVector
}
if self.clustering == 'UPGMA':
with open('{0}PerformanceData_UPGMA.pkl'.format(self.datapath),
'wb') as fileout:
pickle.dump(self.PerformanceClusters, fileout)
elif self.clustering == 'Butina':
with open('{0}PerformanceData_Butina.pkl'.format(self.datapath),
'wb') as fileout:
pickle.dump(self.PerformanceClusters, fileout)
else:
print('Clustering algorithm not implemented.')
return
def ClassificationCrossValidation(self, fraction_sample, N_sample):
samplerange = np.arange(len(self.moldata_proj))
invfrac = 1 / fraction_sample
self.SampledSeries = {}
for i in range(N_sample):
# random sampling
random.seed((i + 1) * 10)
molinds = random.sample(population=samplerange.tolist(),
k=int(
len(samplerange.tolist()) // invfrac))
moldata_sample = self.moldata_proj.iloc[molinds]
distdata_sample = self.distdata_proj[molinds, :]
distdata_sample = distdata_sample[:, molinds]
# apply custering and calculate MCS
if self.clustering == 'UPGMA':
MCSdict_sampled = UPGMAclustering.ApplyUPGMA(
distdata_sample,
moldata_sample,
self.chembldb,
self.flimit,
self.MinClusterSize,
self.calcScores,
useArthor=self.useArthor)
elif self.clustering == 'Butina':
MCSdict_sampled = Butinaclustering.ApplyButina(
distdata_sample,
moldata_sample,
self.chembldb,
self.flimit,
self.MinClusterSize,
self.calcScores,
useArthor=self.useArthor)
else:
print('Clustering algorithm not implemented.')
return
# assign series through substructure matching and filtering
MolAssignment_sampled, MCSdict_sampled = self.AssignSeriesToMCS(
MCSdict_sampled)
self.SampledSeries[i] = MCSdict_sampled
if self.clustering == 'UPGMA':
with open(
'{0}SampledSeries{1}_UPGMA.pkl'.format(
self.datapath, int(fraction_sample * 100)),
'wb') as fileout:
pickle.dump(self.SampledSeries, fileout)
elif self.clustering == 'Butina':
with open(
'{0}SampledSeries{1}_Butina.pkl'.format(
self.datapath, int(fraction_sample * 100)),
'wb') as fileout:
pickle.dump(self.SampledSeries, fileout)
else:
print('Clustering algorithm not implemented.')
return
return
def EvaluationCrossValidation(self):
# Compare the classification obtained from sampling ("SampledSeries") against the original classification ("MCSdict")
self.EvalCrossval = pd.DataFrame(
columns=['series id', 'repetition', 'fscore'])
for rep in self.SampledSeries.keys():
rep_dict = self.SampledSeries[rep]
keylist = [k for k in rep_dict.keys()]
for k in self.MCSdict.keys():
intersect = [
len(set(self.MCSdict[k][-1]) & set(v[-1]))
for v in rep_dict.values()
]
recall = np.array([
intersect[i] / len(rep_dict[keylist[i]][-1])
for i in range(len(keylist))
])
precision = np.array(intersect) / len(self.MCSdict[k][-1])
fscore = max(2 * recall * precision /
(recall + precision + 1e-9))
row = [int(k), int(rep), fscore]
self.EvalCrossval.loc[len(self.EvalCrossval)] = row
self.EvalCrossval['series id'] = self.EvalCrossval['series id'].apply(
int)
|
[
"rdkit.Chem.PatternFingerprint",
"os.mkdir",
"pickle.dump",
"numpy.argmax",
"rdkit.Chem.MolToSmiles",
"pandas.DataFrame",
"os.path.exists",
"random.seed",
"rdkit.Chem.rdSubstructLibrary.PatternHolder",
"copy.deepcopy",
"automated_series_classification.Butinaclustering.ApplyButina",
"automated_series_classification.utilsDataPrep.PrepareData",
"rdkit.Chem.rdSubstructLibrary.CachedTrustedSmilesMolHolder",
"os.path.isdir",
"rdkit.Chem.MolFromSmarts",
"rdkit.Chem.rdSubstructLibrary.SubstructLibrary",
"numpy.array",
"automated_series_classification.UPGMAclustering.ApplyUPGMA",
"rdkit.Chem.MolFromSmiles"
] |
[((1502, 1653), 'automated_series_classification.utilsDataPrep.PrepareData', 'utilsDataPrep.PrepareData', (['self.proj', 'self.datapath', 'filename'], {'distMeasure': '"""Tanimoto"""', 'FP': '"""Morgan2"""', 'calcDists': 'self.calcDists', 'smilesCol': 'smilesCol'}), "(self.proj, self.datapath, filename, distMeasure=\n 'Tanimoto', FP='Morgan2', calcDists=self.calcDists, smilesCol=smilesCol)\n", (1527, 1653), False, 'from automated_series_classification import UPGMAclustering, Butinaclustering, utilsDataPrep\n'), ((9204, 9224), 'numpy.array', 'np.array', (['NMatchScaf'], {}), '(NMatchScaf)\n', (9212, 9224), True, 'import numpy as np\n'), ((9507, 9532), 'numpy.argmax', 'np.argmax', (['Fscore'], {'axis': '(0)'}), '(Fscore, axis=0)\n', (9516, 9532), True, 'import numpy as np\n'), ((10112, 10132), 'numpy.array', 'np.array', (['LinkVector'], {}), '(LinkVector)\n', (10120, 10132), True, 'import numpy as np\n'), ((13547, 13606), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['series id', 'repetition', 'fscore']"}), "(columns=['series id', 'repetition', 'fscore'])\n", (13559, 13606), True, 'import pandas as pd\n'), ((5708, 5916), 'automated_series_classification.UPGMAclustering.ApplyUPGMA', 'UPGMAclustering.ApplyUPGMA', (['self.distdata_proj', 'self.moldata_proj', 'self.chembldb', 'self.flimit', 'self.MinClusterSize', 'self.calcScores'], {'onlyCompleteRings': 'self.onlyCompleteRings', 'useArthor': 'self.useArthor'}), '(self.distdata_proj, self.moldata_proj, self.\n chembldb, self.flimit, self.MinClusterSize, self.calcScores,\n onlyCompleteRings=self.onlyCompleteRings, useArthor=self.useArthor)\n', (5734, 5916), False, 'from automated_series_classification import UPGMAclustering, Butinaclustering, utilsDataPrep\n'), ((11138, 11163), 'random.seed', 'random.seed', (['((i + 1) * 10)'], {}), '((i + 1) * 10)\n', (11149, 11163), False, 'import random\n'), ((1784, 1805), 'os.path.isdir', 'os.path.isdir', (['dbpath'], {}), '(dbpath)\n', (1797, 1805), False, 'import os\n'), ((1823, 1839), 'os.mkdir', 'os.mkdir', (['dbpath'], {}), '(dbpath)\n', (1831, 1839), False, 'import os\n'), ((6102, 6135), 'copy.deepcopy', 'copy.deepcopy', (['self.distdata_proj'], {}), '(self.distdata_proj)\n', (6115, 6135), False, 'import copy\n'), ((6158, 6316), 'automated_series_classification.Butinaclustering.ApplyButina', 'Butinaclustering.ApplyButina', (['distdata', 'self.moldata_proj', 'self.chembldb', 'self.flimit', 'self.MinClusterSize', 'self.calcScores'], {'useArthor': 'self.useArthor'}), '(distdata, self.moldata_proj, self.chembldb,\n self.flimit, self.MinClusterSize, self.calcScores, useArthor=self.useArthor\n )\n', (6186, 6316), False, 'from automated_series_classification import UPGMAclustering, Butinaclustering, utilsDataPrep\n'), ((7407, 7441), 'pickle.dump', 'pickle.dump', (['self.MCSdict', 'fileout'], {}), '(self.MCSdict, fileout)\n', (7418, 7441), False, 'import pickle\n'), ((10501, 10547), 'pickle.dump', 'pickle.dump', (['self.PerformanceClusters', 'fileout'], {}), '(self.PerformanceClusters, fileout)\n', (10512, 10547), False, 'import pickle\n'), ((11661, 11821), 'automated_series_classification.UPGMAclustering.ApplyUPGMA', 'UPGMAclustering.ApplyUPGMA', (['distdata_sample', 'moldata_sample', 'self.chembldb', 'self.flimit', 'self.MinClusterSize', 'self.calcScores'], {'useArthor': 'self.useArthor'}), '(distdata_sample, moldata_sample, self.chembldb,\n self.flimit, self.MinClusterSize, self.calcScores, useArthor=self.useArthor\n )\n', (11687, 11821), False, 'from automated_series_classification import UPGMAclustering, Butinaclustering, utilsDataPrep\n'), ((12913, 12953), 'pickle.dump', 'pickle.dump', (['self.SampledSeries', 'fileout'], {}), '(self.SampledSeries, fileout)\n', (12924, 12953), False, 'import pickle\n'), ((2650, 2672), 'os.path.exists', 'os.path.exists', (['dbpath'], {}), '(dbpath)\n', (2664, 2672), False, 'import os\n'), ((2748, 2797), 'rdkit.Chem.rdSubstructLibrary.CachedTrustedSmilesMolHolder', 'rdSubstructLibrary.CachedTrustedSmilesMolHolder', ([], {}), '()\n', (2795, 2797), False, 'from rdkit.Chem import rdSubstructLibrary\n'), ((2824, 2858), 'rdkit.Chem.rdSubstructLibrary.PatternHolder', 'rdSubstructLibrary.PatternHolder', ([], {}), '()\n', (2856, 2858), False, 'from rdkit.Chem import rdSubstructLibrary\n'), ((3138, 3184), 'rdkit.Chem.rdSubstructLibrary.SubstructLibrary', 'rdSubstructLibrary.SubstructLibrary', (['mols', 'fps'], {}), '(mols, fps)\n', (3173, 3184), False, 'from rdkit.Chem import rdSubstructLibrary\n'), ((3922, 3955), 'rdkit.Chem.MolFromSmarts', 'Chem.MolFromSmarts', (['smartslist[s]'], {}), '(smartslist[s])\n', (3940, 3955), False, 'from rdkit import Chem\n'), ((7714, 7748), 'pickle.dump', 'pickle.dump', (['self.MCSdict', 'fileout'], {}), '(self.MCSdict, fileout)\n', (7725, 7748), False, 'import pickle\n'), ((10723, 10769), 'pickle.dump', 'pickle.dump', (['self.PerformanceClusters', 'fileout'], {}), '(self.PerformanceClusters, fileout)\n', (10734, 10769), False, 'import pickle\n'), ((12034, 12196), 'automated_series_classification.Butinaclustering.ApplyButina', 'Butinaclustering.ApplyButina', (['distdata_sample', 'moldata_sample', 'self.chembldb', 'self.flimit', 'self.MinClusterSize', 'self.calcScores'], {'useArthor': 'self.useArthor'}), '(distdata_sample, moldata_sample, self.chembldb,\n self.flimit, self.MinClusterSize, self.calcScores, useArthor=self.useArthor\n )\n', (12062, 12196), False, 'from automated_series_classification import UPGMAclustering, Butinaclustering, utilsDataPrep\n'), ((13202, 13242), 'pickle.dump', 'pickle.dump', (['self.SampledSeries', 'fileout'], {}), '(self.SampledSeries, fileout)\n', (13213, 13242), False, 'import pickle\n'), ((14161, 14180), 'numpy.array', 'np.array', (['intersect'], {}), '(intersect)\n', (14169, 14180), True, 'import numpy as np\n'), ((2948, 2971), 'rdkit.Chem.MolFromSmiles', 'Chem.MolFromSmiles', (['smi'], {}), '(smi)\n', (2966, 2971), False, 'from rdkit import Chem\n'), ((3011, 3030), 'rdkit.Chem.MolToSmiles', 'Chem.MolToSmiles', (['m'], {}), '(m)\n', (3027, 3030), False, 'from rdkit import Chem\n'), ((3075, 3101), 'rdkit.Chem.PatternFingerprint', 'Chem.PatternFingerprint', (['m'], {}), '(m)\n', (3098, 3101), False, 'from rdkit import Chem\n')]
|
"""
Helper script to create config files for BlenderProc.
"""
import os
import yaml
import random
import numpy as np
import binascii
# these paths have to be manually set before creating a config
BLENDERPROC_ROOT = '' # /path/to/BlenderProc
SHAPENET_ROOT = '' # /path/to/ShapeNetCore.v2
SUNCG_ROOT = '' # /path/to/suncg
DEST = '' # /path/to/output_folder
def get_random_house_path():
with open(os.path.join(BLENDERPROC_ROOT, 'suncg_houses.txt'), 'r') as f:
house_paths = f.readlines()
return os.path.join(SUNCG_ROOT, random.choice(house_paths)).strip()
def get_base_cfg():
with open(os.path.join(BLENDERPROC_ROOT, 'base_config.yaml'), 'r') as f:
base_cfg = yaml.load(f)
return base_cfg
def get_random_obj_configs(n=10):
obj_configs, scale_configs, mat_configs, sample_configs, physic_configs, gravoff_configs = [], [], [], [], [], []
with open(os.path.join(BLENDERPROC_ROOT, 'shapenet_objects.txt'), 'r') as f:
obj_paths = f.readlines()
for i in range(n):
scale = np.random.uniform(0.1, 0.4)
recalculate_uv = np.random.uniform(0., 1.)
obj_base_cfg = {
"module": "loader.CustomObjectLoader",
"config": {
"path": os.path.join(SHAPENET_ROOT, random.choice(obj_paths)[:-1]),
"scale": [scale, scale, scale],
"add_properties": {
"cp_object_to_scale": True,
"cp_sample_pose": True,
"cp_category_id": int(i+2),
"cp_coarse_grained_class": "selected_object",
"cp_type": "Object",
"cp_physics": True,
"cp_cc_texture": True
},
}
}
scale_base_cfg = {
"module": "manipulators.EntityManipulator",
"config": {
"selector": {
"provider": "getter.Entity",
"conditions": {
"cp_category_id": int(i+2),
}
},
"scale": [scale, scale, scale],
"cf_add_modifier": {
"name": "Solidify",
"thickness": 0.0025
},
"cf_randomize_materials": {
"randomization_level": 1.,
"materials_to_replace_with": {
"provider": "getter.Material",
"conditions": {
"cp_is_cc_texture": True
}
}
},
}
}
mat_base_cfg = {
"module": "manipulators.MaterialManipulator",
"config": {
"selector": {
"provider": "getter.Entity",
"conditions": {
"cp_category_id": int(i + 2),
}
},
"cf_set_Roughness": {
"provider": "sampler.Value",
"type": "float",
"min": 0.05,
"max": 0.5,
},
"cf_set_Specular": {
"provider": "sampler.Value",
"type": "float",
"min": 0.5,
"max": 1.0,
},
"cf_color_link_to_displacement": {
"provider": "sampler.Value",
"type": "float",
"min": 0.001,
"max": 0.15
},
"cf_set_Alpha": 1.0,
"mode": "once_for_each"
}
}
sampler_base_cfg = {
"module": "object.OnSurfaceSampler",
"config": {
"objects_to_sample": {
"provider": "getter.Entity",
"conditions": {
"cp_category_id": int(i+2)
}
},
"surface": {
"provider": "getter.Entity",
"index": 0,
"conditions": {
"name": "selected_table"
}
},
"pos_sampler": {
"provider": "sampler.UpperRegionSampler",
"to_sample_on": {
"provider": "getter.Entity",
"index": 0,
"conditions": {
"name": "selected_table"
}
},
"min_height": 1,
"max_height": 4,
"face_sample_range": [0.4, 0.6],
"use_ray_trace_check": False,
},
"min_distance": 0.1,
"max_distance": 1.5,
"rot_sampler": {
"provider": "sampler.Uniform3d",
"min": [0, 0, 0],
"max": [6.28, 6.28, 6.28]
}
}
}
physics_base_cfg = {
"module": "object.PhysicsPositioning",
"config": {
"min_simulation_time": 0.5,
"max_simulation_time": 2,
"check_object_interval": 1,
}
}
grav_off_cfg = {
"module": "manipulators.EntityManipulator",
"config": {
"selector": {
"provider": "getter.Entity",
"conditions": {
"cp_category_id": int(i + 2),
}
},
"cp_physics": False,
}
}
scale_base_cfg["config"]["cf_add_uv_mapping"] = {
"projection": "cylinder",
"forced_recalc_of_uv_maps": True if recalculate_uv > 0.5 else False
}
mat_base_cfg["config"]["cf_add_uv_mapping"] = {
"projection": "cylinder",
"forced_recalc_of_uv_maps": True if recalculate_uv > 0.5 else False
}
obj_configs.append(obj_base_cfg)
scale_configs.append(scale_base_cfg)
mat_configs.append(mat_base_cfg)
sample_configs.append(sampler_base_cfg)
physic_configs.append(physics_base_cfg)
gravoff_configs.append(grav_off_cfg)
return obj_configs, scale_configs, mat_configs, sample_configs, physic_configs, gravoff_configs
def create_config():
base_cfg = get_base_cfg()
baseline = 0.065
focal_length_x = 541.14
focal_length_y = 541.14
base_cfg['modules'][8]['config']['intrinsics']['interocular_distance'] = baseline
base_cfg['modules'][8]['config']['intrinsics']['cam_K'] = [focal_length_x, 0.0, 320.0, 0.0, focal_length_y, 240.0, 0.0, 0.0, 1.0]
# add objects
num_objs = np.random.randint(5, 12)
obj_configs, scale_configs, mat_configs, sample_configs, physic_configs, gravoff_configs = get_random_obj_configs(n=num_objs)
for obj_config, scale_config, mat_config, sample_config, physics_config, gravoff_config in zip(obj_configs, scale_configs, mat_configs, sample_configs, physic_configs, gravoff_configs):
base_cfg['modules'].insert(6, obj_config)
base_cfg['modules'].insert(7, scale_config)
base_cfg['modules'].insert(8, sample_config)
base_cfg['modules'].insert(9, physics_config)
base_cfg['modules'].insert(10, gravoff_config)
# set house path
base_cfg['modules'][1]['config']['path'] = get_random_house_path()
# replace house with cctextures
house_cc_texture_config = {
"module": "manipulators.EntityManipulator",
"config": {
"selector": {
"provider": "getter.Entity",
"conditions": {
"type": "MESH"
}
},
"cf_randomize_materials": {
"randomization_level": 0.4,
"materials_to_replace_with": {
"provider": "getter.Material",
"random_samples": 1,
"conditions": {
"cp_is_cc_texture": True # this will return one random loaded cc textures
}
}
}
}
}
base_cfg['modules'].insert(4, house_cc_texture_config)
# set output dir
output_prefix = os.urandom(20)
output_prefix = binascii.hexlify(output_prefix)
output_prefix = str(output_prefix)[2:-1]
output_path = os.path.join(DEST, output_prefix)
os.makedirs(output_path)
base_cfg['modules'][0]['config']['global']['output_dir'] = output_path
with open(os.path.join(DEST, output_prefix + '/config.yaml'), 'w') as f:
yaml.dump(base_cfg, f)
return os.path.join(DEST, output_prefix + '/config.yaml')
if __name__ == '__main__':
path = create_config()
print(path)
|
[
"numpy.random.uniform",
"yaml.load",
"os.makedirs",
"binascii.hexlify",
"yaml.dump",
"random.choice",
"numpy.random.randint",
"os.path.join",
"os.urandom"
] |
[((7027, 7051), 'numpy.random.randint', 'np.random.randint', (['(5)', '(12)'], {}), '(5, 12)\n', (7044, 7051), True, 'import numpy as np\n'), ((8462, 8476), 'os.urandom', 'os.urandom', (['(20)'], {}), '(20)\n', (8472, 8476), False, 'import os\n'), ((8497, 8528), 'binascii.hexlify', 'binascii.hexlify', (['output_prefix'], {}), '(output_prefix)\n', (8513, 8528), False, 'import binascii\n'), ((8592, 8625), 'os.path.join', 'os.path.join', (['DEST', 'output_prefix'], {}), '(DEST, output_prefix)\n', (8604, 8625), False, 'import os\n'), ((8630, 8654), 'os.makedirs', 'os.makedirs', (['output_path'], {}), '(output_path)\n', (8641, 8654), False, 'import os\n'), ((8850, 8900), 'os.path.join', 'os.path.join', (['DEST', "(output_prefix + '/config.yaml')"], {}), "(DEST, output_prefix + '/config.yaml')\n", (8862, 8900), False, 'import os\n'), ((695, 707), 'yaml.load', 'yaml.load', (['f'], {}), '(f)\n', (704, 707), False, 'import yaml\n'), ((1036, 1063), 'numpy.random.uniform', 'np.random.uniform', (['(0.1)', '(0.4)'], {}), '(0.1, 0.4)\n', (1053, 1063), True, 'import numpy as np\n'), ((1089, 1116), 'numpy.random.uniform', 'np.random.uniform', (['(0.0)', '(1.0)'], {}), '(0.0, 1.0)\n', (1106, 1116), True, 'import numpy as np\n'), ((8816, 8838), 'yaml.dump', 'yaml.dump', (['base_cfg', 'f'], {}), '(base_cfg, f)\n', (8825, 8838), False, 'import yaml\n'), ((406, 456), 'os.path.join', 'os.path.join', (['BLENDERPROC_ROOT', '"""suncg_houses.txt"""'], {}), "(BLENDERPROC_ROOT, 'suncg_houses.txt')\n", (418, 456), False, 'import os\n'), ((613, 663), 'os.path.join', 'os.path.join', (['BLENDERPROC_ROOT', '"""base_config.yaml"""'], {}), "(BLENDERPROC_ROOT, 'base_config.yaml')\n", (625, 663), False, 'import os\n'), ((896, 950), 'os.path.join', 'os.path.join', (['BLENDERPROC_ROOT', '"""shapenet_objects.txt"""'], {}), "(BLENDERPROC_ROOT, 'shapenet_objects.txt')\n", (908, 950), False, 'import os\n'), ((8745, 8795), 'os.path.join', 'os.path.join', (['DEST', "(output_prefix + '/config.yaml')"], {}), "(DEST, output_prefix + '/config.yaml')\n", (8757, 8795), False, 'import os\n'), ((541, 567), 'random.choice', 'random.choice', (['house_paths'], {}), '(house_paths)\n', (554, 567), False, 'import random\n'), ((1267, 1291), 'random.choice', 'random.choice', (['obj_paths'], {}), '(obj_paths)\n', (1280, 1291), False, 'import random\n')]
|
import h5py
import numpy as np
def load_data(fname):
# load in an hdf5 file and return the X and y values
data_file = h5py.File(fname)
# load in X and y training data, fully into memory
X = data_file['X'][:].reshape(-1, 1) # each row is a data point
y = data_file['y'][:]
return X, y
def eval_fit(y_pred, y_true):
# compute mean absolute error
mae = np.mean(np.abs(y_pred - y_true))
return mae # don't normalize
|
[
"h5py.File",
"numpy.abs"
] |
[((127, 143), 'h5py.File', 'h5py.File', (['fname'], {}), '(fname)\n', (136, 143), False, 'import h5py\n'), ((395, 418), 'numpy.abs', 'np.abs', (['(y_pred - y_true)'], {}), '(y_pred - y_true)\n', (401, 418), True, 'import numpy as np\n')]
|
import time
import logging
import cv2
import numpy as np
from deep_sort_realtime.deep_sort import nn_matching
from deep_sort_realtime.deep_sort.detection import Detection
from deep_sort_realtime.deep_sort.tracker import Tracker
from deep_sort_realtime.utils.nms import non_max_suppression
log_level = logging.DEBUG
default_logger = logging.getLogger('DeepSORT')
default_logger.setLevel(log_level)
handler = logging.StreamHandler()
handler.setLevel(log_level)
formatter = logging.Formatter('[%(levelname)s] [%(name)s] %(message)s')
handler.setFormatter(formatter)
default_logger.addHandler(handler)
class DeepSort(object):
def __init__(self, max_age = 30, nms_max_overlap=1.0, max_cosine_distance=0.2, nn_budget=None, override_track_class=None, clock=None, embedder=True, half=True, bgr=True, logger=None, polygon=False):
'''
Parameters
----------
max_age : Optional[int] = 30
Maximum number of missed misses before a track is deleted.
nms_max_overlap : Optional[float] = 1.0
Non-maxima suppression threshold: Maximum detection overlap, if is 1.0, nms will be disabled
max_cosine_distance : Optional[float] = 0.2
Gating threshold for cosine distance
nn_budget : Optional[int] = None
Maximum size of the appearance descriptors, if None, no budget is enforced
override_track_class : Optional[object] = None
Giving this will override default Track class, this must inherit Track
clock : Optional[object] = None
Clock custom object provides date for track naming and facilitates track id reset every day, preventing overflow and overly large track ids. For example clock class, please see `utils/clock.py`
embedder : Optional[bool] = True
Whether to use in-built embedder or not. If False, then embeddings must be given during update
half : Optional[bool] = True
Whether to use half precision for deep embedder
bgr : Optional[bool] = True
Whether frame given to embedder is expected to be BGR or not (RGB)
logger : Optional[object] = None
logger object
polygon: Optional[bool] = False
Whether detections are polygons (e.g. oriented bounding boxes)
'''
if logger is None:
self.logger = default_logger
else:
self.logger = logger
# self.video_info = video_info
# assert clock is not None
self.nms_max_overlap = nms_max_overlap
metric = nn_matching.NearestNeighborDistanceMetric(
"cosine", max_cosine_distance, nn_budget)
self.tracker = Tracker(metric, max_age = max_age, override_track_class=override_track_class, clock=clock, logger=self.logger)
if embedder:
from deep_sort_realtime.embedder.embedder_pytorch import MobileNetv2_Embedder as Embedder
self.embedder = Embedder(half=half, max_batch_size=16, bgr=bgr)
else:
self.embedder = None
self.polygon = polygon
self.logger.info('DeepSort Tracker initialised')
self.logger.info(f'- max age: {max_age}')
self.logger.info(f'- appearance threshold: {max_cosine_distance}')
self.logger.info(f'- nms threshold: {"OFF" if self.nms_max_overlap==1.0 else self.nms_max_overlap }')
self.logger.info(f'- max num of appearance features: {nn_budget}')
self.logger.info(f'- overriding track class : {"No" if override_track_class is None else "Yes"}' )
self.logger.info(f'- clock : {"No" if clock is None else "Yes"}' )
self.logger.info(f'- in-build embedder : {"No" if self.embedder is None else "Yes"}' )
self.logger.info(f'- polygon detections : {"No" if polygon is False else "Yes"}' )
def update_tracks(self, raw_detections, embeds=None, frame=None):
"""Run multi-target tracker on a particular sequence.
Parameters
----------
raw_detections (horizontal bb) : List[ Tuple[ List[float or int], float, str ] ]
List of detections, each in tuples of ( [left,top,w,h] , confidence, detection_class)
raw_detections (polygon) : List[ List[float], List[int or str], List[float] ]
List of Polygons, Classes, Confidences. All 3 sublists of the same length. A polygon defined as a ndarray-like [x1,y1,x2,y2,...].
embeds : Optional[ List[] ] = None
List of appearance features corresponding to detections
frame : Optional [ np.ndarray ] = None
if embeds not given, Image frame must be given here, in [H,W,C].
Returns
-------
list of track objects (Look into track.py for more info or see "main" section below in this script to see simple example)
"""
if embeds is None:
if self.embedder is None:
raise Exception('Embedder not created during init so embeddings must be given now!')
if frame is None:
raise Exception('either embeddings or frame must be given!')
if not self.polygon:
raw_detections = [ d for d in raw_detections if d[0][2] > 0 and d[0][3] > 0]
if embeds is None:
embeds = self.generate_embeds(frame, raw_detections)
# Proper deep sort detection objects that consist of bbox, confidence and embedding.
detections = self.create_detections(raw_detections, embeds)
else:
polygons, bounding_rects = self.process_polygons(raw_detections[0])
if embeds is None:
embeds = self.generate_embeds_poly(frame, polygons, bounding_rects)
# Proper deep sort detection objects that consist of bbox, confidence and embedding.
detections = self.create_detections_poly(raw_detections, embeds, bounding_rects)
# Run non-maxima suppression.
boxes = np.array([d.ltwh for d in detections])
scores = np.array([d.confidence for d in detections])
if self.nms_max_overlap < 1.0:
# nms_tic = time.perf_counter()
indices = non_max_suppression(
boxes, self.nms_max_overlap, scores)
# nms_toc = time.perf_counter()
# logger.debug(f'nms time: {nms_toc-nms_tic}s')
detections = [detections[i] for i in indices]
# Update tracker.
self.tracker.predict()
self.tracker.update(detections)
return self.tracker.tracks
def refresh_track_ids(self):
self.tracker._next_id
def generate_embeds(self, frame, raw_dets):
crops = self.crop_bb(frame, raw_dets)
return self.embedder.predict(crops)
def generate_embeds_poly(self, frame, polygons, bounding_rects):
crops = self.crop_poly_pad_black(frame, polygons, bounding_rects)
return self.embedder.predict(crops)
def create_detections(self, raw_dets, embeds):
detection_list = []
for raw_det, embed in zip(raw_dets,embeds):
detection_list.append(Detection(raw_det[0], raw_det[1], embed, class_name=raw_det[2])) #raw_det = [bbox, conf_score, class]
return detection_list
def create_detections_poly(self, dets, embeds, bounding_rects):
detection_list = []
dets.extend([embeds, bounding_rects])
for raw_polygon, cl, score, embed, bounding_rect in zip(*dets):
x,y,w,h = bounding_rect
x = max(0, x)
y = max(0, y)
bbox = [x,y,w,h]
detection_list.append(Detection(bbox, score, embed, class_name=cl, others=raw_polygon))
return detection_list
@staticmethod
def process_polygons(raw_polygons):
polygons = [ [ polygon[x:x+2] for x in range(0, len(polygon), 2) ]for polygon in raw_polygons ]
bounding_rects = [ cv2.boundingRect(np.array([polygon]).astype(int)) for polygon in polygons ]
return polygons, bounding_rects
@staticmethod
def crop_bb(frame, raw_dets):
crops = []
im_height, im_width = frame.shape[:2]
for detection in raw_dets:
l,t,w,h = [int(x) for x in detection[0]]
r = l + w
b = t + h
crop_l = max(0, l)
crop_r = min(im_width, r)
crop_t = max(0, t)
crop_b = min(im_height, b)
crops.append(frame[crop_t:crop_b, crop_l:crop_r])
return crops
@staticmethod
def crop_poly_pad_black(frame, polygons, bounding_rects):
masked_polys = []
im_height, im_width = frame.shape[:2]
for polygon, bounding_rect in zip(polygons, bounding_rects):
mask = np.zeros(frame.shape, dtype=np.uint8)
polygon_mask = np.array([polygon]).astype(int)
cv2.fillPoly(mask, polygon_mask, color=(255,255,255))
# apply the mask
masked_image = cv2.bitwise_and(frame, mask)
# crop masked image
x,y,w,h = bounding_rect
crop_l = max(0, x)
crop_r = min(im_width, x+w)
crop_t = max(0, y)
crop_b = min(im_height, y+h)
cropped = masked_image[crop_t:crop_b, crop_l:crop_r].copy()
masked_polys.append(np.array(cropped))
return masked_polys
|
[
"deep_sort_realtime.deep_sort.tracker.Tracker",
"deep_sort_realtime.utils.nms.non_max_suppression",
"cv2.bitwise_and",
"deep_sort_realtime.deep_sort.detection.Detection",
"logging.StreamHandler",
"numpy.zeros",
"cv2.fillPoly",
"logging.Formatter",
"deep_sort_realtime.embedder.embedder_pytorch.MobileNetv2_Embedder",
"numpy.array",
"logging.getLogger",
"deep_sort_realtime.deep_sort.nn_matching.NearestNeighborDistanceMetric"
] |
[((336, 365), 'logging.getLogger', 'logging.getLogger', (['"""DeepSORT"""'], {}), "('DeepSORT')\n", (353, 365), False, 'import logging\n'), ((411, 434), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (432, 434), False, 'import logging\n'), ((475, 534), 'logging.Formatter', 'logging.Formatter', (['"""[%(levelname)s] [%(name)s] %(message)s"""'], {}), "('[%(levelname)s] [%(name)s] %(message)s')\n", (492, 534), False, 'import logging\n'), ((2574, 2661), 'deep_sort_realtime.deep_sort.nn_matching.NearestNeighborDistanceMetric', 'nn_matching.NearestNeighborDistanceMetric', (['"""cosine"""', 'max_cosine_distance', 'nn_budget'], {}), "('cosine', max_cosine_distance,\n nn_budget)\n", (2615, 2661), False, 'from deep_sort_realtime.deep_sort import nn_matching\n'), ((2694, 2806), 'deep_sort_realtime.deep_sort.tracker.Tracker', 'Tracker', (['metric'], {'max_age': 'max_age', 'override_track_class': 'override_track_class', 'clock': 'clock', 'logger': 'self.logger'}), '(metric, max_age=max_age, override_track_class=override_track_class,\n clock=clock, logger=self.logger)\n', (2701, 2806), False, 'from deep_sort_realtime.deep_sort.tracker import Tracker\n'), ((5969, 6007), 'numpy.array', 'np.array', (['[d.ltwh for d in detections]'], {}), '([d.ltwh for d in detections])\n', (5977, 6007), True, 'import numpy as np\n'), ((6025, 6069), 'numpy.array', 'np.array', (['[d.confidence for d in detections]'], {}), '([d.confidence for d in detections])\n', (6033, 6069), True, 'import numpy as np\n'), ((2956, 3003), 'deep_sort_realtime.embedder.embedder_pytorch.MobileNetv2_Embedder', 'Embedder', ([], {'half': 'half', 'max_batch_size': '(16)', 'bgr': 'bgr'}), '(half=half, max_batch_size=16, bgr=bgr)\n', (2964, 3003), True, 'from deep_sort_realtime.embedder.embedder_pytorch import MobileNetv2_Embedder as Embedder\n'), ((6175, 6231), 'deep_sort_realtime.utils.nms.non_max_suppression', 'non_max_suppression', (['boxes', 'self.nms_max_overlap', 'scores'], {}), '(boxes, self.nms_max_overlap, scores)\n', (6194, 6231), False, 'from deep_sort_realtime.utils.nms import non_max_suppression\n'), ((8720, 8757), 'numpy.zeros', 'np.zeros', (['frame.shape'], {'dtype': 'np.uint8'}), '(frame.shape, dtype=np.uint8)\n', (8728, 8757), True, 'import numpy as np\n'), ((8829, 8884), 'cv2.fillPoly', 'cv2.fillPoly', (['mask', 'polygon_mask'], {'color': '(255, 255, 255)'}), '(mask, polygon_mask, color=(255, 255, 255))\n', (8841, 8884), False, 'import cv2\n'), ((8940, 8968), 'cv2.bitwise_and', 'cv2.bitwise_and', (['frame', 'mask'], {}), '(frame, mask)\n', (8955, 8968), False, 'import cv2\n'), ((7102, 7165), 'deep_sort_realtime.deep_sort.detection.Detection', 'Detection', (['raw_det[0]', 'raw_det[1]', 'embed'], {'class_name': 'raw_det[2]'}), '(raw_det[0], raw_det[1], embed, class_name=raw_det[2])\n', (7111, 7165), False, 'from deep_sort_realtime.deep_sort.detection import Detection\n'), ((7600, 7664), 'deep_sort_realtime.deep_sort.detection.Detection', 'Detection', (['bbox', 'score', 'embed'], {'class_name': 'cl', 'others': 'raw_polygon'}), '(bbox, score, embed, class_name=cl, others=raw_polygon)\n', (7609, 7664), False, 'from deep_sort_realtime.deep_sort.detection import Detection\n'), ((9285, 9302), 'numpy.array', 'np.array', (['cropped'], {}), '(cropped)\n', (9293, 9302), True, 'import numpy as np\n'), ((8785, 8804), 'numpy.array', 'np.array', (['[polygon]'], {}), '([polygon])\n', (8793, 8804), True, 'import numpy as np\n'), ((7903, 7922), 'numpy.array', 'np.array', (['[polygon]'], {}), '([polygon])\n', (7911, 7922), True, 'import numpy as np\n')]
|
import cv2
import os
import numpy as np
from PIL import Image
import picamera.array
from picamera import PiCamera
class Face(object):
training_count = 5
threshold = 30
def __init__(self, casc_path, path="./passwords", camera_port=0):
self.path = path
self._cascade = cv2.CascadeClassifier(casc_path)
self._port = camera_port
def __del__(self):
cv2.destroyAllWindows()
def _capture_image(self):
"""
Throw away frames so we can let the camera adjust
:return: list(list())
"""
with PiCamera() as camera:
with picamera.array.PiRGBArray(camera) as stream:
camera.resolution = (640, 480)
camera.capture(stream, 'bgr', use_video_port=True)
return cv2.cv.cvtColor(stream.array, cv2.COLOR_BGR2GRAY)
def _get_faces_and_frames(self):
frame = self._capture_image()
faces = self._cascade.detectMultiScale(
frame,
scaleFactor=1.2,
minNeighbors=5,
minSize=(30, 30),
flags=cv2.CASCADE_SCALE_IMAGE
)
return faces, frame
def _get_training_faf(self):
"""
:yield: faces, frame
Gets all images required for training.
* Note
Won't stop getting images unless there is only one face per image.
"""
count = 0
error_count = 0
while count < self.training_count: # Ensures that we get at least self.training_count images
error_count += 1
faces, frame = self._get_faces_and_frames()
if len(faces) == 1:
yield faces, frame
count += 1
elif error_count >= 10:
break
def can_unlock(self):
"""
Will return false under the following conditions:
1. More than one face in the image
2. No images in password file
3. Face is not recognized
:return: True if face is recognized False if face is not recognized
"""
face, frame = self._get_faces_and_frames()
# Don't allow more than 1 face in the image
if len(face) != 1:
return False
x, y, w, h = face[0]
face = frame[y: y + h, x: x + w]
recognizer = cv2.face.createLBPHFaceRecognizer()
paths = [os.path.join(self.path, f) for f in os.listdir(self.path) if f.endswith("bmp")]
if not paths:
return False # Return since there are no images saved as a password
# images will contains face images
images = []
# labels will contains the label that is assigned to the image
labels = []
nbr = 0
for image_path in paths:
# Read the image
image_pil = Image.open(image_path)
# Convert the image format into numpy array
image = np.array(image_pil, 'uint8')
images.append(image)
labels.append(nbr)
nbr += 1
cv2.destroyAllWindows()
# Perform the tranining
recognizer.train(images, np.array(labels))
nbr_predicted, conf = recognizer.predict(face)
if conf < self.threshold:
return True
return False
def new_pass(self):
count = 0
for face, frame in self._get_training_faf():
filename = "".join(["passwords/", str(count), ".bmp"])
x, y, w, h = face[0]
frame = frame[y: y + h, x: x + w]
count += 1
cv2.imwrite(filename, frame)
def secure_new_pass(self):
if self.can_unlock():
self.new_pass()
|
[
"cv2.cv.cvtColor",
"cv2.imwrite",
"PIL.Image.open",
"cv2.face.createLBPHFaceRecognizer",
"numpy.array",
"cv2.CascadeClassifier",
"cv2.destroyAllWindows",
"os.path.join",
"os.listdir",
"picamera.PiCamera"
] |
[((301, 333), 'cv2.CascadeClassifier', 'cv2.CascadeClassifier', (['casc_path'], {}), '(casc_path)\n', (322, 333), False, 'import cv2\n'), ((399, 422), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (420, 422), False, 'import cv2\n'), ((2335, 2370), 'cv2.face.createLBPHFaceRecognizer', 'cv2.face.createLBPHFaceRecognizer', ([], {}), '()\n', (2368, 2370), False, 'import cv2\n'), ((3054, 3077), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (3075, 3077), False, 'import cv2\n'), ((579, 589), 'picamera.PiCamera', 'PiCamera', ([], {}), '()\n', (587, 589), False, 'from picamera import PiCamera\n'), ((2389, 2415), 'os.path.join', 'os.path.join', (['self.path', 'f'], {}), '(self.path, f)\n', (2401, 2415), False, 'import os\n'), ((2831, 2853), 'PIL.Image.open', 'Image.open', (['image_path'], {}), '(image_path)\n', (2841, 2853), False, 'from PIL import Image\n'), ((2930, 2958), 'numpy.array', 'np.array', (['image_pil', '"""uint8"""'], {}), "(image_pil, 'uint8')\n", (2938, 2958), True, 'import numpy as np\n'), ((3143, 3159), 'numpy.array', 'np.array', (['labels'], {}), '(labels)\n', (3151, 3159), True, 'import numpy as np\n'), ((3577, 3605), 'cv2.imwrite', 'cv2.imwrite', (['filename', 'frame'], {}), '(filename, frame)\n', (3588, 3605), False, 'import cv2\n'), ((800, 849), 'cv2.cv.cvtColor', 'cv2.cv.cvtColor', (['stream.array', 'cv2.COLOR_BGR2GRAY'], {}), '(stream.array, cv2.COLOR_BGR2GRAY)\n', (815, 849), False, 'import cv2\n'), ((2425, 2446), 'os.listdir', 'os.listdir', (['self.path'], {}), '(self.path)\n', (2435, 2446), False, 'import os\n')]
|
from __future__ import print_function
import argparse
import os
import csv
import numpy as np
import random
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.utils.data import Dataset, DataLoader
from data_utils.data_util import PointcloudScaleAndTranslate
from data_utils.ModelNetDataLoader import ModelNetDataLoader
from models.pointnet import PointNetCls, feature_transform_regularizer
from models.pointnet2 import PointNet2ClsMsg
from models.dgcnn import DGCNN
from models.pointcnn import PointCNNCls
from utils import progress_bar, log_row
import sys
sys.path.append("./emd/")
import emd_module as emd
def gen_train_log(args):
if not os.path.isdir('logs_train'):
os.mkdir('logs_train')
logname = ('logs_train/%s_%s_%s.csv' % (args.data, args.model, args.name))
if os.path.exists(logname):
with open(logname, 'a') as logfile:
log_row(logname, [''])
log_row(logname, [''])
with open(logname, 'a') as logfile:
logwriter = csv.writer(logfile, delimiter=',')
logwriter.writerow(['model type', 'data set', 'seed', 'train batch size',
'number of points in one batch', 'number of epochs', 'optimizer',
'learning rate', 'resume checkpoint path',
'feature transform', 'lambda for feature transform regularizer', 'data augment'])
logwriter.writerow([args.model, args.data, args.seed, args.batch_size, args.num_points,
args.epochs, args.optimizer, args.lr, args.resume,
args.feature_transform, args.lambda_ft, args.augment])
logwriter.writerow(['Note', args.note])
logwriter.writerow([''])
def save_ckpt(args, epoch, model, optimizer, acc_list):
if not os.path.isdir('checkpoints'):
os.mkdir('checkpoints')
if not os.path.isdir('checkpoints/%s_%s_%s' % (args.data, args.model, args.name)):
os.mkdir('checkpoints/%s_%s_%s' % (args.data, args.model, args.name))
if acc_list[-1] > max(acc_list[:-1]):
print('=====> Saving checkpoint...')
print('the best test acc is', acc_list[-1])
state = {
'epoch': epoch,
'model_state_dict': model.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
'args': args,
'acc_list': acc_list,
}
torch.save(state, 'checkpoints/%s_%s_%s/best.pth' % (args.data, args.model, args.name))
print('Successfully save checkpoint at epoch %d' % epoch)
def cal_loss(pred, gold, smoothing=True):
''' Calculate cross entropy loss, apply label smoothing if needed. '''
gold = gold.contiguous().view(-1)
if smoothing:
eps = 0.2
n_class = pred.size(1)
one_hot = torch.zeros_like(pred).scatter(1, gold.view(-1, 1), 1)
one_hot = one_hot * (1 - eps) + (1 - one_hot) * eps / (n_class - 1)
log_prb = F.log_softmax(pred, dim=1)
loss = -(one_hot * log_prb).sum(dim=1).mean()
else:
loss = F.cross_entropy(pred, gold, reduction='mean')
return loss
def test(model, test_loader, criterion):
model.eval()
correct = 0
total = 0
for j, data in enumerate(test_loader, 0):
points, label = data
points, label = points.to(device), label.to(device)[:, 0]
if args.model == 'rscnn_kcutmix':
fps_idx = pointnet2_utils.furthest_point_sample(points, args.num_points) # (B, npoint)
points = pointnet2_utils.gather_operation(points.transpose(1, 2).contiguous(), fps_idx).transpose(1,
2).contiguous() # (B, N, 3)
points = points.transpose(2, 1) # to be shape batch_size*3*N
pred, trans_feat = model(points)
loss = criterion(pred, label.long())
pred_choice = pred.data.max(1)[1]
correct += pred_choice.eq(label.data).cpu().sum()
total += label.size(0)
progress_bar(j, len(test_loader), 'Test Loss: %.3f | Test Acc: %.3f%% (%d/%d)'
% (loss.item() / (j + 1), 100. * correct.item() / total, correct, total))
return loss.item() / (j + 1), 100. * correct.item() / total
if __name__ == '__main__':
########################################
## Set hypeparameters
########################################
parser = argparse.ArgumentParser()
parser.add_argument('--model', type=str, default='pointnet', help='choose model type')
parser.add_argument('--data', type=str, default='modelnet40', help='choose data set')
parser.add_argument('--seed', type=int, default=0, help='manual random seed')
parser.add_argument('--batch_size', type=int, default=16, help='input batch size')
parser.add_argument('--num_points', type=int, default=1024, help='input batch size')
parser.add_argument('--epochs', type=int, default=300, help='number of epochs to train for')
parser.add_argument('--optimizer', type=str, default='Adam', help='optimizer for training')
parser.add_argument('--lr', default=0.001, type=float, help='learning rate in training')
parser.add_argument('--resume', type=str, default='/', help='resume path')
parser.add_argument('--feature_transform', type=int, default=1, help="use feature transform")
parser.add_argument('--lambda_ft', type=float, default=0.001, help="lambda for feature transform")
parser.add_argument('--augment', type=int, default=1, help='data argment to increase robustness')
parser.add_argument('--name', type=str, default='train', help='name of the experiment')
parser.add_argument('--note', type=str, default='', help='notation of the experiment')
parser.add_argument('--normal', action='store_true', default=False,
help='Whether to use normal information [default: False]')
parser.add_argument('--beta', default=1, type=float, help='hyperparameter beta')
parser.add_argument('--cutmix_prob', default=0.5, type=float, help='cutmix probability')
args = parser.parse_args()
args.feature_transform, args.augment = bool(args.feature_transform), bool(args.augment)
### Set random seed
args.seed = args.seed if args.seed > 0 else random.randint(1, 10000)
# dataset path
DATA_PATH = './data/modelnet40_normal_resampled/'
########################################
## Intiate model
########################################
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
num_classes = 40
if args.model == 'dgcnn_kcutmix':
model = DGCNN(num_classes)
model = model.to(device)
model = nn.DataParallel(model)
optimizer = torch.optim.SGD(model.parameters(), lr=args.lr * 100,
momentum=0.9, weight_decay=1e-4)
scheduler_c = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, 250,
eta_min=1e-3)
else:
if args.model == 'pointnet_kcutmix':
model = PointNetCls(num_classes, args.feature_transform)
model = model.to(device)
elif args.model == 'pointnet2_kcutmix':
model = PointNet2ClsMsg(num_classes)
model = model.to(device)
model = nn.DataParallel(model)
elif args.model == 'rscnn_kcutmix':
from models.rscnn import RSCNN
import models.rscnn_utils.pointnet2_utils as pointnet2_utils
model = RSCNN(num_classes)
model = model.to(device)
model = nn.DataParallel(model)
optimizer = torch.optim.Adam(
model.parameters(),
lr=args.lr,
betas=(0.9, 0.999),
eps=1e-08,
weight_decay=1e-4
)
scheduler_c = torch.optim.lr_scheduler.StepLR(optimizer, step_size=20, gamma=0.5)
if len(args.resume) > 1:
print('=====> Loading from checkpoint...')
checkpoint = torch.load('./checkpoints/%s.pth' % args.resume)
args = checkpoint['args']
torch.manual_seed(args.seed)
print("Random Seed: ", args.seed)
"""if args.optimizer == 'SGD':
optimizer = torch.optim.SGD(model.parameters(), lr=args.lr, momentum=0.9)
elif args.optimizer == 'Adam':
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr, betas=(0.9, 0.999))"""
model.load_state_dict(checkpoint['model_state_dict'])
optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
START_EPOCH = checkpoint['epoch'] + 1
acc_list = checkpoint['acc_list']
if args.model == 'dgcnn_kcutmix':
scheduler_c = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, 250, eta_min=1e-3)
else:
scheduler_c = torch.optim.lr_scheduler.StepLR(optimizer, step_size=20, gamma=0.5)
print('Successfully resumed!')
else:
print('=====> Building new model...')
torch.manual_seed(args.seed)
print("Random Seed: ", args.seed)
START_EPOCH = 0
acc_list = [0]
print('Successfully built!')
########################################
## Load data
########################################
print('======> Loading data')
TRAIN_DATASET = ModelNetDataLoader(root=DATA_PATH, npoint=args.num_points, split='train',
normal_channel=args.normal)
TEST_DATASET = ModelNetDataLoader(root=DATA_PATH, npoint=args.num_points, split='test',
normal_channel=args.normal)
train_loader = torch.utils.data.DataLoader(TRAIN_DATASET, batch_size=args.batch_size, shuffle=True,
num_workers=4, drop_last=True)
test_loader = torch.utils.data.DataLoader(TEST_DATASET, batch_size=args.batch_size, shuffle=False,
num_workers=4, drop_last=False)
PointcloudScaleAndTranslate = PointcloudScaleAndTranslate()
print('======> Successfully loaded!')
gen_train_log(args)
logname = ('logs_train/%s_%s_%s.csv' % (args.data, args.model, args.name))
########################################
## Train
########################################
if args.model == 'dgcnn_kcutmix':
criterion = cal_loss
else:
criterion = F.cross_entropy # nn.CrossEntropyLoss()
if args.resume == '/':
log_row(logname, ['Epoch', 'Train Loss', 'Train Acc', 'Test Loss', 'Test Acc', 'learning Rate'])
for epoch in range(START_EPOCH, args.epochs):
print('\nEpoch: %d' % epoch)
scheduler_c.step(epoch)
model.train()
correct = 0
total = 0
for i, data in enumerate(train_loader, 0):
points, target = data
points, target = points.to(device), target.to(device)[:, 0]
points = PointcloudScaleAndTranslate(points)
if args.model == 'rscnn_kcutmix':
fps_idx = pointnet2_utils.furthest_point_sample(points, args.num_points) # (B, npoint)
fps_idx = fps_idx[:, np.random.choice(args.num_points, args.num_points, False)]
points = pointnet2_utils.gather_operation(points.transpose(1, 2).contiguous(), fps_idx).transpose(1,2).contiguous() # (B, N, 3)
# cutmix
optimizer.zero_grad()
r = np.random.rand(1)
if args.beta > 0 and r < args.cutmix_prob:
lam = np.random.beta(args.beta, args.beta)
B = points.size()[0]
rand_index = torch.randperm(B).cuda()
target_a = target
target_b = target[rand_index]
point_a = torch.zeros(B, 1024, 3)
point_b = torch.zeros(B, 1024, 3)
point_c = torch.zeros(B, 1024, 3)
point_a = points
point_b = points[rand_index]
point_c = points[rand_index]
point_a, point_b, point_c = point_a.to(device), point_b.to(device), point_c.to(device)
remd = emd.emdModule()
remd = remd.cuda()
dis, ind = remd(point_a, point_b, 0.005, 300)
for ass in range(B):
point_c[ass, :, :] = point_c[ass, ind[ass].long(), :]
int_lam = int(args.num_points * lam)
int_lam = max(1, int_lam)
random_point = torch.from_numpy(np.random.choice(1024, B, replace=False, p=None))
# kNN
ind1 = torch.tensor(range(B))
query = point_a[ind1, random_point].view(B, 1, 3)
dist = torch.sqrt(torch.sum((point_a - query.repeat(1, args.num_points, 1)) ** 2, 2))
idxs = dist.topk(int_lam, dim=1, largest=False, sorted=True).indices
for i2 in range(B):
points[i2, idxs[i2], :] = point_c[i2, idxs[i2], :]
# adjust lambda to exactly match point ratio
lam = int_lam * 1.0 / args.num_points
points = points.transpose(2, 1)
pred, trans_feat = model(points)
loss = criterion(pred, target_a.long()) * (1. - lam) + criterion(pred, target_b.long()) * lam
else:
points = points.transpose(2, 1)
pred, trans_feat = model(points)
loss = criterion(pred, target.long())
if args.feature_transform and args.model == 'pointnet_kcutmix':
loss += feature_transform_regularizer(trans_feat) * args.lambda_ft
loss.backward()
optimizer.step()
pred_choice = pred.data.max(1)[1]
correct += pred_choice.eq(target.data).cpu().sum()
total += target.size(0)
progress_bar(i, len(train_loader), 'Train Loss: %.3f | Train Acc: %.3f%% (%d/%d)'
% (loss.item() / (i + 1), 100. * correct.item() / total, correct, total))
train_loss, train_acc = loss.item() / (i + 1), 100. * correct.item() / total
### Test in batch
test_loss, test_acc = test(model, test_loader, criterion)
acc_list.append(test_acc)
print('the best test acc is', max(acc_list))
### Keep tracing
log_row(logname, [epoch, train_loss, train_acc, test_loss, test_acc,
optimizer.param_groups[0]['lr'], max(acc_list), np.argmax(acc_list) - 1])
save_ckpt(args, epoch, model, optimizer, acc_list)
|
[
"os.mkdir",
"torch.optim.lr_scheduler.StepLR",
"argparse.ArgumentParser",
"numpy.argmax",
"models.rscnn.RSCNN",
"data_utils.ModelNetDataLoader.ModelNetDataLoader",
"models.pointnet.PointNetCls",
"sys.path.append",
"models.pointnet2.PointNet2ClsMsg",
"random.randint",
"torch.utils.data.DataLoader",
"torch.load",
"os.path.exists",
"torch.optim.lr_scheduler.CosineAnnealingLR",
"utils.log_row",
"torch.nn.functional.log_softmax",
"numpy.random.choice",
"torch.zeros",
"csv.writer",
"torch.zeros_like",
"numpy.random.beta",
"torch.manual_seed",
"torch.nn.functional.cross_entropy",
"torch.cuda.is_available",
"torch.randperm",
"data_utils.data_util.PointcloudScaleAndTranslate",
"emd_module.emdModule",
"models.pointnet.feature_transform_regularizer",
"os.path.isdir",
"torch.save",
"models.dgcnn.DGCNN",
"models.rscnn_utils.pointnet2_utils.furthest_point_sample",
"numpy.random.rand",
"torch.nn.DataParallel"
] |
[((637, 662), 'sys.path.append', 'sys.path.append', (['"""./emd/"""'], {}), "('./emd/')\n", (652, 662), False, 'import sys\n'), ((882, 905), 'os.path.exists', 'os.path.exists', (['logname'], {}), '(logname)\n', (896, 905), False, 'import os\n'), ((4609, 4634), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (4632, 4634), False, 'import argparse\n'), ((9632, 9737), 'data_utils.ModelNetDataLoader.ModelNetDataLoader', 'ModelNetDataLoader', ([], {'root': 'DATA_PATH', 'npoint': 'args.num_points', 'split': '"""train"""', 'normal_channel': 'args.normal'}), "(root=DATA_PATH, npoint=args.num_points, split='train',\n normal_channel=args.normal)\n", (9650, 9737), False, 'from data_utils.ModelNetDataLoader import ModelNetDataLoader\n'), ((9795, 9899), 'data_utils.ModelNetDataLoader.ModelNetDataLoader', 'ModelNetDataLoader', ([], {'root': 'DATA_PATH', 'npoint': 'args.num_points', 'split': '"""test"""', 'normal_channel': 'args.normal'}), "(root=DATA_PATH, npoint=args.num_points, split='test',\n normal_channel=args.normal)\n", (9813, 9899), False, 'from data_utils.ModelNetDataLoader import ModelNetDataLoader\n'), ((9957, 10076), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['TRAIN_DATASET'], {'batch_size': 'args.batch_size', 'shuffle': '(True)', 'num_workers': '(4)', 'drop_last': '(True)'}), '(TRAIN_DATASET, batch_size=args.batch_size,\n shuffle=True, num_workers=4, drop_last=True)\n', (9984, 10076), False, 'import torch\n'), ((10140, 10260), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['TEST_DATASET'], {'batch_size': 'args.batch_size', 'shuffle': '(False)', 'num_workers': '(4)', 'drop_last': '(False)'}), '(TEST_DATASET, batch_size=args.batch_size,\n shuffle=False, num_workers=4, drop_last=False)\n', (10167, 10260), False, 'import torch\n'), ((10341, 10370), 'data_utils.data_util.PointcloudScaleAndTranslate', 'PointcloudScaleAndTranslate', ([], {}), '()\n', (10368, 10370), False, 'from data_utils.data_util import PointcloudScaleAndTranslate\n'), ((731, 758), 'os.path.isdir', 'os.path.isdir', (['"""logs_train"""'], {}), "('logs_train')\n", (744, 758), False, 'import os\n'), ((769, 791), 'os.mkdir', 'os.mkdir', (['"""logs_train"""'], {}), "('logs_train')\n", (777, 791), False, 'import os\n'), ((1088, 1122), 'csv.writer', 'csv.writer', (['logfile'], {'delimiter': '""","""'}), "(logfile, delimiter=',')\n", (1098, 1122), False, 'import csv\n'), ((1901, 1929), 'os.path.isdir', 'os.path.isdir', (['"""checkpoints"""'], {}), "('checkpoints')\n", (1914, 1929), False, 'import os\n'), ((1940, 1963), 'os.mkdir', 'os.mkdir', (['"""checkpoints"""'], {}), "('checkpoints')\n", (1948, 1963), False, 'import os\n'), ((1976, 2050), 'os.path.isdir', 'os.path.isdir', (["('checkpoints/%s_%s_%s' % (args.data, args.model, args.name))"], {}), "('checkpoints/%s_%s_%s' % (args.data, args.model, args.name))\n", (1989, 2050), False, 'import os\n'), ((2061, 2130), 'os.mkdir', 'os.mkdir', (["('checkpoints/%s_%s_%s' % (args.data, args.model, args.name))"], {}), "('checkpoints/%s_%s_%s' % (args.data, args.model, args.name))\n", (2069, 2130), False, 'import os\n'), ((2517, 2608), 'torch.save', 'torch.save', (['state', "('checkpoints/%s_%s_%s/best.pth' % (args.data, args.model, args.name))"], {}), "(state, 'checkpoints/%s_%s_%s/best.pth' % (args.data, args.model,\n args.name))\n", (2527, 2608), False, 'import torch\n'), ((3078, 3104), 'torch.nn.functional.log_softmax', 'F.log_softmax', (['pred'], {'dim': '(1)'}), '(pred, dim=1)\n', (3091, 3104), True, 'import torch.nn.functional as F\n'), ((3189, 3234), 'torch.nn.functional.cross_entropy', 'F.cross_entropy', (['pred', 'gold'], {'reduction': '"""mean"""'}), "(pred, gold, reduction='mean')\n", (3204, 3234), True, 'import torch.nn.functional as F\n'), ((6475, 6499), 'random.randint', 'random.randint', (['(1)', '(10000)'], {}), '(1, 10000)\n', (6489, 6499), False, 'import random\n'), ((6848, 6866), 'models.dgcnn.DGCNN', 'DGCNN', (['num_classes'], {}), '(num_classes)\n', (6853, 6866), False, 'from models.dgcnn import DGCNN\n'), ((6918, 6940), 'torch.nn.DataParallel', 'nn.DataParallel', (['model'], {}), '(model)\n', (6933, 6940), True, 'import torch.nn as nn\n'), ((7109, 7182), 'torch.optim.lr_scheduler.CosineAnnealingLR', 'torch.optim.lr_scheduler.CosineAnnealingLR', (['optimizer', '(250)'], {'eta_min': '(0.001)'}), '(optimizer, 250, eta_min=0.001)\n', (7151, 7182), False, 'import torch\n'), ((8104, 8171), 'torch.optim.lr_scheduler.StepLR', 'torch.optim.lr_scheduler.StepLR', (['optimizer'], {'step_size': '(20)', 'gamma': '(0.5)'}), '(optimizer, step_size=20, gamma=0.5)\n', (8135, 8171), False, 'import torch\n'), ((8276, 8324), 'torch.load', 'torch.load', (["('./checkpoints/%s.pth' % args.resume)"], {}), "('./checkpoints/%s.pth' % args.resume)\n", (8286, 8324), False, 'import torch\n'), ((8371, 8399), 'torch.manual_seed', 'torch.manual_seed', (['args.seed'], {}), '(args.seed)\n', (8388, 8399), False, 'import torch\n'), ((9297, 9325), 'torch.manual_seed', 'torch.manual_seed', (['args.seed'], {}), '(args.seed)\n', (9314, 9325), False, 'import torch\n'), ((10810, 10910), 'utils.log_row', 'log_row', (['logname', "['Epoch', 'Train Loss', 'Train Acc', 'Test Loss', 'Test Acc', 'learning Rate']"], {}), "(logname, ['Epoch', 'Train Loss', 'Train Acc', 'Test Loss',\n 'Test Acc', 'learning Rate'])\n", (10817, 10910), False, 'from utils import progress_bar, log_row\n'), ((965, 987), 'utils.log_row', 'log_row', (['logname', "['']"], {}), "(logname, [''])\n", (972, 987), False, 'from utils import progress_bar, log_row\n'), ((1001, 1023), 'utils.log_row', 'log_row', (['logname', "['']"], {}), "(logname, [''])\n", (1008, 1023), False, 'from utils import progress_bar, log_row\n'), ((3562, 3624), 'models.rscnn_utils.pointnet2_utils.furthest_point_sample', 'pointnet2_utils.furthest_point_sample', (['points', 'args.num_points'], {}), '(points, args.num_points)\n', (3599, 3624), True, 'import models.rscnn_utils.pointnet2_utils as pointnet2_utils\n'), ((6730, 6755), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (6753, 6755), False, 'import torch\n'), ((7326, 7374), 'models.pointnet.PointNetCls', 'PointNetCls', (['num_classes', 'args.feature_transform'], {}), '(num_classes, args.feature_transform)\n', (7337, 7374), False, 'from models.pointnet import PointNetCls, feature_transform_regularizer\n'), ((9005, 9078), 'torch.optim.lr_scheduler.CosineAnnealingLR', 'torch.optim.lr_scheduler.CosineAnnealingLR', (['optimizer', '(250)'], {'eta_min': '(0.001)'}), '(optimizer, 250, eta_min=0.001)\n', (9047, 9078), False, 'import torch\n'), ((9120, 9187), 'torch.optim.lr_scheduler.StepLR', 'torch.optim.lr_scheduler.StepLR', (['optimizer'], {'step_size': '(20)', 'gamma': '(0.5)'}), '(optimizer, step_size=20, gamma=0.5)\n', (9151, 9187), False, 'import torch\n'), ((11280, 11315), 'data_utils.data_util.PointcloudScaleAndTranslate', 'PointcloudScaleAndTranslate', (['points'], {}), '(points)\n', (11307, 11315), False, 'from data_utils.data_util import PointcloudScaleAndTranslate\n'), ((11789, 11806), 'numpy.random.rand', 'np.random.rand', (['(1)'], {}), '(1)\n', (11803, 11806), True, 'import numpy as np\n'), ((2927, 2949), 'torch.zeros_like', 'torch.zeros_like', (['pred'], {}), '(pred)\n', (2943, 2949), False, 'import torch\n'), ((7483, 7511), 'models.pointnet2.PointNet2ClsMsg', 'PointNet2ClsMsg', (['num_classes'], {}), '(num_classes)\n', (7498, 7511), False, 'from models.pointnet2 import PointNet2ClsMsg\n'), ((7571, 7593), 'torch.nn.DataParallel', 'nn.DataParallel', (['model'], {}), '(model)\n', (7586, 7593), True, 'import torch.nn as nn\n'), ((11392, 11454), 'models.rscnn_utils.pointnet2_utils.furthest_point_sample', 'pointnet2_utils.furthest_point_sample', (['points', 'args.num_points'], {}), '(points, args.num_points)\n', (11429, 11454), True, 'import models.rscnn_utils.pointnet2_utils as pointnet2_utils\n'), ((11886, 11922), 'numpy.random.beta', 'np.random.beta', (['args.beta', 'args.beta'], {}), '(args.beta, args.beta)\n', (11900, 11922), True, 'import numpy as np\n'), ((12145, 12168), 'torch.zeros', 'torch.zeros', (['B', '(1024)', '(3)'], {}), '(B, 1024, 3)\n', (12156, 12168), False, 'import torch\n'), ((12196, 12219), 'torch.zeros', 'torch.zeros', (['B', '(1024)', '(3)'], {}), '(B, 1024, 3)\n', (12207, 12219), False, 'import torch\n'), ((12247, 12270), 'torch.zeros', 'torch.zeros', (['B', '(1024)', '(3)'], {}), '(B, 1024, 3)\n', (12258, 12270), False, 'import torch\n'), ((12543, 12558), 'emd_module.emdModule', 'emd.emdModule', ([], {}), '()\n', (12556, 12558), True, 'import emd_module as emd\n'), ((7782, 7800), 'models.rscnn.RSCNN', 'RSCNN', (['num_classes'], {}), '(num_classes)\n', (7787, 7800), False, 'from models.rscnn import RSCNN\n'), ((7860, 7882), 'torch.nn.DataParallel', 'nn.DataParallel', (['model'], {}), '(model)\n', (7875, 7882), True, 'import torch.nn as nn\n'), ((12921, 12969), 'numpy.random.choice', 'np.random.choice', (['(1024)', 'B'], {'replace': '(False)', 'p': 'None'}), '(1024, B, replace=False, p=None)\n', (12937, 12969), True, 'import numpy as np\n'), ((14012, 14053), 'models.pointnet.feature_transform_regularizer', 'feature_transform_regularizer', (['trans_feat'], {}), '(trans_feat)\n', (14041, 14053), False, 'from models.pointnet import PointNetCls, feature_transform_regularizer\n'), ((14929, 14948), 'numpy.argmax', 'np.argmax', (['acc_list'], {}), '(acc_list)\n', (14938, 14948), True, 'import numpy as np\n'), ((11508, 11565), 'numpy.random.choice', 'np.random.choice', (['args.num_points', 'args.num_points', '(False)'], {}), '(args.num_points, args.num_points, False)\n', (11524, 11565), True, 'import numpy as np\n'), ((11993, 12010), 'torch.randperm', 'torch.randperm', (['B'], {}), '(B)\n', (12007, 12010), False, 'import torch\n')]
|
from __future__ import print_function, division
import torch
import os
import pandas as pd
from skimage import io, transform
import numpy as np
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms, utils
import sklearn
import sklearn.metrics as sklm
import csv
import argparse
import torch
import torch.nn as nn
import torch.optim as optim
from torch.optim import lr_scheduler
from torch.autograd import Variable
import numpy as np
import torchvision
from torchvision import datasets, models, transforms
import time
import os
import pickle
import random
from shutil import copyfile
from shutil import rmtree
import torchvision
use_gpu = torch.cuda.is_available()
print("We see GPU:")
print(use_gpu)
print("Let's use", torch.cuda.device_count(), "GPUs!")
from PIL import Image
import CXRDataset as CXR
import Eval as E
from importlib import reload
reload(CXR)
reload(E)
def checkpoint(model_ft, best_acc, best_loss, epoch,PRED_LABEL,LR,RESULT_PATH):
"""
save checkpoint
args:
model_ft: torchvision model
best_acc: best accuracy achieved so far in training
best_loss: best loss achieved so far in training
epoch: last epoch of training
PRED_LABEL: what we're predicting; expect format ["Pneumonia"] or ["Pneumonia","Opacity"]... etc
LR: learning rate
RESULT_PATH: path to save this to
returns:
nothing (saves file)
"""
# Save checkpoint.
print('Saving..')
state = {
'model_ft': model_ft,
'best_acc': best_acc,
'best_loss': best_loss,
'epoch': epoch,
'rng_state': torch.get_rng_state(),
'LR':LR
}
torch.save(state, RESULT_PATH+'checkpoint_'+PRED_LABEL)
def train_model(model, criterion, optimizer, LR, num_epochs=5,dataloaders="x",dataset_sizes="x", PRED_LABEL="x", start_epoch=1,MULTILABEL=True,FOLD_OVERRIDE="",TRAIN_FILTER="",RESULT_PATH="results/",MULTICLASS=False):
"""
performs torchvision model training
args:
model: model to fine tune
criterion: pytorch optimization criteria
optimizer: pytorch optimizer
LR: learning rate
num_epochs: stop after this many epochs
dataloaders: torchvision dataloader
dataset_sizes: length of train/val datasets
PRED_LABEL: targets we're predicting in list format ["PNA","Opacity"] etc
start_epoch: in case of loading saved model; not currently used
MULTILABEL: should be removed - always True - everything is trained using multilabel list format now even single labels ["Pneumonia"]
FOLD_OVERRIDE: columns of scalars with train/val/test split
TRAIN_FILTER: list of data we're training on, used for labeling results
RESULT_PATH= path at which resutls are saved, recommend leaving default to use with other scripts
MULTICLASS: if training on single multiclass n>2 target; currently only implemented for single multiclass target.
returns:
model: trained torchvision model
best_epoch: epoch on which best model was achieved
"""
since = time.time()
best_acc = 0.0
best_loss=999999
best_epoch=-1
last_train_acc=-1
last_train_loss=-1
for epoch in range(start_epoch,num_epochs+1):
print('Epoch {}/{}'.format(epoch, num_epochs))
print('-' * 10)
#small_data flag used to decide on how to decay
small_data=False
if dataset_sizes['train']<=10000: small_data=True
iter_at_lr=0
# Each epoch has a training and validation phase
for phase in ['train', 'val']:
if phase == 'train':
model.train(True) # Set model to training mode
else:
model.train(False) # Set model to evaluate mode
running_loss = 0.0
running_corrects = 0
# Iterate over data.
i=0
total_done=0
for data in dataloaders[phase]:
i+=1
# get the inputs
inputs, labels = data
batch_size= inputs.shape[0]
if use_gpu:
inputs = Variable(inputs.cuda())
labels = Variable(labels.cuda())
else:
inputs, labels = Variable(inputs), Variable(labels)
# zero the parameter gradients
optimizer.zero_grad()
#needed for multilabel training which uses different loss and expects floats
if not MULTICLASS:
labels = labels.float()
outputs = model(inputs)
_, preds = torch.max(outputs.data, 1)
loss = criterion(outputs, labels)
# backward + optimize only if in training phase
if phase == 'train':
loss.backward()
optimizer.step()
# statistics
if MULTICLASS: # need to fix this for multilabel
running_corrects += torch.sum(preds == labels.long().data)
running_loss += loss.data[0]*batch_size
epoch_loss = running_loss / dataset_sizes[phase]
epoch_acc = running_corrects / dataset_sizes[phase]
if phase=='train':
last_train_acc=epoch_acc
last_train_loss=epoch_loss
print(phase+' epoch {}:loss {:.4f} acc: {:.4f} with data size {}'.format(
epoch, epoch_loss, epoch_acc, dataset_sizes[phase]))
#decay if not best
if phase == 'val' and epoch_loss > best_loss:
#normally we just decay if no improvement in val loss in epoch, but not ideal with small datasets
#so 'small_data' condition that insists on 5 passes at lr if dataset size <=10k
if small_data==False or iter_at_lr>=4:
print("decay loss from "+str(LR)+" to "+str(LR/10)+" as not seeing improvement in val loss")
LR = LR / 10
#making a new optimizer zeros out momentum
optimizer = optim.SGD(filter(lambda p:p.requires_grad, model.parameters()), lr = LR, momentum=0.9, weight_decay=1e-4)
iter_at_lr=0
else:
iter_at_lr+=1
#below is used for labeling results
trainstring = str(TRAIN_FILTER).replace("_","").replace("[","").replace(",","_").replace("]","").replace(" ","").replace("'","")
# deep copy the model
if phase == 'val' and epoch_acc > best_acc:
best_acc = epoch_acc
best_model_wts = model.state_dict()
if phase == 'val' and epoch_loss < best_loss:
best_loss = epoch_loss
best_epoch = epoch
#save stuff if we have a best model
write_label = str(PRED_LABEL)
write_label = "Multilabel"
checkpoint(model, best_acc, best_loss, epoch, RESULT_PATH+write_label+"_train_"+trainstring+"_"+FOLD_OVERRIDE,LR,RESULT_PATH=RESULT_PATH)
write_label = "multilabel_" + trainstring + "_" + FOLD_OVERRIDE
if phase== 'val':
with open(RESULT_PATH+"log_train_"+write_label, 'a') as logfile:
logwriter = csv.writer(logfile, delimiter=',')
logwriter.writerow([write_label, epoch, last_train_loss, last_train_acc, epoch_loss, epoch_acc])
total_done+=batch_size
if(total_done % (100*batch_size) == 0): print("completed "+str(total_done)+" so far in epoch")
#quit if 3 epochs no improvement
if ((epoch-best_epoch)>=3 and small_data==False) or ((epoch-best_epoch)>=15 and small_data==True):
print("no improvement in 3 epochs, break")
break
time_elapsed = time.time() - since
print('Training complete in {:.0f}m {:.0f}s'.format(
time_elapsed // 60, time_elapsed % 60))
print('Best val Acc: {:4f}'.format(best_acc))
# load best model weights and return them
checkpoint_best = torch.load(RESULT_PATH+"checkpoint_results/Multilabel_train_"+trainstring+"_"+FOLD_OVERRIDE)
model = checkpoint_best['model_ft']
return model, best_epoch
def give_mean_var(LABEL_PATH, PRED_LABEL,BALANCE_MODE, TRAIN_FILTER,MULTILABEL, FOLD_OVERRIDE, BATCH_SIZE):
"""
args:
LABEL_PATH: path to the scalars file
PRED_LABEL: list of targets we're predicting
BALANCE_MODE: deprecated
TRAIN_FILTER: list of dataset we're training on, needed for dataloader
MULTILABEL: deprecated, always true
FOLD_OVERRIDE: train/val/test split column name in scalars
BATCH_SIZE: passes batch for dataloader
returns:
mean: rgb channel means np array 3x1
std:: rgb channel std np array 3x1
"""
#create set of val transforms
data_transform = transforms.Compose([
transforms.RandomHorizontalFlip(),
transforms.Scale(224),
transforms.CenterCrop(224), #needed to get 224x224
transforms.ToTensor()
])
#make dataloader
transformed_dataset =CXR.CXRDataset(csv_file=LABEL_PATH, fold='train', PRED_LABEL=PRED_LABEL, transform=data_transform, balance_classes=BALANCE_MODE, FILTER=TRAIN_FILTER,MULTILABEL=MULTILABEL,FOLD_OVERRIDE=FOLD_OVERRIDE,SAMPLE=0,TRAIN_FILTER=TRAIN_FILTER,RESULT_PATH="ignore",MULTICLASS=MULTICLASS)
dataloader = torch.utils.data.DataLoader(transformed_dataset, batch_size=BATCH_SIZE, shuffle=True,num_workers=8)#, sampler=sampler)
#calculate some means and st devs
x = len(dataloader)*BATCH_SIZE
print("len dataloader for give_mean_var:"+str(x))
means = np.empty((x,3))
stds = np.empty((x,3))
means[:,:]=np.nan
stds[:,:]=np.nan
for data in dataloader:
inputs, _ = data
inputs=inputs.numpy()
for i in range(0,inputs.shape[0]):
for j in range(0,3):
means[i,j]=np.mean(inputs[i,j,:,:])
stds[i,j]=np.std(inputs[i,j,:,:])
mean = np.zeros(3)
std = np.zeros(3)
for j in range (0,3):
x=np.nanmean(means[:,j])
mean[j]=x
x=np.nanmean(stds[:,j])
std[j]=x
return mean, std
def train_one(PRED_LABEL,LR,BATCH_SIZE,LABEL_PATH,RESULT_PATH,BALANCE_MODE,FREEZE_LAYERS, NUM_EPOCHS,TRAIN_FILTER,PRED_FILTER,MULTILABEL,FOLD_OVERRIDE,TRAIN_SAMPLE,PRED_SAMPLE,CUSTOM_NORMALIZE, NET_TYPE,MULTICLASS,OUTPUT1024):
"""
make dataloader, instantiates torchvision model, calls training function, returns results
args:
PRED_LABEL: list of labels to predict ["pna","opacity"] etc
LR: learning rate
BATCH_SIZE: batch size for dataloader; too big and won't fit on gpu
LABEL_PATH: path to scalars
RESULT_PATH: path to write results
BALANCE_MODE: deprecated
FREEZE_LAYERS: deprecated
NUM_EPOCHS: max number of epochs to train for; may quit sooner if not improving
TRAIN_FILTER: list of sites we're training on
PRED_FILTER: list of sites we're predicting
MULTILABEL: deprecated
FOLD_OVERRIDE: train/val/test split column in scalars
TRAIN_SAMPLE: sample training data to get limited sample (for testing)
PRED_SAMPLE: sample test data to get limited sample (for testing)
CUSTOM_NORMALIZE: use normalization mean, std based on data not imagenet
NET_TYPE: deprecated
MULTICLASS: train to single multiclass n>2 target (not implemented for multilabel multiclass)
returns:
x: df with predictions
"""
#if we were using custom normalization and not imagenet, do this; it didn't help vs imagenet nornmalization
if CUSTOM_NORMALIZE:
mean, std = give_mean_var(LABEL_PATH, PRED_LABEL,BALANCE_MODE, TRAIN_FILTER,MULTILABEL, FOLD_OVERRIDE, BATCH_SIZE)
print(mean)
print(std)
elif not CUSTOM_NORMALIZE:
mean= [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
#torchvision transforms
df = pd.read_csv(LABEL_PATH,index_col=0)
data_transforms = {
'train': transforms.Compose([
transforms.RandomHorizontalFlip(),
transforms.Scale(224), #244
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(mean, std)
]),
'val': transforms.Compose([
transforms.Scale(224),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(mean, std)
]),
}
#make dataloader
transformed_datasets={}
transformed_datasets['train'] =CXR.CXRDataset(csv_file=LABEL_PATH, fold='train', PRED_LABEL=PRED_LABEL, transform=data_transforms['train'], balance_classes=BALANCE_MODE, FILTER=TRAIN_FILTER,MULTILABEL=MULTILABEL,FOLD_OVERRIDE=FOLD_OVERRIDE,SAMPLE=TRAIN_SAMPLE,TRAIN_FILTER=TRAIN_FILTER,RESULT_PATH=RESULT_PATH,MULTICLASS=MULTICLASS)
transformed_datasets['val'] =CXR.CXRDataset(csv_file=LABEL_PATH, fold='val', PRED_LABEL=PRED_LABEL, transform=data_transforms['val'], balance_classes=BALANCE_MODE, FILTER=TRAIN_FILTER,MULTILABEL=MULTILABEL,FOLD_OVERRIDE=FOLD_OVERRIDE,SAMPLE=TRAIN_SAMPLE,TRAIN_FILTER=TRAIN_FILTER,RESULT_PATH=RESULT_PATH,MULTICLASS=MULTICLASS)
dataloaders={}
dataloaders['train'] = torch.utils.data.DataLoader(transformed_datasets['train'], batch_size=BATCH_SIZE, shuffle=True,num_workers=8)#, sampler=sampler)
dataloaders['val'] = torch.utils.data.DataLoader(transformed_datasets['val'], batch_size=BATCH_SIZE, shuffle=True, num_workers=8)
#instantiate model
if not use_gpu: raise ValueError("Error, requires GPU")
print('==> Building model..')
if(NET_TYPE=="densenet121"):
print("using densenet121")
model_ft = models.densenet121(pretrained=True)
num_ftrs = model_ft.classifier.in_features
if(OUTPUT1024==False):
print("adding bottleneck=15 features")
#if multiclass, needs different output structure then regular training to list of binary taragets
if not MULTICLASS:
model_ft.classifier = nn.Sequential(nn.Linear(num_ftrs, 15), nn.Linear(15, len(PRED_LABEL)),nn.Sigmoid())
elif MULTICLASS:
model_ft.classifier = nn.Sequential(nn.Linear(num_ftrs, 15), nn.Linear(15, transformed_datasets['train'].n_class))
print("n_class "+str(transformed_datasets['train'].n_class))
elif(OUTPUT1024==True):
print("NOT adding bottleneck=15 features")
#if multiclass, needs different output structure then regular training to list of binary taragets
if not MULTICLASS:
model_ft.classifier = nn.Sequential(nn.Linear(num_ftrs, len(PRED_LABEL)),nn.Sigmoid())
elif MULTICLASS:
model_ft.classifier = nn.Sequential(nn.Linear(num_ftrs, transformed_datasets['train'].n_class))
print("n_class "+str(transformed_datasets['train'].n_class))
start_epoch = 1
print("loading model_ft onto gpu")
model_ft = model_ft.cuda()
if NET_TYPE=="densenet121":
if(MULTICLASS==False):
criterion = nn.BCELoss()
else:
criterion = nn.CrossEntropyLoss() # only using this for predicting site, department
optimizer_ft = optim.SGD(filter(lambda p: p.requires_grad, model_ft.parameters()), lr=LR, momentum=0.9, weight_decay=1e-4)
dataset_sizes = {x: len(transformed_datasets[x]) for x in ['train', 'val']}
#train
model_ft , best_epoch = train_model(model_ft, criterion, optimizer_ft, LR, num_epochs=NUM_EPOCHS,dataloaders=dataloaders,dataset_sizes=dataset_sizes, PRED_LABEL=PRED_LABEL, start_epoch=start_epoch,MULTILABEL=MULTILABEL,FOLD_OVERRIDE=FOLD_OVERRIDE,TRAIN_FILTER=TRAIN_FILTER,RESULT_PATH=RESULT_PATH,MULTICLASS=MULTICLASS)
#make preds on test
x = E.make_pred_multilabel(data_transforms,model_ft,"pred_"+str(PRED_LABEL), LABEL_PATH,RESULT_PATH,PRED_LABEL,TRAIN_FILTER,PRED_FILTER,FOLD_OVERRIDE,PRED_SAMPLE,MULTICLASS,OUTPUT1024)
return x
def train_cnn(LABEL_PATH, PRED_LABEL,TRAIN_FILTER,PRED_FILTER,BALANCE_MODE,FOLD_OVERRIDE,MULTICLASS=False,OUTPUT1024=False):
"""
main function that gets called externally to train
LABEL_PATH: path to scalars
PRED_LABEL: targets to predict; list ["pna","opacity"] etc as in scalars file
TRAIN_FILTER: list of sites we're training to ["nih","msh"]
PRED_FILTER: list of sites we're predicting ["nih","iu"]
BALANCE_MODE: deprecated
FOLD_OVERRIDE: the column of scalars we use for train val test split
MULTICLASS: train to single multiclass n>2 target
returns:
y: results
"""
NUM_EPOCHS=50
BATCH_SIZE=16
LR = 0.01
RESULT_PATH="results/"
FREEZE_LAYERS="no"
MULTILABEL = not isinstance(PRED_LABEL, str)
TRAIN_SAMPLE=0
PRED_SAMPLE =0
CUSTOM_NORMALIZE=False
NET_TYPE="densenet121"
if not os.path.exists(RESULT_PATH):
os.makedirs(RESULT_PATH)
if not os.path.exists(RESULT_PATH+"checkpoint_results/"):
os.makedirs(RESULT_PATH+"checkpoint_results/")
x = train_one(PRED_LABEL,LR,BATCH_SIZE,LABEL_PATH,RESULT_PATH,BALANCE_MODE,"layer4",NUM_EPOCHS,TRAIN_FILTER,PRED_FILTER,MULTILABEL,FOLD_OVERRIDE,TRAIN_SAMPLE,PRED_SAMPLE,CUSTOM_NORMALIZE, NET_TYPE, MULTICLASS,OUTPUT1024)
y = pd.read_csv(LABEL_PATH)
y=y[['img_id']]
y = y.merge(x,on="img_id",how="inner")
trainlist=str(TRAIN_FILTER).replace("_","").replace("[","").replace(",","_").replace("]","").replace(" ","").replace("'","")
y.to_csv(RESULT_PATH+"preds_train_"+trainlist+"_"+FOLD_OVERRIDE+".csv",index=False)
return y
|
[
"pandas.read_csv",
"numpy.empty",
"torch.get_rng_state",
"torch.cuda.device_count",
"numpy.mean",
"torchvision.transforms.Normalize",
"numpy.nanmean",
"torch.nn.BCELoss",
"torch.utils.data.DataLoader",
"torchvision.transforms.Scale",
"numpy.std",
"CXRDataset.CXRDataset",
"torch.load",
"os.path.exists",
"torch.nn.Linear",
"torchvision.transforms.CenterCrop",
"csv.writer",
"torchvision.transforms.RandomHorizontalFlip",
"torch.autograd.Variable",
"torch.cuda.is_available",
"torch.max",
"torch.nn.Sigmoid",
"os.makedirs",
"torchvision.models.densenet121",
"numpy.zeros",
"torch.nn.CrossEntropyLoss",
"time.time",
"torch.save",
"importlib.reload",
"torchvision.transforms.ToTensor"
] |
[((673, 698), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (696, 698), False, 'import torch\n'), ((885, 896), 'importlib.reload', 'reload', (['CXR'], {}), '(CXR)\n', (891, 896), False, 'from importlib import reload\n'), ((897, 906), 'importlib.reload', 'reload', (['E'], {}), '(E)\n', (903, 906), False, 'from importlib import reload\n'), ((754, 779), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (777, 779), False, 'import torch\n'), ((1701, 1760), 'torch.save', 'torch.save', (['state', "(RESULT_PATH + 'checkpoint_' + PRED_LABEL)"], {}), "(state, RESULT_PATH + 'checkpoint_' + PRED_LABEL)\n", (1711, 1760), False, 'import torch\n'), ((3155, 3166), 'time.time', 'time.time', ([], {}), '()\n', (3164, 3166), False, 'import time\n'), ((8337, 8441), 'torch.load', 'torch.load', (["(RESULT_PATH + 'checkpoint_results/Multilabel_train_' + trainstring + '_' +\n FOLD_OVERRIDE)"], {}), "(RESULT_PATH + 'checkpoint_results/Multilabel_train_' +\n trainstring + '_' + FOLD_OVERRIDE)\n", (8347, 8441), False, 'import torch\n'), ((9386, 9687), 'CXRDataset.CXRDataset', 'CXR.CXRDataset', ([], {'csv_file': 'LABEL_PATH', 'fold': '"""train"""', 'PRED_LABEL': 'PRED_LABEL', 'transform': 'data_transform', 'balance_classes': 'BALANCE_MODE', 'FILTER': 'TRAIN_FILTER', 'MULTILABEL': 'MULTILABEL', 'FOLD_OVERRIDE': 'FOLD_OVERRIDE', 'SAMPLE': '(0)', 'TRAIN_FILTER': 'TRAIN_FILTER', 'RESULT_PATH': '"""ignore"""', 'MULTICLASS': 'MULTICLASS'}), "(csv_file=LABEL_PATH, fold='train', PRED_LABEL=PRED_LABEL,\n transform=data_transform, balance_classes=BALANCE_MODE, FILTER=\n TRAIN_FILTER, MULTILABEL=MULTILABEL, FOLD_OVERRIDE=FOLD_OVERRIDE,\n SAMPLE=0, TRAIN_FILTER=TRAIN_FILTER, RESULT_PATH='ignore', MULTICLASS=\n MULTICLASS)\n", (9400, 9687), True, 'import CXRDataset as CXR\n'), ((9686, 9790), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['transformed_dataset'], {'batch_size': 'BATCH_SIZE', 'shuffle': '(True)', 'num_workers': '(8)'}), '(transformed_dataset, batch_size=BATCH_SIZE,\n shuffle=True, num_workers=8)\n', (9713, 9790), False, 'import torch\n'), ((9968, 9984), 'numpy.empty', 'np.empty', (['(x, 3)'], {}), '((x, 3))\n', (9976, 9984), True, 'import numpy as np\n'), ((9996, 10012), 'numpy.empty', 'np.empty', (['(x, 3)'], {}), '((x, 3))\n', (10004, 10012), True, 'import numpy as np\n'), ((10350, 10361), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (10358, 10361), True, 'import numpy as np\n'), ((10372, 10383), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (10380, 10383), True, 'import numpy as np\n'), ((12400, 12436), 'pandas.read_csv', 'pd.read_csv', (['LABEL_PATH'], {'index_col': '(0)'}), '(LABEL_PATH, index_col=0)\n', (12411, 12436), True, 'import pandas as pd\n'), ((13037, 13360), 'CXRDataset.CXRDataset', 'CXR.CXRDataset', ([], {'csv_file': 'LABEL_PATH', 'fold': '"""train"""', 'PRED_LABEL': 'PRED_LABEL', 'transform': "data_transforms['train']", 'balance_classes': 'BALANCE_MODE', 'FILTER': 'TRAIN_FILTER', 'MULTILABEL': 'MULTILABEL', 'FOLD_OVERRIDE': 'FOLD_OVERRIDE', 'SAMPLE': 'TRAIN_SAMPLE', 'TRAIN_FILTER': 'TRAIN_FILTER', 'RESULT_PATH': 'RESULT_PATH', 'MULTICLASS': 'MULTICLASS'}), "(csv_file=LABEL_PATH, fold='train', PRED_LABEL=PRED_LABEL,\n transform=data_transforms['train'], balance_classes=BALANCE_MODE,\n FILTER=TRAIN_FILTER, MULTILABEL=MULTILABEL, FOLD_OVERRIDE=FOLD_OVERRIDE,\n SAMPLE=TRAIN_SAMPLE, TRAIN_FILTER=TRAIN_FILTER, RESULT_PATH=RESULT_PATH,\n MULTICLASS=MULTICLASS)\n", (13051, 13360), True, 'import CXRDataset as CXR\n'), ((13372, 13692), 'CXRDataset.CXRDataset', 'CXR.CXRDataset', ([], {'csv_file': 'LABEL_PATH', 'fold': '"""val"""', 'PRED_LABEL': 'PRED_LABEL', 'transform': "data_transforms['val']", 'balance_classes': 'BALANCE_MODE', 'FILTER': 'TRAIN_FILTER', 'MULTILABEL': 'MULTILABEL', 'FOLD_OVERRIDE': 'FOLD_OVERRIDE', 'SAMPLE': 'TRAIN_SAMPLE', 'TRAIN_FILTER': 'TRAIN_FILTER', 'RESULT_PATH': 'RESULT_PATH', 'MULTICLASS': 'MULTICLASS'}), "(csv_file=LABEL_PATH, fold='val', PRED_LABEL=PRED_LABEL,\n transform=data_transforms['val'], balance_classes=BALANCE_MODE, FILTER=\n TRAIN_FILTER, MULTILABEL=MULTILABEL, FOLD_OVERRIDE=FOLD_OVERRIDE,\n SAMPLE=TRAIN_SAMPLE, TRAIN_FILTER=TRAIN_FILTER, RESULT_PATH=RESULT_PATH,\n MULTICLASS=MULTICLASS)\n", (13386, 13692), True, 'import CXRDataset as CXR\n'), ((13716, 13831), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (["transformed_datasets['train']"], {'batch_size': 'BATCH_SIZE', 'shuffle': '(True)', 'num_workers': '(8)'}), "(transformed_datasets['train'], batch_size=\n BATCH_SIZE, shuffle=True, num_workers=8)\n", (13743, 13831), False, 'import torch\n'), ((13870, 13983), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (["transformed_datasets['val']"], {'batch_size': 'BATCH_SIZE', 'shuffle': '(True)', 'num_workers': '(8)'}), "(transformed_datasets['val'], batch_size=\n BATCH_SIZE, shuffle=True, num_workers=8)\n", (13897, 13983), False, 'import torch\n'), ((17905, 17928), 'pandas.read_csv', 'pd.read_csv', (['LABEL_PATH'], {}), '(LABEL_PATH)\n', (17916, 17928), True, 'import pandas as pd\n'), ((1642, 1663), 'torch.get_rng_state', 'torch.get_rng_state', ([], {}), '()\n', (1661, 1663), False, 'import torch\n'), ((8093, 8104), 'time.time', 'time.time', ([], {}), '()\n', (8102, 8104), False, 'import time\n'), ((10425, 10448), 'numpy.nanmean', 'np.nanmean', (['means[:, j]'], {}), '(means[:, j])\n', (10435, 10448), True, 'import numpy as np\n'), ((10476, 10498), 'numpy.nanmean', 'np.nanmean', (['stds[:, j]'], {}), '(stds[:, j])\n', (10486, 10498), True, 'import numpy as np\n'), ((14222, 14257), 'torchvision.models.densenet121', 'models.densenet121', ([], {'pretrained': '(True)'}), '(pretrained=True)\n', (14240, 14257), False, 'from torchvision import datasets, models, transforms\n'), ((17483, 17510), 'os.path.exists', 'os.path.exists', (['RESULT_PATH'], {}), '(RESULT_PATH)\n', (17497, 17510), False, 'import os\n'), ((17520, 17544), 'os.makedirs', 'os.makedirs', (['RESULT_PATH'], {}), '(RESULT_PATH)\n', (17531, 17544), False, 'import os\n'), ((17556, 17607), 'os.path.exists', 'os.path.exists', (["(RESULT_PATH + 'checkpoint_results/')"], {}), "(RESULT_PATH + 'checkpoint_results/')\n", (17570, 17607), False, 'import os\n'), ((17615, 17663), 'os.makedirs', 'os.makedirs', (["(RESULT_PATH + 'checkpoint_results/')"], {}), "(RESULT_PATH + 'checkpoint_results/')\n", (17626, 17663), False, 'import os\n'), ((9172, 9205), 'torchvision.transforms.RandomHorizontalFlip', 'transforms.RandomHorizontalFlip', ([], {}), '()\n', (9203, 9205), False, 'from torchvision import datasets, models, transforms\n'), ((9220, 9241), 'torchvision.transforms.Scale', 'transforms.Scale', (['(224)'], {}), '(224)\n', (9236, 9241), False, 'from torchvision import datasets, models, transforms\n'), ((9251, 9277), 'torchvision.transforms.CenterCrop', 'transforms.CenterCrop', (['(224)'], {}), '(224)\n', (9272, 9277), False, 'from torchvision import datasets, models, transforms\n'), ((9310, 9331), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (9329, 9331), False, 'from torchvision import datasets, models, transforms\n'), ((15637, 15649), 'torch.nn.BCELoss', 'nn.BCELoss', ([], {}), '()\n', (15647, 15649), True, 'import torch.nn as nn\n'), ((15688, 15709), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (15707, 15709), True, 'import torch.nn as nn\n'), ((4764, 4790), 'torch.max', 'torch.max', (['outputs.data', '(1)'], {}), '(outputs.data, 1)\n', (4773, 4790), False, 'import torch\n'), ((10246, 10273), 'numpy.mean', 'np.mean', (['inputs[i, j, :, :]'], {}), '(inputs[i, j, :, :])\n', (10253, 10273), True, 'import numpy as np\n'), ((10297, 10323), 'numpy.std', 'np.std', (['inputs[i, j, :, :]'], {}), '(inputs[i, j, :, :])\n', (10303, 10323), True, 'import numpy as np\n'), ((12510, 12543), 'torchvision.transforms.RandomHorizontalFlip', 'transforms.RandomHorizontalFlip', ([], {}), '()\n', (12541, 12543), False, 'from torchvision import datasets, models, transforms\n'), ((12566, 12587), 'torchvision.transforms.Scale', 'transforms.Scale', (['(224)'], {}), '(224)\n', (12582, 12587), False, 'from torchvision import datasets, models, transforms\n'), ((12606, 12632), 'torchvision.transforms.CenterCrop', 'transforms.CenterCrop', (['(224)'], {}), '(224)\n', (12627, 12632), False, 'from torchvision import datasets, models, transforms\n'), ((12646, 12667), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (12665, 12667), False, 'from torchvision import datasets, models, transforms\n'), ((12681, 12712), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['mean', 'std'], {}), '(mean, std)\n', (12701, 12712), False, 'from torchvision import datasets, models, transforms\n'), ((12773, 12794), 'torchvision.transforms.Scale', 'transforms.Scale', (['(224)'], {}), '(224)\n', (12789, 12794), False, 'from torchvision import datasets, models, transforms\n'), ((12808, 12834), 'torchvision.transforms.CenterCrop', 'transforms.CenterCrop', (['(224)'], {}), '(224)\n', (12829, 12834), False, 'from torchvision import datasets, models, transforms\n'), ((12848, 12869), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (12867, 12869), False, 'from torchvision import datasets, models, transforms\n'), ((12883, 12914), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['mean', 'std'], {}), '(mean, std)\n', (12903, 12914), False, 'from torchvision import datasets, models, transforms\n'), ((7540, 7574), 'csv.writer', 'csv.writer', (['logfile'], {'delimiter': '""","""'}), "(logfile, delimiter=',')\n", (7550, 7574), False, 'import csv\n'), ((14584, 14607), 'torch.nn.Linear', 'nn.Linear', (['num_ftrs', '(15)'], {}), '(num_ftrs, 15)\n', (14593, 14607), True, 'import torch.nn as nn\n'), ((14640, 14652), 'torch.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (14650, 14652), True, 'import torch.nn as nn\n'), ((4366, 4382), 'torch.autograd.Variable', 'Variable', (['inputs'], {}), '(inputs)\n', (4374, 4382), False, 'from torch.autograd import Variable\n'), ((4384, 4400), 'torch.autograd.Variable', 'Variable', (['labels'], {}), '(labels)\n', (4392, 4400), False, 'from torch.autograd import Variable\n'), ((14735, 14758), 'torch.nn.Linear', 'nn.Linear', (['num_ftrs', '(15)'], {}), '(num_ftrs, 15)\n', (14744, 14758), True, 'import torch.nn as nn\n'), ((14760, 14812), 'torch.nn.Linear', 'nn.Linear', (['(15)', "transformed_datasets['train'].n_class"], {}), "(15, transformed_datasets['train'].n_class)\n", (14769, 14812), True, 'import torch.nn as nn\n'), ((15208, 15220), 'torch.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (15218, 15220), True, 'import torch.nn as nn\n'), ((15303, 15361), 'torch.nn.Linear', 'nn.Linear', (['num_ftrs', "transformed_datasets['train'].n_class"], {}), "(num_ftrs, transformed_datasets['train'].n_class)\n", (15312, 15361), True, 'import torch.nn as nn\n')]
|
# coding: utf-8
#
# This code is part of lattpy.
#
# Copyright (c) 2021, <NAME>
#
# This code is licensed under the MIT License. The copyright notice in the
# LICENSE file in the root directory and this permission notice shall
# be included in all copies or substantial portions of the Software.
"""Contains miscellaneous utility methods."""
import logging
from typing import Iterable, List, Sequence, Optional, Union, Tuple
import time
import numpy as np
__all__ = [
"ArrayLike", "logger", "LatticeError", "ConfigurationError", "SiteOccupiedError",
"NoAtomsError", "NoBaseNeighborsError", "NotBuiltError", "Timer",
"min_dtype", "chain", "create_lookup_table", "frmt_num", "frmt_bytes", "frmt_time",
]
# define type for numpy `array_like` types
ArrayLike = Union[int, float, Iterable, np.ndarray]
# Configure package logger
logger = logging.getLogger("lattpy")
_CH = logging.StreamHandler()
_CH.setLevel(logging.DEBUG)
_FRMT_STR = "[%(asctime)s] %(levelname)-8s - %(name)-15s - %(funcName)-25s - %(message)s"
_FRMT = logging.Formatter(_FRMT_STR, datefmt='%H:%M:%S')
_CH.setFormatter(_FRMT) # Add formatter to stream handler
logger.addHandler(_CH) # Add stream handler to package logger
logger.setLevel(logging.WARNING) # Set initial logging level
class LatticeError(Exception):
pass
class ConfigurationError(LatticeError):
@property
def msg(self):
return self.args[0]
@property
def hint(self):
return self.args[1]
def __str__(self):
msg, hint = self.args
if hint:
msg += f" ({hint})"
return msg
class SiteOccupiedError(ConfigurationError):
def __init__(self, atom, pos):
super().__init__(f"Can't add {atom} to lattice, position {pos} already occupied!")
class NoAtomsError(ConfigurationError):
def __init__(self):
super().__init__("lattice doesn't contain any atoms",
"use 'add_atom' to add an 'Atom'-object")
class NoBaseNeighborsError(ConfigurationError):
def __init__(self):
msg = "base neighbors not configured"
hint = "call 'set_num_neighbors' after adding atoms or " \
"use the 'neighbors' keyword of 'add_atom'"
super().__init__(msg, hint)
class NotBuiltError(ConfigurationError):
def __init__(self):
msg = "lattice has not been built"
hint = "use the 'build' method to construct a finite size lattice model"
super().__init__(msg, hint)
def create_lookup_table(array: ArrayLike, dtype: Optional[Union[str, np.dtype]] = np.uint8) \
-> Tuple[np.ndarray, np.ndarray]:
"""Converts the given array to an array of indices linked to the unique values.
Parameters
----------
array : array_like
dtype : int or np.dtype, optional
Optional data-type for storing the indices of the unique values.
By default `np.uint8` is used, since it is assumed that the
input-array has only a few unique values.
Returns
-------
values : np.ndarray
The unique values occuring in the input-array.
indices : np.ndarray
The corresponding indices in the same shape as the input-array.
"""
values = np.sort(np.unique(array))
indices = np.zeros_like(array, dtype=dtype)
for i, x in enumerate(values):
mask = array == x
indices[mask] = i
return values, indices
def min_dtype(a: Union[int, float, np.ndarray, Iterable],
signed: Optional[bool] = True) -> np.dtype:
"""Returns the minimum required dtype to store the given values.
Parameters
----------
a : array_like
One or more values for determining the dtype.
Should contain the maximal expected values.
signed : bool, optional
If `True` the dtype is forced to be signed. The default is `True`.
Returns
-------
dtype : dtype
The required dtype.
"""
if signed:
a = -np.max(np.abs(a))-1
else:
amin, amax = np.min(a), np.max(a)
if amin < 0:
a = - amax - 1 if abs(amin) <= amax else amin
else:
a = amax
return np.dtype(np.min_scalar_type(a))
def chain(items: Sequence, cycle: bool = False) -> List:
"""Creates a chain between items
Parameters
----------
items : Sequence
items to join to chain
cycle : bool, optional
cycle to the start of the chain if True, default: False
Returns
-------
chain: list
chain of items
Example
-------
>>> print(chain(["x", "y", "z"]))
[['x', 'y'], ['y', 'z']]
>>> print(chain(["x", "y", "z"], True))
[['x', 'y'], ['y', 'z'], ['z', 'x']]
"""
result = list()
for i in range(len(items)-1):
result.append([items[i], items[i+1]])
if cycle:
result.append([items[-1], items[0]])
return result
def frmt_num(num: float, dec: Optional[int] = 1, unit: Optional[str] = '',
div: Optional[float] = 1000.) -> str:
"""Returns a formatted string of a number.
Parameters
----------
num : float
The number to format.
dec : int, optional
Number of decimals. The default is 1.
unit : str, optional
Optional unit suffix. By default no unit-strinmg is used.
div : float, optional
The divider used for units. The default is 1000.
Returns
-------
num_str: str
"""
for prefix in ['', 'k', 'M', 'G', 'T', 'P', 'E', 'Z']:
if abs(num) < div:
return f"{num:.{dec}f}{prefix}{unit}"
num /= div
return f"{num:.{dec}f}Y{unit}"
def frmt_bytes(num: float, dec: Optional[int] = 1) -> str:
"""Returns a formatted string of the number of bytes."""
return frmt_num(num, dec, unit="iB", div=1024)
def frmt_time(seconds: float, short: bool = False, width: int = 0) -> str:
"""Returns a formated string for a given time in seconds.
Parameters
----------
seconds : float
Time value to format
short : bool, optional
Flag if short representation should be used.
width : int, optional
Optional minimum length of the returned string.
Returns
-------
time_str: str
"""
string = "00:00"
# short time string
if short:
if seconds > 0:
mins, secs = divmod(seconds, 60)
if mins > 60:
hours, mins = divmod(mins, 60)
string = f"{hours:02.0f}:{mins:02.0f}h"
else:
string = f"{mins:02.0f}:{secs:02.0f}"
# Full time strings
else:
if seconds < 1e-3:
nanos = 1e6 * seconds
string = f"{nanos:.0f}\u03BCs"
elif seconds < 1:
millis = 1000 * seconds
string = f"{millis:.1f}ms"
elif seconds < 60:
string = f"{seconds:.1f}s"
else:
mins, seconds = divmod(seconds, 60)
if mins < 60:
string = f"{mins:.0f}:{seconds:04.1f}min"
else:
hours, mins = divmod(mins, 60)
string = f"{hours:.0f}:{mins:02.0f}:{seconds:02.0f}h"
if width > 0:
string = f"{string:>{width}}"
return string
class Timer:
"""Timer object for easy time measuring."""
__slots__ = ["_time", "_t0"]
def __init__(self, method=None):
self._time = method or time.perf_counter
self._t0 = 0
self.start()
@property
def seconds(self) -> float:
"""Returns the time since the timer has been started in seconds."""
return self.time() - self._t0
@property
def millis(self) -> float:
"""Returns the time since the timer has been started in milliseconds."""
return 1000 * (self.time() - self._t0)
def time(self) -> float:
"""Returns the current time as a timestamp."""
return self._time()
def start(self) -> None:
"""Start the timer."""
self._t0 = self._time()
def eta(self, progress: float) -> float:
"""Approximates the time left for a task.
Parameters
----------
progress: float
Progress fraction of task.
Returns
-------
eta: float
Approximation of time left.
"""
if not progress:
return 0.0
return (1 / progress - 1) * self.time()
def strfrmt(self, short: bool = False, width: int = 0) -> str:
"""Formats the time since the timer has been started."""
return frmt_time(self.seconds, short, width)
def __repr__(self) -> str:
return f'{self.__class__.__name__}({self.strfrmt(short=True)})'
def __str__(self) -> str:
return self.strfrmt(short=True)
|
[
"numpy.zeros_like",
"numpy.abs",
"logging.StreamHandler",
"logging.getLogger",
"logging.Formatter",
"numpy.min_scalar_type",
"numpy.min",
"numpy.max",
"numpy.unique"
] |
[((851, 878), 'logging.getLogger', 'logging.getLogger', (['"""lattpy"""'], {}), "('lattpy')\n", (868, 878), False, 'import logging\n'), ((886, 909), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (907, 909), False, 'import logging\n'), ((1038, 1086), 'logging.Formatter', 'logging.Formatter', (['_FRMT_STR'], {'datefmt': '"""%H:%M:%S"""'}), "(_FRMT_STR, datefmt='%H:%M:%S')\n", (1055, 1086), False, 'import logging\n'), ((3290, 3323), 'numpy.zeros_like', 'np.zeros_like', (['array'], {'dtype': 'dtype'}), '(array, dtype=dtype)\n', (3303, 3323), True, 'import numpy as np\n'), ((3258, 3274), 'numpy.unique', 'np.unique', (['array'], {}), '(array)\n', (3267, 3274), True, 'import numpy as np\n'), ((4197, 4218), 'numpy.min_scalar_type', 'np.min_scalar_type', (['a'], {}), '(a)\n', (4215, 4218), True, 'import numpy as np\n'), ((4042, 4051), 'numpy.min', 'np.min', (['a'], {}), '(a)\n', (4048, 4051), True, 'import numpy as np\n'), ((4053, 4062), 'numpy.max', 'np.max', (['a'], {}), '(a)\n', (4059, 4062), True, 'import numpy as np\n'), ((3998, 4007), 'numpy.abs', 'np.abs', (['a'], {}), '(a)\n', (4004, 4007), True, 'import numpy as np\n')]
|
import numpy as np
import torch
import torch.optim as optim
import torch.nn as nn
from torch.autograd import Variable
import skimage.io as io
import argparse
import os
import sys
import time
# Allow python3 to search for modules outside of this directory
sys.path.append("../")
from models.skip import skip3d
from volumetocube import write_bin_from_array
from volumetocube import write_obj_from_array
import binvox_rw
from tools.Ops import radon
from tools.Ops import tvloss
from tools.Ops import tvloss3d
from tools.Ops import load_binvox
from tools.Ops import volume_proj
from tools.Ops import rotate_volume
from tools.Ops import inv_rotate_volume
from skimage.measure import compare_ssim as ssim
parser = argparse.ArgumentParser(description='Reconstruciton using deep prior.')
parser.add_argument("-m", "--method", type=str, help="Prior to be used in the reconstruction (deep | tv | carve)", default="deep")
parser.add_argument("-b", "--binvox", type=str, help="Path to the binvox file.", default="../data/bunny.binvox")
parser.add_argument("-p", "--projection", type=str, help="Type of projection to be used (depth | binary)", default="depth")
parser.add_argument("-n", "--nproj", type=int, help="Number of projections.", default=8)
parser.add_argument("-s", "--sigma", type=float, help="Amount of variance in the gaussian noise.", default=0.0)
parser.add_argument("-k", "--kappa", type=float, help="Dispersion rate of Von Mises noise.", default=4.0)
parser.add_argument("-v", "--viewWeight", type=float, help="Weight of the viewpoint regularization.", default=1.0)
def add_gaussian_noise(img, sigma=1.0):
randv = torch.randn(*(img.shape)).cuda()
return img + sigma*randv
if __name__ == '__main__':
args = parser.parse_args()
use_tv = args.method == 'tv'
use_dp = args.method == 'deep'
kappa = args.kappa
view_weight = args.viewWeight
binvoxname = args.binvox.split('/')[-1].split('.')[0]
fullname = "prob_{}_{}_{}_{}_{}_vw{}_k{}".format(binvoxname, args.method, args.projection,
args.nproj, args.sigma, view_weight, kappa)
input_depth = 3
input_noise = torch.randn(1, input_depth, 128, 128, 128).cuda()
net = skip3d(
input_depth, 1,
num_channels_down = [8, 16, 32, 64, 128],
num_channels_up = [8, 16, 32, 64, 128],
num_channels_skip = [0, 0, 0, 4, 4],
upsample_mode='trilinear',
need_sigmoid=True, need_bias=True, pad='zero', act_fun='LeakyReLU')
net.cuda()
net(input_noise)
out_volume = torch.zeros(1, 1, 128, 128, 128).cuda()
out_volume.requires_grad = True
nviews = args.nproj
method = args.projection
views = torch.FloatTensor(np.random.rand(nviews, 3) * 2*np.pi)
noisy_views = torch.FloatTensor(np.random.vonmises(views, kappa, size=(nviews,3)))
pred_views = nn.Parameter(noisy_views.detach().clone())
if use_dp:
optimizer = optim.Adam(list(net.parameters()) + [pred_views], lr=0.01)
elif use_tv:
optimizer = optim.Adam([out_volume] + [pred_views], lr=0.01)
padder = nn.ConstantPad3d(10, 0.0)
volume = padder(load_binvox(args.binvox).cuda())
gtprojs = volume_proj(volume, method=method, views=views).cuda()
noisyprojs = gtprojs.detach().clone()
noisyprojs.requires_grad = False
results_dir = os.path.join("results", fullname)
if not os.path.exists(results_dir):
os.makedirs(results_dir)
mse = nn.L1Loss()
sigmoid = nn.Sigmoid()
#Space carving
if args.method == 'carve':
gtprojs = volume_proj(volume, method=method, views=views).cuda()
gtprojs.requires_grad = False
noisyprojs = gtprojs.clone()
noisyprojs.requires_grad = False
carve = torch.ones(*(volume.size())).cuda()
for i in range(nviews):
carve = rotate_volume(carve, x=noisy_views[i,0], y=noisy_views[i,1], z=noisy_views[i,2])
p = gtprojs[:, :, i] < 1e-2
coords = np.argwhere(p)
carve[coords[0, :], :, coords[1, :]] = 0.0
carve = inv_rotate_volume(carve, x=noisy_views[i,0], y=noisy_views[i,1], z=noisy_views[i,2])
projs = volume_proj(carve, method=method, views=views).cuda()
for i in range(noisyprojs.size()[2]):
io.imsave(results_dir+"/carve{}.png".format(i), torch.clamp(projs[:, :, i], -1, 1))
io.imsave(results_dir+"/carvegt{}.png".format(i), torch.clamp(gtprojs[:, :, i], -1, 1))
write_bin_from_array("results/{}/data.npy".format(fullname), carve.data.cpu().numpy())
exit(0)
gt_curve = []
noisygt_curve = []
n_iter = 500
out_rec = None
out_projs = None
pred_views_log = []
noisy_views_log = []
gt_views_log = []
print('EXPERIMENT {}'.format(fullname))
for i in range(n_iter):
optimizer.zero_grad()
if use_dp:
out_rec = net(input_noise)[0, 0, :, :, :]
out_projs = volume_proj(out_rec, method=method, views=pred_views)
loss = mse(out_projs, noisyprojs)
loss -= view_weight * torch.cos(pred_views - noisy_views).mean().cuda()
elif use_tv:
out_rec = sigmoid(out_volume[0, 0, :, :, :])
out_projs = volume_proj(out_rec, method=method, views=views)
loss = mse(out_projs, noisyprojs) + tvloss3d(out_rec, weight=1e-7)#
else:
raise ValueError("Unkown method")
pred_views_log.append(pred_views.data.detach().cpu().numpy())
noisy_views_log.append(noisy_views.data.detach().cpu().numpy())
gt_views_log.append(views.data.detach().cpu().numpy())
predloss = mse(out_projs, noisyprojs)
gtloss = torch.abs(out_projs - gtprojs).mean()
noisyloss = torch.abs(noisyprojs - gtprojs).mean()
print("\r({}/{}) Pred->Noisy: {} | Pred->GT: {} | Noisy->GT: {}".format(
str(i).zfill(4), n_iter, predloss.item(), gtloss.item(), noisyloss.item()),
gt_curve.append(gtloss.item()))
noisygt_curve.append(noisyloss.item())
loss.backward()
optimizer.step()
results_dir = os.path.join("results", fullname)
if not os.path.exists(results_dir):
os.makedirs(results_dir)
write_bin_from_array("results/{}/databin.npy".format(fullname),
out_rec.data.cpu().detach().numpy())
np.save("results/{}/data.npy".format(fullname),
out_rec.data.cpu().detach().numpy())
for i in range(out_projs.size()[2]):
print("Saved {}".format("results/{}/proj{}".format(fullname, i)))
io.imsave("results/{}/proj{}.png".format(fullname, i),
out_projs.data.cpu().detach().numpy()[:, :, i])
io.imsave("results/{}/gt{}.png".format(fullname, i),
torch.clamp(gtprojs[:, :, i], -1, 1).data.cpu().detach().numpy())
np.save("results/{}/gtviews.npy".format(fullname), np.array(gt_views_log))
np.save("results/{}/noisyviews.npy".format(fullname), np.array(noisy_views_log))
np.save("results/{}/predviews.npy".format(fullname), np.array(pred_views_log))
|
[
"argparse.ArgumentParser",
"torch.randn",
"torch.cos",
"tools.Ops.volume_proj",
"os.path.join",
"tools.Ops.rotate_volume",
"sys.path.append",
"os.path.exists",
"torch.zeros",
"tools.Ops.load_binvox",
"tools.Ops.tvloss3d",
"torch.nn.ConstantPad3d",
"torch.optim.Adam",
"torch.clamp",
"numpy.argwhere",
"torch.nn.Sigmoid",
"os.makedirs",
"torch.nn.L1Loss",
"tools.Ops.inv_rotate_volume",
"numpy.random.vonmises",
"numpy.array",
"models.skip.skip3d",
"numpy.random.rand",
"torch.abs"
] |
[((257, 279), 'sys.path.append', 'sys.path.append', (['"""../"""'], {}), "('../')\n", (272, 279), False, 'import sys\n'), ((714, 785), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Reconstruciton using deep prior."""'}), "(description='Reconstruciton using deep prior.')\n", (737, 785), False, 'import argparse\n'), ((2186, 2429), 'models.skip.skip3d', 'skip3d', (['input_depth', '(1)'], {'num_channels_down': '[8, 16, 32, 64, 128]', 'num_channels_up': '[8, 16, 32, 64, 128]', 'num_channels_skip': '[0, 0, 0, 4, 4]', 'upsample_mode': '"""trilinear"""', 'need_sigmoid': '(True)', 'need_bias': '(True)', 'pad': '"""zero"""', 'act_fun': '"""LeakyReLU"""'}), "(input_depth, 1, num_channels_down=[8, 16, 32, 64, 128],\n num_channels_up=[8, 16, 32, 64, 128], num_channels_skip=[0, 0, 0, 4, 4],\n upsample_mode='trilinear', need_sigmoid=True, need_bias=True, pad=\n 'zero', act_fun='LeakyReLU')\n", (2192, 2429), False, 'from models.skip import skip3d\n'), ((3075, 3100), 'torch.nn.ConstantPad3d', 'nn.ConstantPad3d', (['(10)', '(0.0)'], {}), '(10, 0.0)\n', (3091, 3100), True, 'import torch.nn as nn\n'), ((3326, 3359), 'os.path.join', 'os.path.join', (['"""results"""', 'fullname'], {}), "('results', fullname)\n", (3338, 3359), False, 'import os\n'), ((3444, 3455), 'torch.nn.L1Loss', 'nn.L1Loss', ([], {}), '()\n', (3453, 3455), True, 'import torch.nn as nn\n'), ((3470, 3482), 'torch.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (3480, 3482), True, 'import torch.nn as nn\n'), ((6112, 6145), 'os.path.join', 'os.path.join', (['"""results"""', 'fullname'], {}), "('results', fullname)\n", (6124, 6145), False, 'import os\n'), ((2765, 2815), 'numpy.random.vonmises', 'np.random.vonmises', (['views', 'kappa'], {'size': '(nviews, 3)'}), '(views, kappa, size=(nviews, 3))\n', (2783, 2815), True, 'import numpy as np\n'), ((3371, 3398), 'os.path.exists', 'os.path.exists', (['results_dir'], {}), '(results_dir)\n', (3385, 3398), False, 'import os\n'), ((3408, 3432), 'os.makedirs', 'os.makedirs', (['results_dir'], {}), '(results_dir)\n', (3419, 3432), False, 'import os\n'), ((6157, 6184), 'os.path.exists', 'os.path.exists', (['results_dir'], {}), '(results_dir)\n', (6171, 6184), False, 'import os\n'), ((6194, 6218), 'os.makedirs', 'os.makedirs', (['results_dir'], {}), '(results_dir)\n', (6205, 6218), False, 'import os\n'), ((6889, 6911), 'numpy.array', 'np.array', (['gt_views_log'], {}), '(gt_views_log)\n', (6897, 6911), True, 'import numpy as np\n'), ((6971, 6996), 'numpy.array', 'np.array', (['noisy_views_log'], {}), '(noisy_views_log)\n', (6979, 6996), True, 'import numpy as np\n'), ((7055, 7079), 'numpy.array', 'np.array', (['pred_views_log'], {}), '(pred_views_log)\n', (7063, 7079), True, 'import numpy as np\n'), ((1630, 1653), 'torch.randn', 'torch.randn', (['*img.shape'], {}), '(*img.shape)\n', (1641, 1653), False, 'import torch\n'), ((2126, 2168), 'torch.randn', 'torch.randn', (['(1)', 'input_depth', '(128)', '(128)', '(128)'], {}), '(1, input_depth, 128, 128, 128)\n', (2137, 2168), False, 'import torch\n'), ((2531, 2563), 'torch.zeros', 'torch.zeros', (['(1)', '(1)', '(128)', '(128)', '(128)'], {}), '(1, 1, 128, 128, 128)\n', (2542, 2563), False, 'import torch\n'), ((3008, 3056), 'torch.optim.Adam', 'optim.Adam', (['([out_volume] + [pred_views])'], {'lr': '(0.01)'}), '([out_volume] + [pred_views], lr=0.01)\n', (3018, 3056), True, 'import torch.optim as optim\n'), ((3173, 3220), 'tools.Ops.volume_proj', 'volume_proj', (['volume'], {'method': 'method', 'views': 'views'}), '(volume, method=method, views=views)\n', (3184, 3220), False, 'from tools.Ops import volume_proj\n'), ((3828, 3916), 'tools.Ops.rotate_volume', 'rotate_volume', (['carve'], {'x': 'noisy_views[i, 0]', 'y': 'noisy_views[i, 1]', 'z': 'noisy_views[i, 2]'}), '(carve, x=noisy_views[i, 0], y=noisy_views[i, 1], z=\n noisy_views[i, 2])\n', (3841, 3916), False, 'from tools.Ops import rotate_volume\n'), ((3970, 3984), 'numpy.argwhere', 'np.argwhere', (['p'], {}), '(p)\n', (3981, 3984), True, 'import numpy as np\n'), ((4060, 4152), 'tools.Ops.inv_rotate_volume', 'inv_rotate_volume', (['carve'], {'x': 'noisy_views[i, 0]', 'y': 'noisy_views[i, 1]', 'z': 'noisy_views[i, 2]'}), '(carve, x=noisy_views[i, 0], y=noisy_views[i, 1], z=\n noisy_views[i, 2])\n', (4077, 4152), False, 'from tools.Ops import inv_rotate_volume\n'), ((4941, 4994), 'tools.Ops.volume_proj', 'volume_proj', (['out_rec'], {'method': 'method', 'views': 'pred_views'}), '(out_rec, method=method, views=pred_views)\n', (4952, 4994), False, 'from tools.Ops import volume_proj\n'), ((2692, 2717), 'numpy.random.rand', 'np.random.rand', (['nviews', '(3)'], {}), '(nviews, 3)\n', (2706, 2717), True, 'import numpy as np\n'), ((3126, 3150), 'tools.Ops.load_binvox', 'load_binvox', (['args.binvox'], {}), '(args.binvox)\n', (3137, 3150), False, 'from tools.Ops import load_binvox\n'), ((3552, 3599), 'tools.Ops.volume_proj', 'volume_proj', (['volume'], {'method': 'method', 'views': 'views'}), '(volume, method=method, views=views)\n', (3563, 3599), False, 'from tools.Ops import volume_proj\n'), ((4162, 4208), 'tools.Ops.volume_proj', 'volume_proj', (['carve'], {'method': 'method', 'views': 'views'}), '(carve, method=method, views=views)\n', (4173, 4208), False, 'from tools.Ops import volume_proj\n'), ((4322, 4356), 'torch.clamp', 'torch.clamp', (['projs[:, :, i]', '(-1)', '(1)'], {}), '(projs[:, :, i], -1, 1)\n', (4333, 4356), False, 'import torch\n'), ((4420, 4456), 'torch.clamp', 'torch.clamp', (['gtprojs[:, :, i]', '(-1)', '(1)'], {}), '(gtprojs[:, :, i], -1, 1)\n', (4431, 4456), False, 'import torch\n'), ((5227, 5275), 'tools.Ops.volume_proj', 'volume_proj', (['out_rec'], {'method': 'method', 'views': 'views'}), '(out_rec, method=method, views=views)\n', (5238, 5275), False, 'from tools.Ops import volume_proj\n'), ((5686, 5716), 'torch.abs', 'torch.abs', (['(out_projs - gtprojs)'], {}), '(out_projs - gtprojs)\n', (5695, 5716), False, 'import torch\n'), ((5744, 5775), 'torch.abs', 'torch.abs', (['(noisyprojs - gtprojs)'], {}), '(noisyprojs - gtprojs)\n', (5753, 5775), False, 'import torch\n'), ((5324, 5355), 'tools.Ops.tvloss3d', 'tvloss3d', (['out_rec'], {'weight': '(1e-07)'}), '(out_rec, weight=1e-07)\n', (5332, 5355), False, 'from tools.Ops import tvloss3d\n'), ((5075, 5110), 'torch.cos', 'torch.cos', (['(pred_views - noisy_views)'], {}), '(pred_views - noisy_views)\n', (5084, 5110), False, 'import torch\n'), ((6767, 6803), 'torch.clamp', 'torch.clamp', (['gtprojs[:, :, i]', '(-1)', '(1)'], {}), '(gtprojs[:, :, i], -1, 1)\n', (6778, 6803), False, 'import torch\n')]
|
#!/usr/bin/env python
import argparse
from ast import parse
import numpy as np
import bitstring
def to_fixed(x, args):
F = args.fixed_point_bits[0] - args.fixed_point_bits[1]
return np.round(x * 2**F)
def to_float(x, args):
F = args.fixed_point_bits[0] - args.fixed_point_bits[1]
return x * 2**-F
def vals_to_hex(vals, args):
nb = args.fixed_point_bits[0] # bits of one value
tnb = len(vals) * nb # bitwidth of N values
assert args.link_bitwidth >= tnb, \
"Attempting to pack {} x {} bits ({} bits) into {}".format(
len(vals), nb, tnb, args.link_bitwidth)
pad = args.link_bitwidth - tnb
fmt_string = 'uint:{},'.format(pad) + 'int:{},'.format(nb) * len(vals)
return bitstring.pack(fmt_string, 0, *vals).hex
def row_to_hex(row, args):
# compute the packing factor
pf = args.link_bitwidth // args.fixed_point_bits[0] if args.pack_links else 1
N = int(np.ceil(len(row) / pf))
y = np.array([vals_to_hex(np.flip(row[i*pf:(i+1)*pf]), args)
for i in range(N)])
return y
def main():
parser = argparse.ArgumentParser(
description='Parse numpy file to FPGA testing for MP7 board')
parser.add_argument('--board_name', type=str,
help='A string representing the name of the board')
parser.add_argument('--generate_float_from_fix', type=bool,
help='Specify if you want to obtain the .npy file '
'obtained via to_float(to_fix(input)). It is useful to '
'feed to avoid casting mismatches using '
'hls_model.predict()')
parser.add_argument('--link_range', choices=range(0,96), type=int, nargs=2,
metavar=('start','stop'), help='Start and stop values '
'for the range related to links')
parser.add_argument('--link_bitwidth', choices=[32,64], type=int,
help='Word size in bits of each link')
parser.add_argument('--invalid_rows', type=int,
help='The number of invalid that will be generate at '
'the beginning of the test')
parser.add_argument('--fixed_point_bits', type=int, nargs=2,
metavar=('word_bits', 'integer_bits'),
help='The number of invalid that will be generate at '
'the beginning of the test')
parser.add_argument('--pack_links', type=bool, help='Whether to pack '
'multiple values into one link where possible')
parser.add_argument('--link_map', type=int, nargs='*', help='The link map')
parser.add_argument('--input_data_path', type=str,
help='The path of the numpy file containing data in '
'floating point')
parser.add_argument('--output_data_path', type=str,
help='The path of the produced .txt file containing '
'data in fixed point')
args = parser.parse_args()
fp32_data = np.load(args.input_data_path)
# packing factor
pf = args.link_bitwidth // args.fixed_point_bits[0] if args.pack_links else 1
link_width = args.link_range[1] - args.link_range[0] + 1
if fp32_data.shape[1] > link_width * pf:
raise Exception(
'Trying to fit {} features into {} links with packing factor {}'
.format(fp32_data.shape[1],link_width,pf))
if fp32_data.shape[0] > 1024:
print('The system expect no more than 1024 rows; the original file will '
'be truncated, keeping the first 1024 rows')
fp32_data = fp32_data[:1024]
output_file = open(args.output_data_path, 'w')
fixed_data = to_fixed(fp32_data, args)
if args.generate_float_from_fix:
float_back_data = to_float(fixed_data, args)
np.save('float_from_fix.npy', float_back_data)
fixed_data = np.array([row_to_hex(row, args) for row in fixed_data])
link_map = list(range(args.link_range[0], args.link_range[1] + 1)) \
if args.link_map is None else args.link_map
assert len(link_map) == link_width, \
'Link map length ({}) does not match link range ({})'.format(
len(link_map), link_width)
# board section
board_string = 'Board {}\n'.format(args.board_name)
# channel section
quad_chan_string = ' Quad/Chan : '
for i in link_map:
if args.link_bitwidth == 32:
quad_chan_string += ' q{:02d}c{} '.format(i // 4, i % 4)
else:
quad_chan_string += ' q{:02d}c{} '.format(i // 4, i % 4)
if i != link_map[-1]:
quad_chan_string += ' '
else:
quad_chan_string += '\n'
# link section
link_string = ' Link : '
for i in link_map:
if args.link_bitwidth == 32:
link_string += ' {:02d} '.format(i)
else:
link_string += ' {:02d} '.format(i)
if i != link_map[-1]:
link_string += ' '
else:
link_string += '\n'
# frame section
frame_start = 'Frame {:04d} : '
frame = ''
if args.invalid_rows > 0:
for i in range(0,args.invalid_rows):
frame += frame_start.format(i)
for j in range(0, args.link_range[1] - args.link_range[0] + 1):
if args.link_bitwidth == 32:
frame += '0v00000000'
else:
frame += '0v0000000000000000'
if j != args.link_range[1] - args.link_range[0]:
frame += ' '
else:
frame += '\n'
dummy_cols = args.link_range[1] - args.link_range[0] - fp32_data.shape[1]
for i, v in enumerate(fixed_data):
frame += frame_start.format(i + args.invalid_rows)
for j, k in enumerate(v):
frame += '1v' + k
frame += ' '
if dummy_cols > 0:
for s in range(0, dummy_cols + 1):
if args.link_bitwidth == 32:
frame += '0v00000000'
else:
frame += '0v0000000000000000'
if s + j != args.link_range[1] - args.link_range[0] - 1:
frame += ' '
frame += '\n'
l = [board_string, quad_chan_string, link_string, frame]
output_file.writelines(l)
output_file.close()
print('Done!')
|
[
"numpy.load",
"numpy.save",
"numpy.flip",
"argparse.ArgumentParser",
"bitstring.pack",
"numpy.round"
] |
[((191, 211), 'numpy.round', 'np.round', (['(x * 2 ** F)'], {}), '(x * 2 ** F)\n', (199, 211), True, 'import numpy as np\n'), ((1090, 1180), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Parse numpy file to FPGA testing for MP7 board"""'}), "(description=\n 'Parse numpy file to FPGA testing for MP7 board')\n", (1113, 1180), False, 'import argparse\n'), ((3071, 3100), 'numpy.load', 'np.load', (['args.input_data_path'], {}), '(args.input_data_path)\n', (3078, 3100), True, 'import numpy as np\n'), ((728, 764), 'bitstring.pack', 'bitstring.pack', (['fmt_string', '(0)', '*vals'], {}), '(fmt_string, 0, *vals)\n', (742, 764), False, 'import bitstring\n'), ((3873, 3919), 'numpy.save', 'np.save', (['"""float_from_fix.npy"""', 'float_back_data'], {}), "('float_from_fix.npy', float_back_data)\n", (3880, 3919), True, 'import numpy as np\n'), ((978, 1011), 'numpy.flip', 'np.flip', (['row[i * pf:(i + 1) * pf]'], {}), '(row[i * pf:(i + 1) * pf])\n', (985, 1011), True, 'import numpy as np\n')]
|
"""
@brief This file holds classes that store information about the endoscopic images that are
going to be segmented.
@author <NAME> (<EMAIL>).
@date 25 Aug 2015.
"""
import numpy as np
import os
import cv2
# import caffe
import sys
import random
import matplotlib.pyplot as plt
import scipy.misc
import imutils
import geometry
import tempfile
import PIL
import skimage.morphology
import skimage.util
# My imports
import common
#
# @brief Perlin noise generator.
#
def perlin(x, y, seed):
# Permutation table
np.random.seed(seed)
p = np.arange(256, dtype = int)
np.random.shuffle(p)
p = np.stack([p, p]).flatten()
# Coordinates of the top-left
xi = x.astype(int)
yi = y.astype(int)
# Internal coordinates
xf = x - xi
yf = y - yi
# Fade factors
u = fade(xf)
v = fade(yf)
# Noise components
n00 = gradient(p[p[xi] + yi], xf, yf)
n01 = gradient(p[p[xi] + yi + 1], xf, yf - 1)
n11 = gradient(p[p[xi + 1] + yi + 1], xf - 1, yf - 1)
n10 = gradient(p[p[xi + 1] + yi], xf - 1, yf)
# Combine noises
x1 = lerp(n00, n10, u)
x2 = lerp(n01, n11, u)
return lerp(x1, x2, v)
#
# @brief Linear interpolation.
#
def lerp(a, b, x):
return a + x * (b - a)
#
# @brief 6t^5 - 15t^4 + 10t^3.
#
def fade(t):
return 6 * t**5 - 15 * t**4 + 10 * t**3
#
# @brief Grad converts h to the right gradient vector and return the dot product with (x, y).
#
def gradient(h, x, y):
vectors = np.array([[0, 1], [0, -1], [1, 0], [-1, 0]])
g = vectors[h % 4]
return g[:,:, 0] * x + g[:,:, 1] * y
#
# @brief Perlin noise image.
#
# @param[in] height Height of the output image.
# @param[in] width Width of the output image.
# @param[in] scale Higher means smaller blobs.
# @param[in] minval The minimum noise value.
# @param[in] maxval The maximum noise value.
#
# @returns a 2D numpy array.
def perlin2d_smooth(height, width, scale, minval = 0.0, maxval = 1.0, seed = None):
lin_y = np.linspace(0, scale, height, endpoint = False)
lin_x = np.linspace(0, scale, width, endpoint = False)
x, y = np.meshgrid(lin_x, lin_y)
arr = perlin(x, y, seed)
min_arr = np.min(arr)
max_arr = np.max(arr)
arr = (np.clip((arr - min_arr) / (max_arr - min_arr), 0.0, 1.0) * (maxval - minval)) + minval
return arr
#
# @brief Given a set of 2D points it finds the center and radius of a circle.
#
# @param[in] x List or array of x coordinates.
# @param[in] y List or array of y coordinates.
#
# @returns (xc, yc, radius).
def fit_circle(x, y):
# Coordinates of the barycenter
x_m = np.mean(x)
y_m = np.mean(y)
# Calculation of the reduced coordinates
u = x - x_m
v = y - y_m
# Linear system defining the center in reduced coordinates (uc, vc):
# Suu * uc + Suv * vc = (Suuu + Suvv)/2
# Suv * uc + Svv * vc = (Suuv + Svvv)/2
Suv = np.sum(u*v)
Suu = np.sum(u**2)
Svv = np.sum(v**2)
Suuv = np.sum(u**2 * v)
Suvv = np.sum(u * v**2)
Suuu = np.sum(u**3)
Svvv = np.sum(v**3)
# Solving the linear system
A = np.array([ [ Suu, Suv ], [Suv, Svv]])
B = np.array([ Suuu + Suvv, Svvv + Suuv ])/2.0
uc, vc = np.linalg.solve(A, B)
xc_1 = x_m + uc
yc_1 = y_m + vc
# Calculation of all distances from the center (xc_1, yc_1)
Ri_1 = np.sqrt((x - xc_1) ** 2 + (y - yc_1) ** 2)
R_1 = np.mean(Ri_1)
residu_1 = np.sum((Ri_1-R_1) ** 2)
residu2_1 = np.sum((Ri_1 ** 2 - R_1 ** 2) ** 2)
return xc_1, yc_1, R_1
#
# @brief Zero parameter Canny edge detector.
#
def auto_canny(image, sigma = 0.33):
v = np.median(image)
lower = int(max(0, (1.0 - sigma) * v))
upper = int(min(255, (1.0 + sigma) * v))
edges = cv2.Canny(image, lower, upper)
return edges
#
# @brief Abstract image class. This is not meant to be instantiated and it refers to a general
# multidimensional image or label.
#
class CaffeinatedAbstract(object):
#
# @brief Every image must have at least data and name. We ensure of that with this abstract
# constructor that will be called by all the children.
#
# @param[in] raw_frame Multidimensional image, at least H x W.
# @param[in] name String with the name of the image. It can also be the frame number
# of a video, but it will be converted to string.
#
def __init__(self, raw_frame, name):
# Assert that the frame has data
if len(raw_frame.shape) <= 1 or raw_frame.shape[0] <= 0 or raw_frame.shape[1] <= 0:
raise RuntimeError('[CaffeinatedAbstract.__init__], the image provided ' \
'does not have data.')
# Assert that the name is valid
if not name:
raise ValueError('[CaffeinatedAbstract.__init__] Error, every caffeinated ' \
'abstract child must have a name.')
# Store attributes in class
self._raw_frame = raw_frame
self._name = str(name)
#
# @brief Access to a copy of the internal BGR image.
#
# @returns a copy of the internal frame, whatever it is, image or label.
def raw_copy(self):
return self._raw_frame.copy()
#
# @brief Saves image to file.
#
# @param[in] path Destination path.
# @param[in] flags Flags that will be passed to OpenCV.
#
def save(self, path, flags):
# Assert that the destination path does not exist
if common.path_exists(path):
raise ValueError('[CaffeinatedImage.save] Error, destination path ' \
+ str(path) + ' already exists.')
if flags:
return cv2.imwrite(path, self._raw_frame, flags)
else:
return cv2.imwrite(path, self._raw_frame)
#
# @brief Crops an image in a rectangular fashion, including both corner pixels in the image.
#
# @param[in] tlx Integer that represents the top left corner column.
# @param[in] tly Integer that represents the top left corner row.
# @param[in] brx Integer that represents the bottom right corner column.
# @param[in] bry Integer that represents the bottom right corner row.
#
# @returns nothing.
def crop(self, tlx, tly, brx, bry):
assert(isinstance(tlx, type(0)) and isinstance(tly, type(1)) and isinstance(brx, type(1)) \
and isinstance(bry, type(1)))
assert(tlx <= brx)
assert(tly <= bry)
self._raw_frame = self._raw_frame[tly:bry + 1, tlx:brx + 1]
def resize_to_width(self, new_w, interp):
self._raw_frame = CaffeinatedAbstract.resize_width(self._raw_frame, new_w, interp)
#
# @brief Convert binary mask into just the mask of its boundary.
#
# @param[in] mask Input mask.
# @param[in] thickness Thickness of the border.
#
# @returns the boundary mask.
@staticmethod
def mask2border(mask, thickness):
# Find the contour of the mask
cnts = cv2.findContours(mask.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)[1]
# Create a new image with just the contour
new_mask = np.zeros_like(mask)
new_mask = cv2.drawContours(new_mask, cnts, -1, 255, thickness)
return new_mask
#
# @brief Histogram equalisation (CLAHE).
#
# @param[in] im Input image.
# @param[in] clip_limit Contrast limit.
#
# @returns the equalised image.
@staticmethod
def clahe(im, clip_limit = 2.0):
lab = cv2.cvtColor(im, cv2.COLOR_BGR2Lab)
clahe_engine = cv2.createCLAHE(clipLimit = clip_limit, tileGridSize = (8, 8))
lab[:,:, 0] = clahe_engine.apply(lab[:,:, 0])
return cv2.cvtColor(lab, cv2.COLOR_Lab2BGR)
#
# @brief Flip left-right.
#
# @returns the flipped image.
@staticmethod
def fliplr(im):
return np.fliplr(im)
#
# @brief Flip up-down.
#
# @returns the flipped image.
@staticmethod
def flipud(im):
return np.flipud(im)
#
# @brief Thresholds a grayscale image.
#
# @param[in] img Input grayscale image.
# @param[in] level Greater than this level will be set to maxval. Default value is 127.
# @param[in] maxval Th values greater than level will be set to maxval.
# Default value is 255.
#
# @returns the thresholded image.
@staticmethod
def bin_thresh(im, level = 127, maxval = 255):
assert(len(im.shape) == 2 or (len(im.shape) == 3 and im.shape[2] == 1))
_, thresh = cv2.threshold(np.squeeze(im), level, maxval, cv2.THRESH_BINARY)
return thresh
#
# @brief Random crop, both dimensions should be equal or smaller than the original size.
# @details If a list is given, all the images must be larger than the desired new height and
# width.
#
# @param[in] img Ndarray with the image, shape (height, width) or
# (height, width, channels).
# @param[in] new_height Height of the cropped image.
# @param[in] new_width Width of the cropped image.
#
# @returns a cropped patch.
@staticmethod
def random_crop(im, new_height, new_width):
assert(isinstance(im, np.ndarray))
assert(new_height > 0 and new_height <= im.shape[0])
assert(new_width > 0 and new_width <= im.shape[1])
# Choose random coordinates for crop
height_border = im.shape[0] - new_height
width_border = im.shape[1] - new_width
top_y = random.randint(0, height_border - 1) if height_border > 0 else 0
top_x = random.randint(0, width_border - 1) if width_border > 0 else 0
# Crop image
new_im = im[top_y:top_y + new_height, top_x:top_x + new_width].copy()
assert(new_im.shape[0] == new_height)
assert(new_im.shape[1] == new_width)
return new_im
#
# @brief Performs a random crop. New height and width is decided independently, this
# function changes the form factor.
#
# @param[in] img Input image, numpy array.
# @param[in] delta Minimum factor of change, e.g. if 0.5 the new height and width will be
# minimum half of the original.
#
# @returns the new image.
@staticmethod
def random_crop_factor(im, delta):
assert(isinstance(im, np.ndarray))
min_scale = 1.0 - delta
max_scale = 1.0
new_scale = random.uniform(min_scale, max_scale)
new_height = int(round(im.shape[0] * new_scale))
new_width = int(round(im.shape[1] * new_scale))
new_im = CaffeinatedAbstract.random_crop(im, new_height, new_width)
return new_im
#
# @brief Performs a random crop. New height and width is decided independently, this
# function changes the form factor.
#
# @param[in] img Input image, numpy array.
# @param[in] delta Minimum factor of change, e.g. if 0.5 the new height and width will be
# minimum half of the original.
#
# @returns the new image.
@staticmethod
def random_crop_no_factor(im, delta):
assert(isinstance(im, np.ndarray))
min_scale = 1.0 - delta
max_scale = 1.0
new_height = int(round(im.shape[0] * random.uniform(min_scale, max_scale)))
new_width = int(round(im.shape[1] * random.uniform(min_scale, max_scale)))
new_im = CaffeinatedAbstract.random_crop(im, new_height, new_width)
return new_im
#
# @brief Random crop of a list of images. The crops will be performed in different locations
# for the different images of the list, but all the output images will have the same
# size.
#
# @param[in] im_list List of images to be cropped.
# @param[in] new_height Height of the cropped image.
# @param[in] new_width Width of the cropped image.
#
# @returns a list of cropped images to the desired size.
@staticmethod
def random_crop_list(im_list, new_height, new_width):
assert(isinstance(im_list, list))
assert(len(im_list) > 0)
new_im_list = [ CaffeinatedAbstract.random_crop(im, new_height, new_width) \
for im in im_list ]
return new_im_list
#
# @brief Random crop all the images of the list in the same coordinates for all of them.
# All the input images MUST have the same size.
#
# @param[in] im_list List of images to be cropped.
# @param[in] new_height Height of the cropped image.
# @param[in] new_width Width of the cropped image.
#
# @returns a list of cropped images to the desired size.
@staticmethod
def random_crop_same_coord_list(im_list, new_height, new_width):
assert(isinstance(im_list, list))
assert(len(im_list) > 0)
# Choose random coordinates for crop
height_border = im_list[0].shape[0] - new_height
width_border = im_list[0].shape[1] - new_width
top_y = random.randint(0, height_border - 1) if height_border > 0 else 0
top_x = random.randint(0, width_border - 1) if width_border > 0 else 0
# Crop all the images in the list
new_im_list = [ im[top_y:top_y + new_height, top_x:top_x + new_width].copy() \
for im in im_list ]
return new_im_list
#
# @brief Random crop all the images of the list in the same coordinates for all of them.
# All the images MUST have the same size. The output images will have the same form
# factor.
#
# @param[in] im_list List of images to be cropped.
# @param[in] delta Minimum factor of change, e.g. if 0.5 the new height and width will be
# minimum half of the original.
#
# @returns a list of cropped images to the desired size.
@staticmethod
def random_crop_same_coord_list_factor(im_list, delta):
assert(isinstance(im_list, list))
assert(len(im_list) > 0)
# Get the dimensions of the new images
min_scale = 1.0 - delta
max_scale = 1.0
new_scale = random.uniform(min_scale, max_scale)
new_height = int(round(im.shape[0] * new_scale))
new_width = int(round(im.shape[1] * new_scale))
return CaffeinatedAbstract.random_crop_same_coord_list(im_list, new_height, new_width)
#
# @brief Random crop all the images of the list in the same coordinates for all of them.
# All the images MUST have the same size. The output images will not have the same
# form factor.
#
# @param[in] im_list List of images to be cropped.
# @param[in] delta Minimum factor of change, e.g. if 0.5 the new height and width will be
# minimum half of the original.
#
# @returns a list of cropped images to the desired size.
@staticmethod
def random_crop_same_coord_list_no_factor(im_list, delta):
assert(isinstance(im_list, list))
assert(len(im_list) > 0)
# Get the dimensions of the new images
min_scale = 1.0 - delta
max_scale = 1.0
new_height = int(round(im_list[0].shape[0] * random.uniform(min_scale, max_scale)))
new_width = int(round(im_list[0].shape[1] * random.uniform(min_scale, max_scale)))
return CaffeinatedAbstract.random_crop_same_coord_list(im_list, new_height, new_width)
#
# @brief Scale an image keeping original size, that is, the output image will have the
# size of the input.
#
# @details If the scale factor is smaller than 1.0, the output image will be padded.
# Otherwise it will be cropped.
#
# @param[in] im Input image or list of images.
# @param[in] scale_factor If 1.0, the image stays as it is.
# @param[in] interp Method of interpolation: nearest, bilinear, bicubic, lanczos.
# @param[in] boder_value Border value. Used when the image is downsized and padded.
# @param[in] clip_sides List of sides to crop out. Used only in case the scaling factor
# is lower than 1.0.
#
# @returns the scaled image.
@staticmethod
def scale_keeping_size(im, scale_factor, interp, border_value, clip_sides = None):
if clip_sides is None:
clip_sides = []
# Resize image to the desired new scale
new_im = CaffeinatedAbstract.resize_factor(im, scale_factor, interp)
# If the new image is larger, we crop it
if new_im.shape[0] > im.shape[0]:
new_im = CaffeinatedAbstract.crop_center(new_im, im.shape[1], im.shape[0])
# If the new image is smaller, we pad it
elif new_im.shape[0] < im.shape[0]:
padded = np.full_like(im, border_value)
start_row = (padded.shape[0] // 2) - (new_im.shape[0] // 2)
start_col = (padded.shape[1] // 2) - (new_im.shape[1] // 2)
end_row = start_row + new_im.shape[0]
end_col = start_col + new_im.shape[1]
padded[start_row:end_row, start_col:end_col] = new_im
new_im = padded
# Move the image to the desired sides (used to downscale tools and still keep them
# attached to the border of the image)
if 'top' in clip_sides:
M = np.float32([[1, 0, 0], [0, 1, -start_row]])
new_im = cv2.warpAffine(new_im, M, (padded.shape[1], padded.shape[0]),
interp, cv2.BORDER_CONSTANT, border_value)
if 'left' in clip_sides:
M = np.float32([[1, 0, -start_col], [0, 1, 0]])
new_im = cv2.warpAffine(new_im, M, (padded.shape[1], padded.shape[0]),
interp, cv2.BORDER_CONSTANT, border_value)
if 'bottom' in clip_sides:
M = np.float32([[1, 0, 0], [0, 1, start_row]])
new_im = cv2.warpAffine(new_im, M, (padded.shape[1], padded.shape[0]),
interp, cv2.BORDER_CONSTANT, border_value)
if 'right' in clip_sides:
M = np.float32([[1, 0, start_col], [0, 1, 0]])
new_im = cv2.warpAffine(new_im, M, (padded.shape[1], padded.shape[0]),
interp, cv2.BORDER_CONSTANT, border_value)
return new_im
#
# @brief Could flip the image or not.
#
# @param[in] im Image or list of images. If list, all images are either flipped or not.
#
# @returns the image (maybe flipped) maybe just the original one.
@staticmethod
def random_fliplr(im, not_used = None):
if common.randbin():
if isinstance(im, list):
return [ CaffeinatedAbstract.fliplr(i) for i in im ]
else:
return CaffeinatedAbstract.fliplr(im)
else:
return im
#
# @brief Could flip the image or not.
#
# @param[in] im Image or list of images. If list, all images are either flipped or not.
#
# @returns the image (maybe flipped) maybe just the original one.
@staticmethod
def random_flipud(im, not_used = None):
if common.randbin():
if isinstance(im, list):
return [ CaffeinatedAbstract.flipud(i) for i in im ]
else:
return CaffeinatedAbstract.flipud(im)
else:
return im
#
# @brief Add motion blur in a specific direction.
#
# @param[in] im Input image.
# @param[in] mask Pass a foreground mask if you wanna apply the motion just in the
# foreground.
# @param[in] apply_on Either 'bg', 'fg' or 'both'.
# @param[in] ks Size of the convolution kernel to be applied.
# @param[in] phi_deg Angle of rotation in degrees. Default is zero, so the motion will be
# horizontal.
#
# @returns the blured images.
@staticmethod
def directional_motion_blur(im, phi_deg = 0, ks = 15):
# Generating the kernel
kernel = np.zeros((ks, ks))
kernel[int((ks - 1) / 2),:] = np.ones(ks) / ks
# Rotate image if the user wants to simulate motion in a particular direction
# rot_im = CaffeinatedAbstract.rotate_bound(im, phi_deg, cv2.INTER_CUBIC)
# rot_im_blur = cv2.filter2D(rot_im, -1, kernel)
# new_im = CaffeinatedAbstract.rotate_bound(rot_im_blur, -phi_deg, cv2.INTER_CUBIC)
# tly = (new_im.shape[0] - im.shape[0]) // 2
# tlx = (new_im.shape[1] - im.shape[1]) // 2
# new_im = new_im[tly:tly + im.shape[0], tlx:tlx + im.shape[1]]
# FIXME: We keep just horizontal motion to investigate drop in performance
new_im = cv2.filter2D(im, -1, kernel)
return new_im
#
# @brief Random motion blur. Both foreground and background images must have the same size.
#
# @param[in] im Input image.
# @param[in] mask Mask of the foreground object that will appear blurred within the
# image.
# @param[in] rho Magnitude in pixels of the foreground motion vector.
# @param[in] phi_deg Angle in degrees of the motion vector.
# @param[in] interlaced Random interlacing will be added. Some lines of the foreground will
# move and others will not.
# @param[in] alpha Weight for the weighted sum. Default value is 0.5.
#
# @returns the blurred image.
@staticmethod
def weighted_sum_motion_blur(im, mask, rho, phi_deg, interlaced = False,
alpha = 0.5):
assert(im.shape[0] == mask.shape[0])
assert(im.shape[1] == mask.shape[1])
# Compute random motion vector
phi = common.deg_to_rad(phi_deg)
tx = rho * np.cos(phi)
ty = rho * np.sin(phi)
# Translation matrix
trans_mat = np.eye(3)
trans_mat[0, 2] = tx
trans_mat[1, 2] = ty
mat = trans_mat[:2, :3]
# Warp current image and mask according to the motion vector
im_warped = cv2.warpAffine(im, mat, (im.shape[1], im.shape[0]), flags = cv2.INTER_CUBIC)
mask_warped = cv2.warpAffine(mask, mat, (im.shape[1], im.shape[0]),
flags = cv2.INTER_NEAREST)
# Interlacing
if interlaced:
mask_warped_orig = mask_warped.copy()
lines_with_mask = np.unique(np.nonzero(mask_warped)[0]).tolist()
if lines_with_mask:
num_lines_to_remove = np.random.randint(len(lines_with_mask))
random.shuffle(lines_with_mask)
lines_with_mask = lines_with_mask[:num_lines_to_remove]
for i in lines_with_mask:
mask_warped[i,:] = 0
# Combine both images
new_im = im.copy()
new_im[mask_warped > 0] = np.round(
alpha * im[mask_warped > 0] + (1. - alpha) * im_warped[mask_warped > 0]
).astype(np.uint8)
# Blur if interlaced
if interlaced:
ksize = 3
blurred = cv2.GaussianBlur(new_im, (ksize, ksize), 0)
new_im[mask_warped_orig > 0] = blurred[mask_warped_orig > 0]
return new_im
#
# @brief Adds or subtracts intensity in different parts of the image using Perlin noise.
#
# @param[in] im Input image.
#
# @returns the augmented image.
@staticmethod
def random_local_brightness_augmentation(im, intensity_start = 50., intensity_stop = 200.,
intensity_step = 50., shape_start = 1., shape_stop = 5., shape_step = 1.):
# Generate random illumination change range
intensity_options = np.arange(intensity_start, intensity_stop + intensity_step, intensity_step)
change_choice = np.random.choice(intensity_options)
# Generate Perlin blob size, larger numbers mean smaller blobs
shape_options = np.arange(shape_start, shape_stop + shape_step, shape_step)
shape_choice = np.random.choice(shape_options)
# Generate Perlin additive noise mask
pn = perlin2d_smooth(im.shape[0], im.shape[1], shape_choice) * change_choice \
- .5 * change_choice
pn = np.dstack((pn, pn, pn))
# Modify the image: HSV option
# hsv = cv2.cvtColor(im, cv2.COLOR_BGR2HSV).astype(np.float64)
# hsv[:, :, 2] = np.round(np.clip(hsv[:, :, 2] + pn, 0, 255))
# augmented = cv2.cvtColor(hsv.astype(np.uint8), cv2.COLOR_HSV2BGR)
# Additive value on BGR
augmented = np.round(np.clip(im.astype(np.float64) + pn, 0., 255.)).astype(np.uint8)
return augmented
#
# @brief Adds or subtracts intensity in different parts of the image using Perlin noise.
#
# @param[in] im Input image.
#
# @returns the augmented image.
@staticmethod
def random_local_contrast_augmentation(im, shape_start = 1., shape_stop = 5., shape_step = 1.):
# Choose minimum and maximum contrast randomly
contrast_min = random.choice([0.5, 0.6, 0.7, 0.8])
contrast_max = random.choice([1.2, 1.3, 1.4, 1.5, 1.6, 1.7, 1.8, 1.9, 2.0])
# Generate Perlin blob size, larger numbers mean smaller blobs
shape_options = np.arange(shape_start, shape_stop + shape_step, shape_step)
shape_choice = np.random.choice(shape_options)
# Generate Perlin additive noise mask
pn = perlin2d_smooth(im.shape[0], im.shape[1], shape_choice, minval = contrast_min,
maxval = contrast_max)
pn = np.dstack((pn, pn, pn))
# Modify the image
augmented = np.round(np.clip(np.multiply(im.astype(np.float64), pn), 0, 255)).astype(np.uint8)
return augmented
#
# @brief Global (as in same additive value added to all pixels) brightness augmentation.
#
# @param[in] im Input image.
#
# @returns the augmented image.
@staticmethod
def random_global_brightness_augmentation(im, intensity_start = -50, intensity_stop = 50,
intensity_step = 10):
# Generate random illumination change
intensity_options = np.arange(intensity_start, intensity_stop + intensity_step,
intensity_step)
change_choice = np.random.choice(intensity_options)
# Additive change on Value of HSV
# hsv = cv2.cvtColor(im, cv2.COLOR_BGR2HSV).astype(np.float64)
# hsv[:, :, 2] = np.round(np.clip(hsv[:, :, 2] + change_choice, 0., 255.))
# hsv = hsv.astype(np.uint8)
# augmented = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
# Additive change on all channels of BGR
augmented = np.clip(im.astype(np.float64) + change_choice, 0, 255).astype(np.uint8)
return augmented
#
# @brief Global contrast (multiplicative) augmentation.
#
# @param[in] im Input image.
#
# @returns the augmented image.
@staticmethod
def random_global_contrast_augmentation(im):
contrast_choice = random.choice([0.5, 0.6, 0.7, 0.8, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7, 1.8,
1.9, 2.0])
augmented = np.round(np.clip(np.multiply(im.astype(np.float64),
contrast_choice), 0, 255)).astype(np.uint8)
return augmented
#
# @brief Bernoulli motion blur.
#
# @param[in] im Image or list of images.
# @param[in] mask Mask of moving object.
# @param[in] max_mag Maximum amount of pixels of displacement.
# @param[in] max_ang Maximum angle of the motion vector. Default is 360, i.e. can move in any
# direction.
#
# @returns the images with motion blur with a probability p.
@staticmethod
def random_weighted_sum_motion_blur(im, mask, max_mag = 32, max_ang = 360):
rho = np.random.randint(max_mag)
phi_deg = np.random.randint(max_ang)
interlaced = common.randbin()
if isinstance(im, list):
return [ CaffeinatedAbstract.weighted_sum_motion_blur(i, m, rho, phi_deg,
interlaced) for i, m in zip(im, mask) ]
else:
return CaffeinatedAbstract.weighted_sum_motion_blur(im, mask, rho, phi_deg,
interlaced)
#
# @brief Converts an image from BGR to BRG.
#
# @param[in] im BGR image.
#
# @returns an image converted to BRG.
@staticmethod
def bgr2brg(im):
return im[..., [0, 2, 1]]
#
# @brief Bernoulli BGR to BRG swapping.
#
# @param[in] im Image or list of images.
#
# @returns the image with the green-red channels swapped with a probability of 0.5.
@staticmethod
def random_brg(im):
if common.randbin():
if isinstance(im, list):
return [ CaffeinatedAbstract.bgr2brg(i) for i in im ]
else:
return CaffeinatedAbstract.bgr2brg(im)
else:
return im
#
# @brief Rotates the image over itself a random number of degrees.
#
# @param[in] im Input image, numpy array.
# @param[in] deg_delta The range of possible rotation is +- deg_delta.
# @param[in] interp Interpolation method: lanczos, linear, cubic, nearest.
#
# @returns the rotated image.
@staticmethod
def random_rotation(im, deg_delta, interp):
max_ang = deg_delta
min_ang = -1. * max_ang
ang = random.uniform(min_ang, max_ang)
new_im = None
if isinstance(im, list):
new_im = [ CaffeinatedAbstract.rotate_and_crop(i, ang, interp) for i in im ]
else:
new_im = CaffeinatedAbstract.rotate_and_crop(im, ang, interp)
return new_im
#
# @brief Resizes an imaged to the desired width while keeping proportions.
#
# @param[in] im Image to be resized.
# @param[in] new_w New width.
# @param[in] interp Method of interpolation: nearest, bilinear, bicubic, lanczos.
#
# @returns a resized image.
@staticmethod
def resize_width(im, new_w, interp = None):
assert(im.dtype == np.uint8)
# If no interpolation method is chosen we select the most convenient depending on whether
# the user is upsampling or downsampling the image
if interp is None:
interp = cv2.INTER_AREA if new_w < im.shape[1] else cv2.INTER_LANCZOS4
ratio = float(im.shape[0]) / float(im.shape[1])
new_h = int(round(new_w * ratio))
new_im = cv2.resize(im, (new_w, new_h), interpolation=interp)
return new_im
#
# @brief Resizes an imaged to the desired width while keeping proportions.
#
# @param[in] im Image to be resized.
# @param[in] new_h New height.
# @param[in] interp Method of interpolation: nearest, bilinear, bicubic, lanczos.
#
# @returns a resized image.
@staticmethod
def resize_height(im, new_h, interp):
assert(im.dtype == np.uint8)
ratio = float(im.shape[0]) / float(im.shape[1])
new_w = int(round(new_h / ratio))
# imethod = PIL_interp_method[interp]
# new_im = np.array(PIL.Image.fromarray(im).resize((new_w, new_h), imethod))
new_im = cv2.resize(im, (new_w, new_h), interpolation=interp)
return new_im
#
# @brief Scales an image to a desired factor of the original one.
#
# @param[in] im Image to be resized.
# @param[in] scale_factor Factor to scale up or down the image.
# @param[in] interp Method of interpolation: nearest, bilinear, bicubic, lanczos.
#
# @returns a resized image.
@staticmethod
def resize_factor(im, scale_factor, interp):
new_w = int(round(im.shape[1] * scale_factor))
return CaffeinatedAbstract.resize_width(im, new_w, interp)
#
# @brief Scales an image to a desired factor of the original one.
#
# @param[in] im Image to be resized.
# @param[in] new_w New width.
# @param[in] new_h New width.
# @param[in] interp Method of interpolation: nearest, bilinear, bicubic, lanczos.
#
# @returns a resized image.
@staticmethod
def resize(im, new_w, new_h, interp):
# imethod = PIL_interp_method[interp]
# new_im = scipy.misc.imresize(im, (new_h, new_w), interp = interp).astype(im.dtype)
# return np.array(PIL.Image.fromarray(im).resize((new_w, new_h), imethod),
# dtype = im.dtype)
new_im = cv2.resize(im, (new_w, new_h), interpolation=interp)
return new_im
#
# @returns a crop of shape (new_h, new_w).
#
@staticmethod
def crop_center(im, new_w, new_h):
start_x = im.shape[1] // 2 - (new_w // 2)
start_y = im.shape[0] // 2 - (new_h // 2)
return im[start_y:start_y + new_h, start_x:start_x + new_w].copy()
#
# @brief Rotatation of an image with black bounds around it, as it would be
# expected. A positive rotation angle results in a clockwise rotation.
#
# @param[in] image Numpy ndarray.
# @param[in] angle Angle in degrees.
#
# @returns the rotated image.
@staticmethod
def rotate_bound(image, angle, interp):
# Grab the dimensions of the image and then determine the center
(h, w) = image.shape[:2]
(cX, cY) = (w / 2, h / 2)
# Grab the rotation matrix (applying the negative of the
# angle to rotate clockwise), then grab the sine and cosine
# (i.e., the rotation components of the matrix)
M = cv2.getRotationMatrix2D((cX, cY), -angle, 1.0)
cos = np.abs(M[0, 0])
sin = np.abs(M[0, 1])
# Compute the new bounding dimensions of the image
nW = int((h * sin) + (w * cos))
nH = int((h * cos) + (w * sin))
# Adjust the rotation matrix to take into account translation
M[0, 2] += (nW / 2) - cX
M[1, 2] += (nH / 2) - cY
# Perform the actual rotation and return the image
return cv2.warpAffine(image, M, (nW, nH), flags = interp)
#
# brief Rotates an image over a centre point given and leaves the whole
# image inside. Clockwise rotation of the image.
#
# @param[in] im Numpy ndarray.
# @param[in] centre (x, y) in image coordinates.
# @param[in] angle Angle in degrees.
# @param[in] interp OpenCV interpolation method.
@staticmethod
def rotate_bound_centre(im, centre, deg, interp):
cm_x = centre[0]
cm_y = centre[1]
# Build the rotation matrix
rot_mat = cv2.getRotationMatrix2D((cm_y, cm_x), -deg, 1.0)
rot_mat_hom = np.zeros((3, 3))
rot_mat_hom[:2,:] = rot_mat
rot_mat_hom[2, 2] = 1
# Find the coordinates of the corners in the rotated image
h = im.shape[0]
w = im.shape[1]
tl = np.array([0, 0, 1]).reshape((3, 1))
tr = np.array([w - 1, 0, 1]).reshape((3, 1))
bl = np.array([0, h - 1, 1]).reshape((3, 1))
br = np.array([w - 1, h - 1, 1]).reshape((3, 1))
tl_rot = np.round(np.dot(rot_mat_hom, tl)).astype(np.int)
tr_rot = np.round(np.dot(rot_mat_hom, tr)).astype(np.int)
bl_rot = np.round(np.dot(rot_mat_hom, bl)).astype(np.int)
br_rot = np.round(np.dot(rot_mat_hom, br)).astype(np.int)
# Compute the size of the new image from the coordinates of the rotated one so that
# we add black bounds around the rotated one
min_x = min([tl_rot[0], tr_rot[0], bl_rot[0], br_rot[0]])
max_x = max([tl_rot[0], tr_rot[0], bl_rot[0], br_rot[0]])
min_y = min([tl_rot[1], tr_rot[1], bl_rot[1], br_rot[1]])
max_y = max([tl_rot[1], tr_rot[1], bl_rot[1], br_rot[1]])
new_w = max_x + 1 - min_x
new_h = max_y + 1 - min_y
# Correct the translation so that the rotated image lies inside the window
rot_mat[0, 2] -= min_x
rot_mat[1, 2] -= min_y
return cv2.warpAffine(im, rot_mat, (new_w[0], new_h[0]), flags = interp)
#
# @brief Clockwise rotation plus crop (so that there is no extra added black background).
#
# @details The crop is done based on a rectangle of maximal area inside the rotated region.
#
# @param[in] im Numpy ndarray image. Shape (h, w, 3) or (h, w).
# @param[in] ang Angle in degrees.
# @param[in] interp Interpolation method: lanczos, linear, cubic, nearest.
#
# @returns the rotated image.
@staticmethod
def rotate_and_crop(im, ang, interp):
# Rotate image
rotated = CaffeinatedAbstract.rotate_bound(im, ang, interp)
# Calculate cropping area
wr, hr = geometry.rotated_rect_with_max_area(im.shape[1],
im.shape[0], common.deg_to_rad(ang))
wr = int(np.floor(wr))
hr = int(np.floor(hr))
# Centre crop
rotated = CaffeinatedAbstract.crop_center(rotated, wr, hr)
return rotated
#
# @brief This method deinterlaces an image using ffmpeg.
#
# @param[in] im Numpy ndarray image. Shape (h, w, 3) or (h, w).
#
# @returns the deinterlaced image.
@staticmethod
def deinterlace(im, ext = '.png'):
input_path = tempfile.gettempdir() + '/' + common.gen_rand_str() + ext
output_path = tempfile.gettempdir() + '/' + common.gen_rand_str() + ext
# Save image in a temporary folder
cv2.imwrite(input_path, im)
# Deinterlace using ffmpeg
common.shell('ffmpeg -i ' + input_path + ' -vf yadif ' + output_path)
# Read deinterlaced image
dei = cv2.imread(output_path)
# Remove image from temporary folder
common.rm(input_path)
common.rm(output_path)
return dei
@staticmethod
def gaussian_noise(im, mean=0, std=20):
noise = np.random.normal(mean, std, im.shape)
return np.round(np.clip(im.astype(np.float64) + noise, 0, 255)).astype(np.uint8)
#
# @rteurns a gamma corrected image.
#
@staticmethod
def adjust_gamma(im, gamma = 1.0):
inv_gamma = 1.0 / gamma
table = np.array([((i / 255.0) ** inv_gamma) * 255
for i in np.arange(0, 256)]).astype("uint8")
return cv2.LUT(im, table)
#
# @brief Draws an horizontal gradient image.
#
# @returns the image of the gradient.
@staticmethod
def draw_grad_lr(height, width, left_colour, right_colour):
return (np.ones((height, width)) * np.linspace(left_colour, right_colour,
width)).astype(np.uint8)
#
# @brief Draws an horizontal gradient image.
#
# @returns the image of the gradient.
@staticmethod
def draw_grad_ud(height, width, left_colour, right_colour):
return (np.ones((height, width)) * np.linspace(left_colour, right_colour,
width)).astype(np.uint8).T
#
# @brief FIXME: does not work properly when image is dark
@staticmethod
def detect_endoscopic_circle_bbox(im):
# Edge detection
max_black_intensity = 10
gray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
kernel = np.ones((3, 3), np.uint8)
dilation = cv2.dilate(gray, kernel, iterations = 1)
_, thresh = cv2.threshold(dilation, max_black_intensity, 255, cv2.THRESH_BINARY)
# Detect contour of largest area
_, contours, hierarchy = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnt = max(contours, key = cv2.contourArea)
((xc, yc), radius) = cv2.minEnclosingCircle(cnt)
x = xc - radius
y = yc - radius
w = 2 * radius
h = 2 * radius
# x, y, w, h = cv2.boundingRect(cnt)
return int(x), int(y), int(w), int(h)
@staticmethod
def crop_endoscopic_circle(im):
# Detect endoscopic circle
has_circle = True
# TODO
if not has_circle:
return im
x, y, w, h = CaffeinatedAbstract.detect_endoscopic_circle_bbox(im)
cropped = im[y:y + h, x:x + w].copy()
return cropped
#
# @brief Function to add specular reflections to an image.
#
# TODO
#
#
@staticmethod
def add_specular_noise():
pass
#
# @brief Skeletonisation of a binary image [0, 255].
#
# @param[in] im Input binary image. Binary means either some values are zero and some
# others different from zero. Different from 0 can be 1 and 255.
#
# @returns a binary image (0, 255) with the skeleton of the image.
@staticmethod
def skeleton(im):
assert(len(im.shape) == 2)
sk = skimage.morphology.skeletonize_3d(im.astype(bool))
return sk
#
# @brief Pads and image with extra pixels according to a newly specified size.
#
# @param[in] tlx Integer that represents the top left corner column.
# @param[in] tly Integer that represents the top left corner row.
# @param[in] brx Integer that represents the bottom right corner column.
# @param[in] bry Integer that represents the bottom right corner row.
# @param[in] width Width of the new image.
# @param[in] height Height of the new image.
# @param[in] intensity Integer of the padding pixels.
#
# @returns nothing.
def pad(self, tlx, tly, brx, bry, width, height, intensity):
assert(isinstance(tlx, type(0)) and isinstance(tly, type(1)) and isinstance(brx, type(1)) \
and isinstance(bry, type(1)))
assert(tlx <= brx)
assert(tly <= bry)
assert(width >= self.width)
assert(height >= self.height)
assert(isinstance(intensity, type(1)))
# Create image of the new size
new_raw_frame = None
new_pixel = None
if len(self._raw_frame.shape) == 2:
new_raw_frame = np.empty((height, width), dtype=self._raw_frame.dtype)
new_pixel = intensity
elif len(self._raw_frame.shape) == 3:
new_raw_frame = np.empty((height, width, self._raw_frame.shape[2]),
dtype=self._raw_frame.dtype)
new_pixel = np.empty((self._raw_frame.shape[2],), dtype=self._raw_frame.dtype)
new_pixel.fill(intensity)
else:
raise ValueError('[image.CaffeinatedAbstract.pad] Error, image dimension ' \
+ str(self._raw_frame.shape) + ' not supported.')
new_raw_frame[:,:] = new_pixel
# Insert the previous image in the right place
new_raw_frame[tly:bry + 1, tlx:brx + 1] = self._raw_frame
self._raw_frame = new_raw_frame
#
# @brief Converts the image into a distance transform (L2 norm) to the edges.
#
# @param[in] mask_size Size of the Sobel filter kernel.
#
# @returns nothing.
def shape_transform(self, mask_size):
assert(isinstance(mask_size, type(0)))
# Convert to grayscale
# gray = cv2.cvtColor(self._raw_frame, cv2.COLOR_BGR2GRAY)
# Sobel filter
sobel_x_64f = np.absolute(cv2.Sobel(self._raw_frame, cv2.CV_64F, 1, 0, ksize=mask_size))
sobel_y_64f = np.absolute(cv2.Sobel(self._raw_frame, cv2.CV_64F, 0, 1, ksize=mask_size))
sobel_64f = (sobel_x_64f + sobel_y_64f)
scaled_sobel = np.uint8(255 * sobel_64f / np.max(sobel_64f))
# Dilate borders
kernel = np.ones((mask_size, mask_size), np.uint8)
dilated = cv2.dilate(scaled_sobel, kernel, iterations=1)
# Threshold
_, thresh = cv2.threshold(dilated, 1, 255, cv2.THRESH_BINARY)
# Distance transform
dist = 255 - (cv2.distanceTransform(255 - thresh, cv2.DIST_L2, maskSize=0))
# Remove backbround
dist[self._raw_frame == 0] = 0
self._raw_frame = dist
#
# @brief Converts image to single channel.
#
# @returns nothing.
def convert_to_single_chan(self):
assert(len(self._raw_frame.shape) == 3)
# Sanity check: assert that all the pixels of the image have the same intensity value in all the
# channels
for channel in range(1, self._raw_frame.shape[2]):
if not np.array_equal(self._raw_frame[:,:, channel], self._raw_frame[:,:, 0]):
raise RuntimeError('[CaffeinatedAbstract] Error, the image ' + self._name + ' has ' \
+ 'channels that are different from each other so it is not clear ' \
+ 'how to convert it to a proper single channel image.')
self._raw_frame = self._raw_frame[:,:, 0]
#
# @brief Changes the intensity of all the pixels in all the channels to zero.
#
# @returns nothing.
def convert_to_black(self):
self._raw_frame.fill(0)
#
# @brief Filter image with ground truth label, background pixels on the ground truth will be blacked.
#
# @param[in] caffe_label CaffeinatedLabel.
#
def filter_with_gt(self, caffe_label):
self._raw_frame[caffe_label.raw == 0] = 0
#
# @brief Builds an object of type CaffeinatedAbstract from an image file.
#
# @param[in] path Path to the image file.
#
@classmethod
def from_file(cls, path, *args):
# return cls(cv2.imread(path, cv2.IMREAD_COLOR), *args)
return cls(cv2.imread(path, cv2.IMREAD_UNCHANGED), *args)
#
# @returns the height of the image.
#
@property
def height(self):
return self._raw_frame.shape[0]
#
# @returns the width of the image.
#
@property
def width(self):
return self._raw_frame.shape[1]
#
# @returns the name of the image.
#
@property
def name(self):
return self._name
#
# @returns the raw internal image.
#
@property
def raw(self):
return self._raw_frame
#
# @returns the data type.
#
@property
def dtype(self):
return self._raw_frame.dtype
#
# @class CaffeinatedImage represents an image that will be used by Caffe so this class should
# provide methods to adapt the original image to the type of input
# Caffe is expecting.
#
class CaffeinatedImage(CaffeinatedAbstract):
#
# @brief Saves the colour image as an attribute of the class.
#
# @param[in] raw_frame Numpy array with a image, shape (h, w) or (h, w, c).
# @param[in] name Id of the image, either the name or the frame number, it will be converted to
# str.
# @param[in] label Id of the class to whom the image belongs. Only used in case the image is used
# for classification purposes. Default value is None.
#
def __init__(self, raw_frame, name, label = None):
# Assert that the image is multi-channel
dim = len(raw_frame.shape)
if dim < 2:
raise RuntimeError('[CaffeinatedImage.__init__], the image provided has [' + \
str(dim) + '] dimensions, only (H x W x C) and (H x W) are supported.')
# Assert that the type of label is correct (i.e. integer) when it is not None
if label is not None:
assert(isinstance(label, type(0)))
self._label = label
# Call CaffeinatedAbstract constructor
super(CaffeinatedImage, self).__init__(raw_frame if dim > 2 else np.expand_dims(raw_frame, axis = 2),
name)
#
# @brief Builds an object of type CaffeinatedImage from file.
#
# @details Only supports 3-channel colour images. It will raise errors for images with a different
# number of channels.
#
# @param[in] path Path to the image file.
# @classmethod
# def from_file(cls, path, name):
# return cls(cv2.imread(path, cv2.IMREAD_UNCHANGED), name)
#
# @brief Convert image to caffe test input, transposing it to the Caffe format (C x H x W) and
# subtracting the training mean.
#
# @details The mean needs to be subtracted because there is no transform_param section in the input
# layer of the test network.
#
# @param[in] mean_values Numpy ndarray with the per channel mean of the training set.
# Shape (channels,).
#
# @returns an image ready to be processed by Caffe.
def convert_to_caffe_input(self, mean_values):
# Sanity check: the mean values should be equal to the number of channels of the input image
dim = len(self._raw_frame.shape)
no_mean_values = mean_values.shape[0]
if dim < 3: # 1D or 2D images should have only one channel mean
if no_mean_values != 1:
raise ValueError('[convert_to_caffe_input] Error, [' + str(no_mean_values) + '] mean ' + \
' values provided, but the image is only 1D or 2D, so only one mean value is required.')
elif dim == 3:
channels = self._raw_frame.shape[-1]
if channels != no_mean_values:
raise ValueError('[convert_to_caffe_input] Error, [' + str(no_mean_values) \
+ '] mean values have been provided but the given image has [' + str(channels) \
+ '] channels.')
else:
raise ValueError('[convert_to_caffe_input] Error, high dimensional image not supported.')
return np.transpose(self._raw_frame.astype(np.float32) - mean_values, (2, 0, 1))
#
# @brief Resize the image to the desired new width and height.
#
# @param[in] new_h New height.
# @param[in] new_w New width.
#
# @returns nothing.
def resize(self, new_h, new_w):
self._raw_frame = cv2.resize(self._raw_frame, (new_w, new_h))
#
# @brief Resize the image and keep the original aspect ratio, padding if required.
#
# @param[in] new_h Height of the new image.
# @param[in] new_w Width of the new image.
#
# @returns nothing.
def resize_keeping_aspect(self, new_h, new_w):
# Store aspect ratio, width and height about the previous dimensions
w = self.width
h = self.height
ar = float(w) / float(h)
# Create new frame respecting the desired new dimensions
new_frame = np.zeros((new_h, new_w, self._raw_frame.shape[2]), self._raw_frame.dtype)
# We scale the larger size of the image and adapt the other one to the aspect ratio
temp_w = None
temp_h = None
y_start = 0
x_start = 0
if w >= h:
temp_w = new_w
temp_h = int(temp_w / ar)
y_start = int((new_h - temp_h) / 2.0)
else:
temp_h = new_h
temp_w = int(temp_h * ar)
x_start = int((new_w - temp_w) / 2.0)
# We add black padding if there is free space
new_frame[y_start:temp_h + y_start, x_start:temp_w + x_start] = cv2.resize(self._raw_frame,
(temp_w, temp_h))
# Copy the final image to the internal buffer that will be displayed
self._raw_frame = new_frame
#
# @brief Converts BGR image to a Caffe datum with shape (C x H x W).
#
# @returns the Caffe datum serialised as a string.
def serialise_to_string(self, jpeg_quality=100):
assert(self._raw_frame.dtype == np.uint8)
import caffe
# caffe_image = self._raw_frame.astype(np.float32)
# Convert image to Caffe datum
datum = caffe.proto.caffe_pb2.Datum()
datum.height, datum.width, datum.channels = self._raw_frame.shape
# datum.data = caffe_image.tostring()
flags = [int(cv2.IMWRITE_JPEG_QUALITY), jpeg_quality]
datum.data = cv2.imencode('.jpg', self._raw_frame, flags)[1].tostring()
# If the image has a label, it must be an integer
if self._label is not None:
assert(isinstance(self._label, type(0)))
datum.label = self._label
return datum.SerializeToString()
#
# @brief Convert image from uint16 to uint8.
#
def uint16_to_uint8(self):
self._raw_frame = np.round((self._raw_frame.astype(np.float32) / 65535.0) * 255.0).astype(np.uint8)
#
# @brief Add Gaussian noise to image.
#
# @param[in] mean Default value is 0.
# @param[in] std Default value is 10.
#
# @returns nothing.
def add_gaussian_noise(self, mean = 0, std = 10):
# Get image dimensions
row, col, ch = self._raw_frame.shape
# Add Gaussian noise to the internal image
gauss = np.random.normal(mean, std, (row, col, ch)).reshape(row, col, ch)
# Convert image to float, add Gaussian noise and convert back to uint8
self._raw_frame = np.round(self._raw_frame.astype(np.float64) + gauss).astype(np.uint8)
#
# @brief Converts a green screen image with tools to grayscale
# adding a bit of noise so that BGR are not kept equal.
#
@classmethod
def gray_tools(cls, im, noise_delta=3):
assert(isinstance(im, np.ndarray))
new_im = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
new_im = cv2.cvtColor(new_im, cv2.COLOR_GRAY2BGR)
noise = np.random.randint(-noise_delta,
noise_delta + 1, size=new_im.shape)
new_im = np.clip(new_im + noise, 0, 255).astype(np.uint8)
return new_im
#
# @brief Convert it to a noisy grayscale image.
#
def noisy_gray(self, noise_delta=3):
self._raw_frame = CaffeinatedImage.gray_tools(self._raw_frame, noise_delta)
def random_crop(self, height, width):
self._raw_frame = CaffeinatedAbstract.random_crop(self._raw_frame, height, width)
@property
def shape(self):
return self._raw_frame.shape
#
# @class Caffeinated8UC3Image represents a colour (H x W x 3) CaffeinatedImage.
#
class Caffeinated8UC3Image(CaffeinatedImage):
#
# @brief Saves the colour image as an attribute of the class.
#
# @param[in] frame_bgr Numpy array with a BGR image, shape (h, w, c).
def __init__(self, frame_bgr, name):
# Check that it is a 3-channel BGR image
EXPECTED_DIM = 3
EXPECTED_CHANNELS = 3
if len(frame_bgr.shape) != EXPECTED_DIM or frame_bgr.shape[EXPECTED_DIM - 1] != EXPECTED_CHANNELS:
raise RuntimeError('[Caffeinated8UC3Image] Error, the image provided has a shape of ' + \
str(frame_bgr.shape) + '. We expect an image of shape (H x W x ' + \
str(EXPECTED_CHANNELS) + ').')
# Check that the image is uint8
EXPECTED_TYPE = np.uint8
if frame_bgr.dtype != EXPECTED_TYPE:
raise RuntimeError('[Caffeinated8UC3Image] Error, the image provided has a type of ' + \
str(frame_bgr.dtype) + ' and we expect ' + str(EXPECTED_TYPE) + '.')
super(self.__class__, self).__init__(frame_bgr, name)
#
# @class CaffeinatedLabel represents a segmentation label that will be used by Caffe so this
# class should provide methods to adapt the original image to the type of
# input Caffe is expecting.
#
# @details This class does not support labels that are not grayscale or colour images, that is,
# the images provided must be (H x W) or (H x W x C). In case that you provide a label
# with shape (H x W x C) this class will make sure that all the channels C have the same
# values. This is because a priori it does not make any sense for a pixel to belong to
# different classes.
class CaffeinatedLabel(CaffeinatedAbstract):
#
# @brief Stores the label and checks that both dimensions and type are correct for a label.
# @details To make a safe conversion to single channel this method will check that all the
# pixels of the image have exactly the same intensity value in all the BGR channels.
# If this does not happen an exception will be raised.
#
# @param[in] label_image Single channel OpenCV/Numpy image. Shape (H x W) or (H x W x C).
# @param[in] name Name of the label, usually stores the id of the related image.
# @param[in] classes Integer that represents the maximum number of classes in the labels,
# used for both validation purposes and to convert back/forth to Caffe
# input.
# @param[in] class_map Integer (pixel intensity) -> Integer (class, [0, K - 1]),
# where K is the maximum number of classes.
# @param[in] proba_map Probability maps for all the classes, shape (c, h, w).
#
def __init__(self, label_image, name, classes, class_map, proba_map = None):
# This is 2 because we expect the image to be of shape (H x W) and the intensity of the
# pixel to indicate the class that the pixel belongs to
EXPECTED_DIM = 2
EXPECTED_LABEL_TYPE = np.uint8
# Store the maximum number of classes after validating that it is in the range [2, 256]
assert(isinstance(classes, type(0)) and classes >= 2 and classes <= 256)
self._classes = classes
# Store the dictionary for class mappings after validating it
classes_present = [False] * classes
assert(len(class_map.keys()) == classes)
for k, v in class_map.items():
assert(isinstance(k, type(0)))
assert(isinstance(v, type(0)))
assert(k >= 0 and k <= 255)
assert(v >= 0 and v < self._classes)
classes_present[v] = True
assert(all(classes_present))
self._class_map = class_map
# Sanity check: labels that are neither (H x W) nor (H x W x C) are not supported
dim = len(label_image.shape)
if not (dim == 2 or dim == 3):
raise RuntimeError('[CaffeinatedLabel] Error, the label provided has a dimension of ' + \
str(dim) + ', which is not supported. Only (H x W) and (H x W x C) are supported.')
# Sanity check: if the label provided is multiple-channel, assert that all the pixels of the image
# have the same intensity value in all the channels
if dim > EXPECTED_DIM:
for channel in range(1, label_image.shape[2]):
if not np.array_equal(label_image[:,:, channel], label_image[:,:, 0]):
raise RuntimeError('[CaffeinatedLabel] Error, the label provided in ' + name + ' has channels that are ' + \
'different from each other so it is not clear how to convert it to a proper ' + \
'single channel label in which the intensity defines the pixel class.')
# Sanity check: the image must be uint8, this essentially means that there is a maximum of 256 labels
if label_image.dtype != EXPECTED_LABEL_TYPE:
raise RuntimeError('[CaffeinatedLabel] Error, a label must be ' + str(EXPECTED_LABEL_TYPE) + '.')
# If the image has several channels, we just get one (we already know that all the channels have the
# same values
if dim == EXPECTED_DIM:
raw_label = label_image
else:
raw_label = label_image[:,:, 0]
# Assert that there are no more unique labels than classes
unique_classes = np.unique(raw_label)
if unique_classes.shape[0] > self._classes:
raise ValueError('[CaffeinatedLabel] Error, label ' + str(name) + ' is said to have ' \
+ str(self._classes) + ' classes but there are more unique values in it, exactly: ' \
+ str(unique_classes))
# Assert thate the intensities in the label are all present in the class_map dictionary
for i in unique_classes:
if not i in self._class_map:
raise ValueError('[CaffeinatedLabel] Error, label ' + str(name) + ' has a pixel with ' \
+ 'intensity ' + str(i) + ' but this intensity is not present in the class map.')
# Store probability map if provided
if proba_map is not None:
assert(len(proba_map.shape) == 3)
assert(proba_map.shape[0] == classes)
assert(proba_map.shape[1] == raw_label.shape[0])
assert(proba_map.shape[2] == raw_label.shape[1])
self._predicted_map = proba_map
# Call CaffeinatedAbstract constructor
super(CaffeinatedLabel, self).__init__(raw_label, name)
#
# @brief Builds an object of type CaffeinatedLabel from an image file.
#
# @param[in] fmaps array_like, shape (c, h, w).
#
# @param[in] classes Integer that represents the maximum number of classes in the labels, used for
# both validation purposes and to convert back/forth to Caffe input.
#
# @param[in] class_map Integer (pixel intensity) -> Integer (class, [0, K - 1]), where K is the
# maximum number of classes.
#
@classmethod
def from_network_output(cls, fmaps, name, classes, class_map):
label_image = fmaps.argmax(axis=0).astype(np.uint8)
for k, v in class_map.items():
label_image[label_image == v] = k
return cls(label_image, name, classes, class_map, fmaps)
#
# @brief Convert label to CaffeinatedImage for displaying purposes.
#
# @param[in] cn Channels of the new image. The labels will be replicated across channels.
#
# @returns the label converted into a cn-channel CaffeinatedImage.
def to_image(self, cn = 3):
new_image = np.ndarray((self._raw_frame.shape[0], self._raw_frame.shape[1], cn),
self._raw_frame.dtype)
for k in range(cn):
new_image[:,:, k] = self._raw_frame
return CaffeinatedImage(new_image, self._name)
#
# @brief Converts the label to a Caffe datum.
#
# @returns a Caffe datum label serialised to string.
def serialise_to_string(self):
# Sanity check: assert that the type of the label is correct
import caffe
assert(self._raw_frame.dtype == np.uint8)
# Create Caffe datum
datum = caffe.proto.caffe_pb2.Datum()
datum.height, datum.width = self._raw_frame.shape
# if self._classes == 2:
# Convert (h, w) -> (1, h, w)
# caffe_label = np.expand_dims(self._raw_frame, axis = 0)
# caffe_label = self._raw_frame
# else:
# Create ndarray of binary maps
fmaps = np.zeros([self._classes, self._raw_frame.shape[0], self._raw_frame.shape[1]],
dtype = np.uint8)
# k is intensity
# v is the class number
for k, v in self._class_map.items():
fmaps[v, self._raw_frame == k] = 1
# if self._classes == 2:
# Binary case, only one feature map
# datum.channels = 1
# caffe_label = np.expand_dims(fmaps[1], axis = 0)
# else:
# Multi-class case, one feature map per class
# datum.channels = self._classes
# caffe_label = fmaps
# Multi-class case, one feature map per class
datum.channels = self._classes
caffe_label = fmaps
# Convert label[s] to string
datum.data = caffe_label.tostring()
return datum.SerializeToString()
#
# @brief Binarises the label. It will be thresholded so that only 0/maxval values are present.
#
# @param[in] thresh Values greater or equal than 'thresh' will be transformed to 'maxval'.
# @param[in] maxval Integer that will be given to those pixels higher or equal than 'thresh'.
#
# @returns nothing.
def binarise(self, thresh = 10, maxval = 1):
_, self._raw_frame = cv2.threshold(self._raw_frame, thresh, maxval, cv2.THRESH_BINARY)
#
# @brief Convert intensity-based labels into proper class-index labels.
#
# @returns an array_like, shape (h, w).
def to_classes(self):
class_index_frame = self._raw_frame.copy()
for k, v in self._class_map.items():
class_index_frame[self._raw_frame == k] = v
return class_index_frame
#
# @brief Maps between intensities [0, 255] to classes [0, K] using the JSON info provided.
#
# @param[in] intensity Typically an integer [0, 255].
#
# @returns the class index of the givel pixel intensity according to the provided class map.
def map_intensity_to_class(self, intensity):
return self._class_map[intensity]
#
# @brief Maps between classes and JSON intensities.
#
# @param[in] class_id Id of the class whose intensity you want to retrieve.
#
# @returns the intensity corresponding to the given class.
def map_class_to_intensity(self, class_id):
return {v: k for k, v in self._class_map.items()}[class_id]
#
# @brief Retrieves a normalised probability map for a particular class.
#
# @param[in] class_id Id of the class whose probability map you want to retrieve.
#
# @returns an array_like probability map, shape (h, w).
def softmax_predicted_map(self, class_id):
assert(self._predicted_map)
pmap = np.exp(self._predicted_map - np.amax(self._predicted_map, axis = 0))
pmap /= np.sum(pmap, axis = 0)
return pmap[class_id, ...]
#
# @brief Converts all the feature maps to contour images.
#
# @param[in] pixel_width Thickness of the border in pixels.
#
# @returns nothing.
def convert_to_contours(self, pixel_width = 5):
new_raw_frame = np.zeros_like(self._raw_frame)
# If self._predicted_map does not exist, we create it, shape (c, h, w)
if not self._predicted_map:
self._predicted_map = np.zeros((self._classes, self._raw_frame.shape[0], self._raw_frame.shape[1]),
dtype=np.uint8)
for k in range(self._classes):
self._predicted_map[k,:,:][self._raw_frame == self.map_class_to_intensity(k)] = 1
# Draw contours in the new raw frame
for k in range(self._classes):
(_, cnts, _) = cv2.findContours(self._predicted_map[k], cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
for c in cnts:
# cv2.drawContours(new_raw_frame, [c], -1, (self.map_class_to_intensity(k)), pixel_width)
cv2.drawContours(new_raw_frame, [c], -1, self.map_class_to_intensity(k), pixel_width)
self._raw_frame = new_raw_frame
def random_crop(self, height, width):
self._raw_frame = CaffeinatedAbstract.random_crop(self._raw_frame, height, width)
#
# @brief Calculates the number of classes in the frame, that is the quantity of unique labels.
#
# @returns an integer that indicates the number of different pixel labels.
@property
def classes(self):
return self._classes
# return np.unique(self._raw_frame).shape[0]
#
# @returns the unnormalised predicted map for all the classes (class_id, height, width).
#
@property
def predicted_map(self):
return self._predicted_map
@property
def class_map(self):
return self._class_map
#
# @class CaffeinatedBinaryLabel behaves as a CaffeinatedLabel but makes sure that the label images provided
# only contain two different types or labels. Furthermore, it makes them 0's
# and 1's (np.uint8) in case that they are different from these two values.
# Say that you provide an image with 0's and 255's as typical ground truth
# images, this class will make it 0's and 1's.
#
class CaffeinatedBinaryLabel(CaffeinatedLabel):
#
# @brief Stores the label and checks that both, dimensions and type, are correct for a label.
# @details If the label provided is not single channel, the label is converted to grayscale with
# the OpenCV cvtColour function. It
#
# @param[in] label_image Single channel OpenCV/Numpy image. Shape (H x W) or (H x W x C).
# @param[in] name String that identifies the label, usually a frame number.
# @param[in] thresh Values greater or equal than 'thresh' will be transformed to 'maxval'.
# @param[in] maxval Integer that will be given to those pixels higher or equal than 'thresh'.
#
# @returns nothing.
def __init__(self, label_image, name, thresh = 10, maxval = 1):
# Call CaffeinatedLabel constructor
super(self.__class__, self).__init__(label_image, name)
# Sanity check: labels that are neither (H x W) nor (H x W x C) are not supported
# dim = len(label_image.shape)
# if not (dim == 2 or dim == 3):
# raise RuntimeError('[CaffeinatedLabel] Error, the label provided has a dimension of ' + \
# str(dim) + ', which is not supported. Only (H x W) and (H x W x C) are supported.')
# If we received a colour image as a label, we convert it to grayscale
# if dim == 3:
# label_image = cv2.cvtColor(label_image, cv2.COLOR_BGR2GRAY)
# If the label image is not binary, that is, if it has more than two unique values, we thresholded it
# to ensure that the labels are binary
EXPECTED_NO_UNIQUE_VALUES = 2 # As we expect a binary label
no_unique_values = np.unique(self._raw_frame).shape[0]
if no_unique_values > EXPECTED_NO_UNIQUE_VALUES:
_, self._raw_frame = cv2.threshold(self._raw_frame, thresh, maxval, cv2.THRESH_BINARY)
#
# @returns the number of foreground pixels.
@property
def count_fg_pixels(self):
return np.count_nonzero(self._raw_frame)
#
# @returns the number of background pixels.
@property
def count_bg_pixels(self):
return np.count_nonzero(self._raw_frame == 0)
#
# @class CaffeinatedImagePair represents a pair of consecutive frames that will be used by Caffe so this
# class should provide methods to adapt the original images to the type of
# input Caffe is expecting.
#
class CaffeinatedImagePair(object):
#
# @brief Saves the colour image as an attribute of the class.
#
# @param[in] frame_bgr_prev Numpy array with the previous BGR image in the video sequence, shape (h, w, c).
# @param[in] frame_bgr_next Numpy array with the current BGR image in the video sequecne, shape (h, w, c).
#
def __init__(self, frame_bgr_prev, frame_bgr_next):
# Sanity check: both images must have 3 dimensions (h, w, c)
if len(frame_bgr_prev.shape) != 3 or len(frame_bgr_next.shape) != 3:
raise RuntimeError('[CaffeinatedImagePair.__init__] The images provided must have ' + \
'three dimensions (i.e. H x W x C).')
# Sanity check: both images must have 3 channels
if frame_bgr_prev.shape[2] != 3 or frame_bgr_next.shape[2] != 3:
raise RuntimeError('[CaffeinatedImagePair.__init__] The images provided must have three ' + \
'channels (i. e. BGR)')
# Sanity check: both images must have the same height and width
if frame_bgr_prev.shape[0] != frame_bgr_next.shape[0] or \
frame_bgr_prev.shape[1] != frame_bgr_next.shape[1]:
raise RuntimeError('[CaffeinatedImagePair.__init__] The imaged provided must have the same ' + \
'dimensions (i.e. height and width).')
self._frame_bgr_prev = frame_bgr_prev
self._frame_bgr_next = frame_bgr_next
#
# @brief Builds an object of type CaffeinatedImage from file.
#
# @details Only supports 3-channel colour images. It will raise errors for images with a different
# number of channels.
#
# @param[in] path_prev Path to the previous image file.
# @param[in] path_next Path to the next image file.
#
@classmethod
def from_file(cls, path_prev, path_next):
return cls(cv2.imread(path_prev), cv2.imread(path_next))
#
# @brief Convert image to caffe test input, transposing it to the Caffe format (C x H x W) and
# subtracting the training mean.
#
# @details The mean needs to be subtracted because there is no transform_param section in the input
# layer of the test network.
#
# @param[in] mean_values Numpy ndarray with the per channel mean of the training set. Shape (channels,).
#
# @returns an image ready to be processed by Caffe.
def convert_to_caffe_input(self, mean_values):
no_mean_values = mean_values.shape[0]
# Sanity check: the mean values should be equal to the number of channels of the input image
if no_mean_values != 6:
raise ValueError('[CaffeinatedImagePair.convert_to_caffe_input()] Error, six means are required.')
# Subtract mean values from previous frame
norm_prev = self._frame_bgr_prev.astype(np.float32) - mean_values[:3]
# Subtract mean values from next frame
norm_next = self._frame_bgr_next.astype(np.float32) - mean_values[3:]
# Sanity checks: both images must have the same shape and be of the same datatype
assert(norm_prev.shape[0] == norm_next.shape[0])
assert(norm_prev.shape[1] == norm_next.shape[1])
assert(norm_prev.shape[2] == norm_next.shape[2])
assert(norm_prev.dtype == norm_next.dtype)
# Combine both images in a 6-channel image
combined_image = np.empty((norm_prev.shape[0], norm_prev.shape[1], 6), dtype = norm_prev.dtype)
combined_image[:,:, 0:3] = norm_prev
combined_image[:,:, 3:6] = norm_next
# Transpose to channel-first Caffe style
combined_transposed = np.transpose(combined_image, (2, 0, 1))
return combined_transposed
#
# @brief Converts BGR image to a Caffe datum with shape (C x H x W).
#
# @details The training mean is not subtracted from the image because Caffe does this automatically for
# the data layer used for training (see the transform_param section of the 'data' layer in the
# training prototxt).
#
# @returns the Caffe datum serialised as a string.
@property
def serialise_to_string(self):
# Sanity checks: both images must have the same shape and be of the same datatype
import caffe
assert(self._frame_bgr_prev.shape[0] == self._frame_bgr_next.shape[0])
assert(self._frame_bgr_prev.shape[1] == self._frame_bgr_next.shape[1])
assert(self._frame_bgr_prev.shape[2] == self._frame_bgr_next.shape[2])
assert(self._frame_bgr_prev.dtype == self._frame_bgr_next.dtype)
# Combine the two images in a single 6-channel image
channels = 6
combined_image = np.empty((self._frame_bgr_prev.shape[0], self._frame_bgr_prev.shape[1], channels), \
dtype = self._frame_bgr_prev.dtype)
combined_image[:,:, 0:3] = self._frame_bgr_prev
combined_image[:,:, 3:6] = self._frame_bgr_next
caffe_image = combined_image.astype(np.float32)
# Convert image to Caffe datum
datum = caffe.proto.caffe_pb2.Datum()
datum.height, datum.width, _ = caffe_image.shape
datum.channels = channels
datum.data = caffe_image.tostring()
return datum.SerializeToString()
#
# @returns the height of the image.
@property
def height(self):
return self._frame_bgr_prev.shape[0]
#
# @returns the width of the image.
@property
def width(self):
return self._frame_bgr_prev.shape[1]
#
# @class CaffeinatedImagePlusPrevSeg represents a BGR image with a fourth channel that contains the segmentation of the
# previous frame in the video sequence.
#
class CaffeinatedImagePrevSeg(object):
#
# @brief Saves the colour image and the previous segmentation as attributes of the class.
#
# @param[in] prev_seg Numpy array with the predicted segmentation of the previous frame in the sequence,
# shape (h, w, c).
# @param[in] frame_bgr Numpy array with a BGR image, shape (h, w, c).
#
def __init__(self, prev_seg, frame_bgr):
# Sanity check: the image must have three dimensions (h, w, c) and three channels (c = 3)
if len(frame_bgr.shape) != 3 or frame_bgr.shape[2] != 3:
raise RuntimeError('[CaffeinatedImagePlusPrevSeg.__init__] Error, the image provided must ' + \
' have three dimensions (i.e. H x W x 3).')
# Sanity check: the previous mask must have a dimension of two
if len(prev_seg.shape) != 2:
raise RuntimeError('[CaffeinatedImagePlusPrevSeg.__init__] Error, the previous mask must have ' + \
'two dimensions.')
# Sanity check: the frame and the previous mask must have the same dimensions
if frame_bgr.shape[0] != prev_seg.shape[0] or frame_bgr.shape[1] != prev_seg.shape[1]:
raise RuntimeError('[CaffeinatedImagePlusPrevSeg.__init__] Error, the current image and the ' + \
'previous segmentation must have the same height and width.')
self._prev_seg = prev_seg
self._frame_bgr = frame_bgr
#
# @brief Builds an object of type CaffeinatedImage from file.
#
# @details Only supports 3-channel colour images. It will raise errors for images with a different
# number of channels.
#
# @param[in] path Path to the image file.
@classmethod
def from_file(cls, path_prev_seg, path_frame_bgr):
caffeinated_prev_label = CaffeinatedBinaryLabel.from_file(path_prev_seg)
return cls(caffeinated_prev_label.single_channel_label_copy(), cv2.imread(path_frame_bgr))
#
# @brief Convert image to caffe test input, transposing it to the Caffe format (C x H x W) and
# subtracting the training mean.
#
# @details The mean needs to be subtracted because there is no transform_param section in the input
# layer of the test network.
#
# @param[in] mean_values Numpy ndarray with the per channel mean of the training set. Shape (channels,).
#
# @returns an image ready to be processed by Caffe.
def convert_to_caffe_input(self, mean_values):
colour_channels = 3
no_mean_values = mean_values.shape[0]
# Sanity check: the mean values should be equal to the number of channels of the input image
if no_mean_values != colour_channels:
raise ValueError('[CaffeinatedImagePlusPrevSeg.convert_to_caffe_input] Error, three means are ' + \
'required.')
# Subtract mean values from the current frame
norm_frame_bgr = self._frame_bgr.astype(np.float32) - mean_values
# Convert previous segmentation to float
norm_prev_seg = self._prev_seg.astype(np.float32)
# Sanity check: the current normalised image and the segmentation mask must have the same shape and
# datatype
total_channels = colour_channels + 1
assert(norm_frame_bgr.shape[0] == norm_prev_seg.shape[0])
assert(norm_frame_bgr.shape[1] == norm_prev_seg.shape[1])
assert(norm_frame_bgr.shape[2] == colour_channels)
assert(norm_frame_bgr.dtype == norm_prev_seg.dtype)
# Combine the current frame with the previous segmentation in a 4-channel image
combined_image = np.empty((norm_frame_bgr.shape[0], norm_frame_bgr.shape[1], total_channels),
dtype = norm_frame_bgr.dtype)
combined_image[:,:, :colour_channels] = norm_frame_bgr
combined_image[:,:, colour_channels] = norm_prev_seg
# Transpose to channel-first Caffe style
combined_transposed = np.transpose(combined_image, (2, 0, 1))
return combined_transposed
#
# @brief Converts BGR image to a Caffe datum with shape (C x H x W).
#
# @details The training mean is not subtracted from the image because Caffe does this automatically for
# the data layer used for training (see the transform_param section of the 'data' layer in the
# training prototxt).
#
# @returns the Caffe datum serialised as a string.
@property
def serialise_to_string(self):
# Sanity checks: both images must have the same shape and be of the same datatype
import caffe
assert(self._frame_bgr.shape[0] == self._prev_seg.shape[0])
assert(self._frame_bgr.shape[1] == self._prev_seg.shape[1])
assert(self._frame_bgr.dtype == self._prev_seg.dtype)
# Combine the current image and the previous segmentation in a single 4-channel image
colour_channels = 3
total_channels = colour_channels + 1
combined_image = np.empty((self._frame_bgr.shape[0], self._frame_bgr.shape[1], total_channels), \
dtype = self._frame_bgr.dtype)
combined_image[:,:, :colour_channels] = self._frame_bgr
combined_image[:,:, colour_channels] = self._prev_seg
caffe_image = combined_image.astype(np.float32)
# Convert image to Caffe datum
datum = caffe.proto.caffe_pb2.Datum()
datum.height, datum.width, _ = caffe_image.shape
datum.channels = total_channels
datum.data = caffe_image.tostring()
return datum.SerializeToString()
#
# @returns the height of the image.
@property
def height(self):
return self._frame_bgr.shape[0]
#
# @returns the width of the image.
@property
def width(self):
return self._frame_bgr.shape[1]
#
# @brief Convert a binary probability map into a beautiful image.
#
# @param[in] probmap 2D floating point probability map, shape (height, width).
#
# @returns a fancy BGR image.
def make_it_pretty(probmap, vmin = 0, vmax = 1, colourmap = 'plasma', eps = 1e-3):
assert(len(probmap.shape) == 2)
assert(np.max(probmap) < vmax + eps)
assert(np.min(probmap) > vmin - eps)
height = probmap.shape[0]
width = probmap.shape[1]
# Create figure without axes
fig = plt.figure(frameon = False)
ax = fig.add_axes([0, 0, 1, 1])
ax.axis('off')
# Plot figure
plt.imshow(probmap, cmap = colourmap, vmin = vmin, vmax = vmax) # vmin/vmax adjust thesholds
fig.canvas.draw()
# Convert plot to numpy array
data = np.fromstring(fig.canvas.tostring_rgb(), dtype = np.uint8, sep = '')
data = data.reshape(fig.canvas.get_width_height()[::-1] + (3, ))
# Remove left/right borders
left_right_offset = 0
i = 0
left_intensity = data[0, left_right_offset, 0]
right_intensity = data[0, -1, 0]
min_intensity = 255
# Assert that the values for all the rows are equal for the columns 'offset' and '-offset'
left_side_equal = True if np.unique(data[:, left_right_offset, 0]).shape[0] == 1 else False
right_side_equal = True if np.unique(data[:, -left_right_offset, 0]).shape[0] == 1 else False
while left_intensity == right_intensity and left_intensity >= min_intensity and left_side_equal and right_side_equal:
left_right_offset += 1
left_intensity = data[0, left_right_offset, 0]
right_intensity = data[0, -left_right_offset - 1, 0]
left_side_equal = True if np.unique(data[:, left_right_offset, 0]).shape[0] == 1 else False
right_side_equal = True if np.unique(data[:, -left_right_offset, 0]).shape[0] == 1 else False
# Remove top/bottom borders
top_bottom_offset = 0
i = 0
top_intensity = data[top_bottom_offset, 0, 0]
bottom_intensity = data[-1, 0, 0]
min_intensity = 255
# Assert that the values for all the rows are equal for the columns 'offset' and '-offset'
top_side_equal = True if np.unique(data[top_bottom_offset,:, 0]).shape[0] == 1 else False
bottom_side_equal = True if np.unique(data[-top_bottom_offset,:, 0]).shape[0] == 1 else False
while top_intensity == bottom_intensity and top_intensity >= min_intensity and top_side_equal and bottom_side_equal:
top_bottom_offset += 1
top_intensity = data[top_bottom_offset, 0, 0]
bottom_intensity = data[-top_bottom_offset - 1, 0, 0]
top_side_equal = True if np.unique(data[top_bottom_offset,:, 0]).shape[0] == 1 else False
bottom_side_equal = True if np.unique(data[-top_bottom_offset,:, 0]).shape[0] == 1 else False
# Note: 1 is added to 'left_right_offset' because matplotlib tends to leave a border on the left one
# pixel thicker than on the right
cropped_image = data[top_bottom_offset:data.shape[0] - top_bottom_offset,
left_right_offset + 1:data.shape[1] - left_right_offset]
# Resize to original size
resized_image = cv2.resize(cropped_image, (width, height))
assert(resized_image.shape[0] == height)
assert(resized_image.shape[1] == width)
assert(resized_image.shape[2] == 3)
# Convert RGB to BGR
final_image = cv2.cvtColor(resized_image, cv2.COLOR_RGB2BGR)
return final_image
# This module cannot be executed as a script because it is not a script :)
if __name__ == "__main__":
print >> sys.stderr, 'Error, this module is not supposed to be executed by itself.'
sys.exit(1)
|
[
"numpy.sum",
"numpy.ones",
"matplotlib.pyplot.figure",
"numpy.arange",
"common.randbin",
"cv2.imencode",
"cv2.filter2D",
"cv2.cvtColor",
"matplotlib.pyplot.imshow",
"cv2.imwrite",
"numpy.max",
"cv2.LUT",
"cv2.minEnclosingCircle",
"numpy.flipud",
"numpy.min",
"cv2.createCLAHE",
"numpy.squeeze",
"sys.exit",
"tempfile.gettempdir",
"random.choice",
"common.rm",
"numpy.nonzero",
"numpy.array",
"numpy.abs",
"cv2.getRotationMatrix2D",
"numpy.unique",
"numpy.meshgrid",
"common.shell",
"numpy.transpose",
"numpy.dstack",
"numpy.stack",
"cv2.distanceTransform",
"numpy.expand_dims",
"numpy.eye",
"cv2.GaussianBlur",
"numpy.empty",
"numpy.clip",
"cv2.warpAffine",
"numpy.mean",
"numpy.sin",
"numpy.ndarray",
"random.randint",
"cv2.dilate",
"numpy.linspace",
"cv2.drawContours",
"cv2.resize",
"cv2.Canny",
"numpy.dot",
"cv2.Sobel",
"numpy.count_nonzero",
"common.gen_rand_str",
"numpy.zeros",
"numpy.amax",
"numpy.array_equal",
"cv2.findContours",
"numpy.random.seed",
"random.shuffle",
"numpy.floor",
"numpy.random.randint",
"numpy.random.normal",
"common.deg_to_rad",
"numpy.linalg.solve",
"numpy.round",
"numpy.full_like",
"numpy.zeros_like",
"caffe.proto.caffe_pb2.Datum",
"numpy.random.choice",
"numpy.random.shuffle",
"common.path_exists",
"numpy.median",
"numpy.fliplr",
"numpy.cos",
"random.uniform",
"cv2.threshold",
"numpy.float32",
"cv2.imread",
"numpy.sqrt"
] |
[((533, 553), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (547, 553), True, 'import numpy as np\n'), ((562, 587), 'numpy.arange', 'np.arange', (['(256)'], {'dtype': 'int'}), '(256, dtype=int)\n', (571, 587), True, 'import numpy as np\n'), ((594, 614), 'numpy.random.shuffle', 'np.random.shuffle', (['p'], {}), '(p)\n', (611, 614), True, 'import numpy as np\n'), ((1483, 1527), 'numpy.array', 'np.array', (['[[0, 1], [0, -1], [1, 0], [-1, 0]]'], {}), '([[0, 1], [0, -1], [1, 0], [-1, 0]])\n', (1491, 1527), True, 'import numpy as np\n'), ((1996, 2041), 'numpy.linspace', 'np.linspace', (['(0)', 'scale', 'height'], {'endpoint': '(False)'}), '(0, scale, height, endpoint=False)\n', (2007, 2041), True, 'import numpy as np\n'), ((2056, 2100), 'numpy.linspace', 'np.linspace', (['(0)', 'scale', 'width'], {'endpoint': '(False)'}), '(0, scale, width, endpoint=False)\n', (2067, 2100), True, 'import numpy as np\n'), ((2114, 2139), 'numpy.meshgrid', 'np.meshgrid', (['lin_x', 'lin_y'], {}), '(lin_x, lin_y)\n', (2125, 2139), True, 'import numpy as np\n'), ((2183, 2194), 'numpy.min', 'np.min', (['arr'], {}), '(arr)\n', (2189, 2194), True, 'import numpy as np\n'), ((2209, 2220), 'numpy.max', 'np.max', (['arr'], {}), '(arr)\n', (2215, 2220), True, 'import numpy as np\n'), ((2611, 2621), 'numpy.mean', 'np.mean', (['x'], {}), '(x)\n', (2618, 2621), True, 'import numpy as np\n'), ((2632, 2642), 'numpy.mean', 'np.mean', (['y'], {}), '(y)\n', (2639, 2642), True, 'import numpy as np\n'), ((2902, 2915), 'numpy.sum', 'np.sum', (['(u * v)'], {}), '(u * v)\n', (2908, 2915), True, 'import numpy as np\n'), ((2925, 2939), 'numpy.sum', 'np.sum', (['(u ** 2)'], {}), '(u ** 2)\n', (2931, 2939), True, 'import numpy as np\n'), ((2949, 2963), 'numpy.sum', 'np.sum', (['(v ** 2)'], {}), '(v ** 2)\n', (2955, 2963), True, 'import numpy as np\n'), ((2973, 2991), 'numpy.sum', 'np.sum', (['(u ** 2 * v)'], {}), '(u ** 2 * v)\n', (2979, 2991), True, 'import numpy as np\n'), ((3001, 3019), 'numpy.sum', 'np.sum', (['(u * v ** 2)'], {}), '(u * v ** 2)\n', (3007, 3019), True, 'import numpy as np\n'), ((3029, 3043), 'numpy.sum', 'np.sum', (['(u ** 3)'], {}), '(u ** 3)\n', (3035, 3043), True, 'import numpy as np\n'), ((3053, 3067), 'numpy.sum', 'np.sum', (['(v ** 3)'], {}), '(v ** 3)\n', (3059, 3067), True, 'import numpy as np\n'), ((3107, 3141), 'numpy.array', 'np.array', (['[[Suu, Suv], [Suv, Svv]]'], {}), '([[Suu, Suv], [Suv, Svv]])\n', (3115, 3141), True, 'import numpy as np\n'), ((3209, 3230), 'numpy.linalg.solve', 'np.linalg.solve', (['A', 'B'], {}), '(A, B)\n', (3224, 3230), True, 'import numpy as np\n'), ((3353, 3395), 'numpy.sqrt', 'np.sqrt', (['((x - xc_1) ** 2 + (y - yc_1) ** 2)'], {}), '((x - xc_1) ** 2 + (y - yc_1) ** 2)\n', (3360, 3395), True, 'import numpy as np\n'), ((3412, 3425), 'numpy.mean', 'np.mean', (['Ri_1'], {}), '(Ri_1)\n', (3419, 3425), True, 'import numpy as np\n'), ((3442, 3467), 'numpy.sum', 'np.sum', (['((Ri_1 - R_1) ** 2)'], {}), '((Ri_1 - R_1) ** 2)\n', (3448, 3467), True, 'import numpy as np\n'), ((3482, 3517), 'numpy.sum', 'np.sum', (['((Ri_1 ** 2 - R_1 ** 2) ** 2)'], {}), '((Ri_1 ** 2 - R_1 ** 2) ** 2)\n', (3488, 3517), True, 'import numpy as np\n'), ((3641, 3657), 'numpy.median', 'np.median', (['image'], {}), '(image)\n', (3650, 3657), True, 'import numpy as np\n'), ((3758, 3788), 'cv2.Canny', 'cv2.Canny', (['image', 'lower', 'upper'], {}), '(image, lower, upper)\n', (3767, 3788), False, 'import cv2\n'), ((81954, 81979), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'frameon': '(False)'}), '(frameon=False)\n', (81964, 81979), True, 'import matplotlib.pyplot as plt\n'), ((82060, 82117), 'matplotlib.pyplot.imshow', 'plt.imshow', (['probmap'], {'cmap': 'colourmap', 'vmin': 'vmin', 'vmax': 'vmax'}), '(probmap, cmap=colourmap, vmin=vmin, vmax=vmax)\n', (82070, 82117), True, 'import matplotlib.pyplot as plt\n'), ((84579, 84621), 'cv2.resize', 'cv2.resize', (['cropped_image', '(width, height)'], {}), '(cropped_image, (width, height))\n', (84589, 84621), False, 'import cv2\n'), ((84795, 84841), 'cv2.cvtColor', 'cv2.cvtColor', (['resized_image', 'cv2.COLOR_RGB2BGR'], {}), '(resized_image, cv2.COLOR_RGB2BGR)\n', (84807, 84841), False, 'import cv2\n'), ((85061, 85072), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (85069, 85072), False, 'import sys\n'), ((3153, 3189), 'numpy.array', 'np.array', (['[Suuu + Suvv, Svvv + Suuv]'], {}), '([Suuu + Suvv, Svvv + Suuv])\n', (3161, 3189), True, 'import numpy as np\n'), ((5475, 5499), 'common.path_exists', 'common.path_exists', (['path'], {}), '(path)\n', (5493, 5499), False, 'import common\n'), ((7125, 7144), 'numpy.zeros_like', 'np.zeros_like', (['mask'], {}), '(mask)\n', (7138, 7144), True, 'import numpy as np\n'), ((7164, 7216), 'cv2.drawContours', 'cv2.drawContours', (['new_mask', 'cnts', '(-1)', '(255)', 'thickness'], {}), '(new_mask, cnts, -1, 255, thickness)\n', (7180, 7216), False, 'import cv2\n'), ((7496, 7531), 'cv2.cvtColor', 'cv2.cvtColor', (['im', 'cv2.COLOR_BGR2Lab'], {}), '(im, cv2.COLOR_BGR2Lab)\n', (7508, 7531), False, 'import cv2\n'), ((7555, 7613), 'cv2.createCLAHE', 'cv2.createCLAHE', ([], {'clipLimit': 'clip_limit', 'tileGridSize': '(8, 8)'}), '(clipLimit=clip_limit, tileGridSize=(8, 8))\n', (7570, 7613), False, 'import cv2\n'), ((7687, 7723), 'cv2.cvtColor', 'cv2.cvtColor', (['lab', 'cv2.COLOR_Lab2BGR'], {}), '(lab, cv2.COLOR_Lab2BGR)\n', (7699, 7723), False, 'import cv2\n'), ((7854, 7867), 'numpy.fliplr', 'np.fliplr', (['im'], {}), '(im)\n', (7863, 7867), True, 'import numpy as np\n'), ((7995, 8008), 'numpy.flipud', 'np.flipud', (['im'], {}), '(im)\n', (8004, 8008), True, 'import numpy as np\n'), ((10445, 10481), 'random.uniform', 'random.uniform', (['min_scale', 'max_scale'], {}), '(min_scale, max_scale)\n', (10459, 10481), False, 'import random\n'), ((14112, 14148), 'random.uniform', 'random.uniform', (['min_scale', 'max_scale'], {}), '(min_scale, max_scale)\n', (14126, 14148), False, 'import random\n'), ((18588, 18604), 'common.randbin', 'common.randbin', ([], {}), '()\n', (18602, 18604), False, 'import common\n'), ((19116, 19132), 'common.randbin', 'common.randbin', ([], {}), '()\n', (19130, 19132), False, 'import common\n'), ((20010, 20028), 'numpy.zeros', 'np.zeros', (['(ks, ks)'], {}), '((ks, ks))\n', (20018, 20028), True, 'import numpy as np\n'), ((20681, 20709), 'cv2.filter2D', 'cv2.filter2D', (['im', '(-1)', 'kernel'], {}), '(im, -1, kernel)\n', (20693, 20709), False, 'import cv2\n'), ((21694, 21720), 'common.deg_to_rad', 'common.deg_to_rad', (['phi_deg'], {}), '(phi_deg)\n', (21711, 21720), False, 'import common\n'), ((21833, 21842), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (21839, 21842), True, 'import numpy as np\n'), ((22023, 22097), 'cv2.warpAffine', 'cv2.warpAffine', (['im', 'mat', '(im.shape[1], im.shape[0])'], {'flags': 'cv2.INTER_CUBIC'}), '(im, mat, (im.shape[1], im.shape[0]), flags=cv2.INTER_CUBIC)\n', (22037, 22097), False, 'import cv2\n'), ((22122, 22200), 'cv2.warpAffine', 'cv2.warpAffine', (['mask', 'mat', '(im.shape[1], im.shape[0])'], {'flags': 'cv2.INTER_NEAREST'}), '(mask, mat, (im.shape[1], im.shape[0]), flags=cv2.INTER_NEAREST)\n', (22136, 22200), False, 'import cv2\n'), ((23613, 23688), 'numpy.arange', 'np.arange', (['intensity_start', '(intensity_stop + intensity_step)', 'intensity_step'], {}), '(intensity_start, intensity_stop + intensity_step, intensity_step)\n', (23622, 23688), True, 'import numpy as np\n'), ((23713, 23748), 'numpy.random.choice', 'np.random.choice', (['intensity_options'], {}), '(intensity_options)\n', (23729, 23748), True, 'import numpy as np\n'), ((23845, 23904), 'numpy.arange', 'np.arange', (['shape_start', '(shape_stop + shape_step)', 'shape_step'], {}), '(shape_start, shape_stop + shape_step, shape_step)\n', (23854, 23904), True, 'import numpy as np\n'), ((23928, 23959), 'numpy.random.choice', 'np.random.choice', (['shape_options'], {}), '(shape_options)\n', (23944, 23959), True, 'import numpy as np\n'), ((24140, 24163), 'numpy.dstack', 'np.dstack', (['(pn, pn, pn)'], {}), '((pn, pn, pn))\n', (24149, 24163), True, 'import numpy as np\n'), ((24951, 24986), 'random.choice', 'random.choice', (['[0.5, 0.6, 0.7, 0.8]'], {}), '([0.5, 0.6, 0.7, 0.8])\n', (24964, 24986), False, 'import random\n'), ((25010, 25070), 'random.choice', 'random.choice', (['[1.2, 1.3, 1.4, 1.5, 1.6, 1.7, 1.8, 1.9, 2.0]'], {}), '([1.2, 1.3, 1.4, 1.5, 1.6, 1.7, 1.8, 1.9, 2.0])\n', (25023, 25070), False, 'import random\n'), ((25167, 25226), 'numpy.arange', 'np.arange', (['shape_start', '(shape_stop + shape_step)', 'shape_step'], {}), '(shape_start, shape_stop + shape_step, shape_step)\n', (25176, 25226), True, 'import numpy as np\n'), ((25250, 25281), 'numpy.random.choice', 'np.random.choice', (['shape_options'], {}), '(shape_options)\n', (25266, 25281), True, 'import numpy as np\n'), ((25469, 25492), 'numpy.dstack', 'np.dstack', (['(pn, pn, pn)'], {}), '((pn, pn, pn))\n', (25478, 25492), True, 'import numpy as np\n'), ((26052, 26127), 'numpy.arange', 'np.arange', (['intensity_start', '(intensity_stop + intensity_step)', 'intensity_step'], {}), '(intensity_start, intensity_stop + intensity_step, intensity_step)\n', (26061, 26127), True, 'import numpy as np\n'), ((26164, 26199), 'numpy.random.choice', 'np.random.choice', (['intensity_options'], {}), '(intensity_options)\n', (26180, 26199), True, 'import numpy as np\n'), ((26902, 26987), 'random.choice', 'random.choice', (['[0.5, 0.6, 0.7, 0.8, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7, 1.8, 1.9, 2.0]'], {}), '([0.5, 0.6, 0.7, 0.8, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7, 1.8, 1.9, 2.0]\n )\n', (26915, 26987), False, 'import random\n'), ((27680, 27706), 'numpy.random.randint', 'np.random.randint', (['max_mag'], {}), '(max_mag)\n', (27697, 27706), True, 'import numpy as np\n'), ((27725, 27751), 'numpy.random.randint', 'np.random.randint', (['max_ang'], {}), '(max_ang)\n', (27742, 27751), True, 'import numpy as np\n'), ((27773, 27789), 'common.randbin', 'common.randbin', ([], {}), '()\n', (27787, 27789), False, 'import common\n'), ((28557, 28573), 'common.randbin', 'common.randbin', ([], {}), '()\n', (28571, 28573), False, 'import common\n'), ((29264, 29296), 'random.uniform', 'random.uniform', (['min_ang', 'max_ang'], {}), '(min_ang, max_ang)\n', (29278, 29296), False, 'import random\n'), ((30334, 30386), 'cv2.resize', 'cv2.resize', (['im', '(new_w, new_h)'], {'interpolation': 'interp'}), '(im, (new_w, new_h), interpolation=interp)\n', (30344, 30386), False, 'import cv2\n'), ((31050, 31102), 'cv2.resize', 'cv2.resize', (['im', '(new_w, new_h)'], {'interpolation': 'interp'}), '(im, (new_w, new_h), interpolation=interp)\n', (31060, 31102), False, 'import cv2\n'), ((32298, 32350), 'cv2.resize', 'cv2.resize', (['im', '(new_w, new_h)'], {'interpolation': 'interp'}), '(im, (new_w, new_h), interpolation=interp)\n', (32308, 32350), False, 'import cv2\n'), ((33366, 33412), 'cv2.getRotationMatrix2D', 'cv2.getRotationMatrix2D', (['(cX, cY)', '(-angle)', '(1.0)'], {}), '((cX, cY), -angle, 1.0)\n', (33389, 33412), False, 'import cv2\n'), ((33427, 33442), 'numpy.abs', 'np.abs', (['M[0, 0]'], {}), '(M[0, 0])\n', (33433, 33442), True, 'import numpy as np\n'), ((33457, 33472), 'numpy.abs', 'np.abs', (['M[0, 1]'], {}), '(M[0, 1])\n', (33463, 33472), True, 'import numpy as np\n'), ((33825, 33873), 'cv2.warpAffine', 'cv2.warpAffine', (['image', 'M', '(nW, nH)'], {'flags': 'interp'}), '(image, M, (nW, nH), flags=interp)\n', (33839, 33873), False, 'import cv2\n'), ((34385, 34433), 'cv2.getRotationMatrix2D', 'cv2.getRotationMatrix2D', (['(cm_y, cm_x)', '(-deg)', '(1.0)'], {}), '((cm_y, cm_x), -deg, 1.0)\n', (34408, 34433), False, 'import cv2\n'), ((34456, 34472), 'numpy.zeros', 'np.zeros', (['(3, 3)'], {}), '((3, 3))\n', (34464, 34472), True, 'import numpy as np\n'), ((35771, 35834), 'cv2.warpAffine', 'cv2.warpAffine', (['im', 'rot_mat', '(new_w[0], new_h[0])'], {'flags': 'interp'}), '(im, rot_mat, (new_w[0], new_h[0]), flags=interp)\n', (35785, 35834), False, 'import cv2\n'), ((37214, 37241), 'cv2.imwrite', 'cv2.imwrite', (['input_path', 'im'], {}), '(input_path, im)\n', (37225, 37241), False, 'import cv2\n'), ((37286, 37355), 'common.shell', 'common.shell', (["('ffmpeg -i ' + input_path + ' -vf yadif ' + output_path)"], {}), "('ffmpeg -i ' + input_path + ' -vf yadif ' + output_path)\n", (37298, 37355), False, 'import common\n'), ((37405, 37428), 'cv2.imread', 'cv2.imread', (['output_path'], {}), '(output_path)\n', (37415, 37428), False, 'import cv2\n'), ((37483, 37504), 'common.rm', 'common.rm', (['input_path'], {}), '(input_path)\n', (37492, 37504), False, 'import common\n'), ((37513, 37535), 'common.rm', 'common.rm', (['output_path'], {}), '(output_path)\n', (37522, 37535), False, 'import common\n'), ((37635, 37672), 'numpy.random.normal', 'np.random.normal', (['mean', 'std', 'im.shape'], {}), '(mean, std, im.shape)\n', (37651, 37672), True, 'import numpy as np\n'), ((38036, 38054), 'cv2.LUT', 'cv2.LUT', (['im', 'table'], {}), '(im, table)\n', (38043, 38054), False, 'import cv2\n'), ((38871, 38907), 'cv2.cvtColor', 'cv2.cvtColor', (['im', 'cv2.COLOR_BGR2GRAY'], {}), '(im, cv2.COLOR_BGR2GRAY)\n', (38883, 38907), False, 'import cv2\n'), ((38925, 38950), 'numpy.ones', 'np.ones', (['(3, 3)', 'np.uint8'], {}), '((3, 3), np.uint8)\n', (38932, 38950), True, 'import numpy as np\n'), ((38970, 39008), 'cv2.dilate', 'cv2.dilate', (['gray', 'kernel'], {'iterations': '(1)'}), '(gray, kernel, iterations=1)\n', (38980, 39008), False, 'import cv2\n'), ((39031, 39099), 'cv2.threshold', 'cv2.threshold', (['dilation', 'max_black_intensity', '(255)', 'cv2.THRESH_BINARY'], {}), '(dilation, max_black_intensity, 255, cv2.THRESH_BINARY)\n', (39044, 39099), False, 'import cv2\n'), ((39175, 39243), 'cv2.findContours', 'cv2.findContours', (['thresh', 'cv2.RETR_EXTERNAL', 'cv2.CHAIN_APPROX_SIMPLE'], {}), '(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n', (39191, 39243), False, 'import cv2\n'), ((39324, 39351), 'cv2.minEnclosingCircle', 'cv2.minEnclosingCircle', (['cnt'], {}), '(cnt)\n', (39346, 39351), False, 'import cv2\n'), ((43182, 43223), 'numpy.ones', 'np.ones', (['(mask_size, mask_size)', 'np.uint8'], {}), '((mask_size, mask_size), np.uint8)\n', (43189, 43223), True, 'import numpy as np\n'), ((43242, 43288), 'cv2.dilate', 'cv2.dilate', (['scaled_sobel', 'kernel'], {'iterations': '(1)'}), '(scaled_sobel, kernel, iterations=1)\n', (43252, 43288), False, 'import cv2\n'), ((43330, 43379), 'cv2.threshold', 'cv2.threshold', (['dilated', '(1)', '(255)', 'cv2.THRESH_BINARY'], {}), '(dilated, 1, 255, cv2.THRESH_BINARY)\n', (43343, 43379), False, 'import cv2\n'), ((49470, 49513), 'cv2.resize', 'cv2.resize', (['self._raw_frame', '(new_w, new_h)'], {}), '(self._raw_frame, (new_w, new_h))\n', (49480, 49513), False, 'import cv2\n'), ((50034, 50107), 'numpy.zeros', 'np.zeros', (['(new_h, new_w, self._raw_frame.shape[2])', 'self._raw_frame.dtype'], {}), '((new_h, new_w, self._raw_frame.shape[2]), self._raw_frame.dtype)\n', (50042, 50107), True, 'import numpy as np\n'), ((50675, 50720), 'cv2.resize', 'cv2.resize', (['self._raw_frame', '(temp_w, temp_h)'], {}), '(self._raw_frame, (temp_w, temp_h))\n', (50685, 50720), False, 'import cv2\n'), ((51230, 51259), 'caffe.proto.caffe_pb2.Datum', 'caffe.proto.caffe_pb2.Datum', ([], {}), '()\n', (51257, 51259), False, 'import caffe\n'), ((52830, 52866), 'cv2.cvtColor', 'cv2.cvtColor', (['im', 'cv2.COLOR_BGR2GRAY'], {}), '(im, cv2.COLOR_BGR2GRAY)\n', (52842, 52866), False, 'import cv2\n'), ((52884, 52924), 'cv2.cvtColor', 'cv2.cvtColor', (['new_im', 'cv2.COLOR_GRAY2BGR'], {}), '(new_im, cv2.COLOR_GRAY2BGR)\n', (52896, 52924), False, 'import cv2\n'), ((52941, 53008), 'numpy.random.randint', 'np.random.randint', (['(-noise_delta)', '(noise_delta + 1)'], {'size': 'new_im.shape'}), '(-noise_delta, noise_delta + 1, size=new_im.shape)\n', (52958, 53008), True, 'import numpy as np\n'), ((59087, 59107), 'numpy.unique', 'np.unique', (['raw_label'], {}), '(raw_label)\n', (59096, 59107), True, 'import numpy as np\n'), ((61355, 61451), 'numpy.ndarray', 'np.ndarray', (['(self._raw_frame.shape[0], self._raw_frame.shape[1], cn)', 'self._raw_frame.dtype'], {}), '((self._raw_frame.shape[0], self._raw_frame.shape[1], cn), self.\n _raw_frame.dtype)\n', (61365, 61451), True, 'import numpy as np\n'), ((61934, 61963), 'caffe.proto.caffe_pb2.Datum', 'caffe.proto.caffe_pb2.Datum', ([], {}), '()\n', (61961, 61963), False, 'import caffe\n'), ((62285, 62383), 'numpy.zeros', 'np.zeros', (['[self._classes, self._raw_frame.shape[0], self._raw_frame.shape[1]]'], {'dtype': 'np.uint8'}), '([self._classes, self._raw_frame.shape[0], self._raw_frame.shape[1]\n ], dtype=np.uint8)\n', (62293, 62383), True, 'import numpy as np\n'), ((63521, 63586), 'cv2.threshold', 'cv2.threshold', (['self._raw_frame', 'thresh', 'maxval', 'cv2.THRESH_BINARY'], {}), '(self._raw_frame, thresh, maxval, cv2.THRESH_BINARY)\n', (63534, 63586), False, 'import cv2\n'), ((65049, 65069), 'numpy.sum', 'np.sum', (['pmap'], {'axis': '(0)'}), '(pmap, axis=0)\n', (65055, 65069), True, 'import numpy as np\n'), ((65354, 65384), 'numpy.zeros_like', 'np.zeros_like', (['self._raw_frame'], {}), '(self._raw_frame)\n', (65367, 65384), True, 'import numpy as np\n'), ((69472, 69505), 'numpy.count_nonzero', 'np.count_nonzero', (['self._raw_frame'], {}), '(self._raw_frame)\n', (69488, 69505), True, 'import numpy as np\n'), ((69621, 69659), 'numpy.count_nonzero', 'np.count_nonzero', (['(self._raw_frame == 0)'], {}), '(self._raw_frame == 0)\n', (69637, 69659), True, 'import numpy as np\n'), ((73313, 73389), 'numpy.empty', 'np.empty', (['(norm_prev.shape[0], norm_prev.shape[1], 6)'], {'dtype': 'norm_prev.dtype'}), '((norm_prev.shape[0], norm_prev.shape[1], 6), dtype=norm_prev.dtype)\n', (73321, 73389), True, 'import numpy as np\n'), ((73562, 73601), 'numpy.transpose', 'np.transpose', (['combined_image', '(2, 0, 1)'], {}), '(combined_image, (2, 0, 1))\n', (73574, 73601), True, 'import numpy as np\n'), ((74617, 74737), 'numpy.empty', 'np.empty', (['(self._frame_bgr_prev.shape[0], self._frame_bgr_prev.shape[1], channels)'], {'dtype': 'self._frame_bgr_prev.dtype'}), '((self._frame_bgr_prev.shape[0], self._frame_bgr_prev.shape[1],\n channels), dtype=self._frame_bgr_prev.dtype)\n', (74625, 74737), True, 'import numpy as np\n'), ((74974, 75003), 'caffe.proto.caffe_pb2.Datum', 'caffe.proto.caffe_pb2.Datum', ([], {}), '()\n', (75001, 75003), False, 'import caffe\n'), ((79298, 79406), 'numpy.empty', 'np.empty', (['(norm_frame_bgr.shape[0], norm_frame_bgr.shape[1], total_channels)'], {'dtype': 'norm_frame_bgr.dtype'}), '((norm_frame_bgr.shape[0], norm_frame_bgr.shape[1], total_channels),\n dtype=norm_frame_bgr.dtype)\n', (79306, 79406), True, 'import numpy as np\n'), ((79621, 79660), 'numpy.transpose', 'np.transpose', (['combined_image', '(2, 0, 1)'], {}), '(combined_image, (2, 0, 1))\n', (79633, 79660), True, 'import numpy as np\n'), ((80649, 80760), 'numpy.empty', 'np.empty', (['(self._frame_bgr.shape[0], self._frame_bgr.shape[1], total_channels)'], {'dtype': 'self._frame_bgr.dtype'}), '((self._frame_bgr.shape[0], self._frame_bgr.shape[1],\n total_channels), dtype=self._frame_bgr.dtype)\n', (80657, 80760), True, 'import numpy as np\n'), ((81011, 81040), 'caffe.proto.caffe_pb2.Datum', 'caffe.proto.caffe_pb2.Datum', ([], {}), '()\n', (81038, 81040), False, 'import caffe\n'), ((81780, 81795), 'numpy.max', 'np.max', (['probmap'], {}), '(probmap)\n', (81786, 81795), True, 'import numpy as np\n'), ((81821, 81836), 'numpy.min', 'np.min', (['probmap'], {}), '(probmap)\n', (81827, 81836), True, 'import numpy as np\n'), ((623, 639), 'numpy.stack', 'np.stack', (['[p, p]'], {}), '([p, p])\n', (631, 639), True, 'import numpy as np\n'), ((2232, 2288), 'numpy.clip', 'np.clip', (['((arr - min_arr) / (max_arr - min_arr))', '(0.0)', '(1.0)'], {}), '((arr - min_arr) / (max_arr - min_arr), 0.0, 1.0)\n', (2239, 2288), True, 'import numpy as np\n'), ((5671, 5712), 'cv2.imwrite', 'cv2.imwrite', (['path', 'self._raw_frame', 'flags'], {}), '(path, self._raw_frame, flags)\n', (5682, 5712), False, 'import cv2\n'), ((5746, 5780), 'cv2.imwrite', 'cv2.imwrite', (['path', 'self._raw_frame'], {}), '(path, self._raw_frame)\n', (5757, 5780), False, 'import cv2\n'), ((8554, 8568), 'numpy.squeeze', 'np.squeeze', (['im'], {}), '(im)\n', (8564, 8568), True, 'import numpy as np\n'), ((9523, 9559), 'random.randint', 'random.randint', (['(0)', '(height_border - 1)'], {}), '(0, height_border - 1)\n', (9537, 9559), False, 'import random\n'), ((9604, 9639), 'random.randint', 'random.randint', (['(0)', '(width_border - 1)'], {}), '(0, width_border - 1)\n', (9618, 9639), False, 'import random\n'), ((13004, 13040), 'random.randint', 'random.randint', (['(0)', '(height_border - 1)'], {}), '(0, height_border - 1)\n', (13018, 13040), False, 'import random\n'), ((13085, 13120), 'random.randint', 'random.randint', (['(0)', '(width_border - 1)'], {}), '(0, width_border - 1)\n', (13099, 13120), False, 'import random\n'), ((20067, 20078), 'numpy.ones', 'np.ones', (['ks'], {}), '(ks)\n', (20074, 20078), True, 'import numpy as np\n'), ((21740, 21751), 'numpy.cos', 'np.cos', (['phi'], {}), '(phi)\n', (21746, 21751), True, 'import numpy as np\n'), ((21771, 21782), 'numpy.sin', 'np.sin', (['phi'], {}), '(phi)\n', (21777, 21782), True, 'import numpy as np\n'), ((23011, 23054), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['new_im', '(ksize, ksize)', '(0)'], {}), '(new_im, (ksize, ksize), 0)\n', (23027, 23054), False, 'import cv2\n'), ((36557, 36579), 'common.deg_to_rad', 'common.deg_to_rad', (['ang'], {}), '(ang)\n', (36574, 36579), False, 'import common\n'), ((36598, 36610), 'numpy.floor', 'np.floor', (['wr'], {}), '(wr)\n', (36606, 36610), True, 'import numpy as np\n'), ((36629, 36641), 'numpy.floor', 'np.floor', (['hr'], {}), '(hr)\n', (36637, 36641), True, 'import numpy as np\n'), ((41670, 41724), 'numpy.empty', 'np.empty', (['(height, width)'], {'dtype': 'self._raw_frame.dtype'}), '((height, width), dtype=self._raw_frame.dtype)\n', (41678, 41724), True, 'import numpy as np\n'), ((42860, 42921), 'cv2.Sobel', 'cv2.Sobel', (['self._raw_frame', 'cv2.CV_64F', '(1)', '(0)'], {'ksize': 'mask_size'}), '(self._raw_frame, cv2.CV_64F, 1, 0, ksize=mask_size)\n', (42869, 42921), False, 'import cv2\n'), ((42957, 43018), 'cv2.Sobel', 'cv2.Sobel', (['self._raw_frame', 'cv2.CV_64F', '(0)', '(1)'], {'ksize': 'mask_size'}), '(self._raw_frame, cv2.CV_64F, 0, 1, ksize=mask_size)\n', (42966, 43018), False, 'import cv2\n'), ((43432, 43492), 'cv2.distanceTransform', 'cv2.distanceTransform', (['(255 - thresh)', 'cv2.DIST_L2'], {'maskSize': '(0)'}), '(255 - thresh, cv2.DIST_L2, maskSize=0)\n', (43453, 43492), False, 'import cv2\n'), ((45089, 45127), 'cv2.imread', 'cv2.imread', (['path', 'cv2.IMREAD_UNCHANGED'], {}), '(path, cv2.IMREAD_UNCHANGED)\n', (45099, 45127), False, 'import cv2\n'), ((65535, 65633), 'numpy.zeros', 'np.zeros', (['(self._classes, self._raw_frame.shape[0], self._raw_frame.shape[1])'], {'dtype': 'np.uint8'}), '((self._classes, self._raw_frame.shape[0], self._raw_frame.shape[1]\n ), dtype=np.uint8)\n', (65543, 65633), True, 'import numpy as np\n'), ((65898, 65983), 'cv2.findContours', 'cv2.findContours', (['self._predicted_map[k]', 'cv2.RETR_TREE', 'cv2.CHAIN_APPROX_SIMPLE'], {}), '(self._predicted_map[k], cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE\n )\n', (65914, 65983), False, 'import cv2\n'), ((69291, 69356), 'cv2.threshold', 'cv2.threshold', (['self._raw_frame', 'thresh', 'maxval', 'cv2.THRESH_BINARY'], {}), '(self._raw_frame, thresh, maxval, cv2.THRESH_BINARY)\n', (69304, 69356), False, 'import cv2\n'), ((71796, 71817), 'cv2.imread', 'cv2.imread', (['path_prev'], {}), '(path_prev)\n', (71806, 71817), False, 'import cv2\n'), ((71819, 71840), 'cv2.imread', 'cv2.imread', (['path_next'], {}), '(path_next)\n', (71829, 71840), False, 'import cv2\n'), ((77584, 77610), 'cv2.imread', 'cv2.imread', (['path_frame_bgr'], {}), '(path_frame_bgr)\n', (77594, 77610), False, 'import cv2\n'), ((16746, 16776), 'numpy.full_like', 'np.full_like', (['im', 'border_value'], {}), '(im, border_value)\n', (16758, 16776), True, 'import numpy as np\n'), ((22514, 22545), 'random.shuffle', 'random.shuffle', (['lines_with_mask'], {}), '(lines_with_mask)\n', (22528, 22545), False, 'import random\n'), ((22793, 22880), 'numpy.round', 'np.round', (['(alpha * im[mask_warped > 0] + (1.0 - alpha) * im_warped[mask_warped > 0])'], {}), '(alpha * im[mask_warped > 0] + (1.0 - alpha) * im_warped[\n mask_warped > 0])\n', (22801, 22880), True, 'import numpy as np\n'), ((34668, 34687), 'numpy.array', 'np.array', (['[0, 0, 1]'], {}), '([0, 0, 1])\n', (34676, 34687), True, 'import numpy as np\n'), ((34717, 34740), 'numpy.array', 'np.array', (['[w - 1, 0, 1]'], {}), '([w - 1, 0, 1])\n', (34725, 34740), True, 'import numpy as np\n'), ((34770, 34793), 'numpy.array', 'np.array', (['[0, h - 1, 1]'], {}), '([0, h - 1, 1])\n', (34778, 34793), True, 'import numpy as np\n'), ((34823, 34850), 'numpy.array', 'np.array', (['[w - 1, h - 1, 1]'], {}), '([w - 1, h - 1, 1])\n', (34831, 34850), True, 'import numpy as np\n'), ((37054, 37075), 'common.gen_rand_str', 'common.gen_rand_str', ([], {}), '()\n', (37073, 37075), False, 'import common\n'), ((37134, 37155), 'common.gen_rand_str', 'common.gen_rand_str', ([], {}), '()\n', (37153, 37155), False, 'import common\n'), ((41833, 41918), 'numpy.empty', 'np.empty', (['(height, width, self._raw_frame.shape[2])'], {'dtype': 'self._raw_frame.dtype'}), '((height, width, self._raw_frame.shape[2]), dtype=self._raw_frame.dtype\n )\n', (41841, 41918), True, 'import numpy as np\n'), ((41954, 42020), 'numpy.empty', 'np.empty', (['(self._raw_frame.shape[2],)'], {'dtype': 'self._raw_frame.dtype'}), '((self._raw_frame.shape[2],), dtype=self._raw_frame.dtype)\n', (41962, 42020), True, 'import numpy as np\n'), ((43120, 43137), 'numpy.max', 'np.max', (['sobel_64f'], {}), '(sobel_64f)\n', (43126, 43137), True, 'import numpy as np\n'), ((43967, 44039), 'numpy.array_equal', 'np.array_equal', (['self._raw_frame[:, :, channel]', 'self._raw_frame[:, :, 0]'], {}), '(self._raw_frame[:, :, channel], self._raw_frame[:, :, 0])\n', (43981, 44039), True, 'import numpy as np\n'), ((47154, 47187), 'numpy.expand_dims', 'np.expand_dims', (['raw_frame'], {'axis': '(2)'}), '(raw_frame, axis=2)\n', (47168, 47187), True, 'import numpy as np\n'), ((52320, 52363), 'numpy.random.normal', 'np.random.normal', (['mean', 'std', '(row, col, ch)'], {}), '(mean, std, (row, col, ch))\n', (52336, 52363), True, 'import numpy as np\n'), ((53042, 53073), 'numpy.clip', 'np.clip', (['(new_im + noise)', '(0)', '(255)'], {}), '(new_im + noise, 0, 255)\n', (53049, 53073), True, 'import numpy as np\n'), ((64993, 65029), 'numpy.amax', 'np.amax', (['self._predicted_map'], {'axis': '(0)'}), '(self._predicted_map, axis=0)\n', (65000, 65029), True, 'import numpy as np\n'), ((69165, 69191), 'numpy.unique', 'np.unique', (['self._raw_frame'], {}), '(self._raw_frame)\n', (69174, 69191), True, 'import numpy as np\n'), ((11286, 11322), 'random.uniform', 'random.uniform', (['min_scale', 'max_scale'], {}), '(min_scale, max_scale)\n', (11300, 11322), False, 'import random\n'), ((11369, 11405), 'random.uniform', 'random.uniform', (['min_scale', 'max_scale'], {}), '(min_scale, max_scale)\n', (11383, 11405), False, 'import random\n'), ((15172, 15208), 'random.uniform', 'random.uniform', (['min_scale', 'max_scale'], {}), '(min_scale, max_scale)\n', (15186, 15208), False, 'import random\n'), ((15263, 15299), 'random.uniform', 'random.uniform', (['min_scale', 'max_scale'], {}), '(min_scale, max_scale)\n', (15277, 15299), False, 'import random\n'), ((17318, 17361), 'numpy.float32', 'np.float32', (['[[1, 0, 0], [0, 1, -start_row]]'], {}), '([[1, 0, 0], [0, 1, -start_row]])\n', (17328, 17361), True, 'import numpy as np\n'), ((17387, 17496), 'cv2.warpAffine', 'cv2.warpAffine', (['new_im', 'M', '(padded.shape[1], padded.shape[0])', 'interp', 'cv2.BORDER_CONSTANT', 'border_value'], {}), '(new_im, M, (padded.shape[1], padded.shape[0]), interp, cv2.\n BORDER_CONSTANT, border_value)\n', (17401, 17496), False, 'import cv2\n'), ((17570, 17613), 'numpy.float32', 'np.float32', (['[[1, 0, -start_col], [0, 1, 0]]'], {}), '([[1, 0, -start_col], [0, 1, 0]])\n', (17580, 17613), True, 'import numpy as np\n'), ((17639, 17748), 'cv2.warpAffine', 'cv2.warpAffine', (['new_im', 'M', '(padded.shape[1], padded.shape[0])', 'interp', 'cv2.BORDER_CONSTANT', 'border_value'], {}), '(new_im, M, (padded.shape[1], padded.shape[0]), interp, cv2.\n BORDER_CONSTANT, border_value)\n', (17653, 17748), False, 'import cv2\n'), ((17824, 17866), 'numpy.float32', 'np.float32', (['[[1, 0, 0], [0, 1, start_row]]'], {}), '([[1, 0, 0], [0, 1, start_row]])\n', (17834, 17866), True, 'import numpy as np\n'), ((17892, 18001), 'cv2.warpAffine', 'cv2.warpAffine', (['new_im', 'M', '(padded.shape[1], padded.shape[0])', 'interp', 'cv2.BORDER_CONSTANT', 'border_value'], {}), '(new_im, M, (padded.shape[1], padded.shape[0]), interp, cv2.\n BORDER_CONSTANT, border_value)\n', (17906, 18001), False, 'import cv2\n'), ((18076, 18118), 'numpy.float32', 'np.float32', (['[[1, 0, start_col], [0, 1, 0]]'], {}), '([[1, 0, start_col], [0, 1, 0]])\n', (18086, 18118), True, 'import numpy as np\n'), ((18144, 18253), 'cv2.warpAffine', 'cv2.warpAffine', (['new_im', 'M', '(padded.shape[1], padded.shape[0])', 'interp', 'cv2.BORDER_CONSTANT', 'border_value'], {}), '(new_im, M, (padded.shape[1], padded.shape[0]), interp, cv2.\n BORDER_CONSTANT, border_value)\n', (18158, 18253), False, 'import cv2\n'), ((34893, 34916), 'numpy.dot', 'np.dot', (['rot_mat_hom', 'tl'], {}), '(rot_mat_hom, tl)\n', (34899, 34916), True, 'import numpy as np\n'), ((34959, 34982), 'numpy.dot', 'np.dot', (['rot_mat_hom', 'tr'], {}), '(rot_mat_hom, tr)\n', (34965, 34982), True, 'import numpy as np\n'), ((35025, 35048), 'numpy.dot', 'np.dot', (['rot_mat_hom', 'bl'], {}), '(rot_mat_hom, bl)\n', (35031, 35048), True, 'import numpy as np\n'), ((35091, 35114), 'numpy.dot', 'np.dot', (['rot_mat_hom', 'br'], {}), '(rot_mat_hom, br)\n', (35097, 35114), True, 'import numpy as np\n'), ((37024, 37045), 'tempfile.gettempdir', 'tempfile.gettempdir', ([], {}), '()\n', (37043, 37045), False, 'import tempfile\n'), ((37104, 37125), 'tempfile.gettempdir', 'tempfile.gettempdir', ([], {}), '()\n', (37123, 37125), False, 'import tempfile\n'), ((38257, 38281), 'numpy.ones', 'np.ones', (['(height, width)'], {}), '((height, width))\n', (38264, 38281), True, 'import numpy as np\n'), ((38284, 38329), 'numpy.linspace', 'np.linspace', (['left_colour', 'right_colour', 'width'], {}), '(left_colour, right_colour, width)\n', (38295, 38329), True, 'import numpy as np\n'), ((51463, 51507), 'cv2.imencode', 'cv2.imencode', (['""".jpg"""', 'self._raw_frame', 'flags'], {}), "('.jpg', self._raw_frame, flags)\n", (51475, 51507), False, 'import cv2\n'), ((58059, 58123), 'numpy.array_equal', 'np.array_equal', (['label_image[:, :, channel]', 'label_image[:, :, 0]'], {}), '(label_image[:, :, channel], label_image[:, :, 0])\n', (58073, 58123), True, 'import numpy as np\n'), ((82666, 82706), 'numpy.unique', 'np.unique', (['data[:, left_right_offset, 0]'], {}), '(data[:, left_right_offset, 0])\n', (82675, 82706), True, 'import numpy as np\n'), ((82763, 82804), 'numpy.unique', 'np.unique', (['data[:, -left_right_offset, 0]'], {}), '(data[:, -left_right_offset, 0])\n', (82772, 82804), True, 'import numpy as np\n'), ((83608, 83648), 'numpy.unique', 'np.unique', (['data[top_bottom_offset, :, 0]'], {}), '(data[top_bottom_offset, :, 0])\n', (83617, 83648), True, 'import numpy as np\n'), ((83705, 83746), 'numpy.unique', 'np.unique', (['data[-top_bottom_offset, :, 0]'], {}), '(data[-top_bottom_offset, :, 0])\n', (83714, 83746), True, 'import numpy as np\n'), ((38562, 38586), 'numpy.ones', 'np.ones', (['(height, width)'], {}), '((height, width))\n', (38569, 38586), True, 'import numpy as np\n'), ((38589, 38634), 'numpy.linspace', 'np.linspace', (['left_colour', 'right_colour', 'width'], {}), '(left_colour, right_colour, width)\n', (38600, 38634), True, 'import numpy as np\n'), ((83134, 83174), 'numpy.unique', 'np.unique', (['data[:, left_right_offset, 0]'], {}), '(data[:, left_right_offset, 0])\n', (83143, 83174), True, 'import numpy as np\n'), ((83235, 83276), 'numpy.unique', 'np.unique', (['data[:, -left_right_offset, 0]'], {}), '(data[:, -left_right_offset, 0])\n', (83244, 83276), True, 'import numpy as np\n'), ((84073, 84113), 'numpy.unique', 'np.unique', (['data[top_bottom_offset, :, 0]'], {}), '(data[top_bottom_offset, :, 0])\n', (84082, 84113), True, 'import numpy as np\n'), ((84174, 84215), 'numpy.unique', 'np.unique', (['data[-top_bottom_offset, :, 0]'], {}), '(data[-top_bottom_offset, :, 0])\n', (84183, 84215), True, 'import numpy as np\n'), ((22351, 22374), 'numpy.nonzero', 'np.nonzero', (['mask_warped'], {}), '(mask_warped)\n', (22361, 22374), True, 'import numpy as np\n'), ((37984, 38001), 'numpy.arange', 'np.arange', (['(0)', '(256)'], {}), '(0, 256)\n', (37993, 38001), True, 'import numpy as np\n')]
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from absl.testing import parameterized
import numpy as np
from test_utils import ReportJSON
from tensorflow.compiler.tests import xla_test
from tensorflow.python.platform import googletest
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ipu.config import IPUConfig
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
TYPES = (np.float16, np.float32, np.int32)
TESTCASES = [{"testcase_name": np.dtype(x).name, "dtype": x} for x in TYPES]
def _get_random_input(dtype, shape):
if np.issubdtype(dtype, np.integer):
info_fn = np.iinfo
random_fn = np.random.random_integers
else:
info_fn = np.finfo
random_fn = np.random.uniform
return random_fn(info_fn(dtype).min, info_fn(dtype).max,
size=shape).astype(dtype)
class ArgMinMax(xla_test.XLATestCase, parameterized.TestCase):
@parameterized.named_parameters(*TESTCASES)
def testArgMaxBasic(self, dtype):
cfg = IPUConfig()
cfg._profiling.enable_ipu_events = True # pylint: disable=protected-access
cfg.ipu_model.compile_ipu_code = False
cfg.configure_ipu_system()
def model(a):
return math_ops.argmax(a, output_type=dtypes.int32)
with self.session() as sess:
report_json = ReportJSON(self, sess)
report_json.reset()
with ops.device('cpu'):
pa = array_ops.placeholder(dtype, [3, 5, 2])
with ops.device("/device:IPU:0"):
out = model(pa)
input = _get_random_input(dtype, (3, 5, 2))
fd = {pa: input}
result = sess.run(out, fd)
self.assertAllClose(result, np.argmax(input, axis=0))
report_json.parse_log(assert_len=4)
@parameterized.named_parameters(*TESTCASES)
def testArgMaxHalf(self, dtype):
cfg = IPUConfig()
cfg.ipu_model.compile_ipu_code = False
cfg.configure_ipu_system()
def model(a):
return math_ops.argmax(a, output_type=dtypes.int32)
with self.session() as sess:
with ops.device('cpu'):
pa = array_ops.placeholder(dtype, [3, 5, 2])
with ops.device("/device:IPU:0"):
out = model(pa)
input = _get_random_input(dtype, (3, 5, 2))
fd = {pa: input}
result = sess.run(out, fd)
self.assertAllClose(result, np.argmax(input, axis=0))
@parameterized.named_parameters(*TESTCASES)
def testArgMaxMultiDimensional(self, dtype):
cfg = IPUConfig()
cfg.ipu_model.compile_ipu_code = False
cfg.configure_ipu_system()
def model(a, axis):
return math_ops.argmax(a, axis=axis, output_type=dtypes.int32)
for axis in range(6):
with self.session() as sess:
with ops.device('cpu'):
pa = array_ops.placeholder(dtype, [1, 2, 3, 4, 5, 6])
p_axis = array_ops.placeholder(np.int32, shape=())
with ops.device("/device:IPU:0"):
out = model(pa, p_axis)
input = _get_random_input(dtype, (1, 2, 3, 4, 5, 6))
fd = {pa: input, p_axis: axis}
result = sess.run(out, fd)
self.assertAllClose(result, np.argmax(input, axis=axis))
@parameterized.named_parameters(*TESTCASES)
def testArgMinBasic(self, dtype):
cfg = IPUConfig()
cfg._profiling.enable_ipu_events = True # pylint: disable=protected-access
cfg.ipu_model.compile_ipu_code = False
cfg.configure_ipu_system()
def model(a):
return math_ops.argmin(a, output_type=dtypes.int32)
with self.session() as sess:
report_json = ReportJSON(self, sess)
with ops.device('cpu'):
pa = array_ops.placeholder(dtype, [3, 5, 2])
with ops.device("/device:IPU:0"):
out = model(pa)
report_json.reset()
input = _get_random_input(dtype, (3, 5, 2))
fd = {pa: input}
result = sess.run(out, fd)
self.assertAllClose(result, np.argmin(input, axis=0))
report_json.parse_log(assert_len=4)
@parameterized.named_parameters(*TESTCASES)
def testArgMinHalf(self, dtype):
cfg = IPUConfig()
cfg.ipu_model.compile_ipu_code = False
cfg.configure_ipu_system()
def model(a):
return math_ops.argmin(a, output_type=dtypes.int32)
with self.session() as sess:
with ops.device('cpu'):
pa = array_ops.placeholder(dtype, [3, 5, 2])
with ops.device("/device:IPU:0"):
out = model(pa)
input = _get_random_input(dtype, (3, 5, 2))
fd = {pa: input}
result = sess.run(out, fd)
self.assertAllClose(result, np.argmin(input, axis=0))
@parameterized.named_parameters(*TESTCASES)
def testArgMinMultiDimensional(self, dtype):
cfg = IPUConfig()
cfg.ipu_model.compile_ipu_code = False
cfg.configure_ipu_system()
def model(a, axis):
return math_ops.argmin(a, axis=axis, output_type=dtypes.int32)
for axis in range(6):
with self.session() as sess:
with ops.device('cpu'):
pa = array_ops.placeholder(dtype, [1, 2, 3, 4, 5, 6])
p_axis = array_ops.placeholder(np.int32, shape=())
with ops.device("/device:IPU:0"):
out = model(pa, p_axis)
input = _get_random_input(dtype, (1, 2, 3, 4, 5, 6))
fd = {pa: input, p_axis: axis}
result = sess.run(out, fd)
self.assertAllClose(result, np.argmin(input, axis=axis))
@parameterized.named_parameters(*TESTCASES)
def testArgMaxNegativeDim(self, dtype):
cfg = IPUConfig()
cfg._profiling.enable_ipu_events = True # pylint: disable=protected-access
cfg.ipu_model.compile_ipu_code = False
cfg.configure_ipu_system()
def model(a):
return math_ops.argmax(a, axis=-1, output_type=dtypes.int32)
with self.session() as sess:
report_json = ReportJSON(self, sess)
report_json.reset()
with ops.device('cpu'):
pa = array_ops.placeholder(dtype, [3, 5, 2])
with ops.device("/device:IPU:0"):
out = model(pa)
input = _get_random_input(dtype, (3, 5, 2))
fd = {pa: input}
result = sess.run(out, fd)
self.assertAllClose(result, np.argmax(input, axis=-1))
report_json.parse_log(assert_len=4)
@parameterized.named_parameters(*TESTCASES)
def testArgMaxVector(self, dtype):
cfg = IPUConfig()
cfg._profiling.enable_ipu_events = True # pylint: disable=protected-access
cfg.ipu_model.compile_ipu_code = False
cfg.configure_ipu_system()
def model(a):
return math_ops.argmax(a, axis=0, output_type=dtypes.int32)
with self.session() as sess:
report_json = ReportJSON(self, sess)
report_json.reset()
with ops.device('cpu'):
pa = array_ops.placeholder(dtype, [3])
with ops.device("/device:IPU:0"):
out = model(pa)
input = _get_random_input(dtype, (3))
fd = {pa: input}
result = sess.run(out, fd)
self.assertAllClose(result, np.argmax(input))
report_json.parse_log(assert_len=4)
if __name__ == "__main__":
os.environ['TF_XLA_FLAGS'] = ('--tf_xla_min_cluster_size=1 ' +
os.environ.get('TF_XLA_FLAGS', ''))
googletest.main()
|
[
"tensorflow.python.ops.math_ops.argmin",
"tensorflow.python.ipu.config.IPUConfig",
"tensorflow.python.ops.math_ops.argmax",
"numpy.argmax",
"numpy.dtype",
"numpy.argmin",
"tensorflow.python.platform.googletest.main",
"os.environ.get",
"test_utils.ReportJSON",
"tensorflow.python.framework.ops.device",
"tensorflow.python.ops.array_ops.placeholder",
"absl.testing.parameterized.named_parameters",
"numpy.issubdtype"
] |
[((1394, 1426), 'numpy.issubdtype', 'np.issubdtype', (['dtype', 'np.integer'], {}), '(dtype, np.integer)\n', (1407, 1426), True, 'import numpy as np\n'), ((1730, 1772), 'absl.testing.parameterized.named_parameters', 'parameterized.named_parameters', (['*TESTCASES'], {}), '(*TESTCASES)\n', (1760, 1772), False, 'from absl.testing import parameterized\n'), ((2529, 2571), 'absl.testing.parameterized.named_parameters', 'parameterized.named_parameters', (['*TESTCASES'], {}), '(*TESTCASES)\n', (2559, 2571), False, 'from absl.testing import parameterized\n'), ((3134, 3176), 'absl.testing.parameterized.named_parameters', 'parameterized.named_parameters', (['*TESTCASES'], {}), '(*TESTCASES)\n', (3164, 3176), False, 'from absl.testing import parameterized\n'), ((3916, 3958), 'absl.testing.parameterized.named_parameters', 'parameterized.named_parameters', (['*TESTCASES'], {}), '(*TESTCASES)\n', (3946, 3958), False, 'from absl.testing import parameterized\n'), ((4716, 4758), 'absl.testing.parameterized.named_parameters', 'parameterized.named_parameters', (['*TESTCASES'], {}), '(*TESTCASES)\n', (4746, 4758), False, 'from absl.testing import parameterized\n'), ((5321, 5363), 'absl.testing.parameterized.named_parameters', 'parameterized.named_parameters', (['*TESTCASES'], {}), '(*TESTCASES)\n', (5351, 5363), False, 'from absl.testing import parameterized\n'), ((6103, 6145), 'absl.testing.parameterized.named_parameters', 'parameterized.named_parameters', (['*TESTCASES'], {}), '(*TESTCASES)\n', (6133, 6145), False, 'from absl.testing import parameterized\n'), ((6918, 6960), 'absl.testing.parameterized.named_parameters', 'parameterized.named_parameters', (['*TESTCASES'], {}), '(*TESTCASES)\n', (6948, 6960), False, 'from absl.testing import parameterized\n'), ((7866, 7883), 'tensorflow.python.platform.googletest.main', 'googletest.main', ([], {}), '()\n', (7881, 7883), False, 'from tensorflow.python.platform import googletest\n'), ((1819, 1830), 'tensorflow.python.ipu.config.IPUConfig', 'IPUConfig', ([], {}), '()\n', (1828, 1830), False, 'from tensorflow.python.ipu.config import IPUConfig\n'), ((2617, 2628), 'tensorflow.python.ipu.config.IPUConfig', 'IPUConfig', ([], {}), '()\n', (2626, 2628), False, 'from tensorflow.python.ipu.config import IPUConfig\n'), ((3234, 3245), 'tensorflow.python.ipu.config.IPUConfig', 'IPUConfig', ([], {}), '()\n', (3243, 3245), False, 'from tensorflow.python.ipu.config import IPUConfig\n'), ((4005, 4016), 'tensorflow.python.ipu.config.IPUConfig', 'IPUConfig', ([], {}), '()\n', (4014, 4016), False, 'from tensorflow.python.ipu.config import IPUConfig\n'), ((4804, 4815), 'tensorflow.python.ipu.config.IPUConfig', 'IPUConfig', ([], {}), '()\n', (4813, 4815), False, 'from tensorflow.python.ipu.config import IPUConfig\n'), ((5421, 5432), 'tensorflow.python.ipu.config.IPUConfig', 'IPUConfig', ([], {}), '()\n', (5430, 5432), False, 'from tensorflow.python.ipu.config import IPUConfig\n'), ((6198, 6209), 'tensorflow.python.ipu.config.IPUConfig', 'IPUConfig', ([], {}), '()\n', (6207, 6209), False, 'from tensorflow.python.ipu.config import IPUConfig\n'), ((7008, 7019), 'tensorflow.python.ipu.config.IPUConfig', 'IPUConfig', ([], {}), '()\n', (7017, 7019), False, 'from tensorflow.python.ipu.config import IPUConfig\n'), ((7828, 7862), 'os.environ.get', 'os.environ.get', (['"""TF_XLA_FLAGS"""', '""""""'], {}), "('TF_XLA_FLAGS', '')\n", (7842, 7862), False, 'import os\n'), ((1304, 1315), 'numpy.dtype', 'np.dtype', (['x'], {}), '(x)\n', (1312, 1315), True, 'import numpy as np\n'), ((2017, 2061), 'tensorflow.python.ops.math_ops.argmax', 'math_ops.argmax', (['a'], {'output_type': 'dtypes.int32'}), '(a, output_type=dtypes.int32)\n', (2032, 2061), False, 'from tensorflow.python.ops import math_ops\n'), ((2116, 2138), 'test_utils.ReportJSON', 'ReportJSON', (['self', 'sess'], {}), '(self, sess)\n', (2126, 2138), False, 'from test_utils import ReportJSON\n'), ((2735, 2779), 'tensorflow.python.ops.math_ops.argmax', 'math_ops.argmax', (['a'], {'output_type': 'dtypes.int32'}), '(a, output_type=dtypes.int32)\n', (2750, 2779), False, 'from tensorflow.python.ops import math_ops\n'), ((3358, 3413), 'tensorflow.python.ops.math_ops.argmax', 'math_ops.argmax', (['a'], {'axis': 'axis', 'output_type': 'dtypes.int32'}), '(a, axis=axis, output_type=dtypes.int32)\n', (3373, 3413), False, 'from tensorflow.python.ops import math_ops\n'), ((4203, 4247), 'tensorflow.python.ops.math_ops.argmin', 'math_ops.argmin', (['a'], {'output_type': 'dtypes.int32'}), '(a, output_type=dtypes.int32)\n', (4218, 4247), False, 'from tensorflow.python.ops import math_ops\n'), ((4302, 4324), 'test_utils.ReportJSON', 'ReportJSON', (['self', 'sess'], {}), '(self, sess)\n', (4312, 4324), False, 'from test_utils import ReportJSON\n'), ((4922, 4966), 'tensorflow.python.ops.math_ops.argmin', 'math_ops.argmin', (['a'], {'output_type': 'dtypes.int32'}), '(a, output_type=dtypes.int32)\n', (4937, 4966), False, 'from tensorflow.python.ops import math_ops\n'), ((5545, 5600), 'tensorflow.python.ops.math_ops.argmin', 'math_ops.argmin', (['a'], {'axis': 'axis', 'output_type': 'dtypes.int32'}), '(a, axis=axis, output_type=dtypes.int32)\n', (5560, 5600), False, 'from tensorflow.python.ops import math_ops\n'), ((6396, 6449), 'tensorflow.python.ops.math_ops.argmax', 'math_ops.argmax', (['a'], {'axis': '(-1)', 'output_type': 'dtypes.int32'}), '(a, axis=-1, output_type=dtypes.int32)\n', (6411, 6449), False, 'from tensorflow.python.ops import math_ops\n'), ((6504, 6526), 'test_utils.ReportJSON', 'ReportJSON', (['self', 'sess'], {}), '(self, sess)\n', (6514, 6526), False, 'from test_utils import ReportJSON\n'), ((7206, 7258), 'tensorflow.python.ops.math_ops.argmax', 'math_ops.argmax', (['a'], {'axis': '(0)', 'output_type': 'dtypes.int32'}), '(a, axis=0, output_type=dtypes.int32)\n', (7221, 7258), False, 'from tensorflow.python.ops import math_ops\n'), ((7313, 7335), 'test_utils.ReportJSON', 'ReportJSON', (['self', 'sess'], {}), '(self, sess)\n', (7323, 7335), False, 'from test_utils import ReportJSON\n'), ((2177, 2194), 'tensorflow.python.framework.ops.device', 'ops.device', (['"""cpu"""'], {}), "('cpu')\n", (2187, 2194), False, 'from tensorflow.python.framework import ops\n'), ((2209, 2248), 'tensorflow.python.ops.array_ops.placeholder', 'array_ops.placeholder', (['dtype', '[3, 5, 2]'], {}), '(dtype, [3, 5, 2])\n', (2230, 2248), False, 'from tensorflow.python.ops import array_ops\n'), ((2261, 2288), 'tensorflow.python.framework.ops.device', 'ops.device', (['"""/device:IPU:0"""'], {}), "('/device:IPU:0')\n", (2271, 2288), False, 'from tensorflow.python.framework import ops\n'), ((2456, 2480), 'numpy.argmax', 'np.argmax', (['input'], {'axis': '(0)'}), '(input, axis=0)\n', (2465, 2480), True, 'import numpy as np\n'), ((2825, 2842), 'tensorflow.python.framework.ops.device', 'ops.device', (['"""cpu"""'], {}), "('cpu')\n", (2835, 2842), False, 'from tensorflow.python.framework import ops\n'), ((2857, 2896), 'tensorflow.python.ops.array_ops.placeholder', 'array_ops.placeholder', (['dtype', '[3, 5, 2]'], {}), '(dtype, [3, 5, 2])\n', (2878, 2896), False, 'from tensorflow.python.ops import array_ops\n'), ((2909, 2936), 'tensorflow.python.framework.ops.device', 'ops.device', (['"""/device:IPU:0"""'], {}), "('/device:IPU:0')\n", (2919, 2936), False, 'from tensorflow.python.framework import ops\n'), ((3104, 3128), 'numpy.argmax', 'np.argmax', (['input'], {'axis': '(0)'}), '(input, axis=0)\n', (3113, 3128), True, 'import numpy as np\n'), ((4337, 4354), 'tensorflow.python.framework.ops.device', 'ops.device', (['"""cpu"""'], {}), "('cpu')\n", (4347, 4354), False, 'from tensorflow.python.framework import ops\n'), ((4369, 4408), 'tensorflow.python.ops.array_ops.placeholder', 'array_ops.placeholder', (['dtype', '[3, 5, 2]'], {}), '(dtype, [3, 5, 2])\n', (4390, 4408), False, 'from tensorflow.python.ops import array_ops\n'), ((4421, 4448), 'tensorflow.python.framework.ops.device', 'ops.device', (['"""/device:IPU:0"""'], {}), "('/device:IPU:0')\n", (4431, 4448), False, 'from tensorflow.python.framework import ops\n'), ((4643, 4667), 'numpy.argmin', 'np.argmin', (['input'], {'axis': '(0)'}), '(input, axis=0)\n', (4652, 4667), True, 'import numpy as np\n'), ((5012, 5029), 'tensorflow.python.framework.ops.device', 'ops.device', (['"""cpu"""'], {}), "('cpu')\n", (5022, 5029), False, 'from tensorflow.python.framework import ops\n'), ((5044, 5083), 'tensorflow.python.ops.array_ops.placeholder', 'array_ops.placeholder', (['dtype', '[3, 5, 2]'], {}), '(dtype, [3, 5, 2])\n', (5065, 5083), False, 'from tensorflow.python.ops import array_ops\n'), ((5096, 5123), 'tensorflow.python.framework.ops.device', 'ops.device', (['"""/device:IPU:0"""'], {}), "('/device:IPU:0')\n", (5106, 5123), False, 'from tensorflow.python.framework import ops\n'), ((5291, 5315), 'numpy.argmin', 'np.argmin', (['input'], {'axis': '(0)'}), '(input, axis=0)\n', (5300, 5315), True, 'import numpy as np\n'), ((6565, 6582), 'tensorflow.python.framework.ops.device', 'ops.device', (['"""cpu"""'], {}), "('cpu')\n", (6575, 6582), False, 'from tensorflow.python.framework import ops\n'), ((6597, 6636), 'tensorflow.python.ops.array_ops.placeholder', 'array_ops.placeholder', (['dtype', '[3, 5, 2]'], {}), '(dtype, [3, 5, 2])\n', (6618, 6636), False, 'from tensorflow.python.ops import array_ops\n'), ((6649, 6676), 'tensorflow.python.framework.ops.device', 'ops.device', (['"""/device:IPU:0"""'], {}), "('/device:IPU:0')\n", (6659, 6676), False, 'from tensorflow.python.framework import ops\n'), ((6844, 6869), 'numpy.argmax', 'np.argmax', (['input'], {'axis': '(-1)'}), '(input, axis=-1)\n', (6853, 6869), True, 'import numpy as np\n'), ((7374, 7391), 'tensorflow.python.framework.ops.device', 'ops.device', (['"""cpu"""'], {}), "('cpu')\n", (7384, 7391), False, 'from tensorflow.python.framework import ops\n'), ((7406, 7439), 'tensorflow.python.ops.array_ops.placeholder', 'array_ops.placeholder', (['dtype', '[3]'], {}), '(dtype, [3])\n', (7427, 7439), False, 'from tensorflow.python.ops import array_ops\n'), ((7452, 7479), 'tensorflow.python.framework.ops.device', 'ops.device', (['"""/device:IPU:0"""'], {}), "('/device:IPU:0')\n", (7462, 7479), False, 'from tensorflow.python.framework import ops\n'), ((7641, 7657), 'numpy.argmax', 'np.argmax', (['input'], {}), '(input)\n', (7650, 7657), True, 'import numpy as np\n'), ((3489, 3506), 'tensorflow.python.framework.ops.device', 'ops.device', (['"""cpu"""'], {}), "('cpu')\n", (3499, 3506), False, 'from tensorflow.python.framework import ops\n'), ((3523, 3571), 'tensorflow.python.ops.array_ops.placeholder', 'array_ops.placeholder', (['dtype', '[1, 2, 3, 4, 5, 6]'], {}), '(dtype, [1, 2, 3, 4, 5, 6])\n', (3544, 3571), False, 'from tensorflow.python.ops import array_ops\n'), ((3591, 3632), 'tensorflow.python.ops.array_ops.placeholder', 'array_ops.placeholder', (['np.int32'], {'shape': '()'}), '(np.int32, shape=())\n', (3612, 3632), False, 'from tensorflow.python.ops import array_ops\n'), ((3647, 3674), 'tensorflow.python.framework.ops.device', 'ops.device', (['"""/device:IPU:0"""'], {}), "('/device:IPU:0')\n", (3657, 3674), False, 'from tensorflow.python.framework import ops\n'), ((3883, 3910), 'numpy.argmax', 'np.argmax', (['input'], {'axis': 'axis'}), '(input, axis=axis)\n', (3892, 3910), True, 'import numpy as np\n'), ((5676, 5693), 'tensorflow.python.framework.ops.device', 'ops.device', (['"""cpu"""'], {}), "('cpu')\n", (5686, 5693), False, 'from tensorflow.python.framework import ops\n'), ((5710, 5758), 'tensorflow.python.ops.array_ops.placeholder', 'array_ops.placeholder', (['dtype', '[1, 2, 3, 4, 5, 6]'], {}), '(dtype, [1, 2, 3, 4, 5, 6])\n', (5731, 5758), False, 'from tensorflow.python.ops import array_ops\n'), ((5778, 5819), 'tensorflow.python.ops.array_ops.placeholder', 'array_ops.placeholder', (['np.int32'], {'shape': '()'}), '(np.int32, shape=())\n', (5799, 5819), False, 'from tensorflow.python.ops import array_ops\n'), ((5834, 5861), 'tensorflow.python.framework.ops.device', 'ops.device', (['"""/device:IPU:0"""'], {}), "('/device:IPU:0')\n", (5844, 5861), False, 'from tensorflow.python.framework import ops\n'), ((6070, 6097), 'numpy.argmin', 'np.argmin', (['input'], {'axis': 'axis'}), '(input, axis=axis)\n', (6079, 6097), True, 'import numpy as np\n')]
|
import pyHiChi as pfc
import numpy as np
import math as ma
def valueEx(x, y, z):
Ex = 0 #for x or y
#Ex=np.sin(z) #for z
return Ex
def valueEy(x, y, z):
#Ey = 0 #for y or z
#Ey = np.sin(x) #for x
Ey = np.sin(x - z) #for xz
return Ey
def valueEz(x, y, z):
Ez = 0 #for x or z or xz
#Ez = np.sin(y) #for y
return Ez
def valueBx(x, y, z):
#Bx = 0 #for x or z
#Bx = np.sin(y) #for y
Bx = np.sin(x - z)/np.sqrt(2) #for xz
return Bx
def valueBy(x, y, z):
By = 0 #for x or y or xz
#By = np.sin(z) #for z
return By
def valueBz(x, y, z):
#Bz = 0 #for y or z
#Bz = np.sin(x) #for x
Bz = np.sin(x - z)/np.sqrt(2) #for xz
return Bz
def step(minCoords, maxCoords, gridSize):
steps = pfc.vector3d(1, 1, 1)
steps.x = (maxCoords.x - minCoords.x)/(gridSize.x)
steps.y = (maxCoords.y - minCoords.y)/(gridSize.y)
steps.z = (maxCoords.z - minCoords.z)/(gridSize.z)
return steps
gridSize = pfc.vector3d(20, 20, 20)
minCoords = pfc.vector3d(0.0, 0.0, 0.0)
maxCoords = pfc.vector3d(2*ma.pi, 2*ma.pi, 2*ma.pi)
stepsGrid = step(minCoords, maxCoords, gridSize)
timeStep = 1e-14
grid = pfc.YeeGrid(gridSize, timeStep, minCoords, stepsGrid)
grid.setE(valueEx, valueEy, valueEz)
grid.setB(valueBx, valueBy, valueBz)
fieldSolver = pfc.FDTD(grid)
fieldSolver.setPML(0, 0, 0)
periodicalBC = pfc.PeriodicalBC(fieldSolver)
#show
import matplotlib.pyplot as plt
import matplotlib.animation as animation
N = 50
eps = 0.0
x = np.arange(eps, 2*ma.pi - eps, 2*(ma.pi-eps)/N)
z = np.arange(eps, 2*ma.pi - eps, 2*(ma.pi-eps)/N)
def getFields():
global grid, x, z, N
#print(grid)
Ex = np.zeros(shape=(N,N))
Ey = np.zeros(shape=(N,N))
Ez = np.zeros(shape=(N,N))
Bx = np.zeros(shape=(N,N))
By = np.zeros(shape=(N,N))
Bz = np.zeros(shape=(N,N))
for ix in range(N):
for iy in range(N):
coordXZ = pfc.vector3d(x[ix], 0.0, z[iy]) #for x or z or xz
#coordXZ = pfc.vector3d(x[ix], z[iy], 0.0) #for y or x
E = grid.getE(coordXZ)
Ex[ix, iy] = E.x
Ey[ix, iy] = E.y
Ez[ix, iy] = E.z
B = grid.getB(coordXZ)
Bx[ix, iy] = B.x
By[ix, iy] = B.y
Bz[ix, iy] = B.z
return Ex, Ey, Ez, Bx, By, Bz
def updateData():
for i in range(1000):
fieldSolver.updateFields()
(Ex, Ey, Ez, Bx, By, Bz) = getFields()
fig, axes = plt.subplots(ncols=3, nrows=2)
im11 = axes[0, 0].imshow(Ex, cmap='RdBu', interpolation='none', extent=(0, 2*ma.pi, 0, 2*ma.pi), animated = True)
fig.colorbar(im11, ax=axes[0, 0])
im12 = axes[0, 1].imshow(Ey, cmap='RdBu', interpolation='none', extent=(0, 2*ma.pi, 0, 2*ma.pi), animated = True)
fig.colorbar(im12, ax=axes[0, 1])
im13 = axes[0, 2].imshow(Ez, cmap='RdBu', interpolation='none', extent=(0, 2*ma.pi, 0, 2*ma.pi), animated = True)
fig.colorbar(im13, ax=axes[0, 2])
im21 = axes[1, 0].imshow(Bx, cmap='RdBu', interpolation='none', extent=(0, 2*ma.pi, 0, 2*ma.pi), animated = True)
fig.colorbar(im21, ax=axes[1, 0])
im22 = axes[1, 1].imshow(By, cmap='RdBu', interpolation='none', extent=(0, 2*ma.pi, 0, 2*ma.pi), animated = True)
fig.colorbar(im22, ax=axes[1, 1])
im23 = axes[1, 2].imshow(Bz, cmap='RdBu', interpolation='none', extent=(0, 2*ma.pi, 0, 2*ma.pi), animated = True)
fig.colorbar(im23, ax=axes[1, 2])
i = 0
def updatefig(*args):
global i
updateData()
(Ex, Ey, Ez, Bx, By, Bz) = getFields()
im11.set_array(Ex)
im12.set_array(Ey)
im13.set_array(Ez)
im21.set_array(Bx)
im22.set_array(By)
im23.set_array(Bz)
i = i + 1
return im11, im12, im13, im21, im22, im23,
ani = animation.FuncAnimation(fig, updatefig, interval=50, blit=True)
plt.show()
|
[
"matplotlib.pyplot.show",
"numpy.zeros",
"pyHiChi.PeriodicalBC",
"matplotlib.animation.FuncAnimation",
"numpy.sin",
"numpy.arange",
"pyHiChi.vector3d",
"pyHiChi.YeeGrid",
"numpy.sqrt",
"matplotlib.pyplot.subplots",
"pyHiChi.FDTD"
] |
[((902, 926), 'pyHiChi.vector3d', 'pfc.vector3d', (['(20)', '(20)', '(20)'], {}), '(20, 20, 20)\n', (914, 926), True, 'import pyHiChi as pfc\n'), ((939, 966), 'pyHiChi.vector3d', 'pfc.vector3d', (['(0.0)', '(0.0)', '(0.0)'], {}), '(0.0, 0.0, 0.0)\n', (951, 966), True, 'import pyHiChi as pfc\n'), ((979, 1024), 'pyHiChi.vector3d', 'pfc.vector3d', (['(2 * ma.pi)', '(2 * ma.pi)', '(2 * ma.pi)'], {}), '(2 * ma.pi, 2 * ma.pi, 2 * ma.pi)\n', (991, 1024), True, 'import pyHiChi as pfc\n'), ((1093, 1146), 'pyHiChi.YeeGrid', 'pfc.YeeGrid', (['gridSize', 'timeStep', 'minCoords', 'stepsGrid'], {}), '(gridSize, timeStep, minCoords, stepsGrid)\n', (1104, 1146), True, 'import pyHiChi as pfc\n'), ((1237, 1251), 'pyHiChi.FDTD', 'pfc.FDTD', (['grid'], {}), '(grid)\n', (1245, 1251), True, 'import pyHiChi as pfc\n'), ((1295, 1324), 'pyHiChi.PeriodicalBC', 'pfc.PeriodicalBC', (['fieldSolver'], {}), '(fieldSolver)\n', (1311, 1324), True, 'import pyHiChi as pfc\n'), ((1427, 1481), 'numpy.arange', 'np.arange', (['eps', '(2 * ma.pi - eps)', '(2 * (ma.pi - eps) / N)'], {}), '(eps, 2 * ma.pi - eps, 2 * (ma.pi - eps) / N)\n', (1436, 1481), True, 'import numpy as np\n'), ((1478, 1532), 'numpy.arange', 'np.arange', (['eps', '(2 * ma.pi - eps)', '(2 * (ma.pi - eps) / N)'], {}), '(eps, 2 * ma.pi - eps, 2 * (ma.pi - eps) / N)\n', (1487, 1532), True, 'import numpy as np\n'), ((2245, 2275), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'ncols': '(3)', 'nrows': '(2)'}), '(ncols=3, nrows=2)\n', (2257, 2275), True, 'import matplotlib.pyplot as plt\n'), ((3481, 3544), 'matplotlib.animation.FuncAnimation', 'animation.FuncAnimation', (['fig', 'updatefig'], {'interval': '(50)', 'blit': '(True)'}), '(fig, updatefig, interval=50, blit=True)\n', (3504, 3544), True, 'import matplotlib.animation as animation\n'), ((3546, 3556), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3554, 3556), True, 'import matplotlib.pyplot as plt\n'), ((209, 222), 'numpy.sin', 'np.sin', (['(x - z)'], {}), '(x - z)\n', (215, 222), True, 'import numpy as np\n'), ((698, 719), 'pyHiChi.vector3d', 'pfc.vector3d', (['(1)', '(1)', '(1)'], {}), '(1, 1, 1)\n', (710, 719), True, 'import pyHiChi as pfc\n'), ((1586, 1608), 'numpy.zeros', 'np.zeros', ([], {'shape': '(N, N)'}), '(shape=(N, N))\n', (1594, 1608), True, 'import numpy as np\n'), ((1614, 1636), 'numpy.zeros', 'np.zeros', ([], {'shape': '(N, N)'}), '(shape=(N, N))\n', (1622, 1636), True, 'import numpy as np\n'), ((1642, 1664), 'numpy.zeros', 'np.zeros', ([], {'shape': '(N, N)'}), '(shape=(N, N))\n', (1650, 1664), True, 'import numpy as np\n'), ((1670, 1692), 'numpy.zeros', 'np.zeros', ([], {'shape': '(N, N)'}), '(shape=(N, N))\n', (1678, 1692), True, 'import numpy as np\n'), ((1698, 1720), 'numpy.zeros', 'np.zeros', ([], {'shape': '(N, N)'}), '(shape=(N, N))\n', (1706, 1720), True, 'import numpy as np\n'), ((1726, 1748), 'numpy.zeros', 'np.zeros', ([], {'shape': '(N, N)'}), '(shape=(N, N))\n', (1734, 1748), True, 'import numpy as np\n'), ((400, 413), 'numpy.sin', 'np.sin', (['(x - z)'], {}), '(x - z)\n', (406, 413), True, 'import numpy as np\n'), ((414, 424), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (421, 424), True, 'import numpy as np\n'), ((602, 615), 'numpy.sin', 'np.sin', (['(x - z)'], {}), '(x - z)\n', (608, 615), True, 'import numpy as np\n'), ((616, 626), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (623, 626), True, 'import numpy as np\n'), ((1804, 1835), 'pyHiChi.vector3d', 'pfc.vector3d', (['x[ix]', '(0.0)', 'z[iy]'], {}), '(x[ix], 0.0, z[iy])\n', (1816, 1835), True, 'import pyHiChi as pfc\n')]
|
# ORIE 7590
import numpy as np
from bd_sim_cython import discrete_bessel_sim, discrete_laguerre_sim, cmeixner
from scipy.special import jv, laguerre, poch, eval_laguerre, j0
from scipy.integrate import quad
from math import comb, factorial, exp, sqrt, log
import hankel
def bd_simulator(t, x0, num_paths, method='bessel', num_threads=4):
"""
:param t: terminal time, double
:param x0: initial state, callable or int
:param num_paths: number of paths
:param method: method of simulating birth-death chain, currently support 'bessel' and 'laguerre'
:param num_threads: number of threads for multiprocessing
:return: ndarray of simulated result at terminal time
"""
if isinstance(x0, int):
x0_array = np.array([x0]*num_paths, dtype=np.int64)
else:
x0_array = np.array([x0() for _ in range(num_paths)], dtype=np.int64)
output = np.zeros(dtype=np.int64, shape=num_paths)
if method == 'bessel':
discrete_bessel_sim(t, x0_array, num_paths, output, int(num_threads))
else:
discrete_laguerre_sim(t, x0_array, num_paths, output, int(num_threads))
return output
def MC_BESQ_gateway(N = 10**6, t = 0, x0 = 0, test = 'bessel', method = 'bessel', args = [], num_decimal = 4):
"""
Monte Carlo estimator of expected BESQ using dBESQ simulation or dLaguerre simulation
:param N: int, Number of simulations
:param T: positive float, Simulation horizon
:param x0: initial value of X
:param method: simulation method, currently support {'bessel', 'laguerre', 'bessel-delay', 'laguerre-delay'}
:param test: defines test function
:args: arguments to define test function
"""
if method == 'bessel':
if test == 'bessel':
f = lambda n : eval_laguerre(n, 1)
s = t
elif test == 'custom':
f = arg[0]
s = t
elif method == 'laguerre':
if test == 'bessel':
f = lambda n : eval_laguerre(n, 1+t)
s = log(t + 1)
elif method == 'bessel-delay':
method = 'bessel'
if test == 'bessel':
f = lambda n : j0(2*np.sqrt(np.random.gamma(n+1)))
s = t - 1
elif test == 'custom':
f = lambda n : args[0](np.random.gamma(n + 1))
s = t - 1
elif method == 'laguerre-delay':
method = 'laguerre'
if test == 'bessel':
f = lambda n : j0(2*np.sqrt(np.random.gamma(n+1) * (t/2 + 1/2)))
s = log(t/2 + 1/2)
def poisson_x0():
return np.random.poisson(x0)
xt_array = bd_simulator(s, x0=poisson_x0, num_paths=N, method=method, num_threads=4)
return np.mean(f(xt_array)).round(num_decimal)
def MC_Laguerre_gateway(N = 10**6, t = 0, x0 = 0, test = 'laguerre', method = 'laguerre', args = [], num_decimal = 4):
"""
Monte Carlo estimator of expected Laguerre using dLaguerre simulation or dLaguerre simulation
:param N: int, Number of simulations
:param T: positive float, Simulation horizon
:param x0: initial value of X
:param method: simulation method, currently support {'laguerre', 'laguerre-delay'}
:param test: defines test function
:args: arguments to define test function
"""
if method == 'laguerre':
if test == 'laguerre':
f = lambda m : eval_meixner(args['n'], m)
s = t
elif method == 'laguerre-delay':
if test == 'laguerre':
f = lambda m : eval_laguerre(args['n'], np.random.gamma(m+1)/2)
s = t - log(2)
elif test == 'relu':
f = lambda m : np.maximum(0, np.random.gamma(m+1)/2)
s = t - log(2)
def poisson_x0():
return np.random.poisson(x0)
xt_array = bd_simulator(s, x0=poisson_x0, num_paths=N, method='laguerre', num_threads=4)
return np.mean(f(xt_array)).round(num_decimal)
def MC_Laguerre(N = 10**6, t = 0, x0 = 0, test = 'laguerre', args = [], num_decimal = 4):
"""
Monte Carlo estimator of expected Laguerre using Brownian motion simulation
:param N: int, Number of simulations
:param T: positive float, Simulation horizon
:param x0: initial value of X
:param test: defines test function
:args: arguments to define test function
"""
if test == 'laguerre':
f = lambda x : eval_laguerre(args['n'], x)
elif test == 'relu':
f = lambda x : np.maximum(0, x)
s = exp(t) - 1
xt_array = exp(-t)/2 * np.sum(np.square(np.random.multivariate_normal(np.zeros(2), s*np.eye(2), size=N)
+ np.sqrt(x0)*np.ones((N,2))), axis=1)
return np.mean(f(xt_array)).round(num_decimal)
def MC_dBESQ_gateway(N = 10**6, t = 0, n0 = 0, test = 'laguerre', method = 'laguerre', args = [], num_decimal = 4):
"""
Monte Carlo estimator of expected dBESQ using birth-death simulation, exact BESQ solution, dLaguerre simulation
or PDE systems.
:param N: int, Number of simulations
:param T: positive float, Simulation horizon
:param x0: initial value of X
:param method: simulation method, currently support {'birth-death', 'exact-besq', 'laguerre', 'pde'}
:param test: defines test function
:args: arguments to define test function
"""
if method == 'birth-death':
if test == 'laguerre':
f = lambda n : eval_laguerre(n, 1)
xt_array = bd_simulator(t, x0=n0, num_paths=N, method='bessel', num_threads=4)
return np.mean(f(xt_array)).round(num_decimal)
elif method == 'exact-besq':
if test == 'laguerre':
return np.mean(exp(-t+1)*jv(0, 2*np.sqrt(np.random.gamma(n0+1)))).round(num_decimal)
elif method == 'laguerre':
if test == 'laguerre':
f = lambda n : eval_laguerre(n, 1)
s = log(t / 2)
def poisson_x0():
return np.random.poisson(np.random.gamma(n0+1))
xt_array = bd_simulator(s, x0=poisson_x0, num_paths=N, method='laguerre', num_threads=4)
return np.mean(f(np.random.poisson(t/2 *np.random.gamma(xt_array+1)))).round(num_decimal)
def MC_BESQ_hankel(N = 10**6, t = 0, x0 = 0, test = 'custom', function = lambda x : 0, args = [], num_decimal = 4):
"""
Monte Carlo estimator of expected BESQ using Hankel transform and Exponential r.v.
:param N: int, Number of simulations
:param T: positive float, Simulation horizon
:param x0: initial value of X
:param test: defines test function
:param function: custom test function
:args: arguments to define test function
"""
j0 = lambda x : jv(0, 2*np.sqrt(x))
if test == 'bessel':
f = j0
elif test == 'poly':
if len(args) < 1:
print('No coefficients provided')
coef = []
else:
coef = args[0]
f = lambda x : np.polyval(coef, x)
else:
f = function
estimates = np.zeros(N)
for n in range(N):
Z = np.random.exponential(1/t)
estimates[n] = j0(x0*Z)*hankel_reparam(Z, f)/t
return np.mean(estimates).round(num_decimal)
def discrete_poly(n, coef):
return sum([coef[i]*poch(n - i + 1, i) for i in range(len(coef)) if n >= i])
def exact_BESQ(t = 0, x0 = 0, num_decimal = 4):
return (exp(-t)*jv(0, 2*np.sqrt(x0))).round(num_decimal)
def exact_Laguerre(t = 0, x0 = 0, n = 0, num_decimal = 4):
return (exp(-t*n)*eval_laguerre(n, x0)).round(num_decimal)
def eval_meixner(n, m):
output = np.zeros(dtype=np.int64, shape=len(m))
cmeixner(n, m, len(m), output)
return output
def hankel_reparam(z, f):
"""
Monte Carlo estimator of expected BESQ using Hankel transform and Exponential r.v.
Based on <NAME> and <NAME>, “hankel: A Python library for performing simple and accurate Hankel transformations”, Journal of Open Source Software, 4(37), 1397, https://doi.org/10.21105/joss.01397
:param z: positive float
:param f: function in L^2(R_+)
"""
ht = hankel.HankelTransform(
nu= 0, # The order of the bessel function
N = 120, # Number of steps in the integration
h = 0.03 # Proxy for "size" of steps in integration
)
return 2*ht.transform(lambda x: f(x**2), 2*np.sqrt(z), ret_err = False)
# exp = np.random.exponential
# def bd_one_path(t, x0):
# """
# simulate a birth-death proecss X at time t.
#
# :param t: float, terminal time
# :param x0: initial value of X
# :return: one realization of X_t
# """
#
# s = 0
# state = x0
#
# while True:
# birth_rate = state + 1
# death_rate = state
# arrival_rate = birth_rate + death_rate
# time_to_arrival = exp(1/arrival_rate)
# s += time_to_arrival
# # stop and return when exceeds target time
# if s > t:
# return state
# # update
# if np.random.rand() < death_rate / arrival_rate:
# state -= 1
# else:
# state += 1
#
#
# def bd_simulator(t, x0):
# """
# :param t: terminal time
# :param x0: list of initial values from certain distribution
# :return: list of simulated X_t
# """
#
# num_iter = len(x0)
# result = np.zeros(num_iter, dtype = np.int64)
#
# for i in range(num_iter):
# result[i] = bd_one_path(t, x0[i])
#
# return result
|
[
"math.exp",
"hankel.HankelTransform",
"numpy.maximum",
"scipy.special.eval_laguerre",
"numpy.polyval",
"numpy.random.exponential",
"numpy.zeros",
"numpy.ones",
"numpy.random.gamma",
"scipy.special.poch",
"numpy.mean",
"numpy.array",
"numpy.random.poisson",
"scipy.special.j0",
"numpy.eye",
"math.log",
"numpy.sqrt"
] |
[((887, 928), 'numpy.zeros', 'np.zeros', ([], {'dtype': 'np.int64', 'shape': 'num_paths'}), '(dtype=np.int64, shape=num_paths)\n', (895, 928), True, 'import numpy as np\n'), ((6930, 6941), 'numpy.zeros', 'np.zeros', (['N'], {}), '(N)\n', (6938, 6941), True, 'import numpy as np\n'), ((7984, 8027), 'hankel.HankelTransform', 'hankel.HankelTransform', ([], {'nu': '(0)', 'N': '(120)', 'h': '(0.03)'}), '(nu=0, N=120, h=0.03)\n', (8006, 8027), False, 'import hankel\n'), ((744, 786), 'numpy.array', 'np.array', (['([x0] * num_paths)'], {'dtype': 'np.int64'}), '([x0] * num_paths, dtype=np.int64)\n', (752, 786), True, 'import numpy as np\n'), ((2542, 2563), 'numpy.random.poisson', 'np.random.poisson', (['x0'], {}), '(x0)\n', (2559, 2563), True, 'import numpy as np\n'), ((3699, 3720), 'numpy.random.poisson', 'np.random.poisson', (['x0'], {}), '(x0)\n', (3716, 3720), True, 'import numpy as np\n'), ((4416, 4422), 'math.exp', 'exp', (['t'], {}), '(t)\n', (4419, 4422), False, 'from math import comb, factorial, exp, sqrt, log\n'), ((6977, 7005), 'numpy.random.exponential', 'np.random.exponential', (['(1 / t)'], {}), '(1 / t)\n', (6998, 7005), True, 'import numpy as np\n'), ((4310, 4337), 'scipy.special.eval_laguerre', 'eval_laguerre', (["args['n']", 'x'], {}), "(args['n'], x)\n", (4323, 4337), False, 'from scipy.special import jv, laguerre, poch, eval_laguerre, j0\n'), ((4442, 4449), 'math.exp', 'exp', (['(-t)'], {}), '(-t)\n', (4445, 4449), False, 'from math import comb, factorial, exp, sqrt, log\n'), ((7070, 7088), 'numpy.mean', 'np.mean', (['estimates'], {}), '(estimates)\n', (7077, 7088), True, 'import numpy as np\n'), ((1765, 1784), 'scipy.special.eval_laguerre', 'eval_laguerre', (['n', '(1)'], {}), '(n, 1)\n', (1778, 1784), False, 'from scipy.special import jv, laguerre, poch, eval_laguerre, j0\n'), ((2000, 2010), 'math.log', 'log', (['(t + 1)'], {}), '(t + 1)\n', (2003, 2010), False, 'from math import comb, factorial, exp, sqrt, log\n'), ((4386, 4402), 'numpy.maximum', 'np.maximum', (['(0)', 'x'], {}), '(0, x)\n', (4396, 4402), True, 'import numpy as np\n'), ((5341, 5360), 'scipy.special.eval_laguerre', 'eval_laguerre', (['n', '(1)'], {}), '(n, 1)\n', (5354, 5360), False, 'from scipy.special import jv, laguerre, poch, eval_laguerre, j0\n'), ((6626, 6636), 'numpy.sqrt', 'np.sqrt', (['x'], {}), '(x)\n', (6633, 6636), True, 'import numpy as np\n'), ((6862, 6881), 'numpy.polyval', 'np.polyval', (['coef', 'x'], {}), '(coef, x)\n', (6872, 6881), True, 'import numpy as np\n'), ((7027, 7037), 'scipy.special.j0', 'j0', (['(x0 * Z)'], {}), '(x0 * Z)\n', (7029, 7037), False, 'from scipy.special import jv, laguerre, poch, eval_laguerre, j0\n'), ((7161, 7179), 'scipy.special.poch', 'poch', (['(n - i + 1)', 'i'], {}), '(n - i + 1, i)\n', (7165, 7179), False, 'from scipy.special import jv, laguerre, poch, eval_laguerre, j0\n'), ((7279, 7286), 'math.exp', 'exp', (['(-t)'], {}), '(-t)\n', (7282, 7286), False, 'from math import comb, factorial, exp, sqrt, log\n'), ((7400, 7411), 'math.exp', 'exp', (['(-t * n)'], {}), '(-t * n)\n', (7403, 7411), False, 'from math import comb, factorial, exp, sqrt, log\n'), ((7410, 7430), 'scipy.special.eval_laguerre', 'eval_laguerre', (['n', 'x0'], {}), '(n, x0)\n', (7423, 7430), False, 'from scipy.special import jv, laguerre, poch, eval_laguerre, j0\n'), ((8233, 8243), 'numpy.sqrt', 'np.sqrt', (['z'], {}), '(z)\n', (8240, 8243), True, 'import numpy as np\n'), ((1962, 1985), 'scipy.special.eval_laguerre', 'eval_laguerre', (['n', '(1 + t)'], {}), '(n, 1 + t)\n', (1975, 1985), False, 'from scipy.special import jv, laguerre, poch, eval_laguerre, j0\n'), ((3529, 3535), 'math.log', 'log', (['(2)'], {}), '(2)\n', (3532, 3535), False, 'from math import comb, factorial, exp, sqrt, log\n'), ((5815, 5825), 'math.log', 'log', (['(t / 2)'], {}), '(t / 2)\n', (5818, 5825), False, 'from math import comb, factorial, exp, sqrt, log\n'), ((2485, 2503), 'math.log', 'log', (['(t / 2 + 1 / 2)'], {}), '(t / 2 + 1 / 2)\n', (2488, 2503), False, 'from math import comb, factorial, exp, sqrt, log\n'), ((3650, 3656), 'math.log', 'log', (['(2)'], {}), '(2)\n', (3653, 3656), False, 'from math import comb, factorial, exp, sqrt, log\n'), ((4501, 4512), 'numpy.zeros', 'np.zeros', (['(2)'], {}), '(2)\n', (4509, 4512), True, 'import numpy as np\n'), ((4580, 4591), 'numpy.sqrt', 'np.sqrt', (['x0'], {}), '(x0)\n', (4587, 4591), True, 'import numpy as np\n'), ((4592, 4607), 'numpy.ones', 'np.ones', (['(N, 2)'], {}), '((N, 2))\n', (4599, 4607), True, 'import numpy as np\n'), ((5779, 5798), 'scipy.special.eval_laguerre', 'eval_laguerre', (['n', '(1)'], {}), '(n, 1)\n', (5792, 5798), False, 'from scipy.special import jv, laguerre, poch, eval_laguerre, j0\n'), ((7295, 7306), 'numpy.sqrt', 'np.sqrt', (['x0'], {}), '(x0)\n', (7302, 7306), True, 'import numpy as np\n'), ((3485, 3507), 'numpy.random.gamma', 'np.random.gamma', (['(m + 1)'], {}), '(m + 1)\n', (3500, 3507), True, 'import numpy as np\n'), ((4516, 4525), 'numpy.eye', 'np.eye', (['(2)'], {}), '(2)\n', (4522, 4525), True, 'import numpy as np\n'), ((5902, 5925), 'numpy.random.gamma', 'np.random.gamma', (['(n0 + 1)'], {}), '(n0 + 1)\n', (5917, 5925), True, 'import numpy as np\n'), ((2252, 2274), 'numpy.random.gamma', 'np.random.gamma', (['(n + 1)'], {}), '(n + 1)\n', (2267, 2274), True, 'import numpy as np\n'), ((3606, 3628), 'numpy.random.gamma', 'np.random.gamma', (['(m + 1)'], {}), '(m + 1)\n', (3621, 3628), True, 'import numpy as np\n'), ((5611, 5622), 'math.exp', 'exp', (['(-t + 1)'], {}), '(-t + 1)\n', (5614, 5622), False, 'from math import comb, factorial, exp, sqrt, log\n'), ((2141, 2163), 'numpy.random.gamma', 'np.random.gamma', (['(n + 1)'], {}), '(n + 1)\n', (2156, 2163), True, 'import numpy as np\n'), ((2432, 2454), 'numpy.random.gamma', 'np.random.gamma', (['(n + 1)'], {}), '(n + 1)\n', (2447, 2454), True, 'import numpy as np\n'), ((5637, 5660), 'numpy.random.gamma', 'np.random.gamma', (['(n0 + 1)'], {}), '(n0 + 1)\n', (5652, 5660), True, 'import numpy as np\n'), ((6078, 6107), 'numpy.random.gamma', 'np.random.gamma', (['(xt_array + 1)'], {}), '(xt_array + 1)\n', (6093, 6107), True, 'import numpy as np\n')]
|
import os.path as osp
import sys
import numpy as np
import torch
from matplotlib import pyplot as plt
from scipy.stats import norm
sys.path.append(osp.dirname(sys.path[0]))
from neko import neko_utils
class utils(neko_utils.neko_utils):
def __init__(self):
super(utils, self).__init__()
def plot_latent_image(self, model, latent_dim, patch_count, patch_side_size):
# 2σ原则
xs = norm.ppf(np.linspace(0.05, 0.95, patch_count))
ys = norm.ppf(np.linspace(0.05, 0.95, patch_count))
image_size = [patch_count * patch_side_size, patch_count * patch_side_size]
image = np.zeros(image_size)
for x_index, x in enumerate(xs):
for y_index, y in enumerate(ys):
z = np.tile(np.array([[x, y]]), latent_dim).reshape(-1, latent_dim)
z = torch.Tensor(z).cuda()
decoder_image = model.decoder(z)
decoder_image = decoder_image.reshape(-1, patch_side_size, patch_side_size)
image[x_index * patch_side_size:(x_index + 1) * patch_side_size,
y_index * patch_side_size:(y_index + 1) * patch_side_size] = decoder_image[0].cpu().detach().numpy()
plt.figure(figsize=(10, 10))
plt.imshow(image, cmap="gray")
plt.savefig("latent-{}_space_image_{}.png".format(latent_dim, self.get_now_time()))
self.divide_line("save latent space images !")
plt.show()
|
[
"matplotlib.pyplot.show",
"matplotlib.pyplot.imshow",
"os.path.dirname",
"numpy.zeros",
"matplotlib.pyplot.figure",
"torch.Tensor",
"numpy.array",
"numpy.linspace"
] |
[((149, 173), 'os.path.dirname', 'osp.dirname', (['sys.path[0]'], {}), '(sys.path[0])\n', (160, 173), True, 'import os.path as osp\n'), ((621, 641), 'numpy.zeros', 'np.zeros', (['image_size'], {}), '(image_size)\n', (629, 641), True, 'import numpy as np\n'), ((1203, 1231), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 10)'}), '(figsize=(10, 10))\n', (1213, 1231), True, 'from matplotlib import pyplot as plt\n'), ((1240, 1270), 'matplotlib.pyplot.imshow', 'plt.imshow', (['image'], {'cmap': '"""gray"""'}), "(image, cmap='gray')\n", (1250, 1270), True, 'from matplotlib import pyplot as plt\n'), ((1426, 1436), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1434, 1436), True, 'from matplotlib import pyplot as plt\n'), ((423, 459), 'numpy.linspace', 'np.linspace', (['(0.05)', '(0.95)', 'patch_count'], {}), '(0.05, 0.95, patch_count)\n', (434, 459), True, 'import numpy as np\n'), ((483, 519), 'numpy.linspace', 'np.linspace', (['(0.05)', '(0.95)', 'patch_count'], {}), '(0.05, 0.95, patch_count)\n', (494, 519), True, 'import numpy as np\n'), ((833, 848), 'torch.Tensor', 'torch.Tensor', (['z'], {}), '(z)\n', (845, 848), False, 'import torch\n'), ((757, 775), 'numpy.array', 'np.array', (['[[x, y]]'], {}), '([[x, y]])\n', (765, 775), True, 'import numpy as np\n')]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.