repo_name
stringlengths 6
130
| hexsha
list | file_path
list | code
list | apis
list |
---|---|---|---|---|
futscdav/Chunkmogrify | [
"efdfdf3df0bb15e5e64de8575ab89baaaa9f5340"
]
| [
"setup_cpp_ext.py"
]
| [
"#\n# Author: David Futschik\n# Provided as part of the Chunkmogrify project, 2021.\n#\n\nimport platform\nimport setuptools\nimport numpy as np\nfrom setuptools import sandbox\n\nplatform_specific_flags = []\nif platform.system() == \"Windows\":\n platform_specific_flags += [\"/permissive-\", \"/Ox\", \"/std:c++11\"]\nelse:\n platform_specific_flags += [\"-O3\", \"--std=c++11\"]\n\next_modules = [\n setuptools.Extension('_C_canvas',\n sources=['extensions/canvas_to_masks.cpp'],\n include_dirs=[np.get_include()],\n extra_compile_args=platform_specific_flags,\n language='c++'),\n setuptools.Extension('_C_heatmap',\n sources=['extensions/heatmap.cpp'],\n include_dirs=[np.get_include()],\n extra_compile_args=platform_specific_flags,\n language='c++')\n]\n\ndef checked_build(force=False):\n def do_build():\n sandbox.run_setup('setup_cpp_ext.py', ['build_ext', '--inplace'])\n try:\n import _C_canvas\n import _C_heatmap\n if force: do_build()\n except ImportError:\n do_build()\n\nif __name__ == \"__main__\":\n setuptools.setup(\n ext_modules=ext_modules\n )"
]
| [
[
"numpy.get_include"
]
]
|
gitter-badger/GeoMetrics | [
"8f33a7da1db88ea49f10772c4bf63b357e9f066c"
]
| [
"src/GUI/compile_space/fermat_sprial.py"
]
| [
"import matplotlib.pyplot as plt\nfrom matplotlib import *\nfrom numpy import *\nfrom matplotlib.animation import *\n\nname = \"Fermat Spiral\"\n\ndef r_(u):\n\tr = (a**m * u)\n\treturn r\n\ndef r2_(u):\n\tr2 = 1/((a**-m) * u)\n\treturn r2\n\nm = 2\na = 6\nu = linspace(0.001, 2 * pi,1000)\nr = r_(u)\nr2 = r2_(u)\n\nplt.subplot(111, projection='polar')\nplt.plot(u, r**(1/m))\nplt.show()\n"
]
| [
[
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.subplot"
]
]
|
compsciencelab/ppo_D | [
"1870c908f498ceb29295e5625ff5598bed82cbb3"
]
| [
"main/wrappers.py"
]
| [
"import os\nimport sys\nimport gym\nimport torch\nimport glob\nfrom os.path import join\nimport random\nimport numpy as np\nfrom gym import error, spaces\nfrom baselines.bench import load_results\nfrom baselines import bench\nfrom gym.spaces.box import Box\nfrom baselines.common.vec_env import VecEnvWrapper\nimport animalai\nfrom animalai.envs.gym.environment import AnimalAIEnv\nimport time\nfrom animalai.envs.arena_config import ArenaConfig\nfrom animalai.envs.gym.environment import ActionFlattener\nfrom ppo.envs import FrameSkipEnv,TransposeImage\nfrom PIL import Image\n\nSTATEFUL_BASE_SIZE = 1+3+1+1 # and hotbit for actions\nclass Stateful(gym.Wrapper):\n def __init__(self, env):\n gym.Wrapper.__init__(self, env)\n # self.observation_space = spaces.Dict(\n # {'obs': env.observation_space,\n # 'timeleft': spaces.Box(low=0, high=1, shape=()),\n # 'speed': spaces.Box(low=0, high=10, shape=()) ,\n # 'direction': spaces.Box(low=-1, high=1, shape=(3,))})\n\n def step(self, action):\n obs, reward, done, info = self.env.step(action)\n vel = info['vector_obs']\n mag = np.sqrt(vel.dot(vel))\n timeleft = (self.max_time - self.steps)/1000 #normalized to a fixed time unit (0.25, 0.5, 1.0)\n o = vel/mag if mag>0 else vel\n state = np.array([mag,o[0],o[1],o[2],timeleft,self.env_reward],dtype=np.float32) \n actions = np.zeros(self.action_space.n,dtype=np.float32)\n actions[action] = 1 #hotbit\n state = np.concatenate((state,actions))\n info['states'] = state\n return obs, reward, done, info\n\n def reset(self, **kwargs):\n return self.env.reset(**kwargs)\n\n\nclass RetroEnv(gym.Wrapper):\n def __init__(self,env):\n gym.Wrapper.__init__(self, env)\n self.flattener = ActionFlattener([3,3])\n self.action_space = self.flattener.action_space\n self.observation_space = gym.spaces.Box(0, 255,dtype=np.uint8,shape=(84, 84, 3))\n\n def step(self, action): \n action = int(action)\n action = self.flattener.lookup_action(action) # convert to multi\n obs, reward, done, info = self.env.step(action) #non-retro\n visual_obs, vector_obs = self._preprocess_obs(obs)\n info['vector_obs']=vector_obs\n return visual_obs,reward,done,info\n\n def reset(self, **kwargs):\n obs = self.env.reset(**kwargs)\n visual_obs, _ = self._preprocess_obs(obs)\n return visual_obs\n\n def _preprocess_obs(self,obs):\n visual_obs, vector_obs = obs\n visual_obs = self._preprocess_single(visual_obs)\n visual_obs = self._resize_observation(visual_obs)\n return visual_obs, vector_obs\n\n @staticmethod\n def _preprocess_single(single_visual_obs):\n return (255.0 * single_visual_obs).astype(np.uint8)\n\n @staticmethod\n def _resize_observation(observation):\n \"\"\"\n Re-sizes visual observation to 84x84\n \"\"\"\n obs_image = Image.fromarray(observation)\n obs_image = obs_image.resize((84, 84), Image.NEAREST)\n return np.array(obs_image)\n\n\n\n#{0: [0, 0], 1: [0, 1], 2: [0, 2], 3: [1, 0], 4: [1, 1], 5: [1, 2], 6: [2, 0], 7: [2, 1], 8: [2, 2]}\nclass FilterActionEnv(gym.ActionWrapper):\n \"\"\"\n An environment wrapper that limits the action space.\n \"\"\"\n _ACTIONS = (0, 1, 2, 3, 4, 5, 6)\n\n def __init__(self, env):\n super().__init__(env)\n self.actions = self._ACTIONS\n self.action_space = gym.spaces.Discrete(len(self.actions))\n\n def action(self, act):\n return self.actions[act]\n\n\nclass VecVisionState(VecEnvWrapper):\n def __init__(self, venv, visnet):\n wos = venv.observation_space[1] # wrapped state space\n #output_size = visnet.output_size\n output_size = visnet.posangles_size \n low = np.concatenate((wos.low, np.full((output_size,), -np.inf,dtype=np.float32)) )\n high = np.concatenate((wos.high, np.full((output_size,), np.inf,dtype=np.float32)) )\n observation_space = gym.spaces.Tuple( \n (venv.observation_space[0],\n gym.spaces.Box(low=low, high=high, dtype=np.float32)) \n )\n\n VecEnvWrapper.__init__(self, venv, observation_space=observation_space)\n\n self.visnet = visnet\n\n def step_wait(self):\n (viz,states), rews, news, infos = self.venv.step_wait()\n with torch.no_grad():\n posangles,_,h = self.visnet(viz[:,-self.visnet.num_inputs:,:,:]) #match network viz take the last obs\n states = torch.cat((states,posangles),dim=1)\n return (viz,states), rews, news, infos\n\n def reset(self):\n (viz,states) = self.venv.reset()\n with torch.no_grad():\n posangles,_,h = self.visnet(viz[:,-self.visnet.num_inputs:,:,:]) #match network viz take the last obs\n states = torch.cat((states,posangles),dim=1)\n return (viz,states)\n \n\nclass VecObjectState(VecEnvWrapper):\n def __init__(self, venv, objnet):\n wos = venv.observation_space[1] # wrapped state space\n output_size = objnet.num_classes \n low = np.concatenate((wos.low, np.full((output_size,), -np.inf,dtype=np.float32)) )\n high = np.concatenate((wos.high, np.full((output_size,), np.inf,dtype=np.float32)) )\n observation_space = gym.spaces.Tuple( \n (venv.observation_space[0],\n gym.spaces.Box(low=low, high=high, dtype=np.float32)) \n )\n\n VecEnvWrapper.__init__(self, venv, observation_space=observation_space)\n\n self.objnet = objnet\n\n def step_wait(self):\n (viz,states), rews, news, infos = self.venv.step_wait()\n with torch.no_grad():\n _,classes,_,h = self.objnet(viz[:,-self.objnet.num_inputs:,:,:]) #match network viz take the last obs\n states = torch.cat((states,classes),dim=1)\n return (viz,states), rews, news, infos\n\n def reset(self):\n (viz,states) = self.venv.reset()\n with torch.no_grad():\n _,classes,_,h = self.objnet(viz[:,-self.objnet.num_inputs:,:,:]) #match network viz take the last obs\n states = torch.cat((states,classes),dim=1)\n return (viz,states)\n "
]
| [
[
"numpy.concatenate",
"numpy.full",
"numpy.array",
"torch.cat",
"numpy.zeros",
"torch.no_grad"
]
]
|
sghill/sureal | [
"df4bc7a9cfd380569ecf2252be014977c68c792b"
]
| [
"sureal/tools/stats.py"
]
| [
"import numpy as np\nimport scipy\nimport scipy.signal\nfrom .inverse import inversefunc\nimport warnings\n\n# import multiprocessing\n# pool = multiprocessing.Pool()\n\nfrom .misc import parallel_map\n\n__copyright__ = \"Copyright 2016-2018, Netflix, Inc.\"\n__license__ = \"Apache, Version 2.0\"\n\n\ndef vectorized_gaussian(xs, locs, scales):\n # f = lambda x, scale: stats.norm.pdf(x, scale=scale)\n # ff = np.vectorize(f)\n # return ff(xs - locs, scales)\n\n return 1.0 / np.sqrt(2 * np.pi) / scales * np.exp(- (xs - locs)**2 / (2* scales**2))\n\n\ndef vectorized_logistic(xs, locs, scales):\n # f = lambda x, scale: stats.logistic.pdf(x, scale=scale)\n # ff = np.vectorize(f)\n # return ff(xs - locs, scales)\n\n return 1.0 / 4.0 / scales / np.cosh((xs - locs) / 2.0 / scales)**2\n\n\ndef sech(x):\n return 2. / ( np.exp(x) + np.exp(-x) )\n\n\ndef vectorized_convolution_of_two_logistics(xs, locs1, scales1, locs2, scales2):\n\n f = lambda x, loc1, scale1, loc2, scale2: \\\n ConvolveTwoPdf(\n lambda x: 1.0 / 4.0 / scale1 * sech(x / 2.0 / scale1)**2,\n lambda x: 1.0 / 4.0 / scale2 * sech(x / 2.0 / scale2)**2,\n\n # lambda x: 1.0 / 4.0 / scale1 / np.cosh(x / 2.0 / scale1)**2,\n # lambda x: 1.0 / 4.0 / scale2 / np.cosh(x / 2.0 / scale2)**2,\n\n # lambda x: 1.0 / np.sqrt(2 * np.pi * (scale1**2) * (np.pi**2 / 3.)) * np.exp(- x**2 / (2* (scale1**2) * (np.pi**2 / 3.))), # test gaussian\n # lambda x: 1.0 / np.sqrt(2 * np.pi * (scale2**2) * (np.pi**2 / 3.)) * np.exp(- x**2 / (2* (scale2**2) * (np.pi**2 / 3.))), # test gaussian\n\n f_truncation=1e-12,\n g_truncation=1e-12,\n delta=3.0e-3,\n ).pdf(x - loc1 - loc2)\n\n # # === way 1: parallel_map (each job too small, bottlenecked by passing context) ===\n # f2 = lambda x: f(*x)\n # xshape = xs.shape\n # assert xshape == locs1.shape == scales1.shape == locs2.shape == scales2.shape\n # res = parallel_map(f2, zip(xs.ravel(), locs1.ravel(), scales1.ravel(), locs2.ravel(), scales2.ravel()), pause_sec=None)\n # return np.reshape(res, xshape)\n\n # # === way 2: vectorize (sequential execution) ===\n # ff = np.vectorize(f)\n # return ff(xs, locs1, scales1, locs2, scales2)\n\n # === way 3: parallel map combined with vectorize (best speed) ===\n ff = np.vectorize(f)\n ff2 = lambda x: ff(*x)\n xshape = xs.shape\n assert xshape == locs1.shape == scales1.shape == locs2.shape == scales2.shape\n with np.errstate(over='ignore'):\n res = parallel_map(ff2, zip(xs, locs1, scales1, locs2, scales2), pause_sec=None)\n return np.array(res)\n\n # === test: test one gaussian ===\n # return 1.0 / np.sqrt(2 * np.pi * (scales1**2 + scales2**2) * (np.pi**2 / 3.)) * np.exp(- (xs - locs1 - locs2)**2 / (2* (scales1**2 + scales2**2) * (np.pi**2 / 3.)))\n\n\ndef convolution_of_two_uniforms(x, loc1, s1, loc2, s2):\n \"\"\"\n >>> convolution_of_two_uniforms(-2, 0, 1, 0, 2)\n 0.0\n >>> convolution_of_two_uniforms(-1.5, 0, 1, 0, 2)\n 0.0\n >>> convolution_of_two_uniforms(-1.49, 0, 1, 0, 2)\n 0.0050000000000000044\n >>> convolution_of_two_uniforms(-0.51, 0, 1, 0, 2)\n 0.495\n >>> convolution_of_two_uniforms(-0.49, 0, 1, 0, 2)\n 0.5\n >>> convolution_of_two_uniforms(0, 0, 1, 0, 2)\n 0.5\n >>> convolution_of_two_uniforms(0.49, 0, 1, 0, 2)\n 0.5\n >>> convolution_of_two_uniforms(0.51, 0, 1, 0, 2)\n 0.495\n >>> convolution_of_two_uniforms(1.49, 0, 1, 0, 2)\n 0.0050000000000000044\n >>> convolution_of_two_uniforms(1.5, 0, 1, 0, 2)\n 0.0\n >>> convolution_of_two_uniforms(2, 0, 1, 0, 2)\n 0.0\n \"\"\"\n z = x - loc1 - loc2\n d = abs(s1 - s2)\n s = s1 + s2\n h = 2. / (d + s)\n\n if - s/2. <= z < - d/2.:\n x0, y0 = - s / 2., 0\n x1, y1 = -d / 2., h\n return (y1 - y0) / (x1 - x0) * (x - x0) + y0\n elif -d/2. <= z < d/2.:\n return h\n elif d/2. <= z < s/2.:\n x0, y0 = s / 2., 0\n x1, y1 = d / 2., h\n return (y1 - y0) / (x1 - x0) * (x - x0) + y0\n else:\n return 0.\n\n\ndef vectorized_convolution_of_two_uniforms(xs, locs1, scales1, locs2, scales2):\n return np.vectorize(convolution_of_two_uniforms)(xs, locs1, scales1, locs2, scales2)\n\n\nclass ConvolveTwoPdf(object):\n \"\"\"\n Generate a object of probability density function, which is the convolution of two\n valid probability density function. The resulting object is able to evaluate its\n probability density at any real value.\n \"\"\"\n\n def __init__(self, f, g, delta=1e-2, f_truncation=1e-5, g_truncation=1e-5):\n self.f = f\n self.g = g\n self.delta = delta\n self.f_truncation=f_truncation\n self.g_truncation=g_truncation\n\n self.model = None\n\n def pdf(self, x):\n if self.model is None:\n self._get_model()\n\n return self._pdf(x)\n\n def _get_model(self):\n inv_f = inversefunc(self.f, self.f_truncation)\n inv_g = inversefunc(self.g, self.g_truncation)\n assert inv_f > 0\n assert inv_g > 0\n reach = max(inv_f, inv_g)\n big_grid = np.arange(-reach, reach, self.delta)\n pmf_f = self.f(big_grid) * self.delta\n pmf_f = (pmf_f + np.hstack([pmf_f[1:], pmf_f[-1]])) / 2. # trapezoidal rule for better accuracy\n pmf_g = self.g(big_grid) * self.delta\n pmf_g = (pmf_g + np.hstack([pmf_g[1:], pmf_g[-1]])) / 2. # trapezoidal rule for better accuracy\n conv_pmf = scipy.signal.fftconvolve(pmf_f, pmf_g, 'same')\n\n # try:\n # np.testing.assert_almost_equal(sum(conv_pmf), 1, decimal=3)\n # except AssertionError:\n # warnings.warn('expect sum(conv_pmf) close to 1.0 but is {}'.format(sum(conv_pmf)))\n\n conv_pdf = conv_pmf / self.delta\n\n self.model = {\n 'grid': big_grid,\n 'pdf': conv_pdf,\n }\n\n def _pdf(self, x):\n assert self.model is not None\n return np.interp(x, self.model['grid'], self.model['pdf'],\n left=self.f_truncation*self.g_truncation,\n right=self.f_truncation*self.g_truncation)\n\n\ndef get_cdf(x, bins=100):\n x = np.array(x)\n counts, bin_edges = np.histogram(x, bins=bins)\n cdf = np.cumsum(counts)\n cdf = cdf / float(cdf[-1]) # normalize\n bin_edges = bin_edges[1:] # make size\n return cdf, bin_edges\n\n\ndef get_pdf(data, bins=20, density=True):\n pdf, bin_edges = np.histogram(data, density=density, bins=bins)\n bin_centres = (bin_edges[:-1] + bin_edges[1:])/2\n return pdf, bin_centres\n\n\n"
]
| [
[
"numpy.histogram",
"numpy.array",
"numpy.vectorize",
"numpy.errstate",
"scipy.signal.fftconvolve",
"numpy.cosh",
"numpy.exp",
"numpy.interp",
"numpy.arange",
"numpy.sqrt",
"numpy.cumsum",
"numpy.hstack"
]
]
|
applelms/guofei9987.github.io | [
"30bbebbda077de08bcb306420fe4e1129cd03e3e"
]
| [
"reading/tools/auto_generat_sidebar_tree.py"
]
| [
"# 用二叉树自动print sidebar\n\n# %%\nimport os\nimport re\nimport string\n\n# 字数统计\nregex_chinese = re.compile('[\\u4e00-\\u9fa5]') # 汉字\nregex_English = re.compile('[0-9a-zA_Z]+') # 数字和英语单词\n# 去掉中文标点和英文标点\nregex_punctuation = re.compile('[!\"()*+,./:;<=>?{|}~。;,:“”()、?《》]')\n\n\ndef word_count(file_name_md):\n '''\n 返回文件的字数,(新增)二级目录\n '''\n f = open(file_name_md, 'r', encoding='utf-8')\n passages = f.readlines()\n # word_num = sum([len(passage.replace('\\n', '').replace(' ', '')) for passage in passages])\n word_num = sum([len(regex_chinese.findall(passage))\n + len(regex_English.findall(passage))\n + len(regex_punctuation.findall(passage))\n for passage in passages])\n\n title_level_2 = [line.replace('## ', '').replace('\\n', '') for line in passages if line.startswith('## ')]\n f.close()\n return word_num, title_level_2\n\n\nclass TreeNode:\n def __init__(self, name, type, layer, word_num=0, title_level_2=None):\n self.name = name\n self.type = type # 'file' or 'path'\n self.layer = layer\n self.word_num = word_num\n self.title_level_2 = title_level_2\n self.children = dict()\n\n def __repr__(self):\n # return self.name+self.type+str(self.layer)+str([i for i in self.children])\n return 'name={name},type={type},layer={layer},word_num={word_num},children={children}'. \\\n format(name=self.name, type=self.type, layer=self.layer, word_num=self.word_num,\n children=[i for i in self.children])\n\n\nclass Tree:\n def __init__(self, path):\n path_walker = os.walk(path, topdown=True)\n self.path1, self.path2 = '\\\\'.join(path.split('\\\\')[:-1]), path.split('\\\\')[-1]\n # 'C:\\\\Users\\\\guofei8\\\\Desktop\\\\git\\\\GitHub\\\\reading', 'docs' 这种\n self.root = TreeNode(self.path2, 'path', 0)\n self.add_all_tree_node(path_walker)\n\n def addTreeNode(self, path, dirs, nondirs):\n pointer = self.root\n for i in path:\n if i not in pointer.children:\n pointer.children[i] = TreeNode(i, 'path', pointer.layer + 1)\n pointer = pointer.children[i]\n for i in dirs:\n pointer.children[i] = TreeNode(name='* ' + i, type='path', layer=pointer.layer + 1)\n for i in nondirs:\n # 每个节点的 name 是规整后的 markdown语句,这样前序遍历不需要太多处理就可以满足需求\n word_num, title_level_2 = word_count('\\\\'.join([self.path1] + path + [i]))\n\n file_name_md = '* [' + i.replace('.md', '') + \\\n ('<sup style = \"color:red\">' + str(word_num) + '字<sup>' if word_num else '') \\\n + ']' \\\n + '(' + '/'.join(path) + '/' + i + ')'\n pointer.children[i] = TreeNode(name=file_name_md,\n type='file',\n layer=pointer.layer + 1,\n word_num=word_num,\n title_level_2=title_level_2)\n\n def add_all_tree_node(self, path_walker):\n for top, dirs, nondirs in path_walker:\n path = top.replace(self.path1, '').split('\\\\')[1:] # 0号位是一个空字符串\n self.addTreeNode(path, dirs, nondirs)\n\n def pre_order(self, root):\n '''\n 左侧边栏\n '''\n return '' if (root is None) \\\n else ((root.layer - 2) * ' ' if root.layer > 1 else '# ') + root.name + '\\n' + \\\n ''.join([self.pre_order(i) for i in root.children.values()])\n\n def pre_order2(self, root):\n '''\n 总字数\n '''\n return 0 if (root is None) else root.word_num + sum([self.pre_order2(i) for i in root.children.values()])\n\n def pre_order3(self, root):\n '''\n 目录-二级目录信息\n '''\n return '' if (root is None) \\\n else ((root.layer - 2) * ' ' if root.layer > 1 else '# ') \\\n + root.name + '\\n' \\\n + ('' if root.title_level_2 is None else '、'.join(root.title_level_2)) + '\\n' \\\n + ''.join([self.pre_order3(i) for i in root.children.values()])\n\n\npath = os.getcwd() + r'\\docs'\ntree = Tree(path)\nsidebar = tree.pre_order(tree.root.children[tree.path2])\nprint(sidebar)\n\n# 总字数\nc = tree.pre_order2(tree.root.children[tree.path2])\nprint('总字数:', c)\n# %%\nhead = '''\n<a href=\"http://www.guofei.site\" target='blog'>\n<img src=\"http://www.guofei.site/public/img/me.png\" alt=\"回到blog\" height=\"64\" width=\"64\">\n</a>\n\n'''\n\ntail = '''\n\n* 书单\n * [书单](书单/书单.md)\n * [读完的书单](书单/读完的书单.md)\n* 建站日志\n * [快速开始](建站日志/quickstart.md)\n * [配置项](建站日志/configuration.md)\n * [主题](建站日志/themes.md)\n * [扩展Markdown语法<sup style=\"color:red\">(new)<sup>](建站日志/markdown.md)\n * [mermaid语法](建站日志/mermaid.md)\n'''\n\ncontent = '\\n'.join(sidebar.split('\\n')[1:])\n\nf = open('sidebar.md', 'w', encoding='utf-8')\n# print(head+content)\n# f.write(head+content.encode('utf-8').decode('utf-8'))\nf.write(head + content + tail)\nf.close()\n\nf = open('homepage.md', 'w', encoding='utf-8')\n# print(head+content)\n# f.write(head+content.encode('utf-8').decode('utf-8'))\nf.write(content)\nf.close()\n\n\n# %%\n# 统计每个板块的字数\ndef word_ana():\n import re\n\n regex = re.compile(\"[0-9]+['字']\")\n\n total_analys = []\n\n for i in sidebar.split('\\n')[1:]:\n if len(i) > 0:\n if i[0] == '*':\n chapter = i[2:]\n else:\n k = regex.findall(i)\n word_num = int(k[0].replace('字', '')) if len(k) > 0 else 0\n total_analys.append([chapter, word_num])\n\n import pandas as pd\n total_analys_pd = pd.DataFrame(total_analys, columns=['chapter', 'word_num'])\n a = total_analys_pd.groupby('chapter').sum()\n\n import plotly.graph_objs as go\n import plotly\n\n # 拆成画图所需数据格式\n data1 = go.Bar(\n x=a.index,\n y=a.word_num,\n name='v1'\n )\n\n layout = go.Layout(title=\"bar charts\", xaxis={'title': 'x'}, yaxis={'title': 'value'})\n fig = go.Figure(data=[data1], layout=layout)\n plotly.offline.plot(fig, filename='c:\\\\abc\\\\example.html')\n\n# word_ana()\n"
]
| [
[
"pandas.DataFrame"
]
]
|
americanas-data-platform/data-discovery-cidamo | [
"9fe42dcc13b3e6f3fc12a06b181d1207b0be0e74"
]
| [
"data_quality/src/transformer/general_transformer.py"
]
| [
"import pandas as pd\nfrom data_quality.src.transformer.base_transformer import BaseTransformer\nfrom data_quality.src.transformer.aggregation_functions.categorical import describe_categorical\nfrom data_quality.src.transformer.aggregation_functions.continuous import describe_continuous, describe_datetime\nfrom data_quality.src.transformer.aggregation_functions.discrete import describe_discrete\n\n\nclass GeneralTransformer(BaseTransformer):\n def __init__(self, dataframe: pd.DataFrame) -> None:\n self.dataframe = dataframe\n\n def transform(self) -> dict:\n metadata_dict = {}\n metadata_dict['categorical_features'] = []\n metadata_dict['discrete_features'] = []\n metadata_dict['continuous_features'] = []\n metadata_dict['datetime_features'] = []\n\n for feature in self.dataframe.columns:\n if self.dataframe[feature].dtype in ('object',):\n try:\n datetime_serie = pd.to_datetime(self.dataframe[feature])\n metadata_dict['datetime_features'].append(describe_datetime(datetime_serie))\n except Exception as e:\n metadata_dict['categorical_features'].append(describe_categorical(self.dataframe[feature]))\n if self.dataframe[feature].dtype in ('float', 'float32', 'float64'):\n metadata_dict['continuous_features'].append(describe_continuous(self.dataframe[feature]))\n if self.dataframe[feature].dtype in ('int', 'int32', 'int64'):\n metadata_dict['discrete_features'].append(describe_discrete(self.dataframe[feature]))\n return metadata_dict\n"
]
| [
[
"pandas.to_datetime"
]
]
|
Yanan-Clarifai/automl | [
"be9d9677145220f2c622ae57e78827902373afcd"
]
| [
"efficientdet/keras/train.py"
]
| [
"# Lint as: python3\n# Copyright 2020 Google Research. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"The main training script.\"\"\"\nimport os\nfrom absl import app\nfrom absl import flags\nfrom absl import logging\nimport tensorflow as tf\n\nimport dataloader\nimport hparams_config\nimport utils\nfrom keras import train_lib\n\n# Cloud TPU Cluster Resolvers\nflags.DEFINE_string(\n 'tpu',\n default=None,\n help='The Cloud TPU to use for training. This should be either the name '\n 'used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 '\n 'url.')\nflags.DEFINE_string(\n 'gcp_project',\n default=None,\n help='Project name for the Cloud TPU-enabled project. If not specified, we '\n 'will attempt to automatically detect the GCE project from metadata.')\nflags.DEFINE_string(\n 'tpu_zone',\n default=None,\n help='GCE zone where the Cloud TPU is located in. If not specified, we '\n 'will attempt to automatically detect the GCE project from metadata.')\n\n# Model specific paramenters\nflags.DEFINE_string(\n 'eval_master',\n default='',\n help='GRPC URL of the eval master. Set to an appropriate value when running'\n ' on CPU/GPU')\nflags.DEFINE_string('eval_name', default=None, help='Eval job name')\nflags.DEFINE_enum('strategy', None, ['tpu', 'gpus', ''],\n 'Training: gpus for multi-gpu, if None, use TF default.')\n\nflags.DEFINE_integer(\n 'num_cores', default=8, help='Number of TPU cores for training')\n\nflags.DEFINE_bool('use_fake_data', False, 'Use fake input.')\nflags.DEFINE_bool(\n 'use_xla', False,\n 'Use XLA even if strategy is not tpu. If strategy is tpu, always use XLA, '\n 'and this flag has no effect.')\nflags.DEFINE_string('model_dir', None, 'Location of model_dir')\n\nflags.DEFINE_string(\n 'hparams', '', 'Comma separated k=v pairs of hyperparameters or a module'\n ' containing attributes to use as hyperparameters.')\nflags.DEFINE_integer('batch_size', 64, 'training batch size')\nflags.DEFINE_integer('eval_samples', 5000, 'The number of samples for '\n 'evaluation.')\nflags.DEFINE_integer('iterations_per_loop', 100,\n 'Number of iterations per TPU training loop')\nflags.DEFINE_string(\n 'training_file_pattern', None,\n 'Glob for training data files (e.g., COCO train - minival set)')\nflags.DEFINE_string('validation_file_pattern', None,\n 'Glob for evaluation tfrecords (e.g., COCO val2017 set)')\nflags.DEFINE_string(\n 'val_json_file', None,\n 'COCO validation JSON containing golden bounding boxes. If None, use the '\n 'ground truth from the dataloader. Ignored if testdev_dir is not None.')\nflags.DEFINE_string('testdev_dir', None,\n 'COCO testdev dir. If not None, ignorer val_json_file.')\nflags.DEFINE_integer('num_examples_per_epoch', 120000,\n 'Number of examples in one epoch')\nflags.DEFINE_integer('num_epochs', None, 'Number of epochs for training')\nflags.DEFINE_string('mode', 'train',\n 'Mode to run: train or eval (default: train)')\nflags.DEFINE_string('model_name', 'efficientdet-d1',\n 'Model name: retinanet or efficientdet')\nflags.DEFINE_bool('eval_after_training', False, 'Run one eval after the '\n 'training finishes.')\nflags.DEFINE_bool('debug', False, 'Enable debug mode')\nflags.DEFINE_bool('profile', False, 'Enable profile mode')\n\n# For Eval mode\nflags.DEFINE_integer('min_eval_interval', 180,\n 'Minimum seconds between evaluations.')\nflags.DEFINE_integer(\n 'eval_timeout', None,\n 'Maximum seconds between checkpoints before evaluation terminates.')\n\nFLAGS = flags.FLAGS\n\n\ndef main(_):\n # Parse and override hparams\n config = hparams_config.get_detection_config(FLAGS.model_name)\n config.override(FLAGS.hparams)\n if FLAGS.num_epochs: # NOTE: remove this flag after updating all docs.\n config.num_epochs = FLAGS.num_epochs\n\n # Parse image size in case it is in string format.\n config.image_size = utils.parse_image_size(config.image_size)\n\n if FLAGS.use_xla and FLAGS.strategy != 'tpu':\n tf.config.optimizer.set_jit(True)\n for gpu in tf.config.list_physical_devices('GPU'):\n tf.config.experimental.set_memory_growth(gpu, True)\n\n if FLAGS.debug:\n tf.config.experimental_run_functions_eagerly(True)\n tf.debugging.set_log_device_placement(True)\n tf.random.set_seed(111111)\n logging.set_verbosity(logging.DEBUG)\n\n if FLAGS.strategy == 'tpu':\n tpu_cluster_resolver = tf.distribute.cluster_resolver.TPUClusterResolver(\n FLAGS.tpu, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project)\n tf.config.experimental_connect_to_cluster(tpu_cluster_resolver)\n tf.tpu.experimental.initialize_tpu_system(tpu_cluster_resolver)\n ds_strategy = tf.distribute.TPUStrategy(tpu_cluster_resolver)\n logging.info('All devices: %s', tf.config.list_logical_devices('TPU'))\n elif FLAGS.strategy == 'gpus':\n ds_strategy = tf.distribute.MirroredStrategy()\n logging.info('All devices: %s', tf.config.list_physical_devices('GPU'))\n else:\n if tf.config.list_physical_devices('GPU'):\n ds_strategy = tf.distribute.OneDeviceStrategy('device:GPU:0')\n else:\n ds_strategy = tf.distribute.OneDeviceStrategy('device:CPU:0')\n\n # Check data path\n if FLAGS.mode in ('train',\n 'train_and_eval') and FLAGS.training_file_pattern is None:\n raise RuntimeError('You must specify --training_file_pattern for training.')\n if FLAGS.mode in ('eval', 'train_and_eval'):\n if FLAGS.validation_file_pattern is None:\n raise RuntimeError('You must specify --validation_file_pattern '\n 'for evaluation.')\n\n params = dict(\n config.as_dict(),\n model_name=FLAGS.model_name,\n iterations_per_loop=FLAGS.iterations_per_loop,\n model_dir=FLAGS.model_dir,\n num_examples_per_epoch=FLAGS.num_examples_per_epoch,\n strategy=FLAGS.strategy,\n batch_size=FLAGS.batch_size // ds_strategy.num_replicas_in_sync,\n num_shards=ds_strategy.num_replicas_in_sync,\n val_json_file=FLAGS.val_json_file,\n testdev_dir=FLAGS.testdev_dir,\n mode=FLAGS.mode)\n\n # set mixed precision policy by keras api.\n precision = utils.get_precision(params['strategy'], params['mixed_precision'])\n policy = tf.keras.mixed_precision.experimental.Policy(precision)\n tf.keras.mixed_precision.experimental.set_policy(policy)\n\n def get_dataset(is_training, params):\n file_pattern = (\n FLAGS.training_file_pattern\n if is_training else FLAGS.validation_file_pattern)\n return dataloader.InputReader(\n file_pattern,\n is_training=is_training,\n use_fake_data=FLAGS.use_fake_data,\n max_instances_per_image=config.max_instances_per_image)(\n params)\n\n with ds_strategy.scope():\n model = train_lib.EfficientDetNetTrain(params['model_name'], config)\n height, width = utils.parse_image_size(params['image_size'])\n model.build((params['batch_size'], height, width, 3))\n model.compile(\n optimizer=train_lib.get_optimizer(params),\n loss={\n 'box_loss':\n train_lib.BoxLoss(\n params['delta'], reduction=tf.keras.losses.Reduction.NONE),\n 'box_iou_loss':\n train_lib.BoxIouLoss(\n params['iou_loss_type'],\n params['min_level'],\n params['max_level'],\n params['num_scales'],\n params['aspect_ratios'],\n params['anchor_scale'],\n params['image_size'],\n reduction=tf.keras.losses.Reduction.NONE),\n 'class_loss':\n train_lib.FocalLoss(\n params['alpha'],\n params['gamma'],\n label_smoothing=params['label_smoothing'],\n reduction=tf.keras.losses.Reduction.NONE)\n })\n ckpt_path = tf.train.latest_checkpoint(FLAGS.model_dir)\n if ckpt_path:\n model.load_weights(ckpt_path)\n model.freeze_vars(params['var_freeze_expr'])\n model.fit(\n get_dataset(True, params=params),\n steps_per_epoch=FLAGS.num_examples_per_epoch,\n callbacks=train_lib.get_callbacks(params, FLAGS.profile),\n validation_data=get_dataset(False, params=params),\n validation_steps=FLAGS.eval_samples)\n model.save_weights(os.path.join(FLAGS.model_dir, 'model'))\n\n\nif __name__ == '__main__':\n logging.set_verbosity(logging.WARNING)\n app.run(main)\n"
]
| [
[
"tensorflow.keras.mixed_precision.experimental.set_policy",
"tensorflow.distribute.MirroredStrategy",
"tensorflow.train.latest_checkpoint",
"tensorflow.random.set_seed",
"tensorflow.config.optimizer.set_jit",
"tensorflow.config.experimental_run_functions_eagerly",
"tensorflow.config.experimental_connect_to_cluster",
"tensorflow.config.experimental.set_memory_growth",
"tensorflow.config.list_logical_devices",
"tensorflow.keras.mixed_precision.experimental.Policy",
"tensorflow.debugging.set_log_device_placement",
"tensorflow.distribute.cluster_resolver.TPUClusterResolver",
"tensorflow.config.list_physical_devices",
"tensorflow.tpu.experimental.initialize_tpu_system",
"tensorflow.distribute.OneDeviceStrategy",
"tensorflow.distribute.TPUStrategy"
]
]
|
stanfordmlgroup/CheXseg | [
"fb5c411ce08e394cd4a2a87d963843942bdc2021"
]
| [
"chexpert-model/segmentation_test_save_output.py"
]
| [
"import wandb\nfrom args import SegTestArgParser\nimport segmentation_models_pytorch as smp\nfrom data import get_seg_loader\nimport torch\nimport pandas as pd\nimport util\nimport json\nfrom argparse import Namespace\nfrom tqdm import tqdm\nfrom pycocotools import mask\nimport os\n\nimport numpy as np\n\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\ndef test(args):\n train_args = args.model_args.config_path + '/args.json'\n ckpt_path = args.model_args.config_path + '/best_model.pth'\n with open(train_args) as f:\n train_args = json.load(f, object_hook=dict_to_namespace)\n model_fn = util.get_seg_model_fn(train_args.model_args.architecture)\n model = model_fn(encoder_name=train_args.model_args.encoder, \n encoder_weights=train_args.model_args.encoder_weights, \n classes=len(train_args.data_args.classes), \n activation=train_args.model_args.activation)\n model.load_state_dict(torch.load(ckpt_path))\n \n thresholds_file = open(args.model_args.config_path + '/thresholds.txt', 'r')\n thresholds = json.load(thresholds_file)\n\n ## TODO:Customize this with args\n # loss = smp.utils.losses.DiceLoss()\n loss = smp.utils.losses.MyDiceLoss(activation=torch.nn.Sigmoid())\n metrics = [\n smp.utils.metrics.IoUWithThresholds(thresholds, activation=torch.nn.Sigmoid()),\n ]\n\n classes = train_args.data_args.classes.copy()\n # classes.insert(0, 'Overall')\n # classes_formatted = [x.lower().replace(' ', '_') for x in args.data_args.classes]\n # for i in range(len(args.data_args.classes)):\n # ignore_channels = list(range(0, len(args.data_args.classes)))\n # ignore_channels.remove(i)\n # metrics.append(smp.utils.metrics.IoU(threshold=0.5, ignore_channels=ignore_channels, task=classes_formatted[i]))\n\n test_loader = get_seg_loader(phase=args.data_args.test_set,\n data_args=train_args.data_args,\n transform_args=args.transform_args,\n model_args=train_args.model_args,\n is_training=False)\n test_epoch = smp.utils.train.ValidEpoch(model=model,\n loss=loss,\n metrics=metrics,\n thresholds=thresholds,\n device=args.model_args.device,\n num_channels=len(train_args.data_args.classes),\n verbose=True)\n thresholds_tensor = torch.FloatTensor(thresholds).to(device='cuda')\n predicted_test_seg_maps = {}\n\n for index, (img, img_id) in tqdm(enumerate(test_loader)):\n img_name = img_id[0]\n img = img.to(device)\n predictions = model.predict(img)\n predicted_seg_maps = smp.utils.functional._taskwise_threshold(predictions, thresholds)\n\n for i in range(len(classes)):\n task = classes[i]\n predicted_seg_map = np.asfortranarray(predicted_seg_maps[0][i].cpu().numpy().astype('uint8'))\n # print(predicted_seg_map.shape)\n encoded_map = mask.encode(predicted_seg_map)\n encoded_map['counts'] = encoded_map['counts'].decode()\n\n\n if img_name in predicted_test_seg_maps:\n if task in predicted_test_seg_maps[img_name]:\n print(f'Check for duplicates for {task} for {img_name}')\n break\n else:\n predicted_test_seg_maps[img_name][task] = encoded_map\n else:\n predicted_test_seg_maps[img_name] = {}\n predicted_test_seg_maps[img_name][task] = encoded_map\n \n with open(os.path.join(args.logger_args.output_labels_save_dir, \"pred_semi_supervised_test_seg_labels.json\"), \"w\") as f:\n json.dump(predicted_test_seg_maps, f)\n \n\n \n # logs = test_epoch.run(test_loader)\n # eps = 1e-7\n # intersection, union = logs['iou_thresh_score']\n # ious = np.divide(intersection, union)\n # miou = np.insert(ious, 0, np.mean(ious))\n # for i in range(len(classes)):\n # print(\"Task:\", classes[i], \"iou:\", miou[i])\n # results = pd.DataFrame([miou], columns=classes, index=[args.logger_args.experiment_name])\n # results = results.sort_index(axis=1)\n # results.to_csv(args.logger_args.results_dir / 'results.csv')\n\n # classes = args.data_args.classes.copy()\n # if args.data_args.task: # only test one task if specified\n # classes = [args.data_args.task]\n # else:\n # classes.insert(2, 'Overall')\n # ious = []\n # for task in classes: # test all classes seperately to get individual ious\n # if task != 'Overall':\n # test_loader = get_seg_loader(phase=args.data_args.test_set,\n # data_args=args.data_args,\n # transform_args=args.transform_args,\n # model_args=train_args.model_args,\n # is_training=False,\n # task=task)\n # else:\n # test_loader = get_seg_loader(phase=args.data_args.test_set,\n # data_args=args.data_args,\n # transform_args=args.transform_args,\n # model_args=train_args.model_args,\n # is_training=False)\n # test_epoch = smp.utils.train.ValidEpoch(model=model,\n # loss=loss,\n # metrics=metrics,\n # device=args.model_args.device,\n # verbose=False)\n # logs = test_epoch.run(test_loader)\n # ious.append(logs['iou_score'])\n # print('Task:', task, logs)\n # results = pd.DataFrame([ious], columns=classes, index=[args.logger_args.experiment_name])\n # results = results.sort_index(axis=1)\n # results.to_csv(args.logger_args.results_dir / 'results.csv')\n\ndef dict_to_namespace(d):\n return Namespace(**d)\n\nif __name__ == \"__main__\":\n parser = SegTestArgParser()\n test(parser.parse_args())"
]
| [
[
"torch.FloatTensor",
"torch.cuda.is_available",
"torch.load",
"torch.nn.Sigmoid"
]
]
|
adrien-berchet/GpsDataAnalyzer | [
"88a0dab83b186430c05d3e0976c5beac796ebc0d"
]
| [
"tests/fixtures/poi_points.py"
]
| [
"import pytest\n\nimport pandas as pd\n\nimport gps_data_analyzer as gda\n\n\[email protected]\ndef simple_poi_raw_data():\n x = [0.15]\n y = [1.15]\n r = [0.1]\n return x, y, r\n\n\[email protected]\ndef simple_poi_df(simple_poi_raw_data):\n x, y, r = simple_poi_raw_data\n df = pd.DataFrame({\"x\": x, \"y\": y, \"radius\": r})\n return df\n\n\[email protected]\ndef simple_poi_data(simple_poi_df):\n return gda.PoiPoints(simple_poi_df, x_col=\"x\", y_col=\"y\")\n"
]
| [
[
"pandas.DataFrame"
]
]
|
ephsmith/darts | [
"0e5b5ad184ed8e83e703e5c955156400930d8afa"
]
| [
"darts/tests/models/forecasting/test_NBEATS.py"
]
| [
"import numpy as np\n\nfrom darts.tests.base_test_class import DartsBaseTestClass\nfrom darts.utils import timeseries_generation as tg\nfrom darts.logging import get_logger\n\nlogger = get_logger(__name__)\n\ntry:\n from darts.models.forecasting.nbeats import NBEATSModel\n\n TORCH_AVAILABLE = True\nexcept ImportError:\n logger.warning(\"Torch not available. TCN tests will be skipped.\")\n TORCH_AVAILABLE = False\n\n\nif TORCH_AVAILABLE:\n\n class NBEATSModelTestCase(DartsBaseTestClass):\n def test_creation(self):\n with self.assertRaises(ValueError):\n # if a list is passed to the `layer_widths` argument, it must have a length equal to `num_stacks`\n NBEATSModel(\n input_chunk_length=1,\n output_chunk_length=1,\n num_stacks=3,\n layer_widths=[1, 2],\n )\n\n def test_fit(self):\n large_ts = tg.constant_timeseries(length=100, value=1000)\n small_ts = tg.constant_timeseries(length=100, value=10)\n\n # Test basic fit and predict\n model = NBEATSModel(\n input_chunk_length=1,\n output_chunk_length=1,\n n_epochs=10,\n num_stacks=1,\n num_blocks=1,\n layer_widths=20,\n )\n model.fit(large_ts[:98])\n pred = model.predict(n=2).values()[0]\n\n # Test whether model trained on one series is better than one trained on another\n model2 = NBEATSModel(\n input_chunk_length=1,\n output_chunk_length=1,\n n_epochs=10,\n num_stacks=1,\n num_blocks=1,\n layer_widths=20,\n )\n model2.fit(small_ts[:98])\n pred2 = model2.predict(n=2).values()[0]\n self.assertTrue(abs(pred2 - 10) < abs(pred - 10))\n\n # test short predict\n pred3 = model2.predict(n=1)\n self.assertEqual(len(pred3), 1)\n\n def test_multivariate(self):\n\n # testing a 2-variate linear ts, first one from 0 to 1, second one from 0 to 0.5, length 100\n series_multivariate = tg.linear_timeseries(length=100).stack(\n tg.linear_timeseries(length=100, start_value=0, end_value=0.5)\n )\n model = NBEATSModel(\n input_chunk_length=3, output_chunk_length=1, n_epochs=20\n )\n\n model.fit(series_multivariate)\n res = model.predict(n=2).values()\n\n # the theoretical result should be [[1.01, 1.02], [0.505, 0.51]].\n # We just test if the given result is not too far in average.\n self.assertTrue(\n abs(np.average(res - np.array([[1.01, 1.02], [0.505, 0.51]])) < 0.03)\n )\n\n # Test Covariates\n series_covariates = tg.linear_timeseries(length=100).stack(\n tg.linear_timeseries(length=100, start_value=0, end_value=0.1)\n )\n model = NBEATSModel(input_chunk_length=3, output_chunk_length=4, n_epochs=5)\n model.fit(series_multivariate, past_covariates=series_covariates)\n\n res = model.predict(\n n=3, series=series_multivariate, past_covariates=series_covariates\n ).values()\n\n self.assertEqual(len(res), 3)\n self.assertTrue(abs(np.average(res)) < 5)\n\n def test_logtensorboard(self):\n ts = tg.constant_timeseries(length=50, value=10)\n\n # testing if both the modes (generic and interpretable) runs with tensorboard\n architectures = [True, False]\n for architecture in architectures:\n # Test basic fit and predict\n model = NBEATSModel(\n input_chunk_length=1,\n output_chunk_length=1,\n n_epochs=1,\n log_tensorboard=True,\n generic_architecture=architecture,\n )\n model.fit(ts)\n model.predict(n=2)\n"
]
| [
[
"numpy.average",
"numpy.array"
]
]
|
zzzace2000/robust_cls_model | [
"c2b9a79dd5ccbb8aa8beaa08baaded6ed45d0410"
]
| [
"arch/inpainting/InpaintingBase.py"
]
| [
"import torch.nn as nn\nimport torch\n\nfrom .Baseline import InpaintTemplate\n\n\nclass InpaintingBase(InpaintTemplate):\n def __init__(self):\n super(InpaintingBase, self).__init__()\n\n pth_mean = torch.FloatTensor([0.485, 0.456, 0.406]).unsqueeze(0).unsqueeze(-1).unsqueeze(-1)\n pth_std = torch.FloatTensor([0.229, 0.224, 0.225]).unsqueeze(0).unsqueeze(-1).unsqueeze(-1)\n\n self.pth_mean = nn.Parameter(pth_mean, requires_grad=False)\n self.pth_std = nn.Parameter(pth_std, requires_grad=False)\n\n # Set up loss record system\n self.zero_loss_record()\n\n def generate_background(self, x, mask):\n '''\n Use to generate whole blurry images with pytorch normalization.\n '''\n outputs = self.forward(x, mask)\n return outputs[0].data\n\n def impute_missing_imgs(self, x, mask):\n '''\n Generate images but replace the part that don't need to impute by original img.\n Used in test time.\n '''\n generated_img = self.generate_background(x, mask)\n\n if mask.ndimension() == 3:\n mask = mask.unsqueeze(0)\n\n expand_mask = mask.expand_as(x)\n generated_img[expand_mask == 1] = x[expand_mask == 1]\n return generated_img\n\n '''\n The following functions are used to train and used in train_gen_model.py\n '''\n def loss_fn(self, outputs, targets, mask):\n loss = ((1. - mask) * (outputs[0] - targets) ** 2).sum()\n self.total_loss += loss.data[0]\n self.num_instances += outputs[0].size(0)\n return loss / outputs[0].size(0)\n\n def zero_loss_record(self):\n self.total_loss = 0.\n self.num_instances = 0\n\n def report_loss(self):\n return 'training loss: {}'.format(self.total_loss / self.num_instances)\n\n def total_avg_loss(self):\n return self.total_loss / self.num_instances\n\n\nclass VAEInpaintingBase(InpaintingBase):\n def __init__(self, num_training=100000):\n super(VAEInpaintingBase, self).__init__()\n\n print('num_training:', num_training)\n self.num_training = num_training\n\n @staticmethod\n def reparametrize(mu, logvar):\n std = logvar.mul(0.5).exp_()\n\n eps = std.data.new(std.size()).normal_()\n return eps.mul(std).add_(mu)\n\n def zero_loss_record(self):\n self.total_loss = 0.\n self.pred_loss = 0.\n self.reg_loss = 0.\n self.num_instances = 0\n\n def report_loss(self):\n return 'loss: {} ({}, {})'.format(self.total_loss / self.num_instances,\n self.pred_loss / self.num_instances,\n self.reg_loss / self.num_instances)\n"
]
| [
[
"torch.FloatTensor",
"torch.nn.Parameter"
]
]
|
AmirAliEbrahimi/PyTorch-ENet | [
"22860676e9f05c06c6babf89001c0226ac146b16"
]
| [
"models/bden.py"
]
| [
"import torch.nn as nn\nimport torchvision.transforms as transforms\nfrom .binarized_modules import BinarizeConv2d,InputScale,SignumActivation,BinarizeTransposedConv2d\n\nclass BDEN(nn.Module):\n\n def __init__(self, num_classes):\n super().__init__()\n self.ratioInfl=16\n self.numOfClasses=num_classes\n\n self.FrontLayer = nn.Sequential(\n InputScale(),\n BinarizeConv2d(3, int(4*self.ratioInfl), kernel_size=3, stride=2, padding=1),\n nn.BatchNorm2d(int(4*self.ratioInfl)),\n SignumActivation(),\n BinarizeConv2d(int(4*self.ratioInfl), int(4*self.ratioInfl), kernel_size=3, padding=1,stride=1),\n nn.BatchNorm2d(int(4*self.ratioInfl)),\n SignumActivation(),\n\n BinarizeConv2d(int(4*self.ratioInfl), int(8*self.ratioInfl), kernel_size=3, stride=2, padding=1),\n nn.BatchNorm2d(int(8*self.ratioInfl)),\n SignumActivation(),\n BinarizeConv2d(int(8*self.ratioInfl), int(8*self.ratioInfl), kernel_size=3, padding=1,stride=1),\n nn.BatchNorm2d(int(8*self.ratioInfl)),\n SignumActivation(),\n BinarizeConv2d(int(8*self.ratioInfl), int(16*self.ratioInfl), kernel_size=3, padding=1,stride=1),\n nn.BatchNorm2d(int(16*self.ratioInfl)),\n SignumActivation(),\n\n BinarizeTransposedConv2d(int(16*self.ratioInfl), int(16*self.ratioInfl), kernel_size=3, stride=2 , padding=1,output_padding=1),\n nn.BatchNorm2d(int(16*self.ratioInfl)),\n SignumActivation(),\n BinarizeConv2d(int(16*self.ratioInfl), int(8*self.ratioInfl), kernel_size=3, padding=1,stride=1),\n nn.BatchNorm2d(int(8*self.ratioInfl)),\n SignumActivation(),\n\n BinarizeTransposedConv2d(int(8*self.ratioInfl), int(8*self.ratioInfl), kernel_size=3, padding=1,stride=2,output_padding=1),\n nn.BatchNorm2d(int(8*self.ratioInfl)),\n SignumActivation(),\n BinarizeConv2d(int(8*self.ratioInfl), int(4*self.ratioInfl), kernel_size=3, padding=1,stride=1),\n nn.BatchNorm2d(int(4*self.ratioInfl)),\n SignumActivation()\n )\n\n self.TailLayer = nn.Sequential(\n BinarizeConv2d(int(4*self.ratioInfl), self.numOfClasses, kernel_size=3, padding=1,stride=1),\n nn.BatchNorm2d(self.numOfClasses),\n nn.Softmax(dim=0)\n )\n\n def forward(self, x):\n x = self.FrontLayer(x)\n x = self.TailLayer(x)\n return x\n"
]
| [
[
"torch.nn.BatchNorm2d",
"torch.nn.Softmax"
]
]
|
Darth-Ozak/pvlib-python | [
"510f08ef8b2d0ee543c197a1433c6294ce410cde"
]
| [
"pvlib/temperature.py"
]
| [
"\"\"\"\nThe ``temperature`` module contains functions for modeling temperature of\nPV modules and cells.\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\nfrom pvlib.tools import sind\n\nTEMPERATURE_MODEL_PARAMETERS = {\n 'sapm': {\n 'open_rack_glass_glass': {'a': -3.47, 'b': -.0594, 'deltaT': 3},\n 'close_mount_glass_glass': {'a': -2.98, 'b': -.0471, 'deltaT': 1},\n 'open_rack_glass_polymer': {'a': -3.56, 'b': -.0750, 'deltaT': 3},\n 'insulated_back_glass_polymer': {'a': -2.81, 'b': -.0455, 'deltaT': 0},\n },\n 'pvsyst': {'freestanding': {'u_c': 29.0, 'u_v': 0},\n 'insulated': {'u_c': 15.0, 'u_v': 0}}\n}\n\"\"\"Dictionary of temperature parameters organized by model.\n\nThere are keys for each model at the top level. Currently there are two models,\n``'sapm'`` for the Sandia Array Performance Model, and ``'pvsyst'``. Each model\nhas a dictionary of configurations; a value is itself a dictionary containing\nmodel parameters. Retrieve parameters by indexing the model and configuration\nby name. Note: the keys are lower-cased and case sensitive.\n\nExample\n-------\nRetrieve the open rack glass-polymer configuration for SAPM::\n\n from pvlib.temperature import TEMPERATURE_MODEL_PARAMETERS\n temperature_model_parameters = (\n TEMPERATURE_MODEL_PARAMETERS['sapm']['open_rack_glass_polymer'])\n # {'a': -3.56, 'b': -0.075, 'deltaT': 3}\n\"\"\"\n\n\ndef _temperature_model_params(model, parameter_set):\n try:\n params = TEMPERATURE_MODEL_PARAMETERS[model]\n return params[parameter_set]\n except KeyError:\n msg = ('{} is not a named set of parameters for the {} cell'\n ' temperature model.'\n ' See pvlib.temperature.TEMPERATURE_MODEL_PARAMETERS'\n ' for names'.format(parameter_set, model))\n raise KeyError(msg)\n\n\ndef sapm_cell(poa_global, temp_air, wind_speed, a, b, deltaT,\n irrad_ref=1000.):\n r'''\n Calculate cell temperature per the Sandia Array Performance Model.\n\n See [1]_ for details on the Sandia Array Performance Model.\n\n Parameters\n ----------\n poa_global : numeric\n Total incident irradiance [W/m^2].\n\n temp_air : numeric\n Ambient dry bulb temperature [C].\n\n wind_speed : numeric\n Wind speed at a height of 10 meters [m/s].\n\n a : float\n Parameter :math:`a` in :eq:`sapm1`.\n\n b : float\n Parameter :math:`b` in :eq:`sapm1`.\n\n deltaT : float\n Parameter :math:`\\Delta T` in :eq:`sapm2` [C].\n\n irrad_ref : float, default 1000\n Reference irradiance, parameter :math:`E_{0}` in\n :eq:`sapm2` [W/m^2].\n\n Returns\n -------\n numeric, values in degrees C.\n\n Notes\n -----\n The model for cell temperature :math:`T_{C}` is given by a pair of\n equations (Eq. 11 and 12 in [1]_).\n\n .. math::\n :label: sapm1\n\n T_{m} = E \\times \\exp (a + b \\times WS) + T_{a}\n\n .. math::\n :label: sapm2\n\n T_{C} = T_{m} + \\frac{E}{E_{0}} \\Delta T\n\n The module back surface temperature :math:`T_{m}` is implemented in\n :py:func:`~pvlib.temperature.sapm_module`.\n\n Inputs to the model are plane-of-array irradiance :math:`E` (W/m2) and\n ambient air temperature :math:`T_{a}` (C). Model parameters depend both on\n the module construction and its mounting. Parameter sets are provided in\n [1]_ for representative modules and mounting, and are coded for convenience\n in :data:`~pvlib.temperature.TEMPERATURE_MODEL_PARAMETERS`.\n\n +---------------+----------------+-------+---------+---------------------+\n | Module | Mounting | a | b | :math:`\\Delta T [C]`|\n +===============+================+=======+=========+=====================+\n | glass/glass | open rack | -3.47 | -0.0594 | 3 |\n +---------------+----------------+-------+---------+---------------------+\n | glass/glass | close roof | -2.98 | -0.0471 | 1 |\n +---------------+----------------+-------+---------+---------------------+\n | glass/polymer | open rack | -3.56 | -0.075 | 3 |\n +---------------+----------------+-------+---------+---------------------+\n | glass/polymer | insulated back | -2.81 | -0.0455 | 0 |\n +---------------+----------------+-------+---------+---------------------+\n\n References\n ----------\n .. [1] King, D. et al, 2004, \"Sandia Photovoltaic Array Performance\n Model\", SAND Report 3535, Sandia National Laboratories, Albuquerque,\n NM.\n\n See also\n --------\n sapm_cell_from_module\n sapm_module\n\n Examples\n --------\n >>> from pvlib.temperature import sapm_cell, TEMPERATURE_MODEL_PARAMETERS\n >>> params = TEMPERATURE_MODEL_PARAMETERS['sapm']['open_rack_glass_glass']\n >>> sapm_cell(1000, 10, 0, **params)\n 44.11703066106086\n '''\n module_temperature = sapm_module(poa_global, temp_air, wind_speed,\n a, b)\n return sapm_cell_from_module(module_temperature, poa_global, deltaT,\n irrad_ref)\n\n\ndef sapm_module(poa_global, temp_air, wind_speed, a, b):\n r'''\n Calculate module back surface temperature per the Sandia Array\n Performance Model.\n\n See [1]_ for details on the Sandia Array Performance Model.\n\n Parameters\n ----------\n poa_global : numeric\n Total incident irradiance [W/m^2].\n\n temp_air : numeric\n Ambient dry bulb temperature [C].\n\n wind_speed : numeric\n Wind speed at a height of 10 meters [m/s].\n\n a : float\n Parameter :math:`a` in :eq:`sapm1mod`.\n\n b : float\n Parameter :math:`b` in :eq:`sapm1mod`.\n\n Returns\n -------\n numeric, values in degrees C.\n\n Notes\n -----\n The model for module temperature :math:`T_{m}` is given by Eq. 11 in [1]_.\n\n .. math::\n :label: sapm1mod\n\n T_{m} = E \\times \\exp (a + b \\times WS) + T_{a}\n\n Inputs to the model are plane-of-array irradiance :math:`E` (W/m2) and\n ambient air temperature :math:`T_{a}` (C). Model outputs are surface\n temperature at the back of the module :math:`T_{m}` and cell temperature\n :math:`T_{C}`. Model parameters depend both on the module construction and\n its mounting. Parameter sets are provided in [1]_ for representative\n modules and mounting, and are coded for convenience in\n :data:`~pvlib.temperature.TEMPERATURE_MODEL_PARAMETERS`.\n\n +---------------+----------------+-------+---------+---------------------+\n | Module | Mounting | a | b | :math:`\\Delta T [C]`|\n +===============+================+=======+=========+=====================+\n | glass/glass | open rack | -3.47 | -0.0594 | 3 |\n +---------------+----------------+-------+---------+---------------------+\n | glass/glass | close roof | -2.98 | -0.0471 | 1 |\n +---------------+----------------+-------+---------+---------------------+\n | glass/polymer | open rack | -3.56 | -0.075 | 3 |\n +---------------+----------------+-------+---------+---------------------+\n | glass/polymer | insulated back | -2.81 | -0.0455 | 0 |\n +---------------+----------------+-------+---------+---------------------+\n\n References\n ----------\n .. [1] King, D. et al, 2004, \"Sandia Photovoltaic Array Performance\n Model\", SAND Report 3535, Sandia National Laboratories, Albuquerque,\n NM.\n\n See also\n --------\n sapm_cell\n sapm_cell_from_module\n '''\n return poa_global * np.exp(a + b * wind_speed) + temp_air\n\n\ndef sapm_cell_from_module(module_temperature, poa_global, deltaT,\n irrad_ref=1000.):\n r'''\n Calculate cell temperature from module temperature using the Sandia Array\n Performance Model.\n\n See [1]_ for details on the Sandia Array Performance Model.\n\n Parameters\n ----------\n module_temperature : numeric\n Temperature of back of module surface [C].\n\n poa_global : numeric\n Total incident irradiance [W/m^2].\n\n deltaT : float\n Parameter :math:`\\Delta T` in :eq:`sapm2_cell_from_mod` [C].\n\n irrad_ref : float, default 1000\n Reference irradiance, parameter :math:`E_{0}` in\n :eq:`sapm2` [W/m^2].\n\n Returns\n -------\n numeric, values in degrees C.\n\n Notes\n -----\n The model for cell temperature :math:`T_{C}` is given by Eq. 12 in [1]_.\n\n .. math::\n :label: sapm2_cell_from_mod\n\n T_{C} = T_{m} + \\frac{E}{E_{0}} \\Delta T\n\n The module back surface temperature :math:`T_{m}` is implemented in\n :py:func:`~pvlib.temperature.sapm_module`.\n\n Model parameters depend both on the module construction and its mounting.\n Parameter sets are provided in [1]_ for representative modules and\n mounting, and are coded for convenience in\n :data:`~pvlib.temperature.TEMPERATURE_MODEL_PARAMETERS`.\n\n +---------------+----------------+-------+---------+---------------------+\n | Module | Mounting | a | b | :math:`\\Delta T [C]`|\n +===============+================+=======+=========+=====================+\n | glass/glass | open rack | -3.47 | -0.0594 | 3 |\n +---------------+----------------+-------+---------+---------------------+\n | glass/glass | close roof | -2.98 | -0.0471 | 1 |\n +---------------+----------------+-------+---------+---------------------+\n | glass/polymer | open rack | -3.56 | -0.075 | 3 |\n +---------------+----------------+-------+---------+---------------------+\n | glass/polymer | insulated back | -2.81 | -0.0455 | 0 |\n +---------------+----------------+-------+---------+---------------------+\n\n References\n ----------\n .. [1] King, D. et al, 2004, \"Sandia Photovoltaic Array Performance\n Model\", SAND Report 3535, Sandia National Laboratories, Albuquerque,\n NM.\n\n See also\n --------\n sapm_cell\n sapm_module\n '''\n return module_temperature + (poa_global / irrad_ref) * deltaT\n\n\ndef pvsyst_cell(poa_global, temp_air, wind_speed=1.0, u_c=29.0, u_v=0.0,\n eta_m=0.1, alpha_absorption=0.9):\n r\"\"\"\n Calculate cell temperature using an empirical heat loss factor model\n as implemented in PVsyst.\n\n Parameters\n ----------\n poa_global : numeric\n Total incident irradiance [W/m^2].\n\n temp_air : numeric\n Ambient dry bulb temperature [C].\n\n wind_speed : numeric, default 1.0\n Wind speed in m/s measured at the same height for which the wind loss\n factor was determined. The default value 1.0 m/2 is the wind\n speed at module height used to determine NOCT. [m/s]\n\n u_c : float, default 29.0\n Combined heat loss factor coefficient. The default value is\n representative of freestanding modules with the rear surfaces exposed\n to open air (e.g., rack mounted). Parameter :math:`U_{c}` in\n :eq:`pvsyst`.\n :math:`\\left[\\frac{\\text{W}/{\\text{m}^2}}{\\text{C}}\\right]`\n\n u_v : float, default 0.0\n Combined heat loss factor influenced by wind. Parameter :math:`U_{v}`\n in :eq:`pvsyst`.\n :math:`\\left[ \\frac{\\text{W}/\\text{m}^2}{\\text{C}\\ \\left( \\text{m/s} \\right)} \\right]`\n\n eta_m : numeric, default 0.1\n Module external efficiency as a fraction, i.e., DC power / poa_global.\n Parameter :math:`\\eta_{m}` in :eq:`pvsyst`.\n\n alpha_absorption : numeric, default 0.9\n Absorption coefficient. Parameter :math:`\\alpha` in :eq:`pvsyst`.\n\n Returns\n -------\n numeric, values in degrees Celsius\n\n Notes\n -----\n The Pvsyst model for cell temperature :math:`T_{C}` is given by\n\n .. math::\n :label: pvsyst\n\n T_{C} = T_{a} + \\frac{\\alpha E (1 - \\eta_{m})}{U_{c} + U_{v} \\times WS}\n\n Inputs to the model are plane-of-array irradiance :math:`E` (W/m2), ambient\n air temperature :math:`T_{a}` (C) and wind speed :math:`WS` (m/s). Model\n output is cell temperature :math:`T_{C}`. Model parameters depend both on\n the module construction and its mounting. Parameters are provided in\n [1]_ for open (freestanding) and close (insulated) mounting configurations,\n , and are coded for convenience in\n :data:`~pvlib.temperature.TEMPERATURE_MODEL_PARAMETERS`. The heat loss\n factors provided represent the combined effect of convection, radiation and\n conduction, and their values are experimentally determined.\n\n +--------------+---------------+---------------+\n | Mounting | :math:`U_{c}` | :math:`U_{v}` |\n +==============+===============+===============+\n | freestanding | 29.0 | 0.0 |\n +--------------+---------------+---------------+\n | insulated | 15.0 | 0.0 |\n +--------------+---------------+---------------+\n\n References\n ----------\n .. [1] \"PVsyst 6 Help\", Files.pvsyst.com, 2018. [Online]. Available:\n http://files.pvsyst.com/help/index.html. [Accessed: 10- Dec- 2018].\n\n .. [2] Faiman, D. (2008). \"Assessing the outdoor operating temperature of\n photovoltaic modules.\" Progress in Photovoltaics 16(4): 307-315.\n\n Examples\n --------\n >>> from pvlib.temperature import pvsyst_cell, TEMPERATURE_MODEL_PARAMETERS\n >>> params = TEMPERATURE_MODEL_PARAMETERS['pvsyst']['freestanding']\n >>> pvsyst_cell(1000, 10, **params)\n 37.93103448275862\n \"\"\"\n\n total_loss_factor = u_c + u_v * wind_speed\n heat_input = poa_global * alpha_absorption * (1 - eta_m)\n temp_difference = heat_input / total_loss_factor\n return temp_air + temp_difference\n\n\ndef faiman(poa_global, temp_air, wind_speed=1.0, u0=25.0, u1=6.84):\n r'''\n Calculate cell or module temperature using the Faiman model.\n\n The Faiman model uses an empirical heat loss factor model [1]_ and is\n adopted in the IEC 61853 standards [2]_ and [3]_.\n\n Usage of this model in the IEC 61853 standard does not distinguish\n between cell and module temperature.\n\n Parameters\n ----------\n poa_global : numeric\n Total incident irradiance [W/m^2].\n\n temp_air : numeric\n Ambient dry bulb temperature [C].\n\n wind_speed : numeric, default 1.0\n Wind speed in m/s measured at the same height for which the wind loss\n factor was determined. The default value 1.0 m/s is the wind\n speed at module height used to determine NOCT. [m/s]\n\n u0 : numeric, default 25.0\n Combined heat loss factor coefficient. The default value is one\n determined by Faiman for 7 silicon modules.\n :math:`\\left[\\frac{\\text{W}/{\\text{m}^2}}{\\text{C}}\\right]`\n\n u1 : numeric, default 6.84\n Combined heat loss factor influenced by wind. The default value is one\n determined by Faiman for 7 silicon modules.\n :math:`\\left[ \\frac{\\text{W}/\\text{m}^2}{\\text{C}\\ \\left( \\text{m/s} \\right)} \\right]`\n\n Returns\n -------\n numeric, values in degrees Celsius\n\n Notes\n -----\n All arguments may be scalars or vectors. If multiple arguments\n are vectors they must be the same length.\n\n References\n ----------\n .. [1] Faiman, D. (2008). \"Assessing the outdoor operating temperature of\n photovoltaic modules.\" Progress in Photovoltaics 16(4): 307-315.\n\n .. [2] \"IEC 61853-2 Photovoltaic (PV) module performance testing and energy\n rating - Part 2: Spectral responsivity, incidence angle and module\n operating temperature measurements\". IEC, Geneva, 2018.\n\n .. [3] \"IEC 61853-3 Photovoltaic (PV) module performance testing and energy\n rating - Part 3: Energy rating of PV modules\". IEC, Geneva, 2018.\n\n '''\n # Contributed by Anton Driesse (@adriesse), PV Performance Labs. Dec., 2019\n\n # The following lines may seem odd since u0 & u1 are probably scalar,\n # but it serves an indirect and easy way of allowing lists and\n # tuples for the other function arguments.\n u0 = np.asanyarray(u0)\n u1 = np.asanyarray(u1)\n\n total_loss_factor = u0 + u1 * wind_speed\n heat_input = poa_global\n temp_difference = heat_input / total_loss_factor\n return temp_air + temp_difference\n\n\ndef ross(poa_global, temp_air, noct):\n r'''\n Calculate cell temperature using the Ross model.\n\n The Ross model [1]_ assumes the difference between cell temperature\n and ambient temperature is proportional to the plane of array irradiance,\n and assumes wind speed of 1 m/s. The model implicitly assumes steady or\n slowly changing irradiance conditions.\n\n Parameters\n ----------\n poa_global : numeric\n Total incident irradiance. [W/m^2]\n\n temp_air : numeric\n Ambient dry bulb temperature. [C]\n\n noct : numeric\n Nominal operating cell temperature [C], determined at conditions of\n 800 W/m^2 irradiance, 20 C ambient air temperature and 1 m/s wind.\n\n Returns\n -------\n cell_temperature : numeric\n Cell temperature. [C]\n\n Notes\n -----\n The Ross model for cell temperature :math:`T_{C}` is given in [1]_ as\n\n .. math::\n\n T_{C} = T_{a} + \\frac{NOCT - 20}{80} S\n\n where :math:`S` is the plane of array irradiance in :math:`mW/{cm}^2`.\n This function expects irradiance in :math:`W/m^2`.\n\n References\n ----------\n .. [1] Ross, R. G. Jr., (1981). \"Design Techniques for Flat-Plate\n Photovoltaic Arrays\". 15th IEEE Photovoltaic Specialist Conference,\n Orlando, FL.\n '''\n # factor of 0.1 converts irradiance from W/m2 to mW/cm2\n return temp_air + (noct - 20.) / 80. * poa_global * 0.1\n\n\ndef _fuentes_hconv(tave, windmod, tinoct, temp_delta, xlen, tilt,\n check_reynold):\n # Calculate the convective coefficient as in Fuentes 1987 -- a mixture of\n # free, laminar, and turbulent convection.\n densair = 0.003484 * 101325.0 / tave # density\n visair = 0.24237e-6 * tave**0.76 / densair # kinematic viscosity\n condair = 2.1695e-4 * tave**0.84 # thermal conductivity\n reynold = windmod * xlen / visair\n # the boundary between laminar and turbulent is modeled as an abrupt\n # change at Re = 1.2e5:\n if check_reynold and reynold > 1.2e5:\n # turbulent convection\n hforce = 0.0282 / reynold**0.2 * densair * windmod * 1007 / 0.71**0.4\n else:\n # laminar convection\n hforce = 0.8600 / reynold**0.5 * densair * windmod * 1007 / 0.71**0.67\n # free convection via Grashof number\n # NB: Fuentes hardwires sind(tilt) as 0.5 for tilt=30\n grashof = 9.8 / tave * temp_delta * xlen**3 / visair**2 * sind(tilt)\n # product of Nusselt number and (k/l)\n hfree = 0.21 * (grashof * 0.71)**0.32 * condair / xlen\n # combine free and forced components\n hconv = (hfree**3 + hforce**3)**(1/3)\n return hconv\n\n\ndef _hydraulic_diameter(width, height):\n # calculate the hydraulic diameter of a rectangle\n return 2 * (width * height) / (width + height)\n\n\ndef fuentes(poa_global, temp_air, wind_speed, noct_installed, module_height=5,\n wind_height=9.144, emissivity=0.84, absorption=0.83,\n surface_tilt=30, module_width=0.31579, module_length=1.2):\n \"\"\"\n Calculate cell or module temperature using the Fuentes model.\n\n The Fuentes model is a first-principles heat transfer energy balance\n model [1]_ that is used in PVWatts for cell temperature modeling [2]_.\n\n Parameters\n ----------\n poa_global : pandas Series\n Total incident irradiance [W/m^2]\n\n temp_air : pandas Series\n Ambient dry bulb temperature [C]\n\n wind_speed : pandas Series\n Wind speed [m/s]\n\n noct_installed : float\n The \"installed\" nominal operating cell temperature as defined in [1]_.\n PVWatts assumes this value to be 45 C for rack-mounted arrays and\n 49 C for roof mount systems with restricted air flow around the\n module. [C]\n\n module_height : float, default 5.0\n The height above ground of the center of the module. The PVWatts\n default is 5.0 [m]\n\n wind_height : float, default 9.144\n The height above ground at which ``wind_speed`` is measured. The\n PVWatts defauls is 9.144 [m]\n\n emissivity : float, default 0.84\n The effectiveness of the module at radiating thermal energy. [unitless]\n\n absorption : float, default 0.83\n The fraction of incident irradiance that is converted to thermal\n energy in the module. [unitless]\n\n surface_tilt : float, default 30\n Module tilt from horizontal. If not provided, the default value\n of 30 degrees from [1]_ and [2]_ is used. [degrees]\n\n module_width : float, default 0.31579\n Module width. The default value of 0.31579 meters in combination with\n the default `module_length` gives a hydraulic diameter of 0.5 as\n assumed in [1]_ and [2]_. [m]\n\n module_length : float, default 1.2\n Module length. The default value of 1.2 meters in combination with\n the default `module_width` gives a hydraulic diameter of 0.5 as\n assumed in [1]_ and [2]_. [m]\n\n Returns\n -------\n temperature_cell : pandas Series\n The modeled cell temperature [C]\n\n Notes\n -----\n This function returns slightly different values from PVWatts at night\n and just after dawn. This is because the SAM SSC assumes that module\n temperature equals ambient temperature when irradiance is zero so it can\n skip the heat balance calculation at night.\n\n References\n ----------\n .. [1] Fuentes, M. K., 1987, \"A Simplifed Thermal Model for Flat-Plate\n Photovoltaic Arrays\", SAND85-0330, Sandia National Laboratories,\n Albuquerque NM.\n http://prod.sandia.gov/techlib/access-control.cgi/1985/850330.pdf\n .. [2] Dobos, A. P., 2014, \"PVWatts Version 5 Manual\", NREL/TP-6A20-62641,\n National Renewable Energy Laboratory, Golden CO.\n doi:10.2172/1158421.\n \"\"\"\n # ported from the FORTRAN77 code provided in Appendix A of Fuentes 1987;\n # nearly all variable names are kept the same for ease of comparison.\n\n boltz = 5.669e-8\n emiss = emissivity\n absorp = absorption\n xlen = _hydraulic_diameter(module_width, module_length)\n # cap0 has units of [J / (m^2 K)], equal to mass per unit area times\n # specific heat of the module.\n cap0 = 11000\n tinoct = noct_installed + 273.15\n\n # convective coefficient of top surface of module at NOCT\n windmod = 1.0\n tave = (tinoct + 293.15) / 2\n hconv = _fuentes_hconv(tave, windmod, tinoct, tinoct - 293.15, xlen,\n surface_tilt, False)\n\n # determine the ground temperature ratio and the ratio of the total\n # convection to the top side convection\n hground = emiss * boltz * (tinoct**2 + 293.15**2) * (tinoct + 293.15)\n backrat = (\n absorp * 800.0\n - emiss * boltz * (tinoct**4 - 282.21**4)\n - hconv * (tinoct - 293.15)\n ) / ((hground + hconv) * (tinoct - 293.15))\n tground = (tinoct**4 - backrat * (tinoct**4 - 293.15**4))**0.25\n tground = np.clip(tground, 293.15, tinoct)\n\n tgrat = (tground - 293.15) / (tinoct - 293.15)\n convrat = (absorp * 800 - emiss * boltz * (\n 2 * tinoct**4 - 282.21**4 - tground**4)) / (hconv * (tinoct - 293.15))\n\n # adjust the capacitance (thermal mass) of the module based on the INOCT.\n # It is a function of INOCT because high INOCT implies thermal coupling\n # with the racking (e.g. roofmount), so the thermal mass is increased.\n # `cap` has units J/(m^2 C) -- see Table 3, Equations 26 & 27\n cap = cap0\n if tinoct > 321.15:\n cap = cap * (1 + (tinoct - 321.15) / 12)\n\n # iterate through timeseries inputs\n sun0 = 0\n tmod0 = 293.15\n\n # n.b. the way Fuentes calculates the first timedelta makes it seem like\n # the value doesn't matter -- rather than recreate it here, just assume\n # it's the same as the second timedelta:\n timedelta_seconds = poa_global.index.to_series().diff().dt.total_seconds()\n timedelta_hours = timedelta_seconds / 3600\n timedelta_hours.iloc[0] = timedelta_hours.iloc[1]\n\n tamb_array = temp_air + 273.15\n sun_array = poa_global * absorp\n\n # Two of the calculations are easily vectorized, so precalculate them:\n # sky temperature -- Equation 24\n tsky_array = 0.68 * (0.0552 * tamb_array**1.5) + 0.32 * tamb_array\n # wind speed at module height -- Equation 22\n # not sure why the 1e-4 factor is included -- maybe the equations don't\n # behave well if wind == 0?\n windmod_array = wind_speed * (module_height/wind_height)**0.2 + 1e-4\n\n tmod0 = 293.15\n tmod_array = np.zeros_like(poa_global)\n\n iterator = zip(tamb_array, sun_array, windmod_array, tsky_array,\n timedelta_hours)\n for i, (tamb, sun, windmod, tsky, dtime) in enumerate(iterator):\n # solve the heat transfer equation, iterating because the heat loss\n # terms depend on tmod. NB Fuentes doesn't show that 10 iterations is\n # sufficient for convergence.\n tmod = tmod0\n for j in range(10):\n # overall convective coefficient\n tave = (tmod + tamb) / 2\n hconv = convrat * _fuentes_hconv(tave, windmod, tinoct,\n abs(tmod-tamb), xlen,\n surface_tilt, True)\n # sky radiation coefficient (Equation 3)\n hsky = emiss * boltz * (tmod**2 + tsky**2) * (tmod + tsky)\n # ground radiation coeffieicient (Equation 4)\n tground = tamb + tgrat * (tmod - tamb)\n hground = emiss * boltz * (tmod**2 + tground**2) * (tmod + tground)\n # thermal lag -- Equation 8\n eigen = - (hconv + hsky + hground) / cap * dtime * 3600\n # not sure why this check is done, maybe as a speed optimization?\n if eigen > -10:\n ex = np.exp(eigen)\n else:\n ex = 0\n # Equation 7 -- note that `sun` and `sun0` already account for\n # absorption (alpha)\n tmod = tmod0 * ex + (\n (1 - ex) * (\n hconv * tamb\n + hsky * tsky\n + hground * tground\n + sun0\n + (sun - sun0) / eigen\n ) + sun - sun0\n ) / (hconv + hsky + hground)\n tmod_array[i] = tmod\n tmod0 = tmod\n sun0 = sun\n\n return pd.Series(tmod_array - 273.15, index=poa_global.index, name='tmod')\n"
]
| [
[
"numpy.zeros_like",
"numpy.clip",
"numpy.exp",
"pandas.Series",
"numpy.asanyarray"
]
]
|
vadam5/NeMo | [
"3c5db09539293c3c19a6bb7437011f91261119af"
]
| [
"nemo/collections/asr/parts/numba/rnnt_loss/rnnt.py"
]
| [
"# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# Copyright 2018-2019, Mingkun Huang\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport multiprocessing\n\nimport torch\nfrom numba import cuda\n\nfrom nemo.collections.asr.parts.numba.rnnt_loss.utils import global_constants, rnnt_helper\nfrom nemo.collections.asr.parts.numba.rnnt_loss.utils.cpu_utils import cpu_rnnt\nfrom nemo.collections.asr.parts.numba.rnnt_loss.utils.cuda_utils import gpu_rnnt\n\n\ndef rnnt_loss_cpu(\n acts: torch.Tensor,\n labels: torch.Tensor,\n input_lengths: torch.Tensor,\n label_lengths: torch.Tensor,\n costs: torch.Tensor,\n grads: torch.Tensor,\n blank_label: int,\n num_threads: int,\n):\n \"\"\"\n Wrapper method for accessing CPU RNNT loss.\n\n CPU implementation ported from [HawkAaron/warp-transducer](https://github.com/HawkAaron/warp-transducer).\n\n Args:\n acts: Activation tensor of shape [B, T, U, V+1].\n labels: Ground truth labels of shape [B, U].\n input_lengths: Lengths of the acoustic sequence as a vector of ints [B].\n label_lengths: Lengths of the target sequence as a vector of ints [B].\n costs: Zero vector of length [B] in which costs will be set.\n grads: Zero tensor of shape [B, T, U, V+1] where the gradient will be set.\n blank_label: Index of the blank token in the vocabulary.\n num_threads: Number of threads for OpenMP.\n \"\"\"\n # aliases\n log_probs = acts\n flat_labels = labels\n\n minibatch_size = log_probs.shape[0]\n maxT = log_probs.shape[1]\n maxU = log_probs.shape[2]\n alphabet_size = log_probs.shape[3]\n\n if num_threads < 0:\n num_threads = multiprocessing.cpu_count()\n\n num_threads = max(1, num_threads) # have to use at least 1 thread\n\n gpu_size, status = rnnt_helper.get_workspace_size(maxT, maxU, minibatch_size, gpu=False)\n if status != global_constants.RNNTStatus.RNNT_STATUS_SUCCESS:\n raise RuntimeError(\"Invalid parameter passed when calculating working space memory\")\n\n cpu_workspace = torch.zeros(gpu_size, device=log_probs.device, dtype=log_probs.dtype, requires_grad=False)\n\n ### VIEW TENSORS AS VECTORS FOR POINTER INDEXING ###\n log_probs, acts_shape = rnnt_helper.flatten_tensor(log_probs)\n flat_labels, labels_shape = rnnt_helper.flatten_tensor(flat_labels)\n\n wrapper = cpu_rnnt.CPURNNT(\n minibatch=minibatch_size,\n maxT=maxT,\n maxU=maxU,\n alphabet_size=alphabet_size,\n workspace=cpu_workspace,\n blank=blank_label,\n num_threads=num_threads,\n batch_first=True,\n )\n\n if grads is None:\n status = wrapper.score_forward(\n log_probs=log_probs.data,\n costs=costs,\n flat_labels=flat_labels.data,\n label_lengths=label_lengths.data,\n input_lengths=input_lengths.data,\n )\n\n if status != global_constants.RNNTStatus.RNNT_STATUS_SUCCESS:\n raise RuntimeError(\"Could not calculate forward scores\")\n\n else:\n ### FLATTEN GRAD TENSOR ###\n grads, grads_shape = rnnt_helper.flatten_tensor(grads)\n\n status = wrapper.cost_and_grad(\n log_probs=log_probs.data,\n grads=grads.data,\n costs=costs,\n flat_labels=flat_labels.data,\n label_lengths=label_lengths.data,\n input_lengths=input_lengths.data,\n )\n\n if status != global_constants.RNNTStatus.RNNT_STATUS_SUCCESS:\n raise RuntimeError(\"Could not calculate forward scores\")\n\n del cpu_workspace, wrapper\n return True\n\n\ndef rnnt_loss_gpu(\n acts: torch.Tensor,\n labels: torch.Tensor,\n input_lengths: torch.Tensor,\n label_lengths: torch.Tensor,\n costs: torch.Tensor,\n grads: torch.Tensor,\n blank_label: int,\n num_threads: int,\n):\n \"\"\"\n Wrapper method for accessing GPU RNNT loss.\n\n CUDA implementation ported from [HawkAaron/warp-transducer](https://github.com/HawkAaron/warp-transducer).\n\n Args:\n acts: Activation tensor of shape [B, T, U, V+1].\n labels: Ground truth labels of shape [B, U].\n input_lengths: Lengths of the acoustic sequence as a vector of ints [B].\n label_lengths: Lengths of the target sequence as a vector of ints [B].\n costs: Zero vector of length [B] in which costs will be set.\n grads: Zero tensor of shape [B, T, U, V+1] where the gradient will be set.\n blank_label: Index of the blank token in the vocabulary.\n num_threads: Number of threads for OpenMP.\n \"\"\"\n minibatch_size = acts.shape[0]\n maxT = acts.shape[1]\n maxU = acts.shape[2]\n alphabet_size = acts.shape[3]\n\n if hasattr(cuda, 'external_stream'):\n stream = cuda.external_stream(torch.cuda.current_stream(acts.device).cuda_stream)\n else:\n stream = cuda.default_stream()\n\n if num_threads < 0:\n num_threads = multiprocessing.cpu_count()\n\n num_threads = max(1, num_threads) # have to use at least 1 thread\n\n gpu_size, status = rnnt_helper.get_workspace_size(maxT, maxU, minibatch_size, gpu=True)\n if status != global_constants.RNNTStatus.RNNT_STATUS_SUCCESS:\n raise RuntimeError(\"Invalid parameter passed when calculating working space memory\")\n\n # Select GPU index\n cuda.select_device(acts.device.index)\n gpu_workspace = torch.zeros(gpu_size, device=acts.device, dtype=acts.dtype, requires_grad=False)\n\n ### VIEW TENSORS AS VECTORS FOR POINTER INDEXING ###\n acts, acts_shape = rnnt_helper.flatten_tensor(acts)\n\n ### REPRESENT THE CUDA ARRAY INTERFACE OF COSTS VECTOR ###\n costs_repr = cuda.as_cuda_array(costs) # NO COPY OF DATA, JUST CHANGE REPRESENTATION\n\n wrapper = gpu_rnnt.GPURNNT(\n minibatch=minibatch_size,\n maxT=maxT,\n maxU=maxU,\n alphabet_size=alphabet_size,\n workspace=gpu_workspace,\n blank=blank_label,\n num_threads=num_threads,\n stream=stream,\n )\n\n if grads is None:\n status = wrapper.score_forward(\n acts=acts.data,\n costs=costs_repr,\n pad_labels=labels.data,\n label_lengths=label_lengths.data,\n input_lengths=input_lengths.data,\n )\n\n if status != global_constants.RNNTStatus.RNNT_STATUS_SUCCESS:\n raise RuntimeError(\"Could not calculate forward scores\")\n\n else:\n ### FLATTEN GRAD TENSOR ###\n grads, grads_shape = rnnt_helper.flatten_tensor(grads)\n\n status = wrapper.cost_and_grad(\n acts=acts.data,\n grads=grads.data,\n costs=costs_repr,\n pad_labels=labels.data,\n label_lengths=label_lengths.data,\n input_lengths=input_lengths.data,\n )\n\n if status != global_constants.RNNTStatus.RNNT_STATUS_SUCCESS:\n raise RuntimeError(\"Could not calculate forward scores\")\n\n del gpu_workspace, wrapper\n return True\n"
]
| [
[
"torch.zeros",
"torch.cuda.current_stream"
]
]
|
gecko17/project-sailor | [
"7a35eeec2a6a8ec9bc998e39e8ffad4703cec5d7"
]
| [
"sailor/sap_iot/fetch.py"
]
| [
"\"\"\"\nTimeseries module can be used to retrieve timeseries data from the SAP iot abstract timeseries api.\n\nInterfaces for retrieval are aligned with AssetCentral objects such as equipment_set and indicator_set.\nTimeseries data is generally stored in a pandas dataframe, wrapped in a convenience class to make it easier\nto interact with the data in AssetCentral terms (see wrappers.py for the convenience class).\n\"\"\"\nfrom __future__ import annotations\n\nfrom collections import defaultdict\nfrom functools import partial\nfrom datetime import datetime\nfrom typing import TYPE_CHECKING, Union, BinaryIO\nimport logging\nimport warnings\nimport time\nimport json\nimport zipfile\nimport gzip\nfrom io import BytesIO\n\nimport pandas as pd\n\nimport sailor.assetcentral.indicators as ac_indicators\nfrom ..utils.oauth_wrapper import get_oauth_client, RequestError\nfrom ..utils.timestamps import _any_to_timestamp, _timestamp_to_date_string\nfrom ..utils.config import SailorConfig\nfrom ..utils.utils import DataNotFoundWarning\nfrom .wrappers import TimeseriesDataset\n\nif TYPE_CHECKING:\n from ..assetcentral.indicators import IndicatorSet\n from ..assetcentral.equipment import EquipmentSet\n\nLOG = logging.getLogger(__name__)\nLOG.addHandler(logging.NullHandler())\n\nfixed_timeseries_columns = {\n '_TIME': 'timestamp',\n 'modelId': 'model_id',\n 'equipmentId': 'equipment_id'\n}\n\n\ndef _start_bulk_timeseries_data_export(start_date: str, end_date: str, liot_indicator_group: str) -> str:\n LOG.debug(\"Triggering raw indicator data export for indicator group: %s.\", liot_indicator_group)\n oauth_iot = get_oauth_client('sap_iot')\n base_url = SailorConfig.get('sap_iot', 'export_url') # todo: figure out what to do about these urls\n request_url = f'{base_url}/v1/InitiateDataExport/{liot_indicator_group}?timerange={start_date}-{end_date}'\n\n resp = oauth_iot.request('POST', request_url)\n return resp['RequestId']\n\n\ndef _check_bulk_timeseries_export_status(export_id: str) -> bool:\n LOG.debug(\"Checking export status for export id: %s.\", export_id)\n oauth_iot = get_oauth_client('sap_iot')\n base_url = SailorConfig.get('sap_iot', 'export_url') # todo: figure out what to do about these urls\n request_url = f'{base_url}/v1/DataExportStatus?requestId={export_id}'\n\n resp = oauth_iot.request('GET', request_url)\n\n if resp['Status'] == 'The file is available for download.':\n return True\n elif resp['Status'] in ['Request for data download is submitted.', 'Request for data download is initiated.']:\n return False\n else:\n raise RuntimeError(resp['Status'])\n\n\ndef _process_one_file(ifile: BinaryIO, indicator_set: IndicatorSet, equipment_set: EquipmentSet) -> pd.DataFrame:\n # each processed file contains data for some time range (one day it seems), one indicator group and all\n # equipment holding any data for that group in that time period.\n # Since the user might not have requested all indicators in the group we'll filter out any results that were not\n # requested. This is complicated by the fact that it's possible that the same indicator_id is present in the\n # indicator_group through two different templates. If it is requested only through one template it needs to be\n # filtered out after parsing the csv into a pandas dataframe, and converting to a\n # columnar format (one column for each (indicator_id, indicator_group_id, template_id)).\n\n float_types = ['numeric', 'numericflexible']\n\n selected_equipment_ids = [equipment.id for equipment in equipment_set] # noqa: F841\n dtypes = {indicator._liot_id: float for indicator in indicator_set if indicator.datatype in float_types}\n dtypes.update({'equipmentId': 'object', 'indicatorGroupId': 'object', 'templateId': 'object'})\n df = pd.read_csv(ifile,\n usecols=lambda x: x != 'modelId',\n parse_dates=['_TIME'], date_parser=partial(pd.to_datetime, utc=True, unit='ms', errors='coerce'),\n dtype=dtypes)\n\n df = df.pivot(index=['_TIME', 'equipmentId'], columns=['indicatorGroupId', 'templateId'])\n\n columns_to_keep = {}\n columns_flat = df.columns.to_flat_index()\n for indicator in indicator_set:\n id_tuple = (indicator._liot_id, indicator._liot_group_id, indicator.template_id)\n if id_tuple in columns_flat:\n columns_to_keep[id_tuple] = indicator._unique_id\n\n df.columns = columns_flat\n df = (\n df.filter(items=columns_to_keep.keys())\n .reset_index()\n .rename(columns=columns_to_keep)\n .rename(columns=fixed_timeseries_columns)\n .query('equipment_id in @selected_equipment_ids')\n )\n return df\n\n\ndef _get_exported_bulk_timeseries_data(export_id: str,\n indicator_set: IndicatorSet,\n equipment_set: EquipmentSet) -> pd.DataFrame:\n oauth_iot = get_oauth_client('sap_iot')\n base_url = SailorConfig.get('sap_iot', 'download_url') # todo: figure out what to do about these urls\n request_url = f\"{base_url}/v1/DownloadData('{export_id}')\"\n\n resp = oauth_iot.request('GET', request_url, headers={'Accept': 'application/octet-stream'})\n\n ifile = BytesIO(resp)\n try:\n zip_content = zipfile.ZipFile(ifile)\n except zipfile.BadZipFile:\n raise RuntimeError('Downloaded file is corrupted, can not process contents.')\n\n frames = []\n for i, inner_file in enumerate(zip_content.filelist):\n # the end marker below allows us to keep updating the current line for a nicer 'progress update'\n print(f'processing compressed file {i + 1}/{len(zip_content.filelist)}', end='\\x1b[2K\\r')\n gzip_file = zip_content.read(inner_file)\n if not gzip_file:\n continue\n\n try:\n gzip_content = gzip.GzipFile(fileobj=BytesIO(gzip_file))\n frames.append(_process_one_file(gzip_content, indicator_set, equipment_set))\n except gzip.BadGzipFile:\n raise RuntimeError('Downloaded file is corrupted, can not process contents.')\n\n if frames:\n return pd.concat(frames)\n else:\n raise RuntimeError('Downloaded File did not have any content.')\n\n\ndef get_indicator_data(start_date: Union[str, pd.Timestamp, datetime.timestamp, datetime.date],\n end_date: Union[str, pd.Timestamp, datetime.timestamp, datetime.date],\n indicator_set: IndicatorSet, equipment_set: EquipmentSet) -> TimeseriesDataset:\n \"\"\"\n Read indicator data for a certain time period, a set of equipments and a set of indicators.\n\n Parameters\n ----------\n start_date:\n Date of beginning of requested timeseries data. Time components of the date will be ignored.\n end_date:\n Date of end of requested timeseries data. Time components of the date will be ignored.\n indicator_set:\n IndicatorSet for which timeseries data is returned.\n equipment_set:\n Equipment set for which the timeseries data is read.\n\n Example\n -------\n Get the indicator set 'my_indicator_set' timeseries data for equipments in\n the equipment set 'my_equipment_set' for a period from '2020-07-02' to '2021-01-10'::\n\n get_indicator_data('2020-07-02','2021-01-10', my_indicator_set, my_equipment_set)\n \"\"\"\n # some notes:\n # the bulk export api *only* works on indicator groups. No filtering for equipment_set or indicator_set.\n # so we always need to download data for the whole group. We filter on individual indicator-template combinations\n # as well as individual equipment in `_process_one_file`.\n start_date = _any_to_timestamp(start_date)\n end_date = _any_to_timestamp(end_date)\n\n query_groups = defaultdict(list)\n for indicator in indicator_set:\n query_groups[indicator._liot_group_id].append(indicator)\n\n request_ids = {}\n for indicator_group, indicator_subset in sorted(query_groups.items()): # sorted to make query order reproducable\n formatted_start_date = _timestamp_to_date_string(start_date)\n formatted_end_date = _timestamp_to_date_string(end_date)\n try:\n request_id = _start_bulk_timeseries_data_export(formatted_start_date, formatted_end_date, indicator_group)\n request_ids[request_id] = indicator_subset\n except RequestError as e:\n try:\n error_message = json.loads(e.error_text)['message']\n except (json.JSONDecodeError, KeyError):\n raise e\n\n if error_message == 'Data not found for the requested date range':\n warning = DataNotFoundWarning(\n f'No data for indicator group {indicator_group} found in the requested time interval!')\n warnings.warn(warning)\n continue\n else:\n raise e\n\n LOG.info('Data export triggered for %s indicator group(s).', len(query_groups))\n print(f'Data export triggered for {len(query_groups)} indicator group(s).')\n\n # string (or really uuid?) might be better data types for equipment_id\n # unfortunately, support for native string datatypes in pandas is still experimental\n # and if we cast to string right when reading the csv files it gets 'upcast' back to object\n # in `pivot` and `merge`. Hence we'll just stick with object for now.\n schema = {'equipment_id': 'object', 'timestamp': pd.DatetimeTZDtype(tz='UTC')}\n results = pd.DataFrame(columns=schema.keys()).astype(schema)\n\n print('Waiting for data export:')\n while request_ids:\n for request_id in list(request_ids):\n if _check_bulk_timeseries_export_status(request_id):\n indicator_subset = ac_indicators.IndicatorSet(request_ids.pop(request_id))\n\n print(f'\\nNow downloading export for indicator group {indicator_subset[0].indicator_group_name}.')\n data = _get_exported_bulk_timeseries_data(request_id, indicator_subset, equipment_set)\n print('\\nDownload complete')\n\n for indicator in indicator_subset:\n if indicator._unique_id not in data.columns:\n warning = DataNotFoundWarning(f'Could not find any data for indicator {indicator}')\n warnings.warn(warning)\n\n results = pd.merge(results, data, on=['equipment_id', 'timestamp'], how='outer')\n\n if request_ids:\n time.sleep(5)\n print('.', end='')\n print()\n\n wrapper = TimeseriesDataset(results, indicator_set, equipment_set, start_date, end_date)\n return wrapper\n"
]
| [
[
"pandas.DatetimeTZDtype",
"pandas.merge",
"pandas.concat"
]
]
|
m-mostafavi/Arshad | [
"ca9bff4f66562be8cd50b3703f51061f48ee1612"
]
| [
"Practice2/LogesticRegression.py"
]
| [
"import pandas as pd\nimport numpy as np\nfrom matplotlib import gridspec\nfrom sklearn.model_selection import cross_val_predict\nfrom sklearn import linear_model\nimport sklearn.metrics as met\nimport matplotlib.pyplot as plt\n\n\n#Insert data set\ndata=pd.read_csv('tae.csv',sep=',',header=None)\ntrain=data.ix[:,0:4]\ntarget=data.ix[:,5]\n\nrecall=[]\nprecision=[]\naccuracy=[]\nf1score=[]\n\n#method : SVC , LinerSVC\nfor i in np.arange(0.1, 4,0.2):\n clf = linear_model.LogisticRegression(C=i)\n print(clf)\n print('--------------------{}---------------------------------'.format(i))\n print('cross_val_predict')\n predicted = cross_val_predict(clf, train, target, cv=10, ) # predict y values for the test fold\n print('mean recall in all classes:')\n re=met.recall_score(target, predicted, average='macro')\n recall.append([i,re])\n print(re)\n print('mean precision in all classes')\n pre=met.precision_score(target, predicted, average='macro')\n precision.append([i,pre])\n print(pre)\n print('mean accuracy in all classes:')\n acc=met.accuracy_score(target, predicted)\n accuracy.append([i,acc])\n print(acc)\n print('mean f1score in all classes:')\n f1=met.f1_score(target, predicted, average='macro')\n f1score.append([i,f1])\n print(f1)\n print('----------------------------------------')\ngs = gridspec.GridSpec(3, 2)\nfig = plt.figure()\nax1 = plt.subplot(gs[0,:-1])\nr = np.array(recall)\nax1.plot(r[:,0], r[:,1], 'ro')\nax1.set_xlabel('C parametr')\nax1.set_ylabel('recall')\n# ------------------------------------\nax2 = plt.subplot(gs[0,-1:])\np=np.array(precision)\nax2.plot(p[:,0], p[:,1], 'ro')\nax2.set_xlabel('C parametr ')\nax2.set_ylabel('precision')\n# ------------------------------------\nax3 = plt.subplot(gs[1,:])\na=np.array(accuracy)\nax3.plot(a[:,0], a[:,1], 'ro')\nax3.set_xlabel('C parametr ')\nax3.set_ylabel('accuracy')\n# ------------------------------------\nax4 = plt.subplot(gs[2,:])\nf=np.array(f1score)\nax4.plot(f[:,0], f[:,1], 'ro')\nax4.set_xlabel('C parametr ')\nax4.set_ylabel('f1score')\n#------------------------------------\nplt.show()\nprint(max( f[:,1]))\n\n"
]
| [
[
"numpy.array",
"sklearn.metrics.precision_score",
"matplotlib.pyplot.figure",
"sklearn.metrics.accuracy_score",
"sklearn.linear_model.LogisticRegression",
"numpy.arange",
"sklearn.metrics.recall_score",
"matplotlib.pyplot.show",
"pandas.read_csv",
"sklearn.metrics.f1_score",
"sklearn.model_selection.cross_val_predict",
"matplotlib.gridspec.GridSpec",
"matplotlib.pyplot.subplot"
]
]
|
DarthThomas/PyRep | [
"5430f57e57af036e02753a7db8f816a0409571ff"
]
| [
"pyrep/robots/configuration_paths/arm_configuration_path.py"
]
| [
"from pyrep.backend import sim\nfrom pyrep.robots.configuration_paths.configuration_path import (\n ConfigurationPath)\nimport numpy as np\nfrom typing import List\n\n\nclass ArmConfigurationPath(ConfigurationPath):\n \"\"\"A path expressed in joint configuration space.\n\n Paths are retrieved from an :py:class:`Arm`, and are associated with the\n arm that generated the path.\n\n This class is used for executing motion along a path via the\n Reflexxes Motion Library type II or IV. The Reflexxes Motion Library\n provides instantaneous trajectory generation capabilities for motion\n control systems.\n \"\"\"\n\n def __init__(self, arm: 'Arm', path_points: List[float]):\n self._arm = arm\n self._path_points = np.array(path_points)\n self._rml_handle = None\n self._drawing_handle = None\n self._path_done = False\n self._num_joints = arm.get_joint_count()\n\n def step(self) -> bool:\n \"\"\"Makes a step along the trajectory.\n\n This function steps forward a trajectory generation algorithm from\n Reflexxes Motion Library.\n NOTE: This does not step the physics engine. This is left to the user.\n\n :return: If the end of the trajectory has been reached.\n \"\"\"\n if self._path_done:\n raise RuntimeError('This path has already been completed. '\n 'If you want to re-run, then call set_to_start.')\n if self._rml_handle is None:\n self._rml_handle = self._get_rml_handle()\n done = self._step_motion() == 1\n self._path_done = done\n return done\n\n def set_to_start(self, allow_force_mode=True) -> None:\n \"\"\"Sets the arm to the beginning of this path.\n\n :param allow_force_mode: If True, then the position can be set even\n when the joint mode is in Force mode. It will disable dynamics,\n move the joint, and then re-enable dynamics.\n \"\"\"\n start_config = self._path_points[:len(self._arm.joints)]\n self._arm.set_joint_positions(start_config, allow_force_mode)\n self._path_done = False\n\n def set_to_end(self, allow_force_mode=True) -> None:\n \"\"\"Sets the arm to the end of this path.\n\n :param allow_force_mode: If True, then the position can be set even\n when the joint mode is in Force mode. It will disable dynamics,\n move the joint, and then re-enable dynamics.\n \"\"\"\n final_config = self._path_points[-len(self._arm.joints):]\n self._arm.set_joint_positions(final_config, allow_force_mode)\n\n def visualize(self) -> None:\n \"\"\"Draws a visualization of the path in the scene.\n\n The visualization can be removed\n with :py:meth:`ConfigurationPath.clear_visualization`.\n \"\"\"\n if len(self._path_points) <= 0:\n raise RuntimeError(\"Can't visualise a path with no points.\")\n\n tip = self._arm.get_tip()\n self._drawing_handle = sim.simAddDrawingObject(\n objectType=sim.sim_drawing_lines, size=3, duplicateTolerance=0,\n parentObjectHandle=-1, maxItemCount=99999,\n ambient_diffuse=[1, 0, 1])\n sim.simAddDrawingObjectItem(self._drawing_handle, None)\n init_angles = self._arm.get_joint_positions()\n self._arm.set_joint_positions(\n self._path_points[0: len(self._arm.joints)], allow_force_mode=False)\n prev_point = tip.get_position()\n\n for i in range(len(self._arm.joints), len(self._path_points),\n len(self._arm.joints)):\n points = self._path_points[i:i + len(self._arm.joints)]\n self._arm.set_joint_positions(points, allow_force_mode=False)\n p = tip.get_position()\n sim.simAddDrawingObjectItem(self._drawing_handle, prev_point + p)\n prev_point = p\n\n # Set the arm back to the initial config\n self._arm.set_joint_positions(init_angles, allow_force_mode=False)\n\n def clear_visualization(self) -> None:\n \"\"\"Clears/removes a visualization of the path in the scene.\n \"\"\"\n if self._drawing_handle is not None:\n sim.simAddDrawingObjectItem(self._drawing_handle, None)\n\n def _get_rml_handle(self) -> int:\n dt = sim.simGetSimulationTimeStep()\n limits = np.array(self._arm.get_joint_upper_velocity_limits())\n vel_correction = 1.0\n max_vel = self._arm.max_velocity\n max_accel = self._arm.max_acceleration\n max_jerk = self._arm.max_jerk\n lengths = self._get_path_point_lengths()\n target_pos_vel = [lengths[-1],0]\n previous_q = self._path_points[0:len(self._arm.joints)]\n\n while True:\n pos_vel_accel = [0, 0, 0]\n rMax = 0\n rml_handle = sim.simRMLPos(\n 1, 0.0001, -1, pos_vel_accel,\n [max_vel * vel_correction, max_accel, max_jerk],\n [1], target_pos_vel)\n state = 0\n while state == 0:\n state, pos_vel_accel = sim.simRMLStep(rml_handle, dt, 1)\n if state >= 0:\n pos = pos_vel_accel[0]\n for i in range(len(lengths)-1):\n if lengths[i] <= pos <= lengths[i + 1]:\n t = (pos - lengths[i]) / (lengths[i + 1] - lengths[i])\n # For each joint\n offset = len(self._arm.joints) * i\n p1 = self._path_points[\n offset:offset + self._num_joints]\n offset = len(self._arm.joints) * (i + 1)\n p2 = self._path_points[\n offset:offset + self._num_joints]\n dx = p2 - p1\n qs = p1 + dx * t\n dq = qs - previous_q\n previous_q = qs\n r = np.abs(dq / dt) / limits\n m = np.max(r)\n if m > rMax:\n rMax = m\n break\n sim.simRMLRemove(rml_handle)\n if rMax > 1.001:\n vel_correction = vel_correction / rMax\n else:\n break\n pos_vel_accel = [0, 0, 0]\n rml_handle = sim.simRMLPos(\n 1, 0.0001, -1, pos_vel_accel,\n [max_vel*vel_correction, max_accel, max_jerk], [1], target_pos_vel)\n return rml_handle\n\n def _step_motion(self) -> int:\n dt = sim.simGetSimulationTimeStep()\n lengths = self._get_path_point_lengths()\n state, posVelAccel = sim.simRMLStep(self._rml_handle, dt, 1)\n if state >= 0:\n pos = posVelAccel[0]\n for i in range(len(lengths) - 1):\n if lengths[i] <= pos <= lengths[i + 1]:\n t = (pos - lengths[i]) / (lengths[i + 1] - lengths[i])\n # For each joint\n offset = len(self._arm.joints) * i\n p1 = self._path_points[\n offset:offset + len(self._arm.joints)]\n offset = self._arm._num_joints * (i + 1)\n p2 = self._path_points[\n offset:offset + len(self._arm.joints)]\n dx = p2 - p1\n qs = p1 + dx * t\n self._arm.set_joint_target_positions(qs)\n break\n if state == 1:\n sim.simRMLRemove(self._rml_handle)\n return state\n\n def _get_path_point_lengths(self) -> List[float]:\n path_points = self._path_points\n prev_points = path_points[0:len(self._arm.joints)]\n dists = [0]\n d = 0\n for i in range(len(self._arm.joints), len(self._path_points),\n len(self._arm.joints)):\n points = path_points[i:i + len(self._arm.joints)]\n d += np.sqrt(np.sum(np.square(prev_points - points)))\n dists.append(d)\n prev_points = points\n return dists\n"
]
| [
[
"numpy.square",
"numpy.array",
"numpy.max",
"numpy.abs"
]
]
|
caglar/GroundHogP | [
"03182d3041eee0d18ee50845efc842f60e346d48"
]
| [
"groundhog/layers/rec_layers.py"
]
| [
"\"\"\"\nRecurrent layers.\n\n\nTODO: write more documentation\n\"\"\"\n__docformat__ = 'restructedtext en'\n__authors__ = (\"Razvan Pascanu \"\n \"KyungHyun Cho \"\n \"Caglar Gulcehre \")\n__contact__ = \"Razvan Pascanu <r.pascanu@gmail>\"\n\nimport numpy\nimport copy\nimport theano\nimport theano.tensor as TT\n# Nicer interface of scan\nfrom theano.sandbox.scan import scan\nfrom theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams\nfrom pylearn2.utils import block_gradient\n\nfrom groundhog import utils\nfrom groundhog.utils import sample_weights, \\\n sample_weights_classic,\\\n sample_weights_orth, \\\n init_bias, \\\n constant_shape, \\\n sample_zeros\nfrom basic import Layer\n\nclass RecurrentMultiLayer(Layer):\n \"\"\"\n Constructs a recurrent layer whose transition from h_tm1 to h_t is given\n by an MLP or logistic regression. In our ICLR submission this is a\n DT-RNN model.\n \"\"\"\n def __init__(self,\n rng,\n n_hids=[500,500],\n activation = [TT.tanh, TT.tanh],\n scale=.01,\n sparsity = -1,\n activ_noise=0.,\n weight_noise=False,\n dropout = 1.,\n init_fn='sample_weights',\n bias_fn='init_bias',\n bias_scale = 0.,\n grad_scale = 1.,\n profile = 0,\n name=None):\n \"\"\"\n :type rng: numpy random generator\n :param rng: numpy random generator\n\n :type n_in: int\n :param n_in: number of inputs units\n\n :type n_hids: list of ints\n :param n_hids: Number of hidden units on each layer of the MLP\n\n :type activation: string/function or list of\n :param activation: Activation function for the embedding layers. If\n a list it needs to have a value for each layer. If not, the same\n activation will be applied to all layers\n\n :type scale: float or list of\n :param scale: depending on the initialization function, it can be\n the standard deviation of the Gaussian from which the weights\n are sampled or the largest singular value. If a single value it\n will be used for each layer, otherwise it has to have one value\n for each layer\n\n :type sparsity: int or list of\n :param sparsity: if a single value, it will be used for each layer,\n otherwise it has to be a list with as many values as layers. If\n negative, it means the weight matrix is dense. Otherwise it\n means this many randomly selected input units are connected to\n an output unit\n\n\n :type weight_noise: bool\n :param weight_noise: If true, the model is used with weight noise\n (and the right shared variable are constructed, to keep track of the\n noise)\n\n :type dropout: float\n :param dropout: the probability with which hidden units are dropped\n from the hidden layer. If set to 1, dropout is not used\n\n :type init_fn: string or function\n :param init_fn: function used to initialize the weights of the\n layer. We recommend using either `sample_weights_classic` or\n `sample_weights` defined in the utils\n\n :type bias_fn: string or function\n :param bias_fn: function used to initialize the biases. We recommend\n using `init_bias` defined in the utils\n\n :type bias_scale: float\n :param bias_scale: argument passed to `bias_fn`, depicting the scale\n of the initial bias\n\n :type grad_scale: float or theano scalar\n :param grad_scale: factor with which the gradients with respect to\n the parameters of this layer are scaled. It is used for\n differentiating between the different parameters of a model.\n\n :type name: string\n :param name: name of the layer (used to name parameters). NB: in\n this library names are very important because certain parts of the\n code relies on name to disambiguate between variables, therefore\n each layer should have a unique name.\n\n \"\"\"\n self.grad_scale = grad_scale\n if type(n_hids) not in (list, tuple):\n n_hids = [n_hids]\n n_layers = len(n_hids)\n if type(scale) not in (list, tuple):\n scale = [scale] * n_layers\n if type(sparsity) not in (list, tuple):\n sparsity = [sparsity] * n_layers\n for idx, sp in enumerate(sparsity):\n if sp < 0: sparsity[idx] = n_hids[idx]\n if type(activation) not in (list, tuple):\n activation = [activation] * n_layers\n if type(bias_scale) not in (list, tuple):\n bias_scale = [bias_scale] * (n_layers-1)\n if type(bias_fn) not in (list, tuple):\n bias_fn = [bias_fn] * (n_layers-1)\n if type(init_fn) not in (list, tuple):\n init_fn = [init_fn] * n_layers\n\n for dx in xrange(n_layers):\n if dx < n_layers-1:\n if type(bias_fn[dx]) is str or type(bias_fn[dx]) is unicode:\n bias_fn[dx] = eval(bias_fn[dx])\n if type(init_fn[dx]) is str or type(init_fn[dx]) is unicode:\n init_fn[dx] = eval(init_fn[dx])\n if type(activation[dx]) is str or type(activation[dx]) is unicode:\n activation[dx] = eval(activation[dx])\n self.scale = scale\n self.n_layers = n_layers\n self.sparsity = sparsity\n self.activation = activation\n self.n_hids = n_hids\n self.bias_scale = bias_scale\n self.bias_fn = bias_fn\n self.init_fn = init_fn\n self.weight_noise = weight_noise\n self.activ_noise = activ_noise\n self.profile = profile\n self.dropout = dropout\n assert rng is not None, \"random number generator should not be empty!\"\n super(RecurrentMultiLayer, self).__init__(n_hids[0],\n n_hids[-1],\n rng,\n name)\n\n self.trng = RandomStreams(self.rng.randint(int(1e6)))\n self.params = []\n self._init_params()\n\n def _init_params(self):\n self.W_hhs = []\n self.b_hhs = []\n for dx in xrange(self.n_layers):\n W_hh = self.init_fn[dx](self.n_hids[(dx-1)%self.n_layers],\n self.n_hids[dx],\n self.sparsity[dx],\n self.scale[dx],\n rng=self.rng)\n self.W_hhs.append(theano.shared(value=W_hh, name=\"W%d_%s\" %\n (dx,self.name)))\n if dx > 0:\n self.b_hhs.append(theano.shared(\n self.bias_fn[dx-1](self.n_hids[dx],\n self.bias_scale[dx-1],\n self.rng),\n name='b%d_%s' %(dx, self.name)))\n self.params = [x for x in self.W_hhs] + [x for x in self.b_hhs]\n self.params_grad_scale = [self.grad_scale for x in self.params]\n if self.weight_noise:\n self.nW_hhs = [theano.shared(x.get_value()*0, name='noise_'+x.name) for x in self.W_hhs]\n self.nb_hhs = [theano.shared(x.get_value()*0, name='noise_'+x.name) for x in self.b_hhs]\n self.noise_params = [x for x in self.nW_hhs] + [x for x in self.nb_hhs]\n self.noise_params_shape_fn = [constant_shape(x.get_value().shape)\n for x in self.noise_params]\n\n\n def step_fprop(self,\n state_below,\n mask=None,\n dpmask=None,\n state_before=None,\n init_state=None,\n use_noise=True,\n no_noise_bias=False):\n \"\"\"\n Constructs the computational graph of a single step of the recurrent\n layer.\n\n :type state_below: theano variable\n :param state_below: the input to the layer\n\n :type mask: None or theano variable\n :param mask: mask describing the length of each sequence in a\n minibatch\n\n :type state_before: theano variable\n :param state_before: the previous value of the hidden state of the\n layer\n\n :type use_noise: bool\n :param use_noise: flag saying if weight noise should be used in\n computing the output of this layer\n\n :type no_noise_bias: bool\n :param no_noise_bias: flag saying if weight noise should be added to\n the bias as well\n \"\"\"\n rval = []\n if self.weight_noise and use_noise and self.noise_params:\n W_hhs = [(x+y) for x, y in zip(self.W_hhs, self.nW_hhs)]\n if not no_noise_bias:\n b_hhs = [(x+y) for x, y in zip(self.b_hhs, self.nb_hss)]\n else:\n b_hhs = self.b_hhs\n else:\n W_hhs = self.W_hhs\n b_hhs = self.b_hhs\n preactiv = TT.dot(state_before, W_hhs[0]) +state_below\n h = self.activation[0](preactiv)\n if self.activ_noise and use_noise:\n h = h + self.trng.normal(h.shape, avg=0, std=self.activ_noise, dtype=h.dtype)\n if self.dropout < 1.:\n if use_noise:\n if h.ndim == 2:\n h = h * dpmask[:,:h.shape[1]]\n dpidx = h.shape[1]\n else:\n h = h * dpmask[:h.shape[0]]\n dpidx = h.shape[0]\n else:\n h = h * self.dropout\n\n rval +=[h]\n for dx in xrange(1, self.n_layers):\n preactiv = TT.dot(h, W_hhs[dx]) + b_hhs[dx-1]\n h = self.activation[dx](preactiv)\n\n if self.activ_noise and use_noise:\n h = h + self.trng.normal(h.shape, avg=0, std=self.activ_noise, dtype=h.dtype)\n if self.dropout < 1.:\n if use_noise:\n if h.ndim == 2:\n h = h * dpmask[:,dpidx:dpidx+h.shape[1]]\n dpidx = dpidx + h.shape[1]\n else:\n h = h * dpmask[dpidx:dpidx+h.shape[0]]\n dpidx = dpidx + h.shape[0]\n else:\n h = h * self.dropout\n rval += [h]\n if mask is not None:\n if h.ndim ==2 and mask.ndim==1:\n mask = mask.dimshuffle(0,'x')\n h = mask * h + (1-mask) * state_before\n rval[-1] = h\n return rval\n\n def fprop(self,\n state_below,\n mask=None,\n init_state=None,\n n_steps=None,\n batch_size=None,\n use_noise=True,\n truncate_gradient=-1,\n no_noise_bias = False):\n \"\"\"\n Evaluates the forward through a recurrent layer\n\n :type state_below: theano variable\n :param state_below: the input of the recurrent layer\n\n :type mask: None or theano variable\n :param mask: mask describing the length of each sequence in a\n minibatch\n\n :type init_state: theano variable or None\n :param init_state: initial state for the hidden layer\n\n :type n_steps: None or int or theano scalar\n :param n_steps: Number of steps the recurrent netowrk does\n\n :type batch_size: int\n :param batch_size: the size of the minibatch over which scan runs\n\n :type use_noise: bool\n :param use_noise: flag saying if weight noise should be used in\n computing the output of this layer\n\n :type truncate_gradient: int\n :param truncate_gradient: If negative, no truncation is used,\n otherwise truncated BPTT is used, where you go backwards only this\n amount of steps\n\n :type no_noise_bias: bool\n :param no_noise_bias: flag saying if weight noise should be added to\n the bias as well\n \"\"\"\n\n\n if theano.config.floatX=='float32':\n floatX = numpy.float32\n else:\n floatX = numpy.float64\n if n_steps is None:\n n_steps = state_below.shape[0]\n if batch_size and batch_size != 1:\n n_steps = n_steps / batch_size\n if batch_size is None and state_below.ndim == 3:\n batch_size = state_below.shape[1]\n if state_below.ndim == 2 and \\\n (not isinstance(batch_size,int) or batch_size > 1):\n state_below = state_below.reshape((n_steps, batch_size, self.n_in))\n\n\n if not init_state:\n if not isinstance(batch_size, int) or batch_size != 1:\n init_state = TT.alloc(floatX(0), batch_size, self.n_hids[0])\n else:\n init_state = TT.alloc(floatX(0), self.n_hids[0])\n\n if mask:\n inps = [state_below, mask]\n fn = lambda x,y,z : self.step_fprop(x,y,None, z, use_noise=use_noise,\n no_noise_bias=no_noise_bias)\n else:\n inps = [state_below]\n fn = lambda tx, ty: self.step_fprop(tx, None, None, ty,\n use_noise=use_noise,\n no_noise_bias=no_noise_bias)\n\n if self.dropout < 1. and use_noise:\n # build dropout mask outside scan\n allhid = numpy.sum(self.n_hids)\n shape = state_below.shape\n if state_below.ndim == 3:\n alldpmask = self.trng.binomial(\n (n_steps, batch_size, allhid),\n n = 1, p = self.dropout, dtype=state_below.dtype)\n else:\n alldpmask = self.trng.binomial(\n (n_steps, allhid),\n n = 1, p = self.dropout, dtype=state_below.dtype)\n inps.append(alldpmask)\n if mask:\n fn = lambda x,y,z,u : self.step_fprop(x,y,z,u,use_noise=use_noise)\n else:\n fn = lambda tx, ty, tu: self.step_fprop(tx,None,ty,tu,\n use_noise=use_noise)\n\n rval, updates = theano.scan(fn,\n sequences = inps,\n outputs_info = [None]*(self.n_layers-1) +\n [init_state],\n name='layer_%s'%self.name,\n profile=self.profile,\n truncate_gradient = truncate_gradient,\n n_steps = n_steps)\n if not isinstance(rval,(list, tuple)):\n rval = [rval]\n new_h = rval[-1]\n self.out = rval[-1]\n self.rval = rval\n self.updates =updates\n\n return self.out\n\n\nclass RecurrentMultiLayerInp(RecurrentMultiLayer):\n \"\"\"\n Similar to the RecurrentMultiLayer, with the exception that the input is\n fed into the top layer of the MLP (rather than being an input to the\n MLP).\n \"\"\"\n def _init_params(self):\n self.W_hhs = []\n self.b_hhs = []\n for dx in xrange(self.n_layers):\n W_hh = self.init_fn[dx](self.n_hids[(dx-1)%self.n_layers],\n self.n_hids[dx],\n self.sparsity[dx],\n self.scale[dx],\n rng=self.rng)\n self.W_hhs.append(theano.shared(value=W_hh, name=\"W%d_%s\" %\n (dx,self.name)))\n if dx < self.n_layers-1:\n self.b_hhs.append(theano.shared(\n self.bias_fn[dx](self.n_hids[dx],\n self.bias_scale[dx],\n self.rng),\n name='b%d_%s' %(dx, self.name)))\n self.params = [x for x in self.W_hhs] + [x for x in self.b_hhs]\n self.params_grad_scale = [self.grad_scale for x in self.params]\n self.restricted_params = [x for x in self.params]\n if self.weight_noise:\n self.nW_hhs = [theano.shared(x.get_value()*0, name='noise_'+x.name) for x in self.W_hhs]\n self.nb_hhs = [theano.shared(x.get_value()*0, name='noise_'+x.name) for x in self.b_hhs]\n self.noise_params = [x for x in self.nW_hhs] + [x for x in self.nb_hhs]\n self.noise_params_shape_fn = [constant_shape(x.get_value().shape)\n for x in self.noise_params]\n\n\n def step_fprop(self,\n state_below,\n mask=None,\n dpmask=None,\n state_before=None,\n no_noise_bias=False,\n use_noise=True):\n \"\"\"\n See parent class\n \"\"\"\n rval = []\n if self.weight_noise and use_noise and self.noise_params:\n W_hhs = [(x+y) for x, y in zip(self.W_hhs,self.nW_hss)]\n if not no_noise_bias:\n b_hhs = [(x+y) for x, y in zip(self.b_hhs,self.nb_hhs)]\n else:\n b_hhs = self.b_hhs\n else:\n W_hhs = self.W_hhs\n b_hhs = self.b_hhs\n\n h = self.activation[0](TT.dot(state_before,\n W_hhs[0])+b_hhs[0])\n if self.activ_noise and use_noise:\n h = h + self.trng.normal(h.shape, avg=0, std=self.activ_noise, dtype=h.dtype)\n\n if self.dropout < 1.:\n if use_noise:\n if h.ndim == 2:\n h = h * dpmask[:,:h.shape[1]]\n dpidx = h.shape[1]\n else:\n h = h * dpmask[:h.shape[0]]\n dpidx = h.shape[0]\n else:\n h = h * self.dropout\n\n rval += [h]\n for dx in xrange(1, self.n_layers-1):\n h = self.activation[dx](TT.dot(h,\n W_hhs[dx])+b_hhs[dx])\n if self.activ_noise and use_noise:\n h = h + self.trng.normal(h.shape, avg=0, std=self.activ_noise, dtype=h.dtype)\n if self.dropout < 1.:\n if use_noise:\n if h.ndim == 2:\n h = h * dpmask[:,dpidx:dpidx+h.shape[1]]\n dpidx = dpidx + h.shape[1]\n else:\n h = h * dpmask[dpidx:dpidx+h.shape[0]]\n dpidx = dpidx + h.shape[0]\n else:\n h = h * self.dropout\n rval += [h]\n h = self.activation[-1](TT.dot(h, W_hhs[-1]) + state_below)\n if self.activ_noise and use_noise:\n h = h + self.trng.normal(h.shape, avg=0, std=self.activ_noise, dtype=h.dtype)\n if self.dropout < 1.:\n if use_noise:\n if h.ndim == 2:\n h = h * dpmask[:,dpidx:dpidx+h.shape[1]]\n dpidx = dpidx + h.shape[1]\n else:\n h = h * dpmask[dpidx:dpidx+h.shape[0]]\n dpidx = dpidx + h.shape[0]\n else:\n h = h * self.dropout\n rval += [h]\n if mask is not None:\n if h.ndim ==2 and mask.ndim==1:\n mask = mask.dimshuffle(0,'x')\n h = mask * h + (1-mask) * state_before\n rval[-1] = h\n return rval\n\nclass RecurrentMultiLayerShortPath(RecurrentMultiLayer):\n \"\"\"\n A similar layer to RecurrentMultiLayer (the DT-RNN), with the difference\n that we have shortcut connections in the MLP representing the transition\n from previous hidden state to the next\n \"\"\"\n def _init_params(self):\n self.W_hhs = []\n self.b_hhs = []\n self.W_shortp = []\n for dx in xrange(self.n_layers):\n W_hh = self.init_fn[dx](self.n_hids[(dx-1)%self.n_layers],\n self.n_hids[dx],\n self.sparsity[dx],\n self.scale[dx],\n rng=self.rng)\n self.W_hhs.append(theano.shared(value=W_hh, name=\"W%d_%s\" %\n (dx,self.name)))\n\n if dx > 0:\n W_shp = self.init_fn[dx](self.n_hids[self.n_layers-1],\n self.n_hids[dx],\n self.sparsity[dx],\n self.scale[dx],\n rng=self.rng)\n self.W_shortp.append(theano.shared(value=W_shp,\n name='W_s%d_%s'%(dx,self.name)))\n self.b_hhs.append(theano.shared(\n self.bias_fn[dx-1](self.n_hids[dx],\n self.bias_scale[dx-1],\n self.rng),\n name='b%d_%s' %(dx, self.name)))\n self.params = [x for x in self.W_hhs] + [x for x in self.b_hhs] +\\\n [x for x in self.W_shortp]\n self.params_grad_scale = [self.grad_scale for x in self.params]\n self.restricted_params = [x for x in self.params]\n if self.weight_noise:\n self.nW_hhs = [theano.shared(x.get_value()*0, name='noise_'+x.name) for x in self.W_hhs]\n self.nb_hhs = [theano.shared(x.get_value()*0, name='noise_'+x.name) for x in self.b_hhs]\n self.nW_shortp = [theano.shared(x.get_value()*0, name='noise_'+x.name) for x in self.W_shortp]\n\n self.noise_params = [x for x in self.nW_hhs] + [x for x in self.nb_hhs] + [x for x in self.nW_shortp]\n self.noise_params_shape_fn = [constant_shape(x.get_value().shape) for x in self.noise_params]\n\n\n\n def step_fprop(self,\n state_below,\n mask=None,\n dpmask=None,\n state_before=None,\n no_noise_bias=False,\n use_noise=True):\n \"\"\"\n See parent class\n \"\"\"\n rval = []\n if self.weight_noise and use_noise and self.noise_params:\n W_hhs = [(x+y) for x, y in zip(self.W_hhs,self.nW_hhs)]\n if not no_noise_bias:\n b_hhs = [(x+y) for x, y in zip(self.b_hhs,self.nb_hhs)]\n else:\n b_hhs = self.b_hhs\n W_shp = [(x+y) for x, y in zip(self.W_shortp,self.nW_shortp)]\n else:\n W_hhs = self.W_hhs\n b_hhs = self.b_hhs\n W_shp = self.W_shortp\n h = self.activation[0](TT.dot(state_before,\n W_hhs[0])+state_below)\n if self.activ_noise and use_noise:\n h = h + self.trng.normal(h.shape, avg=0, std=self.activ_noise, dtype=h.dtype)\n if self.dropout < 1.:\n if use_noise:\n if h.ndim == 2:\n h = h * dpmask[:,:h.shape[1]]\n dpidx = h.shape[1]\n else:\n h = h * dpmask[:h.shape[0]]\n dpidx = h.shape[0]\n else:\n h = h * self.dropout\n rval += [h]\n for dx in xrange(1, self.n_layers):\n h = self.activation[dx](TT.dot(h,\n W_hhs[dx])+\n TT.dot(state_before,\n W_shp[dx-1])+b_hhs[dx-1])\n if self.activ_noise and use_noise:\n h = h + self.trng.normal(h.shape, avg=0, std=self.activ_noise, dtype=h.dtype)\n if self.dropout < 1.:\n if use_noise:\n if h.ndim == 2:\n h = h * dpmask[:,dpidx:dpidx+h.shape[1]]\n dpidx = dpidx + h.shape[1]\n else:\n h = h * dpmask[dpidx:dpidx+h.shape[0]]\n dpidx = dpidx + h.shape[0]\n else:\n h = h * self.dropout\n rval += [h]\n\n if mask is not None:\n if h.ndim ==2 and mask.ndim==1:\n mask = mask.dimshuffle(0,'x')\n h = mask * h + (1-mask) * state_before\n rval[-1] = h\n return rval\n\nclass RecurrentMultiLayerShortPathInp(RecurrentMultiLayer):\n \"\"\"\n Similar to the RecurrentMultiLayerShortPath class, just that the input\n is fed into the last layer of the MLP (similar to\n RecurrentMultiLayerInp).\n \"\"\"\n\n def _init_params(self):\n self.W_hhs = []\n self.b_hhs = []\n self.W_shortp = []\n for dx in xrange(self.n_layers):\n W_hh = self.init_fn[dx](self.n_hids[(dx-1)%self.n_layers],\n self.n_hids[dx],\n self.sparsity[dx],\n self.scale[dx],\n rng=self.rng)\n self.W_hhs.append(theano.shared(value=W_hh, name=\"W%d_%s\" %\n (dx,self.name)))\n\n if dx > 0:\n W_shp = self.init_fn[dx](self.n_hids[self.n_layers-1],\n self.n_hids[dx],\n self.sparsity[dx],\n self.scale[dx],\n rng=self.rng)\n self.W_shortp.append(theano.shared(value=W_shp,\n name='W_s%d_%s'%(dx,self.name)))\n if dx < self.n_layers-1:\n self.b_hhs.append(theano.shared(\n self.bias_fn[dx](self.n_hids[dx],\n self.bias_scale[dx],\n self.rng),\n name='b%d_%s' %(dx, self.name)))\n self.params = [x for x in self.W_hhs] + [x for x in self.b_hhs] +\\\n [x for x in self.W_shortp]\n self.restricted_params = [x for x in self.params]\n self.params_grad_scale = [self.grad_scale for x in self.params]\n if self.weight_noise:\n self.nW_hhs = [theano.shared(x.get_value()*0, name='noise_'+x.name) for x in self.W_hhs]\n self.nb_hhs = [theano.shared(x.get_value()*0, name='noise_'+x.name) for x in self.b_hhs]\n self.nW_shortp = [theano.shared(x.get_value()*0, name='noise_'+x.name) for x in self.W_shortp]\n\n self.noise_params = [x for x in self.nW_hhs] + [x for x in self.nb_hhs] + [x for x in self.nW_shortp]\n self.noise_params_shape_fn = [constant_shape(x.get_value().shape) for x in self.noise_params]\n\n\n\n def step_fprop(self,\n state_below,\n mask=None,\n dpmask=None,\n state_before=None,\n no_noise_bias=False,\n use_noise=True):\n \"\"\"\n See parent class\n \"\"\"\n rval = []\n if self.weight_noise and use_noise and self.noise_params:\n W_hhs = [(x+y) for x, y in zip(self.W_hhs, self.nW_hhs)]\n if not no_noise_bias:\n b_hhs = [(x+y) for x, y in zip(self.b_hhs, self.nb_hhs)]\n else:\n b_hhs = self.b_hhs\n W_shp = [(x+y) for x, y in zip(self.W_shortp, self.nW_shortp)]\n else:\n W_hhs = self.W_hhs\n b_hhs = self.b_hhs\n W_shp = self.W_shortp\n h = self.activation[0](TT.dot(state_before,\n W_hhs[0])+b_hhs[0])\n if self.activ_noise and use_noise:\n h = h + self.trng.normal(h.shape, avg=0, std=self.activ_noise, dtype=h.dtype)\n if self.dropout < 1.:\n if use_noise:\n if h.ndim == 2:\n h = h * dpmask[:,:h.shape[1]]\n dpidx = h.shape[1]\n else:\n h = h * dpmask[:h.shape[0]]\n dpidx = h.shape[0]\n else:\n h = h * self.dropout\n rval += [h]\n for dx in xrange(1, self.n_layers-1):\n h = self.activation[dx](TT.dot(h,\n W_hhs[dx])+\n TT.dot(state_before,\n W_shp[dx-1])+b_hhs[dx])\n if self.activ_noise and use_noise:\n h = h + self.trng.normal(h.shape, avg=0, std=self.activ_noise, dtype=h.dtype)\n if self.dropout < 1.:\n if use_noise:\n if h.ndim == 2:\n h = h * dpmask[:,dpidx:dpidx+h.shape[1]]\n dpidx = dpidx + h.shape[1]\n else:\n h = h * dpmask[dpidx:dpidx+h.shape[0]]\n dpidx = dpidx + h.shape[0]\n else:\n h = h * self.dropout\n rval += [h]\n\n h = self.activation[-1](TT.dot(h, W_hhs[-1]) +\n TT.dot(state_before, W_shp[-1])+state_below)\n if self.activ_noise and use_noise:\n h = h + self.trng.normal(h.shape, avg=0, std=self.activ_noise, dtype=h.dtype)\n if self.dropout < 1.:\n if use_noise:\n if h.ndim == 2:\n h = h * dpmask[:,:h.shape[1]]\n dpidx = h.shape[1]\n else:\n h = h * dpmask[:h.shape[0]]\n dpidx = h.shape[0]\n else:\n h = h * self.dropout\n\n rval +=[h]\n if mask is not None:\n if h.ndim ==2 and mask.ndim==1:\n mask = mask.dimshuffle(0,'x')\n h = mask * h + (1-mask) * state_before\n rval += [h]\n return rval\n\nclass RecurrentMultiLayerShortPathInpAll(RecurrentMultiLayer):\n \"\"\"\n Similar to RecurrentMultiLayerShortPathInp class, just that the input is\n fed to all layers of the MLP depicting the deep transition between h_tm1\n to h_t.\n \"\"\"\n def _init_params(self):\n self.W_hhs = []\n self.W_shortp = []\n for dx in xrange(self.n_layers):\n W_hh = self.init_fn[dx](self.n_hids[(dx-1)%self.n_layers],\n self.n_hids[dx],\n self.sparsity[dx],\n self.scale[dx],\n rng=self.rng)\n self.W_hhs.append(theano.shared(value=W_hh, name=\"W%d_%s\" %\n (dx,self.name)))\n\n if dx > 0:\n W_shp = self.init_fn[dx](self.n_hids[self.n_layers-1],\n self.n_hids[dx],\n self.sparsity[dx],\n self.scale[dx],\n rng=self.rng)\n self.W_shortp.append(theano.shared(value=W_shp,\n name='W_s%d_%s'%(dx,self.name)))\n self.params = [x for x in self.W_hhs] +\\\n [x for x in self.W_shortp]\n\n self.params_grad_scale = [self.grad_scale for x in self.params]\n self.restricted_params = [x for x in self.params]\n\n if self.weight_noise:\n self.nW_hhs = [theano.shared(x.get_value()*0, name='noise_'+x.name) for x in self.W_hhs]\n self.nW_shortp = [theano.shared(x.get_value()*0, name='noise_'+x.name) for x in self.W_shortp]\n\n self.noise_params = [x for x in self.nW_hhs] + [x for x in self.nW_shortp]\n self.noise_params_shape_fn = [constant_shape(x.get_value().shape) for x in self.noise_params]\n\n\n def step_fprop(self,\n state_below,\n mask=None,\n dpmask=None,\n state_before=None,\n no_noise_bias=False,\n use_noise=True):\n \"\"\"\n See parent class\n \"\"\"\n rval = []\n if self.weight_noise and use_noise and self.noise_params:\n W_hhs = [(x+y) for x, y in zip(self.W_hhs,self.nW_hhs)]\n W_shp = [(x+y) for x, y in zip(self.W_shortp,self.nW_shortp)]\n else:\n W_hhs = self.W_hhs\n W_shp = self.W_shortp\n def slice_state_below(dx, sb = state_below):\n st = 0\n for p in xrange(dx):\n st += self.n_hids[p]\n ed = st + self.n_hids[dx]\n if sb.ndim == 1:\n return sb[st:ed]\n else:\n return sb[:,st:ed]\n\n\n h = self.activation[0](TT.dot(state_before, W_hhs[0]) + slice_state_below(0))\n\n if self.activ_noise and use_noise:\n h = h + self.trng.normal(h.shape, avg=0, std=self.activ_noise, dtype=h.dtype)\n\n if self.dropout < 1.:\n if use_noise:\n if h.ndim == 2:\n h = h * dpmask[:,:h.shape[1]]\n dpidx = h.shape[1]\n else:\n h = h * dpmask[:h.shape[0]]\n dpidx = h.shape[0]\n else:\n h = h * self.dropout\n\n rval += [h]\n for dx in xrange(1, self.n_layers):\n h = self.activation[dx](TT.dot(h, W_hhs[dx]) +\n TT.dot(state_before, W_shp[dx-1]) +\n slice_state_below(dx))\n\n if self.activ_noise and use_noise:\n h = h + self.trng.normal(h.shape, avg=0, std=self.activ_noise, dtype=h.dtype)\n if self.dropout < 1.:\n if use_noise:\n if h.ndim == 2:\n h = h * dpmask[:,dpidx:dpidx+h.shape[1]]\n dpidx = dpidx + h.shape[1]\n else:\n h = h * dpmask[dpidx:dpidx+h.shape[0]]\n dpidx = dpidx + h.shape[0]\n else:\n h = h * self.dropout\n rval += [h]\n\n if mask is not None:\n if h.ndim ==2 and mask.ndim==1:\n mask = mask.dimshuffle(0,'x')\n h = mask * h + (1-mask) * state_before\n rval[-1] = h\n return rval\n\nclass RecurrentLayer(Layer):\n \"\"\"\n Standard recurrent layer with gates.\n See arXiv verion of our paper.\n \"\"\"\n def __init__(self, rng,\n n_hids=500,\n scale=.01,\n sparsity = -1,\n activation = TT.tanh,\n activ_noise=0.,\n weight_noise=False,\n bias_fn='init_bias',\n bias_scale = 0.,\n dropout = 1.,\n init_fn='sample_weights',\n kind_reg = None,\n grad_scale = 1.,\n profile = 0,\n gating = False,\n reseting = False,\n gater_activation = TT.nnet.sigmoid,\n reseter_activation = TT.nnet.sigmoid,\n name=None):\n \"\"\"\n :type rng: numpy random generator\n :param rng: numpy random generator\n\n :type n_in: int\n :param n_in: number of inputs units\n\n :type n_hids: int\n :param n_hids: Number of hidden units on each layer of the MLP\n\n :type activation: string/function or list of\n :param activation: Activation function for the embedding layers. If\n a list it needs to have a value for each layer. If not, the same\n activation will be applied to all layers\n\n :type scale: float or list of\n :param scale: depending on the initialization function, it can be\n the standard deviation of the Gaussian from which the weights\n are sampled or the largest singular value. If a single value it\n will be used for each layer, otherwise it has to have one value\n for each layer\n\n :type sparsity: int or list of\n :param sparsity: if a single value, it will be used for each layer,\n otherwise it has to be a list with as many values as layers. If\n negative, it means the weight matrix is dense. Otherwise it\n means this many randomly selected input units are connected to\n an output unit\n\n\n :type weight_noise: bool\n :param weight_noise: If true, the model is used with weight noise\n (and the right shared variable are constructed, to keep track of the\n noise)\n\n :type dropout: float\n :param dropout: the probability with which hidden units are dropped\n from the hidden layer. If set to 1, dropout is not used\n\n :type init_fn: string or function\n :param init_fn: function used to initialize the weights of the\n layer. We recommend using either `sample_weights_classic` or\n `sample_weights` defined in the utils\n\n :type bias_fn: string or function\n :param bias_fn: function used to initialize the biases. We recommend\n using `init_bias` defined in the utils\n\n :type bias_scale: float\n :param bias_scale: argument passed to `bias_fn`, depicting the scale\n of the initial bias\n\n :type grad_scale: float or theano scalar\n :param grad_scale: factor with which the gradients with respect to\n the parameters of this layer are scaled. It is used for\n differentiating between the different parameters of a model.\n\n :type gating: bool\n :param gating: If true, an update gate is used\n\n :type reseting: bool\n :param reseting: If true, a reset gate is used\n\n :type gater_activation: string or function\n :param name: The activation function of the update gate\n\n :type reseter_activation: string or function\n :param name: The activation function of the reset gate\n\n :type name: string\n :param name: name of the layer (used to name parameters). NB: in\n this library names are very important because certain parts of the\n code relies on name to disambiguate between variables, therefore\n each layer should have a unique name.\n\n \"\"\"\n self.grad_scale = grad_scale\n\n if type(init_fn) is str or type(init_fn) is unicode:\n init_fn = eval(init_fn)\n if type(bias_fn) is str or type(bias_fn) is unicode:\n bias_fn = eval(bias_fn)\n if type(activation) is str or type(activation) is unicode:\n activation = eval(activation)\n if type(gater_activation) is str or type(gater_activation) is unicode:\n gater_activation = eval(gater_activation)\n if type(reseter_activation) is str or type(reseter_activation) is unicode:\n reseter_activation = eval(reseter_activation)\n\n self.scale = scale\n self.sparsity = sparsity\n self.activation = activation\n self.n_hids = n_hids\n self.bias_scale = bias_scale\n self.bias_fn = bias_fn\n self.init_fn = init_fn\n self.weight_noise = weight_noise\n self.activ_noise = activ_noise\n self.profile = profile\n self.dropout = dropout\n self.gating = gating\n self.reseting = reseting\n self.gater_activation = gater_activation\n self.reseter_activation = reseter_activation\n\n assert rng is not None, \"random number generator should not be empty!\"\n\n super(RecurrentLayer, self).__init__(self.n_hids,\n self.n_hids, rng, name)\n\n self.trng = RandomStreams(self.rng.randint(int(1e6)))\n self.params = []\n self._init_params()\n\n def _init_params(self):\n self.W_hh = theano.shared(\n self.init_fn(self.n_hids,\n self.n_hids,\n self.sparsity,\n self.scale,\n rng=self.rng),\n name=\"W_%s\"%self.name)\n self.params = [self.W_hh]\n if self.gating:\n self.G_hh = theano.shared(\n self.init_fn(self.n_hids,\n self.n_hids,\n self.sparsity,\n self.scale,\n rng=self.rng),\n name=\"G_%s\"%self.name)\n self.params.append(self.G_hh)\n if self.reseting:\n self.R_hh = theano.shared(\n self.init_fn(self.n_hids,\n self.n_hids,\n self.sparsity,\n self.scale,\n rng=self.rng),\n name=\"R_%s\"%self.name)\n self.params.append(self.R_hh)\n self.params_grad_scale = [self.grad_scale for x in self.params]\n self.restricted_params = [x for x in self.params]\n if self.weight_noise:\n self.nW_hh = theano.shared(self.W_hh.get_value()*0, name='noise_'+self.W_hh.name)\n self.nG_hh = theano.shared(self.G_hh.get_value()*0, name='noise_'+self.G_hh.name)\n self.noise_params = [self.nW_hh,self.nG_hh]\n self.noise_params_shape_fn = [constant_shape(x.get_value().shape)\n for x in self.noise_params]\n\n def step_fprop(self,\n state_below,\n mask = None,\n state_before = None,\n gater_below = None,\n reseter_below = None,\n use_noise=True,\n no_noise_bias = False):\n \"\"\"\n Constructs the computational graph of this layer.\n\n :type state_below: theano variable\n :param state_below: the input to the layer\n\n :type mask: None or theano variable\n :param mask: mask describing the length of each sequence in a\n minibatch\n\n :type state_before: theano variable\n :param state_before: the previous value of the hidden state of the\n layer\n\n :type gater_below: theano variable\n :param gater_below: the input to the update gate\n\n :type reseter_below: theano variable\n :param reseter_below: the input to the reset gate\n\n :type use_noise: bool\n :param use_noise: flag saying if weight noise should be used in\n computing the output of this layer\n\n :type no_noise_bias: bool\n :param no_noise_bias: flag saying if weight noise should be added to\n the bias as well\n \"\"\"\n\n rval = []\n if self.weight_noise and use_noise and self.noise_params:\n W_hh = self.W_hh + self.nW_hh\n if self.gating:\n G_hh = self.G_hh + self.nG_hh\n if self.reseting:\n R_hh = self.R_hh + self.nR_hh\n else:\n W_hh = self.W_hh\n if self.gating:\n G_hh = self.G_hh\n if self.reseting:\n R_hh = self.R_hh\n\n # Reset gate:\n # optionally reset the hidden state.\n if self.reseting and reseter_below:\n reseter = self.reseter_activation(TT.dot(state_before, R_hh) +\n reseter_below)\n reseted_state_before = reseter * state_before\n else:\n reseted_state_before = state_before\n\n # Feed the input to obtain potential new state.\n preactiv = TT.dot(reseted_state_before, W_hh) + state_below\n h = self.activation(preactiv)\n\n # Update gate:\n # optionally reject the potential new state and use the new one.\n if self.gating and gater_below:\n gater = self.gater_activation(TT.dot(state_before, G_hh) +\n gater_below)\n h = gater * h + (1-gater) * state_before\n\n if self.activ_noise and use_noise:\n h = h + self.trng.normal(h.shape, avg=0, std=self.activ_noise, dtype=h.dtype)\n if mask is not None:\n if h.ndim ==2 and mask.ndim==1:\n mask = mask.dimshuffle(0,'x')\n h = mask * h + (1-mask) * state_before\n return h\n\n def fprop(self,\n state_below,\n mask=None,\n init_state=None,\n gater_below=None,\n reseter_below=None,\n nsteps=None,\n batch_size=None,\n use_noise=True,\n truncate_gradient=-1,\n no_noise_bias = False\n ):\n\n if theano.config.floatX=='float32':\n floatX = numpy.float32\n else:\n floatX = numpy.float64\n if nsteps is None:\n nsteps = state_below.shape[0]\n if batch_size and batch_size != 1:\n nsteps = nsteps / batch_size\n if batch_size is None and state_below.ndim == 3:\n batch_size = state_below.shape[1]\n if state_below.ndim == 2 and \\\n (not isinstance(batch_size,int) or batch_size > 1):\n state_below = state_below.reshape((nsteps, batch_size, self.n_in))\n if gater_below:\n gater_below = gater_below.reshape((nsteps, batch_size, self.n_in))\n if reseter_below:\n reseter_below = reseter_below.reshape((nsteps, batch_size, self.n_in))\n\n if not init_state:\n if not isinstance(batch_size, int) or batch_size != 1:\n init_state = TT.alloc(floatX(0), batch_size, self.n_hids)\n else:\n init_state = TT.alloc(floatX(0), self.n_hids)\n\n # FIXME: Find a way to clean this up\n if self.reseting and reseter_below:\n if self.gating and gater_below:\n if mask:\n inps = [state_below, mask, gater_below, reseter_below]\n fn = lambda x,y,g,r,z : self.step_fprop(x,y,z, gater_below=g, reseter_below=r, use_noise=use_noise,\n no_noise_bias=no_noise_bias)\n else:\n inps = [state_below, gater_below, reseter_below]\n fn = lambda tx, tg,tr, ty: self.step_fprop(tx, None, ty, gater_below=tg,\n reseter_below=tr,\n use_noise=use_noise,\n no_noise_bias=no_noise_bias)\n else:\n if mask:\n inps = [state_below, mask, reseter_below]\n fn = lambda x,y,r,z : self.step_fprop(x,y,z, use_noise=use_noise,\n reseter_below=r,\n no_noise_bias=no_noise_bias)\n else:\n inps = [state_below, reseter_below]\n fn = lambda tx,tr,ty: self.step_fprop(tx, None, ty,\n reseter_below=tr,\n use_noise=use_noise,\n no_noise_bias=no_noise_bias)\n else:\n if self.gating and gater_below:\n if mask:\n inps = [state_below, mask, gater_below]\n fn = lambda x,y,g,z : self.step_fprop(x,y,z, gater_below=g, use_noise=use_noise,\n no_noise_bias=no_noise_bias)\n else:\n inps = [state_below, gater_below]\n fn = lambda tx, tg, ty: self.step_fprop(tx, None, ty, gater_below=tg,\n use_noise=use_noise,\n no_noise_bias=no_noise_bias)\n else:\n if mask:\n inps = [state_below, mask]\n fn = lambda x,y,z : self.step_fprop(x,y,z, use_noise=use_noise,\n no_noise_bias=no_noise_bias)\n else:\n inps = [state_below]\n fn = lambda tx, ty: self.step_fprop(tx, None, ty,\n use_noise=use_noise,\n no_noise_bias=no_noise_bias)\n\n rval, updates = theano.scan(fn,\n sequences = inps,\n outputs_info = [init_state],\n name='layer_%s'%self.name,\n profile=self.profile,\n truncate_gradient = truncate_gradient,\n n_steps = nsteps)\n new_h = rval\n self.out = rval\n self.rval = rval\n self.updates =updates\n\n return self.out\n\n\nclass GatedMemoryLayer(Layer):\n \"\"\"\n Standard recurrent layer with gates.\n See arXiv verion of our paper.\n \"\"\"\n def __init__(self, rng,\n n_hids=500,\n scale=.01,\n sparsity = -1,\n activation = TT.tanh,\n activ_noise=0.,\n weight_noise=False,\n bias_fn='init_bias',\n bias_scale = 0.,\n dropout = 1.,\n init_fn='sample_weights',\n kind_reg = None,\n grad_scale = 1.,\n top_k = 10,\n memory_man_activation = TT.tanh,\n memory_bank_sizes = [],\n profile = 0,\n reseting = False,\n gater_activation = TT.nnet.sigmoid,\n reseter_activation = TT.nnet.sigmoid,\n name=None):\n \"\"\"\n :type rng: numpy random generator\n :param rng: numpy random generator\n\n :type n_in: int\n :param n_in: number of inputs units\n\n :type n_hids: int\n :param n_hids: Number of hidden units on each layer of the MLP\n\n :type activation: string/function or list of\n :param activation: Activation function for the embedding layers. If\n a list it needs to have a value for each layer. If not, the same\n activation will be applied to all layers\n\n :type scale: float or list of\n :param scale: depending on the initialization function, it can be\n the standard deviation of the Gaussian from which the weights\n are sampled or the largest singular value. If a single value it\n will be used for each layer, otherwise it has to have one value\n for each layer\n\n :type sparsity: int or list of\n :param sparsity: if a single value, it will be used for each layer,\n otherwise it has to be a list with as many values as layers. If\n negative, it means the weight matrix is dense. Otherwise it\n means this many randomly selected input units are connected to\n an output unit\n\n\n :type weight_noise: bool\n :param weight_noise: If true, the model is used with weight noise\n (and the right shared variable are constructed, to keep track of the\n noise)\n\n :type dropout: float\n :param dropout: the probability with which hidden units are dropped\n from the hidden layer. If set to 1, dropout is not used\n\n :type init_fn: string or function\n :param init_fn: function used to initialize the weights of the\n layer. We recommend using either `sample_weights_classic` or\n `sample_weights` defined in the utils\n\n :type bias_fn: string or function\n :param bias_fn: function used to initialize the biases. We recommend\n using `init_bias` defined in the utils\n\n :type bias_scale: float\n :param bias_scale: argument passed to `bias_fn`, depicting the scale\n of the initial bias\n\n :type grad_scale: float or theano scalar\n :param grad_scale: factor with which the gradients with respect to\n the parameters of this layer are scaled. It is used for\n differentiating between the different parameters of a model.\n\n :type reseting: bool\n :param reseting: If true, a reset gate is used\n\n :type gater_activation: string or function\n :param name: The activation function of the update gate\n\n :type reseter_activation: string or function\n :param name: The activation function of the reset gate\n\n :type name: string\n :param name: name of the layer (used to name parameters). NB: in\n this library names are very important because certain parts of the\n code relies on name to disambiguate between variables, therefore\n each layer should have a unique name.\n\n \"\"\"\n\n self.grad_scale = grad_scale\n\n if type(top_k) is str or type(top_k) is unicode:\n top_k = eval(top_k)\n\n if type(init_fn) is str or type(init_fn) is unicode:\n init_fn = eval(init_fn)\n\n if type(bias_fn) is str or type(bias_fn) is unicode:\n bias_fn = eval(bias_fn)\n\n if type(activation) is str or type(activation) is unicode:\n activation = eval(activation)\n\n if type(gater_activation) is str or type(gater_activation) is unicode:\n gater_activation = eval(gater_activation)\n\n if type(reseter_activation) is str or type(reseter_activation) is unicode:\n reseter_activation = eval(reseter_activation)\n\n if type(memory_bank_sizes) is str or type(memory_bank_sizes) is unicode:\n memory_bank_sizes = eval(memory_bank_sizes)\n\n if type(memory_man_activation) is str or type(memory_man_activation) is unicode:\n memory_man_activation = eval(memory_man_activation)\n\n\n assert len(memory_bank_sizes) > 0, \"Size of the memory bank should be greater than 0.\"\n\n self.scale = scale\n self.top_k = top_k\n\n self.sparsity = sparsity\n self.activation = activation\n self.n_hids = n_hids\n self.bias_scale = bias_scale\n self.bias_fn = bias_fn\n self.init_fn = init_fn\n self.weight_noise = weight_noise\n self.activ_noise = activ_noise\n self.profile = profile\n self.dropout = dropout\n self.reseting = reseting\n self.gater_activation = gater_activation\n self.reseter_activation = reseter_activation\n self.memory_bank_sizes = memory_bank_sizes\n self.memory_man_activation = memory_man_activation\n\n assert rng is not None, \"random number generator should not be empty!\"\n\n super(GatedMemoryLayer, self).__init__(self.n_hids,\n self.n_hids,\n rng, name)\n\n self.trng = RandomStreams(self.rng.randint(int(1e6)))\n self.params = []\n self._init_params()\n\n def _init_params(self):\n self.W_hh = theano.shared(self.init_fn(self.memory_bank_sizes[1],\n self.n_hids,\n self.sparsity,\n self.scale,\n rng = self.rng),\n name = \"W_%s\"%self.name)\n\n self.M = theano.shared(\n sample_weights_classic(self.memory_bank_sizes[0],\n self.memory_bank_sizes[1],\n self.sparsity,\n self.scale,\n rng = self.rng),\n name = \"M_%s\" % self.name)\n\n self.W_ah = theano.shared(\n sample_weights_classic(self.n_hids,\n self.memory_bank_sizes[0],\n self.sparsity,\n self.scale,\n rng = self.rng),\n name = \"W_ah_%s\" % self.name)\n\n self.params = [self.W_hh, self.W_ah, self.M]\n\n self.G_hh = theano.shared(\n self.init_fn(self.n_hids,\n self.n_hids,\n self.sparsity,\n self.scale,\n rng = self.rng),\n name = \"G_%s\"%self.name)\n\n self.params.append(self.G_hh)\n self.b_hh = theano.shared(numpy.zeros((self.n_hids),\n dtype = \"float32\"),\n name = \"b_hh_%s\" % self.name)\n\n self.params.append(self.b_hh)\n\n if self.reseting:\n self.R_hh = theano.shared(\n self.init_fn(self.n_hids,\n self.memory_bank_sizes[1],\n self.sparsity,\n self.scale,\n rng = self.rng),\n name = \"R_%s\"%self.name)\n\n self.params.append(self.R_hh)\n\n self.params_grad_scale = [self.grad_scale for x in self.params]\n self.restricted_params = [x for x in self.params]\n\n if self.weight_noise:\n self.nW_hh = theano.shared(self.W_hh.get_value()*0,\n name = 'noise_'+self.W_hh.name)\n\n self.nG_hh = theano.shared(self.G_hh.get_value()*0,\n name = 'noise_'+self.G_hh.name)\n\n self.noise_params = [self.nW_hh,self.nG_hh]\n self.noise_params_shape_fn = [constant_shape(x.get_value().shape)\n for x in self.noise_params]\n\n def step_fprop_orig(self,\n state_below,\n state_at_below=None,\n mask = None,\n state_before = None,\n gater_below = None,\n reseter_below = None,\n use_noise=True,\n no_noise_bias = False):\n \"\"\"\n Constructs the computational graph of this layer.\n\n :type state_below: theano variable\n :param state_below: the input to the layer\n\n :type mask: None or theano variable\n :param mask: mask describing the length of each sequence in a\n minibatch\n\n :type state_before: theano variable\n :param state_before: the previous value of the hidden state of the\n layer\n\n :type gater_below: theano variable\n :param gater_below: the input to the update gate\n\n :type reseter_below: theano variable\n :param reseter_below: the input to the reset gate\n\n :type use_noise: bool\n :param use_noise: flag saying if weight noise should be used in\n computing the output of this layer\n\n :type no_noise_bias: bool\n :param no_noise_bias: flag saying if weight noise should be added to\n the bias as well\n \"\"\"\n\n rval = []\n if self.weight_noise and use_noise and self.noise_params:\n W_hh = self.W_hh + self.nW_hh\n G_hh = self.G_hh + self.nG_hh\n W_ah = self.W_ah\n\n if self.reseting:\n R_hh = self.R_hh + self.nR_hh\n else:\n W_hh = self.W_hh\n G_hh = self.G_hh\n W_ah = self.W_ah\n\n if self.reseting:\n R_hh = self.R_hh\n\n ##men_man is the memory manager, it chooses to which locations\n ##in the memory to look at.\n mem_man = self.memory_man_activation(TT.dot(state_before, W_ah) + state_below)\n\n if mem_man.ndim > 1:\n if self.M.ndim == 2:\n M = self.M.dimshuffle('x', 0, 1)\n mem_man = mem_man.dimshuffle(0, 1, 'x')\n mem_t = (mem_man * M).sum(1)\n else:\n mem_man = mem_man.dimshuffle(0, 'x')\n mem_t = (mem_man * self.M).sum(0)\n\n # Reset gate:\n # optionally reset the hidden state.\n\n if self.reseting and reseter_below:\n reseter = self.reseter_activation(TT.dot(state_before, R_hh) +\n reseter_below)\n reseted_state_before = reseter * mem_t + state_before\n else:\n reseted_state_before = mem_t\n\n # Feed the input to obtain potential new state.\n preactiv = TT.dot(reseted_state_before, W_hh) + self.b_hh #+ state_below\n h = self.activation(preactiv)\n\n # Update gate:\n # optionally reject the potential new state and use the new one.\n gater = self.gater_activation(TT.dot(state_before, G_hh) + gater_below)\n h = gater * h + (1 - gater) * state_before\n\n if self.activ_noise and use_noise:\n h = h + self.trng.normal(h.shape, avg=0, std=self.activ_noise, dtype=h.dtype)\n\n if mask is not None:\n if h.ndim ==2 and mask.ndim==1:\n mask = mask.dimshuffle(0,'x')\n h = mask * h + (1-mask) * state_before\n\n return h\n\n def step_fprop(self,\n state_below,\n state_at_below=None,\n mask = None,\n state_before = None,\n gater_below = None,\n reseter_below = None,\n use_noise=True,\n no_noise_bias = False):\n \"\"\"\n Constructs the computational graph of this layer.\n\n :type state_below: theano variable\n :param state_below: the input to the layer\n\n :type mask: None or theano variable\n :param mask: mask describing the length of each sequence in a\n minibatch\n\n :type state_before: theano variable\n :param state_before: the previous value of the hidden state of the\n layer\n\n :type gater_below: theano variable\n :param gater_below: the input to the update gate\n\n :type reseter_below: theano variable\n :param reseter_below: the input to the reset gate\n\n :type use_noise: bool\n :param use_noise: flag saying if weight noise should be used in\n computing the output of this layer\n\n :type no_noise_bias: bool\n :param no_noise_bias: flag saying if weight noise should be added to\n the bias as well\n \"\"\"\n\n rval = []\n if self.weight_noise and use_noise and self.noise_params:\n W_hh = self.W_hh + self.nW_hh\n G_hh = self.G_hh + self.nG_hh\n W_ah = self.W_ah\n M = self.M\n if self.reseting:\n R_hh = self.R_hh + self.nR_hh\n else:\n W_hh = self.W_hh\n G_hh = self.G_hh\n W_ah = self.W_ah\n M = self.M\n\n if self.reseting:\n R_hh = self.R_hh\n\n ##men_man is the memory manager, it chooses to which locations\n ##in the memory to look at.\n mem_man = self.memory_man_activation(TT.dot(state_before, W_ah) + state_below)\n\n if mem_man.ndim > 1:\n if self.M.ndim == 2:\n M = M.dimshuffle('x', 0, 1)\n\n mem_sorted = block_gradient(mem_man.sort(1))\n mem_sorted = mem_sorted[:, -(self.top_k + 1)][:, None]\n k_mem_man = TT.switch(mem_man > mem_sorted, mem_man, 0)\n\n k_mem_man = k_mem_man.dimshuffle(0, 1, 'x')\n mem_t = (k_mem_man * M).sum(1)\n else:\n top_k = mem_man.argsort(0)[:self.top_k]\n top_k = block_gradient(top_k)\n\n mem_man = mem_man.dimshuffle(0, 'x')\n mem_t = (mem_man * M)[top_k].sum(0)\n\n # Reset gate:\n # optionally reset the hidden state.\n if self.reseting and reseter_below:\n reseter = self.reseter_activation(TT.dot(state_before, R_hh) +\n reseter_below)\n reseted_state_before = reseter * mem_t\n else:\n reseted_state_before = mem_t\n\n reseted_state_before = reseted_state_before + state_before\n\n # Feed the input to obtain potential new state.\n preactiv = TT.dot(reseted_state_before, W_hh) + self.b_hh #+ state_below\n h = self.activation(preactiv)\n\n # Update gate:\n # optionally reject the potential new state and use the new one.\n gater = self.gater_activation(TT.dot(state_before, G_hh) + gater_below)\n h = gater * h + (1 - gater) * state_before\n\n if self.activ_noise and use_noise:\n h = h + self.trng.normal(h.shape, avg=0, std=self.activ_noise, dtype=h.dtype)\n\n if mask is not None:\n if h.ndim ==2 and mask.ndim==1:\n mask = mask.dimshuffle(0,'x')\n h = mask * h + (1 - mask) * state_before\n\n return h\n\n def fprop(self,\n state_below,\n state_at_below=None,\n mask=None,\n init_state=None,\n gater_below=None,\n reseter_below=None,\n nsteps=None,\n batch_size=None,\n use_noise=True,\n truncate_gradient=-1,\n no_noise_bias = False):\n\n if theano.config.floatX=='float32':\n floatX = numpy.float32\n else:\n floatX = numpy.float64\n\n if nsteps is None:\n nsteps = state_below.shape[0]\n if batch_size and batch_size != 1:\n nsteps = nsteps / batch_size\n\n if batch_size is None and state_below.ndim == 3:\n batch_size = state_below.shape[1]\n\n if state_below.ndim == 2 and \\\n (not isinstance(batch_size,int) or batch_size > 1):\n state_below = state_below.reshape((nsteps, batch_size, self.memory_bank_sizes[0]))\n if gater_below:\n gater_below = gater_below.reshape((nsteps, batch_size, self.n_in))\n if reseter_below:\n reseter_below = reseter_below.reshape((nsteps, batch_size,\n self.memory_bank_sizes[1]))\n\n if not init_state:\n if not isinstance(batch_size, int) or batch_size != 1:\n init_state = TT.alloc(floatX(0), batch_size, self.n_hids)\n else:\n init_state = TT.alloc(floatX(0), self.n_hids)\n\n # FIXME: Find a way to clean this up\n if self.reseting and reseter_below:\n if gater_below:\n if mask:\n inps = [state_below, mask, gater_below, reseter_below]\n fn = lambda s, y, g, r, z : self.step_fprop(x, s, y, z,\n gater_below=g,\n reseter_below=r,\n use_noise=use_noise,\n no_noise_bias=no_noise_bias)\n else:\n inps = [state_below, gater_below, reseter_below]\n fn = lambda tx, tg, tr, ty: self.step_fprop(tx, None, None, ty,\n gater_below=tg,\n reseter_below=tr,\n use_noise=use_noise,\n no_noise_bias=no_noise_bias)\n else:\n if mask:\n inps = [state_below, mask, reseter_below]\n fn = lambda x, y, r, z : self.step_fprop(x,y,z, use_noise=use_noise,\n reseter_below=r,\n no_noise_bias=no_noise_bias)\n else:\n inps = [reseter_below]\n fn = lambda tx, tr, ty: self.step_fprop(tx, None, ty,\n reseter_below=tr,\n use_noise=use_noise,\n no_noise_bias=no_noise_bias)\n else:\n if gater_below:\n if mask:\n inps = [state_below, mask, gater_below]\n fn = lambda x, y, g, z : self.step_fprop(x, None, y, z,\n gater_below=g, use_noise=use_noise,\n no_noise_bias=no_noise_bias)\n else:\n inps = [state_below, gater_below]\n fn = lambda tx, tg, ty: self.step_fprop(tx, None,\n None, ty, gater_below=tg,\n use_noise=use_noise,\n no_noise_bias=no_noise_bias)\n else:\n if mask:\n inps = [state_below, state_at_below, mask]\n fn = lambda x, s, y, z : self.step_fprop(x, s, y, z, use_noise=use_noise,\n no_noise_bias=no_noise_bias)\n else:\n inps = [state_below, state_at_below]\n fn = lambda tx, ts, ty: self.step_fprop(tx, ts, None, ty,\n use_noise=use_noise,\n no_noise_bias=no_noise_bias)\n\n rval, updates = theano.scan(fn,\n sequences = inps,\n outputs_info = [init_state],\n name='layer_%s'%self.name,\n profile=self.profile,\n truncate_gradient = truncate_gradient,\n n_steps = nsteps)\n new_h = rval\n self.out = rval\n self.rval = rval\n self.updates =updates\n\n return self.out\n\n\nclass LSTMLayer(Layer):\n \"\"\"\n Standard LSTM Layer\n \"\"\"\n def __init__(self, rng,\n n_hids=500,\n scale=.01,\n sparsity = -1,\n activation = TT.tanh,\n activ_noise=0.,\n weight_noise=False,\n bias_fn='init_bias',\n bias_scale = 0.,\n dropout = 1.,\n init_fn='sample_weights',\n kind_reg = None,\n grad_scale = 1.,\n profile = 0,\n name=None,\n **kwargs):\n \"\"\"\n :type rng: numpy random generator\n :param rng: numpy random generator\n\n :type n_in: int\n :param n_in: number of inputs units\n\n :type n_hids: int\n :param n_hids: Number of hidden units on each layer of the MLP\n\n :type activation: string/function or list of\n :param activation: Activation function for the embedding layers. If\n a list it needs to have a value for each layer. If not, the same\n activation will be applied to all layers\n\n :type scale: float or list of\n :param scale: depending on the initialization function, it can be\n the standard deviation of the Gaussian from which the weights\n are sampled or the largest singular value. If a single value it\n will be used for each layer, otherwise it has to have one value\n for each layer\n\n :type sparsity: int or list of\n :param sparsity: if a single value, it will be used for each layer,\n otherwise it has to be a list with as many values as layers. If\n negative, it means the weight matrix is dense. Otherwise it\n means this many randomly selected input units are connected to\n an output unit\n\n :type weight_noise: bool\n :param weight_noise: If true, the model is used with weight noise\n (and the right shared variable are constructed, to keep track of the\n noise)\n\n :type dropout: float\n :param dropout: the probability with which hidden units are dropped\n from the hidden layer. If set to 1, dropout is not used\n\n :type init_fn: string or function\n :param init_fn: function used to initialize the weights of the\n layer. We recommend using either `sample_weights_classic` or\n `sample_weights` defined in the utils\n\n :type bias_fn: string or function\n :param bias_fn: function used to initialize the biases. We recommend\n using `init_bias` defined in the utils\n\n :type bias_scale: float\n :param bias_scale: argument passed to `bias_fn`, depicting the scale\n of the initial bias\n\n :type grad_scale: float or theano scalar\n :param grad_scale: factor with which the gradients with respect to\n the parameters of this layer are scaled. It is used for\n differentiating between the different parameters of a model.\n\n :type name: string\n :param name: name of the layer (used to name parameters). NB: in\n this library names are very important because certain parts of the\n code relies on name to disambiguate between variables, therefore\n each layer should have a unique name.\n \"\"\"\n self.grad_scale = grad_scale\n\n if type(init_fn) is str or type(init_fn) is unicode:\n init_fn = eval(init_fn)\n if type(bias_fn) is str or type(bias_fn) is unicode:\n bias_fn = eval(bias_fn)\n if type(activation) is str or type(activation) is unicode:\n activation = eval(activation)\n\n self.scale = scale\n self.sparsity = sparsity\n self.activation = activation\n self.n_hids = n_hids\n self.bias_scale = bias_scale\n self.bias_fn = bias_fn\n self.init_fn = init_fn\n self.weight_noise = weight_noise\n self.activ_noise = activ_noise\n self.profile = profile\n self.dropout = dropout\n\n assert rng is not None, \"random number generator should not be empty!\"\n\n super(LSTMLayer, self).__init__(self.n_hids,\n self.n_hids, rng, name)\n\n self.trng = RandomStreams(self.rng.randint(int(1e6)))\n self.params = []\n self._init_params()\n\n def _init_params(self):\n self.W_hi = theano.shared(\n self.init_fn(self.n_hids,\n self.n_hids,\n self.sparsity,\n self.scale,\n rng=self.rng),\n name=\"Whi_%s\"%self.name)\n self.params = [self.W_hi]\n self.W_ci = theano.shared(\n self.init_fn(self.n_hids,\n self.n_hids,\n self.sparsity,\n self.scale,\n rng=self.rng),\n name=\"Wci_%s\"%self.name)\n self.params += [self.W_ci]\n self.W_hf = theano.shared(\n self.init_fn(self.n_hids,\n self.n_hids,\n self.sparsity,\n self.scale,\n rng=self.rng),\n name=\"Whf_%s\"%self.name)\n self.params += [self.W_hf]\n self.W_cf = theano.shared(\n self.init_fn(self.n_hids,\n self.n_hids,\n self.sparsity,\n self.scale,\n rng=self.rng),\n name=\"Wcf_%s\"%self.name)\n self.params += [self.W_cf]\n self.W_hc = theano.shared(\n self.init_fn(self.n_hids,\n self.n_hids,\n self.sparsity,\n self.scale,\n rng=self.rng),\n name=\"Wcf_%s\"%self.name)\n self.params += [self.W_hc]\n self.W_ho = theano.shared(\n self.init_fn(self.n_hids,\n self.n_hids,\n self.sparsity,\n self.scale,\n rng=self.rng),\n name=\"Wcf_%s\"%self.name)\n self.params += [self.W_ho]\n self.W_co = theano.shared(\n self.init_fn(self.n_hids,\n self.n_hids,\n self.sparsity,\n self.scale,\n rng=self.rng),\n name=\"Wcf_%s\"%self.name)\n self.params += [self.W_co]\n\n self.params_grad_scale = [self.grad_scale for x in self.params]\n self.restricted_params = [x for x in self.params]\n if self.weight_noise:\n self.noise_params = [theano.shared(p.get_value()*0, name='noise_'+p.name) for p in self.params]\n self.noise_params_shape_fn = [constant_shape(x.get_value().shape)\n for x in self.noise_params]\n\n def _get_slice_below(self, state_below, to='cell'):\n if to == 'cell':\n offset = 0\n elif to == 'input':\n offset = 1 * self.n_hids\n elif to == 'output':\n offset = 2 * self.n_hids\n elif to == 'forget':\n offset = 3 * self.n_hids\n else:\n raise Warning('Unknown gate/cell types')\n\n if state_below.ndim == 3:\n return state_below[:,:,offset:offset+self.n_hids]\n if state_below.ndim == 2:\n return state_below[:,offset:offset+self.n_hids]\n return state_below[offset:offset+self.n_hids]\n\n def _get_slice_before(self, state_before, fr='cell'):\n if fr == 'cell':\n offset = self.n_hids\n elif fr == 'hidden':\n offset = 0\n else:\n raise Warning('Unknown cell/gate types')\n\n if state_before.ndim == 2:\n return state_before[:,offset:offset+self.n_hids]\n return state_before[offset:offset+self.n_hids]\n\n def step_fprop(self,\n state_below,\n mask = None,\n state_before = None,\n use_noise=True,\n no_noise_bias = False,\n **kwargs):\n \"\"\"\n Constructs the computational graph of this layer.\n\n :type state_below: theano variable\n :param state_below: the input to the layer\n\n :type mask: None or theano variable\n :param mask: mask describing the length of each sequence in a\n minibatch\n\n :type state_before: theano variable\n :param state_before: the previous value of the hidden state of the\n layer\n\n :type use_noise: bool\n :param use_noise: flag saying if weight noise should be used in\n computing the output of this layer\n\n :type no_noise_bias: bool\n :param no_noise_bias: flag saying if weight noise should be added to\n the bias as well\n \"\"\"\n\n rval = []\n if self.weight_noise and use_noise and self.noise_params:\n W_hi = self.W_hi + self.nW_hi\n W_ci = self.W_ci + self.nW_ci\n W_hf = self.W_hf + self.nW_hf\n W_cf = self.W_cf + self.nW_cf\n W_hc = self.W_hc + self.nW_hc\n W_ho = self.W_ho + self.nW_ho\n W_co = self.W_co + self.nW_co\n else:\n W_hi = self.W_hi\n W_ci = self.W_ci\n W_hf = self.W_hf\n W_cf = self.W_cf\n W_hc = self.W_hc\n W_ho = self.W_ho\n W_co = self.W_co\n\n # input gate\n ig = TT.nnet.sigmoid(self._get_slice_below(state_below,'input') +\n TT.dot(self._get_slice_before(state_before,'hidden'), W_hi) +\n TT.dot(self._get_slice_before(state_before,'cell'), W_ci))\n\n # forget gate\n fg = TT.nnet.sigmoid(self._get_slice_below(state_below,'forget') +\n TT.dot(self._get_slice_before(state_before,'hidden'), W_hf) +\n TT.dot(self._get_slice_before(state_before,'cell'), W_cf))\n\n # cell\n cc = fg * self._get_slice_before(state_before,'cell') + \\\n ig * self.activation(self._get_slice_below(state_below,'cell') +\n TT.dot(self._get_slice_before(state_before,'hidden'), W_hc))\n\n # output gate\n og = TT.nnet.sigmoid(self._get_slice_below(state_below,'output') +\n TT.dot(self._get_slice_before(state_before,'hidden'), W_ho) +\n TT.dot(cc, W_co))\n\n # hidden state\n hh = og * self.activation(cc)\n\n if hh.ndim == 2:\n h = TT.concatenate([hh, cc], axis=1)\n else:\n h = TT.concatenate([hh, cc], axis=0)\n if self.activ_noise and use_noise:\n h = h + self.trng.normal(h.shape, avg=0, std=self.activ_noise, dtype=h.dtype)\n if mask is not None:\n if h.ndim ==2 and mask.ndim==1:\n mask = mask.dimshuffle(0,'x')\n h = mask * h + (1-mask) * state_before\n return h\n\n def fprop(self,\n state_below,\n mask=None,\n init_state=None,\n nsteps=None,\n batch_size=None,\n use_noise=True,\n truncate_gradient=-1,\n no_noise_bias = False,\n **kwargs\n ):\n\n if theano.config.floatX=='float32':\n floatX = numpy.float32\n else:\n floatX = numpy.float64\n if nsteps is None:\n nsteps = state_below.shape[0]\n if batch_size and batch_size != 1:\n nsteps = nsteps / batch_size\n if batch_size is None and state_below.ndim == 3:\n batch_size = state_below.shape[1]\n if state_below.ndim == 2 and \\\n (not isinstance(batch_size,int) or batch_size > 1):\n state_below = state_below.reshape((nsteps, batch_size, state_below.shape[-1]))\n\n if not init_state:\n if not isinstance(batch_size, int) or batch_size != 1:\n init_state = TT.alloc(floatX(0), batch_size, self.n_hids * 2)\n else:\n init_state = TT.alloc(floatX(0), self.n_hids * 2)\n\n if mask:\n inps = [state_below, mask]\n fn = lambda x,y,z : self.step_fprop(x,y,z, use_noise=use_noise,\n no_noise_bias=no_noise_bias)\n else:\n inps = [state_below]\n fn = lambda tx, ty: self.step_fprop(tx, None, ty,\n use_noise=use_noise,\n no_noise_bias=no_noise_bias)\n\n rval, updates = theano.scan(fn,\n sequences = inps,\n outputs_info = [init_state],\n name='layer_%s'%self.name,\n profile=self.profile,\n truncate_gradient = truncate_gradient,\n n_steps = nsteps)\n new_h = rval\n self.out = rval\n self.rval = rval\n self.updates = updates\n\n return self.out\n\n\n"
]
| [
[
"numpy.sum",
"numpy.zeros"
]
]
|
ThorstenGroh/Qcodes_contrib_drivers | [
"97e05f8f5d8762953ee9db9bc461d0814eef657d"
]
| [
"qcodes_contrib_drivers/drivers/Spectrum/M4i.py"
]
| [
"# **************************************************************************\n#\n# Driver file for M4i.44x-x8\n#\n# **************************************************************************\n#\n# QuTech\n#\n# Written by: Luka Bavdaz, Marco Tagliaferri, Pieter Eendebak\n# Also see: http://spectrum-instrumentation.com/en/m4i-platform-overview\n#\n\n# %%\nimport os\nimport sys\nimport logging\nimport numpy as np\nimport ctypes as ct\nfrom functools import partial\nfrom typing import Union, Type\n\nfrom qcodes.utils.validators import Enum, Numbers, Anything, Ints\nfrom qcodes.instrument.base import Instrument\n\nlog = logging.getLogger(__name__)\n\ntry:\n # add the location of the pyspcm header file manually\n header_dir = os.path.split(__file__)[0]\n\n if not header_dir in sys.path:\n log.info('M4i: adding header_dir %s to sys.path' % header_dir)\n sys.path.append(header_dir)\n import pyspcm\nexcept (ImportError, OSError) as ex:\n info_str = 'to use the M4i driver install the pyspcm module and the M4i libs'\n log.exception(info_str)\n raise ImportError(info_str)\n\n# %% Helper functions\n\n\ndef szTypeToName(lCardType):\n \"\"\" Convert card type to string\n\n This function is taken from an example provided by Spectrum GmbH\n \"\"\"\n sName = ''\n lVersion = (lCardType & pyspcm.TYP_VERSIONMASK)\n if (lCardType & pyspcm.TYP_SERIESMASK) == pyspcm.TYP_M2ISERIES:\n sName = 'M2i.%04x' % lVersion\n elif (lCardType & pyspcm.TYP_SERIESMASK) == pyspcm.TYP_M2IEXPSERIES:\n sName = 'M2i.%04x-Exp' % lVersion\n elif (lCardType & pyspcm.TYP_SERIESMASK) == pyspcm.TYP_M3ISERIES:\n sName = 'M3i.%04x' % lVersion\n elif (lCardType & pyspcm.TYP_SERIESMASK) == pyspcm.TYP_M3IEXPSERIES:\n sName = 'M3i.%04x-Exp' % lVersion\n elif (lCardType & pyspcm.TYP_SERIESMASK) == pyspcm.TYP_M4IEXPSERIES:\n sName = 'M4i.%04x-x8' % lVersion\n else:\n sName = 'unknown type'\n return sName\n\n# %% Main driver class\n\n\nclass M4i(Instrument):\n\n _NO_HF_MODE = -1\n\n def __init__(self, name, cardid='spcm0', **kwargs):\n \"\"\" Driver for the Spectrum M4i.44xx-x8 cards.\n\n For more information see: http://spectrum-instrumentation.com/en/m4i-platform-overview\n\n Example:\n\n Example usage for acquisition with channel 2 using an external trigger\n that triggers multiple times with trigger mode HIGH::\n\n m4 = M4i(name='M4i', server_name=None)\n m4.enable_channels(pyspcm.CHANNEL2)\n m4.set_channel_settings(2,mV_range, input_path, termination, coupling, compensation)\n m4.set_ext0_OR_trigger_settings(pyspcm.SPC_TM_HIGH,termination,coupling,level0)\n calc = m4.multiple_trigger_acquisition(mV_range,memsize,seg_size,posttrigger_size)\n\n Note:\n Error generated by the card can be retrieved with the method :func:`get_error_info32bit`. The card can be\n reset with :func:`reset`.\n Sometimes when an error occurs (including validation errors) the python console needs to be restarted\n\n\n \"\"\"\n super().__init__(name, **kwargs)\n\n self.hCard = pyspcm.spcm_hOpen(cardid)\n if self.hCard is None:\n logging.warning(\"M4i: no card found\\n\")\n\n # add parameters for getting\n self.add_parameter('card_id',\n label='card id',\n get_cmd=None, set_cmd=None,\n initial_value=cardid,\n vals=Anything(),\n docstring='The card ID')\n self.add_parameter('max_sample_rate',\n label='max sample rate',\n unit='Hz',\n get_cmd=self.get_max_sample_rate,\n docstring='The maximumum sample rate')\n self.add_parameter('memory',\n label='memory',\n unit='bytes',\n get_cmd=self.get_card_memory,\n docstring='Amount of memory on card')\n self.add_parameter('resolution',\n label='resolution',\n unit='bits',\n get_cmd=partial(self._param32bit,\n pyspcm.SPC_MIINST_BITSPERSAMPLE),\n docstring='Resolution of the card')\n self.add_parameter('pcidate',\n label='pcidate',\n get_cmd=partial(self._param32bit,\n pyspcm.SPC_PCIDATE),\n docstring='The PCI date')\n self.add_parameter('serial_number',\n label='serial number',\n get_cmd=partial(self._param32bit,\n pyspcm.SPC_PCISERIALNO),\n docstring='The serial number of the board')\n self.add_parameter('channel_count',\n label='channel count',\n get_cmd=partial(self._param32bit,\n pyspcm.SPC_CHCOUNT),\n docstring='Return number of enabled channels')\n self.add_parameter('input_path_count',\n label='input path count',\n get_cmd=partial(self._param32bit,\n pyspcm.SPC_READAIPATHCOUNT),\n docstring='Return number of analog input paths')\n self.add_parameter('input_ranges_count',\n label='input ranges count',\n get_cmd=partial(self._param32bit,\n pyspcm.SPC_READIRCOUNT),\n docstring='Return number of input ranges for the current input path')\n self.add_parameter('input_path_features',\n label='input path features',\n get_cmd=partial(self._param32bit,\n pyspcm.SPC_READAIFEATURES),\n docstring='Return a bitmap of features for current input path')\n self.add_parameter('available_card_modes',\n label='available card modes',\n get_cmd=partial(self._param32bit,\n pyspcm.SPC_AVAILCARDMODES),\n docstring='Return a bitmap of available card modes')\n self.add_parameter('card_status',\n label='card status',\n get_cmd=partial(self._param32bit,\n pyspcm.SPC_M2STATUS),\n docstring='Return a bitmap for the status information')\n self.add_parameter('read_range_min_0',\n label='read range min 0', unit='mV',\n get_cmd=partial(self._param32bit,\n pyspcm.SPC_READRANGEMIN0),\n docstring='Return the lower border of input range 0')\n\n # buffer handling\n self.add_parameter('user_available_length',\n label='user available length',\n get_cmd=partial(self._param32bit,\n pyspcm.SPC_DATA_AVAIL_USER_LEN),\n docstring='returns the number of currently to the user available bytes inside a sample data transfer')\n self.add_parameter('user_available_position',\n label='user available position',\n get_cmd=partial(self._param32bit,\n pyspcm.SPC_DATA_AVAIL_USER_POS),\n docstring='returns the position as byte index where the currently available data samles start')\n self.add_parameter('buffer_fill_size',\n label='buffer fill size',\n get_cmd=partial(self._param32bit,\n pyspcm.SPC_FILLSIZEPROMILLE),\n docstring='returns the current fill size of the on-board memory (FIFO buffer) in promille (1/1000)')\n\n # triggering\n self.add_parameter('available_trigger_or_mask',\n label='available trigger or mask',\n get_cmd=partial(self._param32bit,\n pyspcm.SPC_TRIG_AVAILORMASK),\n docstring='bitmask, in which all bits of sources for the OR mask are set, if available')\n self.add_parameter('available_channel_or_mask',\n label='available channel or mask',\n get_cmd=partial(self._param32bit,\n pyspcm.SPC_TRIG_CH_AVAILORMASK0),\n docstring='bitmask, in which all bits of sources/channels (0-31) for the OR mask are set, if available')\n self.add_parameter('available_trigger_and_mask',\n label='available trigger and mask',\n get_cmd=partial(self._param32bit,\n pyspcm.SPC_TRIG_AVAILANDMASK),\n docstring='bitmask, in which all bits of sources for the AND mask are set, if available')\n self.add_parameter('available_channel_and_mask',\n label='available channel and mask',\n get_cmd=partial(self._param32bit,\n pyspcm.SPC_TRIG_CH_AVAILANDMASK0),\n docstring='bitmask, in which all bits of sources/channels (0-31) for the AND mask are set, if available')\n self.add_parameter('available_trigger_delay',\n label='available trigger delay',\n get_cmd=partial(self._param32bit,\n pyspcm.SPC_TRIG_AVAILDELAY),\n docstring='contains the maximum available delay as decimal integer value')\n self.add_parameter('available_external_trigger_modes',\n label='available external trigger modes',\n get_cmd=partial(self._param32bit,\n pyspcm.SPC_TRIG_EXT0_AVAILMODES),\n docstring='bitmask showing all available trigger modes for external 0 (main analog trigger input)')\n self.add_parameter('external_trigger_min_level',\n label='external trigger min level',\n unit='mV',\n get_cmd=partial(self._param32bit,\n pyspcm.SPC_TRIG_EXT_AVAIL0_MIN),\n docstring='returns the minimum trigger level')\n self.add_parameter('external_trigger_max_level',\n label='external trigger max level',\n unit='mV',\n get_cmd=partial(self._param32bit,\n pyspcm.SPC_TRIG_EXT_AVAIL0_MAX),\n docstring='returns the maximum trigger level')\n self.add_parameter('external_trigger_level_step_size',\n label='external trigger level step size',\n unit='mV',\n get_cmd=partial(self._param32bit,\n pyspcm.SPC_TRIG_EXT_AVAIL0_STEP),\n docstring='returns the step size of the trigger level')\n self.add_parameter('available_channel_trigger_modes',\n label='available channel trigger modes',\n get_cmd=partial(self._param32bit,\n pyspcm.SPC_TRIG_CH_AVAILMODES),\n docstring='bitmask, in which all bits of the modes for the channel trigger are set')\n self.add_parameter('trigger_counter',\n label='trigger counter',\n get_cmd=partial(self._param32bit,\n pyspcm.SPC_TRIGGERCOUNTER),\n docstring='returns the number of triger events since acquisition start')\n # data per sample\n self.add_parameter('bytes_per_sample',\n label='bytes per sample',\n get_cmd=partial(self._param32bit,\n pyspcm.SPC_MIINST_BYTESPERSAMPLE),\n docstring='returns the number of bytes per sample')\n self.add_parameter('bits_per_sample',\n label='bits per sample',\n get_cmd=partial(self._param32bit,\n pyspcm.SPC_MIINST_BITSPERSAMPLE),\n docstring='returns the number of bits per sample')\n\n # available clock modes\n self.add_parameter('available_clock_modes',\n label='available clock modes',\n get_cmd=partial(self._param32bit,\n pyspcm.SPC_AVAILCLOCKMODES),\n docstring='returns a bitmask in which the bits of the clock modes are set, if available')\n\n # converting ADC samples to voltage values\n self.add_parameter('ADC_to_voltage',\n label='ADC to voltage',\n get_cmd=partial(self._param32bit,\n pyspcm.SPC_MIINST_MAXADCVALUE),\n docstring='contains the decimal code (in LSB) of the ADC full scale value')\n\n self.add_parameter('box_averages',\n label='number samples in box averaging',\n get_cmd=partial(self._param32bit,\n pyspcm.SPC_BOX_AVERAGES),\n set_cmd=partial(self._set_param32bit,\n pyspcm.SPC_BOX_AVERAGES),\n vals=Enum(2, 4, 8, 16, 32, 64, 128, 256),\n docstring='Defines the number of successive samples per channel that are summed together')\n\n self.add_parameter('oversampling_factor',\n label='oversampling factor',\n get_cmd=partial(self._param32bit,\n pyspcm.SPC_OVERSAMPLINGFACTOR),\n docstring='Reads the oversampling factor')\n\n # add parameters for setting and getting (read/write direction\n # registers)\n\n self.add_parameter('enable_channels',\n label='Channels enabled',\n get_cmd=partial(self._param32bit,\n pyspcm.SPC_CHENABLE),\n set_cmd=partial(self._set_param32bit,\n pyspcm.SPC_CHENABLE),\n vals=Enum(1, 2, 4, 8, 3, 5, 9, 6, 10, 12, 15),\n docstring='Set and get enabled channels')\n\n # analog input path functions\n # TODO: change Enum validator to set_parser for the numbered functions\n # if we want string inputs\n\n self.add_parameter('read_input_path',\n label='read input path',\n get_cmd=partial(self._param32bit,\n pyspcm.SPC_READAIPATH),\n set_cmd=partial(self._set_param32bit,\n pyspcm.SPC_READAIPATH),\n vals=Enum(0, 1, 2, 3),\n docstring='Select the input path which is used to read out the features')\n\n for i in [0, 1, 2, 3]:\n self.add_parameter('input_path_{}'.format(i),\n label='input path {}'.format(i),\n get_cmd=partial(self._param32bit, getattr(\n pyspcm, 'SPC_PATH{}'.format(i))),\n set_cmd=partial(self._set_param32bit, getattr(\n pyspcm, 'SPC_PATH{}'.format(i))),\n vals=Enum(0, 1),\n docstring='Set and get analog input path for channel {}'.format(i))\n\n # channel range functions\n # TODO: check the input path to set the right validator (either by\n # directly calling input_path_x() or by storing a variable)\n self.add_parameter('range_channel_{}'.format(i),\n label='range channel {}'.format(i),\n get_cmd=partial(self._param32bit, getattr(\n pyspcm, 'SPC_AMP{}'.format(i))),\n set_cmd=partial(self._set_param32bit, getattr(\n pyspcm, 'SPC_AMP{}'.format(i))),\n vals=Enum(200, 500, 1000, 2000,\n 2500, 5000, 10000),\n unit='mV',\n docstring='Set and get input range of channel {} (in mV)'.format(i))\n\n # input termination functions\n self.add_parameter('termination_{}'.format(i),\n label='termination {}'.format(i),\n get_cmd=partial(self._param32bit, getattr(\n pyspcm, 'SPC_50OHM{}'.format(i))),\n set_cmd=partial(self._set_param32bit, getattr(\n pyspcm, 'SPC_50OHM{}'.format(i))),\n vals=Enum(0, 1),\n docstring='if 1 sets termination to 50 Ohm, otherwise 1 MOhm for channel {}'.format(i))\n\n # input coupling\n ACDC_coupling_docstring = f'if 1 sets the AC coupling, otherwise sets the DC coupling for channel {i}'\n ACDC_coupling_docstring += '\\nThe AC coupling only works if the card is in HF mode.'\n self.add_parameter('ACDC_coupling_{}'.format(i),\n label='ACDC coupling {}'.format(i),\n get_cmd=partial(self._param32bit, getattr(\n pyspcm, 'SPC_ACDC{}'.format(i))),\n set_cmd=partial(self._set_param32bit, getattr(\n pyspcm, 'SPC_ACDC{}'.format(i))),\n vals=Enum(0, 1),\n docstring=ACDC_coupling_docstring)\n\n # AC/DC offset compensation\n self.add_parameter('ACDC_offs_compensation_{}'.format(i),\n label='ACDC offs compensation {}'.format(i),\n get_cmd=partial(self._get_compensation, i),\n set_cmd=partial(self._set_compensation, i),\n vals=Enum(0, 1, M4i._NO_HF_MODE),\n docstring=f'if 1 enables compensation, if 0 disables compensation for channel {i}. Value {M4i._NO_HF_MODE} means the card is not in HF mode')\n\n # anti aliasing filter (Bandwidth limit)\n self.add_parameter('anti_aliasing_filter_{}'.format(i),\n label='anti aliasing filter {}'.format(i),\n get_cmd=partial(self._param32bit, getattr(\n pyspcm, 'SPC_FILTER{}'.format(i))),\n set_cmd=partial(self._set_param32bit, getattr(\n pyspcm, 'SPC_FILTER{}'.format(i))),\n vals=Enum(0, 1),\n docstring='if 1 selects bandwidth limit, if 0 sets to full bandwidth for channel {}'.format(i))\n\n self.add_parameter('channel_{}'.format(i),\n label='channel {}'.format(i),\n unit='a.u.',\n get_cmd=partial(self._read_channel, i))\n\n # acquisition modes\n # TODO: If required, the other acquisition modes can be added to the\n # validator\n self.add_parameter('card_mode',\n label='card mode',\n get_cmd=partial(self._param32bit,\n pyspcm.SPC_CARDMODE),\n set_cmd=partial(self._set_param32bit,\n pyspcm.SPC_CARDMODE),\n vals=Enum(pyspcm.SPC_REC_STD_SINGLE, pyspcm.SPC_REC_STD_MULTI, pyspcm.SPC_REC_STD_GATE, pyspcm.SPC_REC_STD_ABA,\n pyspcm.SPC_REC_FIFO_SINGLE, pyspcm.SPC_REC_FIFO_MULTI, pyspcm.SPC_REC_FIFO_GATE,\n pyspcm.SPC_REC_FIFO_ABA, pyspcm.SPC_REC_STD_AVERAGE, pyspcm.SPC_REC_STD_BOXCAR),\n docstring='defines the used operating mode')\n\n # wait command\n self.add_parameter('timeout',\n label='timeout',\n get_cmd=partial(self._param32bit,\n pyspcm.SPC_TIMEOUT),\n unit='ms',\n set_cmd=partial(self._set_param32bit,\n pyspcm.SPC_TIMEOUT),\n docstring='defines the timeout for wait commands (in ms)')\n\n # Single acquisition mode memory, pre- and posttrigger (pretrigger = memory size - posttrigger)\n # TODO: improve the validators to make them take into account the\n # current state of the instrument\n self.add_parameter('data_memory_size',\n label='data memory size',\n get_cmd=partial(self._param32bit,\n pyspcm.SPC_MEMSIZE),\n set_cmd=partial(self._set_param32bit,\n pyspcm.SPC_MEMSIZE),\n vals=Numbers(min_value=16),\n docstring='sets the memory size in samples per channel')\n self.add_parameter('posttrigger_memory_size',\n label='posttrigger memory size',\n get_cmd=partial(self._param32bit,\n pyspcm.SPC_POSTTRIGGER),\n set_cmd=partial(self._set_param32bit,\n pyspcm.SPC_POSTTRIGGER),\n docstring='sets the number of samples to be recorded after trigger event')\n\n # FIFO single acquisition length and pretrigger\n self.add_parameter('pretrigger_memory_size',\n label='pretrigger memory size',\n get_cmd=partial(self._param32bit,\n pyspcm.SPC_PRETRIGGER),\n set_cmd=partial(self._set_param32bit,\n pyspcm.SPC_PRETRIGGER),\n docstring='sets the number of samples to be recorded before trigger event')\n self.add_parameter('segment_size',\n label='segment size',\n get_cmd=partial(self._param32bit,\n pyspcm.SPC_SEGMENTSIZE),\n set_cmd=partial(self._set_param32bit,\n pyspcm.SPC_SEGMENTSIZE),\n docstring='length of segments to acquire')\n self.add_parameter('total_segments',\n label='total segments',\n get_cmd=partial(self._param32bit, pyspcm.SPC_LOOPS),\n set_cmd=partial(self._set_param32bit,\n pyspcm.SPC_LOOPS),\n docstring='number of segments to acquire in total. Setting 0 makes it run until stopped by user')\n\n # clock generation\n self.add_parameter('clock_mode',\n label='clock mode',\n get_cmd=partial(self._param32bit,\n pyspcm.SPC_CLOCKMODE),\n set_cmd=partial(self._set_param32bit,\n pyspcm.SPC_CLOCKMODE),\n vals=Enum(pyspcm.SPC_CM_INTPLL, pyspcm.SPC_CM_QUARTZ2,\n pyspcm.SPC_CM_EXTREFCLOCK, pyspcm.SPC_CM_PXIREFCLOCK),\n docstring='defines the used clock mode or reads out the actual selected one')\n self.add_parameter('reference_clock',\n label='frequency of external reference clock', unit='Hz',\n get_cmd=partial(self._param32bit,\n pyspcm.SPC_REFERENCECLOCK),\n set_cmd=partial(self._set_param32bit,\n pyspcm.SPC_REFERENCECLOCK),\n vals=Ints(),\n docstring='defines the frequency of the external reference clock')\n\n self.add_parameter('sample_rate',\n label='sample rate',\n get_cmd=partial(self._param32bit,\n pyspcm.SPC_SAMPLERATE),\n unit='Hz',\n set_cmd=partial(self._set_param32bit,\n pyspcm.SPC_SAMPLERATE),\n docstring='write the sample rate for internal sample generation or read rate nearest to desired. This sample rate is rounded to an integer number.')\n\n self.add_parameter('exact_sample_rate',\n label='sample rate',\n get_cmd=self._exact_sample_rate,\n unit='Hz',\n docstring='return the exact sampling rate in Hz. This is an integer divisor of the maximum sample rate')\n\n self.add_parameter('special_clock',\n label='special clock',\n get_cmd=partial(self._param32bit,\n pyspcm.SPC_SPECIALCLOCK),\n unit='Hz',\n set_cmd=partial(self._set_param32bit,\n pyspcm.SPC_SPECIALCLOCK),\n docstring='Activate/Deactivate the special clock mode (lower and more sampling clock rates)')\n\n # triggering\n self.add_parameter('trigger_or_mask',\n label='trigger or mask',\n get_cmd=partial(self._param32bit,\n pyspcm.SPC_TRIG_ORMASK),\n set_cmd=partial(self._set_param32bit,\n pyspcm.SPC_TRIG_ORMASK),\n vals=Enum(pyspcm.SPC_TMASK_NONE, pyspcm.SPC_TMASK_SOFTWARE,\n pyspcm.SPC_TMASK_EXT0, pyspcm.SPC_TMASK_EXT1),\n docstring='defines the events included within the trigger OR mask card')\n self.add_parameter('channel_or_mask',\n label='channel or mask',\n get_cmd=partial(self._param32bit,\n pyspcm.SPC_TRIG_CH_ORMASK0),\n set_cmd=partial(self._set_param32bit,\n pyspcm.SPC_TRIG_CH_ORMASK0),\n docstring='includes the channels (0-31) within the channel trigger OR mask of the card')\n self.add_parameter('trigger_and_mask',\n label='trigger and mask',\n get_cmd=partial(self._param32bit,\n pyspcm.SPC_TRIG_ANDMASK),\n set_cmd=partial(self._set_param32bit,\n pyspcm.SPC_TRIG_ANDMASK),\n vals=Enum(pyspcm.SPC_TMASK_NONE,\n pyspcm.SPC_TMASK_EXT0, pyspcm.SPC_TMASK_EXT1),\n docstring='defines the events included within the trigger AND mask card')\n self.add_parameter('channel_and_mask',\n label='channel and mask',\n get_cmd=partial(self._param32bit,\n pyspcm.SPC_TRIG_CH_ANDMASK0),\n set_cmd=partial(self._set_param32bit,\n pyspcm.SPC_TRIG_CH_ANDMASK0),\n docstring='includes the channels (0-31) within the channel trigger AND mask of the card')\n self.add_parameter('trigger_delay',\n label='trigger delay',\n get_cmd=partial(self._param32bit,\n pyspcm.SPC_TRIG_DELAY),\n set_cmd=partial(self._set_param32bit,\n pyspcm.SPC_TRIG_DELAY),\n docstring='defines the delay for the detected trigger events')\n self.add_parameter('external_trigger_mode',\n label='external trigger mode',\n get_cmd=partial(self._param32bit,\n pyspcm.SPC_TRIG_EXT0_MODE),\n set_cmd=partial(self._set_param32bit,\n pyspcm.SPC_TRIG_EXT0_MODE),\n docstring='defines the external trigger mode for the external SMA connector trigger input')\n self.add_parameter('external_trigger_termination',\n label='external trigger termination',\n get_cmd=partial(self._param32bit,\n pyspcm.SPC_TRIG_TERM),\n set_cmd=partial(self._set_param32bit,\n pyspcm.SPC_TRIG_TERM),\n vals=Enum(0, 1),\n docstring='A 1 sets the 50 Ohm termination, a 0 sets high impedance termination')\n self.add_parameter('external_trigger_input_coupling',\n label='external trigger input coupling',\n get_cmd=partial(self._param32bit,\n pyspcm.SPC_TRIG_EXT0_ACDC),\n set_cmd=partial(self._set_param32bit,\n pyspcm.SPC_TRIG_EXT0_ACDC),\n vals=Enum(0, 1),\n docstring='A 1 sets the AC coupling for the external trigger, a 0 sets DC')\n\n for l in [0, 1]:\n self.add_parameter('external_trigger_level_{}'.format(l),\n label='external trigger level {}'.format(l),\n get_cmd=partial(self._param32bit, getattr(\n pyspcm, 'SPC_TRIG_EXT0_LEVEL{}'.format(l))),\n set_cmd=partial(self._set_param32bit, getattr(\n pyspcm, 'SPC_TRIG_EXT0_LEVEL{}'.format(l))),\n unit='mV',\n docstring='trigger level {} for external trigger'.format(l))\n\n for i in [0, 1, 2, 3]:\n self.add_parameter('trigger_mode_channel_{}'.format(i),\n label='trigger mode channel {}'.format(i),\n get_cmd=partial(self._param32bit, getattr(\n pyspcm, 'SPC_TRIG_CH{}_MODE'.format(i))),\n set_cmd=partial(self._set_param32bit, getattr(\n pyspcm, 'SPC_TRIG_CH{}_MODE'.format(i))),\n docstring='sets the trigger mode for channel {}'.format(i))\n for l in [0, 1]:\n self.add_parameter('trigger_channel_{}_level_{}'.format(i, l),\n label='trigger channel {} level {}'.format(\n i, l),\n get_cmd=partial(self._param32bit, getattr(\n pyspcm, 'SPC_TRIG_CH{}_LEVEL{}'.format(i, l))),\n set_cmd=partial(self._set_param32bit, getattr(\n pyspcm, 'SPC_TRIG_CH{}_LEVEL{}'.format(i, l))),\n docstring='trigger level {} channel {}'.format(l, i))\n\n # add parameters for setting (write only registers)\n\n # Buffer handling\n self.add_parameter('card_available_length',\n label='card available length',\n set_cmd=partial(self._set_param32bit,\n pyspcm.SPC_DATA_AVAIL_CARD_LEN),\n docstring='writes the number of bytes that the card can now use for sample data transfer again')\n\n # General\n self.add_parameter('general_command',\n label='general command',\n set_cmd=partial(self._set_param32bit,\n pyspcm.SPC_M2CMD),\n docstring='executes a command for the card or data transfer')\n\n # memsize used for simple channel read-out\n self._channel_memsize = 2**12\n\n # checks if requirements for the compensation get and set functions are met\n def _get_compensation(self, i):\n # if HF enabled\n if(getattr(self, 'input_path_{}'.format(i))() == 1):\n return self._param32bit(getattr(pyspcm, 'SPC_ACDC_OFFS_COMPENSATION{}'.format(i)))\n else:\n logging.info(\"M4i: HF path not set, ACDC offset compensation parameter will be ignored by the M4i card\\n\")\n return M4i._NO_HF_MODE\n\n def _set_compensation(self, i, value):\n # if HF enabled\n if(getattr(self, 'input_path_{}'.format(i))() == 1):\n self._set_param32bit(\n getattr(pyspcm, 'SPC_ACDC_OFFS_COMPENSATION{}'.format(i)), value)\n else:\n logging.warning(\"M4i: HF path not set, ignoring ACDC offset compensation set\\n\")\n\n def active_channels(self):\n \"\"\" Return a list with the indices of the active channels \"\"\"\n x = bin(self.enable_channels())[2:][::-1]\n return [i for i in range(len(x)) if int(x[i])]\n\n def get_idn(self):\n return dict(zip(('vendor', 'model', 'serial', 'firmware'), ('Spectrum_GMBH', szTypeToName(self.get_card_type()), self.serial_number(), ' ')))\n\n def reset(self):\n \"\"\" Reset the card\n\n The pyspcm.M2CMD_CARD_RESET command is executed.\n \"\"\"\n self.general_command(pyspcm.M2CMD_CARD_RESET)\n\n def convert_to_voltage(self, data, input_range):\n \"\"\"convert an array of numbers to an array of voltages.\"\"\"\n resolution = self.ADC_to_voltage()\n return data * input_range / resolution\n\n def initialize_channels(self, channels=None, mV_range=1000, input_path=0,\n termination=0, coupling=0, compensation=None,\n memsize=2**12, pretrigger_memsize=16):\n \"\"\" Setup channels of the digitizer for simple readout using Parameters\n\n The channels can be read out using the Parameters `channel_0`,\n `channel_1`, ...\n\n Args:\n channels (list): list of channels to setup. mV_range, input_path,\n termination, coupling, compensation. Passed to the\n set_channel_settings function\n memsize (int): memory size to use for simple channel readout\n pretrigger_memsize (int): Pretrigger memory size to use.\n The default value used is 16, which is the smallest value\n possible.\n \"\"\"\n allchannels = 0\n self._channel_memsize = memsize\n self._channel_pretrigger_memsize = pretrigger_memsize\n self.data_memory_size(memsize)\n if channels is None:\n channels = range(4)\n for ch in channels:\n self.set_channel_settings(ch, mV_range, input_path=input_path,\n termination=termination, coupling=coupling, compensation=compensation)\n allchannels = allchannels + getattr(pyspcm, 'CHANNEL%d' % ch)\n\n self.enable_channels(allchannels)\n\n def _channel_mask(self, channels=range(4)):\n \"\"\" Return mask for specified channels\n\n Args:\n channels (list): list of channel indices\n Returns:\n cx (int): channel mask\n \"\"\"\n cx = 0\n for c in channels:\n cx += getattr(pyspcm, 'CHANNEL{}'.format(c))\n return cx\n\n def _read_channel(self, channel, memsize=None):\n \"\"\" Helper function to read out a channel\n\n Before a channel is measured all channels are enabled to ensure we can\n read out channels without the overhead of changing channels.\n \"\"\"\n if memsize is None:\n memsize = self._channel_memsize\n posttrigger_size = memsize - self._channel_pretrigger_memsize\n mV_range = getattr(self, 'range_channel_%d' % channel).get()\n cx = self._channel_mask()\n self.enable_channels(cx)\n data = self.single_software_trigger_acquisition(\n mV_range, memsize, posttrigger_size)\n active = self.active_channels()\n data = data.reshape((-1, len(active)))\n value = np.mean(data[:, channel])\n return value\n\n def set_channel_settings(self, channel_index, mV_range, input_path, termination, coupling, compensation=None):\n \"\"\" Update settings of the specified channel\n\n Args:\n channel_index (int): channel to update\n mV_range (float): measurement range for the channel\n input_path (int): input path\n termination (None or int): If None, then do not update the\n termination\n coupling (None or int): Set the ACDC_coupling.If None, then do not\n update the coupling\n compensation (None or int): If None, then do not update the\n compensation\n \"\"\"\n # initialize\n getattr(self, 'input_path_{}'.format(channel_index))(\n input_path) # 0: 1 MOhm\n if termination is not None:\n getattr(self, 'termination_{}'.format(\n channel_index))(termination) # 0: DC\n if coupling is not None:\n getattr(self, 'ACDC_coupling_{}'.format(\n channel_index))(coupling) # 0: DC\n getattr(self, 'range_channel_{}'.format(channel_index))(\n mV_range) # note: set after voltage range\n # can only be used with DC coupling and 50 Ohm path (hf)\n if compensation is not None:\n getattr(self, 'ACDC_offs_compensation_{}'.format(\n channel_index))(compensation)\n\n def set_ext0_OR_trigger_settings(self, trig_mode, termination, coupling, level0, level1=None):\n\n self.channel_or_mask(0)\n self.external_trigger_mode(trig_mode) # trigger mode\n self.trigger_or_mask(pyspcm.SPC_TMASK_EXT0) # external trigger\n self.external_trigger_termination(termination) # 1: 50 Ohm\n self.external_trigger_input_coupling(coupling) # 0: DC\n self.external_trigger_level_0(level0) # mV\n if(level1 != None):\n self.external_trigger_level_1(level1) # mV\n\n # Note: the levels need to be set in bits, not voltages! (between -8191 to\n # 8191 for 14 bits)\n def set_channel_OR_trigger_settings(self, i, trig_mode, bitlevel0, bitlevel1=None):\n \"\"\"When a channel is used for triggering it must be enabled during the\n acquisition.\"\"\"\n self.trigger_or_mask(0)\n self.channel_or_mask(getattr(pyspcm, 'SPC_TMASK0_CH{}'.format(i)))\n getattr(self, 'trigger_channel_{}_level_0'.format(i))(bitlevel0)\n if(bitlevel1 != None):\n getattr(self, 'trigger_channel_{}_level_1'.format(i))(bitlevel1)\n getattr(self, 'trigger_mode_channel_{}'.format(i))(\n trig_mode) # trigger mode\n\n def _stop_acquisition(self):\n\n # close acquisition\n self.general_command(pyspcm.M2CMD_DATA_STOPDMA)\n\n # invalidate buffer\n self._invalidate_buf(pyspcm.SPCM_BUF_DATA)\n\n self.general_command(pyspcm.M2CMD_CARD_STOP)\n\n # TODO: if multiple channels are used at the same time, the voltage conversion needs to be updated\n # TODO: the data also needs to be organized nicely (currently it\n # interleaves the data)\n def multiple_trigger_acquisition(self, mV_range, memsize, seg_size, posttrigger_size):\n \"\"\" Acquire traces with the SPC_REC_STD_MULTI mode\n\n This method does not update the triggering properties.\n\n Args:\n mV_range (float): Input range used for coversion to voltage\n memsize (int): Size of total buffer to acquire\n seg_size (int): Size of segments to record\n posttrigger_size (int): Size of the if post trigger buffer\n Returns:\n Array with measured voltages\n\n \"\"\"\n self.card_mode(pyspcm.SPC_REC_STD_MULTI) # multi\n\n self.data_memory_size(memsize)\n self.segment_size(seg_size)\n self.posttrigger_memory_size(posttrigger_size)\n numch = self._num_channels()\n\n self.general_command(pyspcm.M2CMD_CARD_START | pyspcm.M2CMD_CARD_ENABLETRIGGER)\n self.wait_ready()\n\n # convert transfer data to numpy array\n output = self._transfer_buffer_numpy(\n memsize, numch, bytes_per_sample=2)\n\n self._stop_acquisition()\n\n voltages = self.convert_to_voltage(output, mV_range / 1000)\n\n return voltages\n\n def start_acquisition(self, mV_range, memsize, posttrigger_size=None, verbose=0):\n \"\"\" Start data acquisition of a single data trace\n\n The resulting data can be acquired with the function retrieve_data.\n\n Args:\n mV_range (float): range in mV\n memsize (int): size of data trace\n posttrigger_size (int or None): size of data trace after triggering\n Returns:\n trace as a dict. Data concerning the trace\n \"\"\"\n self.card_mode(pyspcm.SPC_REC_STD_SINGLE) # single\n\n self.data_memory_size(memsize)\n if posttrigger_size is None:\n posttrigger_size = memsize - 16\n self.posttrigger_memory_size(posttrigger_size)\n numch = self._num_channels()\n\n # start/enable trigger/wait ready\n self.trigger_or_mask(pyspcm.SPC_TMASK_SOFTWARE) # software trigger\n self.general_command(pyspcm.M2CMD_CARD_START |\n pyspcm.M2CMD_CARD_ENABLETRIGGER)\n\n return {'memsize': memsize, 'numch': numch, 'mV_range': mV_range}\n\n def _transfer_buffer_numpy(self, memsize: int, numch: int, bytes_per_sample=2) -> np.ndarray:\n \"\"\" Transfer buffer to numpy array\n\n Args:\n memsize (int): number of samples to transfer\n numch (int): number of channels\n bytes_per_sample (int): specifies the datatype. 2 for int16, 4 for int32\n Returns:\n array: transfered data\n\n \"\"\"\n # setup software buffer\n sample_ctype: Union[Type[ct.c_int16], Type[ct.c_int32]]\n if bytes_per_sample == 2:\n sample_ctype = ct.c_int16\n elif bytes_per_sample == 4:\n sample_ctype = ct.c_int32\n else:\n raise ValueError('bytes_per_sample should be 2 or 4')\n\n ctype_buffer_type = sample_ctype * memsize * numch\n data_buffer = (ctype_buffer_type)()\n data_pointer = ct.cast(data_buffer, ct.c_void_p)\n\n # data acquisition\n self._def_transfer64bit(\n pyspcm.SPCM_BUF_DATA, pyspcm.SPCM_DIR_CARDTOPC, 0, data_pointer, 0, bytes_per_sample * memsize * numch)\n self.general_command(pyspcm.M2CMD_DATA_STARTDMA |\n pyspcm.M2CMD_DATA_WAITDMA)\n\n # convert buffer to numpy array\n output = np.frombuffer(data_buffer, dtype=sample_ctype)\n\n return output\n\n def retrieve_data(self, trace):\n \"\"\" Retrieve data from the digitizer\n\n The data acquisition must have been started by start_acquisition.\n\n Args:\n\n\n Returns:\n voltages (array)\n \"\"\"\n\n memsize = trace['memsize']\n numch = trace['numch']\n mV_range = trace['mV_range']\n\n self.wait_ready()\n\n output = self._transfer_buffer_numpy(memsize, numch)\n self._stop_acquisition()\n\n voltages = self.convert_to_voltage(output, mV_range / 1000)\n\n return voltages\n\n def single_trigger_acquisition(self, mV_range, memsize, posttrigger_size):\n \"\"\" Acquire traces with the SPC_REC_STD_SINGLE mode\n\n This method does not update the triggering properties.\n\n Args:\n mV_range (float): Input range used for coversion to voltage\n memsize (int): Size of total buffer to acquire\n posttrigger_size (int): Size of the if post trigger buffer\n Returns:\n Array with measured voltages\n \"\"\"\n self.card_mode(pyspcm.SPC_REC_STD_SINGLE)\n\n # set memsize and posttrigger\n self.data_memory_size(memsize)\n self.posttrigger_memory_size(posttrigger_size)\n numch = self._num_channels()\n\n self.general_command(pyspcm.M2CMD_CARD_START | pyspcm.M2CMD_CARD_ENABLETRIGGER)\n self.wait_ready()\n\n output = self._transfer_buffer_numpy(memsize, numch)\n self._stop_acquisition()\n\n voltages = self.convert_to_voltage(output, mV_range / 1000)\n\n return voltages\n\n def gated_trigger_acquisition(self, mV_range, memsize, pretrigger_size, posttrigger_size):\n \"\"\"doesn't work completely as expected, it triggers even when the\n trigger level is set outside of the signal range it also seems to\n additionally acquire some wrong parts of the wave, but this also exists\n in SBench6, so it is not a problem caused by this code.\"\"\"\n\n self.card_mode(pyspcm.SPC_REC_STD_GATE) # gated\n\n # set memsize and posttrigger\n self.data_memory_size(memsize)\n self.pretrigger_memory_size(pretrigger_size)\n self.posttrigger_memory_size(posttrigger_size)\n numch = self._num_channels()\n\n self.general_command(pyspcm.M2CMD_CARD_START | pyspcm.M2CMD_CARD_ENABLETRIGGER )\n self.wait_ready()\n\n output = self._transfer_buffer_numpy(memsize, numch)\n\n self._stop_acquisition()\n\n voltages = self.convert_to_voltage(output, mV_range / 1000)\n\n return voltages\n\n def single_software_trigger_acquisition_boxcar(self, mV_range, memsize, posttrigger_size):\n \"\"\" Acquire a single data trace with boxcar averaging\n\n Args:\n mV_range (float): range in mV\n memsize (int): size of data trace\n posttrigger_size (int): size of data trace after triggering\n Returns:\n voltages (array)\n \"\"\"\n self.card_mode(pyspcm.SPC_REC_STD_BOXCAR) # single\n\n self.segment_size(memsize)\n self.posttrigger_memory_size(posttrigger_size)\n self.data_memory_size(memsize * self.box_averages())\n numch = self._num_channels()\n\n self.trigger_or_mask(pyspcm.SPC_TMASK_SOFTWARE)\n self.general_command(pyspcm.M2CMD_CARD_START | pyspcm.M2CMD_CARD_ENABLETRIGGER)\n self.wait_ready()\n\n output = self._transfer_buffer_numpy(\n memsize, numch, bytes_per_sample=4)\n self._stop_acquisition()\n\n voltages = self.convert_to_voltage(\n output, mV_range / 1000) / self.box_averages()\n\n return voltages\n\n def single_software_trigger_acquisition(self, mV_range, memsize, posttrigger_size):\n \"\"\" Acquire a single data trace\n\n Args:\n mV_range (float): range in mV\n memsize (int): size of data trace\n posttrigger_size (int): size of data trace after triggering\n Returns:\n voltages (array)\n \"\"\"\n self.card_mode(pyspcm.SPC_REC_STD_SINGLE) # single\n\n self.data_memory_size(memsize)\n self.posttrigger_memory_size(posttrigger_size)\n numch = self._num_channels()\n\n # start/enable trigger/wait ready\n self.trigger_or_mask(pyspcm.SPC_TMASK_SOFTWARE) # software trigger\n self.general_command(pyspcm.M2CMD_CARD_START | pyspcm.M2CMD_CARD_ENABLETRIGGER)\n self.wait_ready()\n\n output = self._transfer_buffer_numpy(memsize, numch)\n self._stop_acquisition()\n\n voltages = self.convert_to_voltage(output, mV_range / 1000)\n\n return voltages\n\n def _check_buffers(self):\n \"\"\" Check validity of buffers\n\n See: manual section \"Limits of pre trigger, post trigger, memory size\"\n \"\"\"\n\n pretrigger = self.data_memory_size() - self.posttrigger_memory_size()\n if pretrigger > 2**13:\n raise Exception('value of SPC_PRETRIGGER is invalid')\n\n def _num_channels(self):\n \"\"\" Return number of channels that is enabled \"\"\"\n return bin(self.enable_channels()).count(\"1\")\n\n def wait_ready(self) -> int:\n \"\"\" Wait for the M4i card to be ready using M2CMD_CARD_WAITREADY\n Returns:\n Return code of the M4i general command used to wait for the card to be ready\n \"\"\"\n command_result = pyspcm.spcm_dwSetParam_i32(self.hCard, pyspcm.SPC_M2CMD, int(pyspcm.M2CMD_CARD_WAITREADY))\n return command_result\n \n def blockavg_hardware_trigger_acquisition(self, mV_range, nr_averages=10,\n verbose=0, post_trigger=None):\n \"\"\" Acquire data using block averaging and hardware triggering\n\n To read out multiple channels, use `initialize_channels`. This methods updates\n the external_trigger_mode and trigger_or_mask parameters.\n\n Args:\n mV_range (float)\n nr_averages (int): number of averages to take\n verbose (int): output level\n post_trigger (None or int): optional size of post_trigger buffer\n Returns:\n An array of voltages. If multiple channels are read,\n then the data is interleaved\n \"\"\"\n # self.available_card_modes()\n memsize = self.data_memory_size()\n self.segment_size(memsize)\n\n if post_trigger is None:\n pre_trigger = min(2**13, 16 * int((memsize / 2) // 16))\n post_trigger = memsize - pre_trigger\n else:\n pre_trigger = memsize - post_trigger\n self.posttrigger_memory_size(post_trigger)\n self.pretrigger_memory_size(pre_trigger)\n\n self._check_buffers()\n\n if verbose:\n print('blockavg_hardware_trigger_acquisition: errors %s' %\n (self.get_error_info32bit(), ))\n print('blockavg_hardware_trigger_acquisition: card_status %s' %\n (self.card_status(), ))\n\n if nr_averages == 1:\n # special case since SPC_AVERAGES cannot handle 1\n if verbose:\n print(\n 'blockavg_hardware_trigger_acquisition: pass to single_trigger_acquisition')\n return self.single_trigger_acquisition(mV_range=mV_range, memsize=memsize, posttrigger_size=post_trigger)\n\n self.card_mode(pyspcm.SPC_REC_STD_AVERAGE)\n self._set_param32bit(pyspcm.SPC_AVERAGES, nr_averages)\n numch = self._num_channels()\n\n self.external_trigger_mode(pyspcm.SPC_TM_POS)\n self.trigger_or_mask(pyspcm.SPC_TMASK_EXT0)\n self.general_command(pyspcm.M2CMD_CARD_START | pyspcm.M2CMD_CARD_ENABLETRIGGER)\n self.wait_ready()\n\n output = self._transfer_buffer_numpy(memsize, numch, bytes_per_sample=4) / nr_averages\n\n self._stop_acquisition()\n\n voltages = self.convert_to_voltage(output, mV_range / 1000)\n\n return voltages\n\n def close(self):\n \"\"\"Close handle to the card.\"\"\"\n if self.hCard is not None:\n pyspcm.spcm_vClose(self.hCard)\n self.hCard = None\n super().close()\n\n def get_card_type(self, verbose=0):\n \"\"\"Read card type.\"\"\"\n # read type, function and sn and check for D/A card\n lCardType = pyspcm.int32(0)\n pyspcm.spcm_dwGetParam_i32(\n self.hCard, pyspcm.SPC_PCITYP, pyspcm.byref(lCardType))\n if verbose:\n print('card_type: %s' % szTypeToName(lCardType.value))\n return (lCardType.value)\n\n # only works if the error was not caused by running the entire program\n # (and therefore making a new M4i object)\n def get_error_info32bit(self, verbose=False):\n \"\"\"Read an error from the error register.\n\n Args:\n verbose (bool): If True then print the error message to stdout\n Returns:\n errorreg (int)\n errorvalue (int)\n \"\"\"\n dwErrorReg = pyspcm.uint32(0)\n lErrorValue = pyspcm.int32(0)\n\n if verbose:\n buffer = (ct.c_uint8 * pyspcm.ERRORTEXTLEN)()\n pyspcm.spcm_dwGetErrorInfo_i32(self.hCard, pyspcm.byref(\n dwErrorReg), pyspcm.byref(lErrorValue), buffer)\n bb = (bytearray(buffer)).decode().strip('\\x00')\n print('get_error_info32bit: %d %d: %s' %\n (dwErrorReg.value, lErrorValue.value, bb))\n else:\n pyspcm.spcm_dwGetErrorInfo_i32(self.hCard, pyspcm.byref(\n dwErrorReg), pyspcm.byref(lErrorValue), None)\n return (dwErrorReg.value, lErrorValue.value)\n\n def _param64bit(self, param):\n \"\"\"Read a 64-bit parameter from the device.\"\"\"\n data = pyspcm.int64(0)\n pyspcm.spcm_dwGetParam_i64(self.hCard, param, pyspcm.byref(data))\n return (data.value)\n\n def _param32bit(self, param):\n \"\"\"Read a 32-bit parameter from the device.\"\"\"\n data = pyspcm.int32(0)\n pyspcm.spcm_dwGetParam_i32(self.hCard, param, pyspcm.byref(data))\n return (data.value)\n\n def _set_param32bit(self, param, value):\n \"\"\" Set a 32-bit parameter on the device.\"\"\"\n value = int(value) # convert floating point to int if necessary\n pyspcm.spcm_dwSetParam_i32(self.hCard, param, value)\n\n def _invalidate_buf(self, buf_type):\n \"\"\"Invalidate device buffer.\"\"\"\n pyspcm.spcm_dwInvalidateBuf(self.hCard, buf_type)\n\n def _def_transfer64bit(self, buffer_type, direction, bytes_till_event, data_pointer, offset, buffer_length):\n \"\"\"Define a 64-bit transer between the device and the computer.\"\"\"\n pyspcm.spcm_dwDefTransfer_i64(\n self.hCard, buffer_type, direction, bytes_till_event, data_pointer, offset, buffer_length)\n\n def _exact_sample_rate(self):\n \"\"\" Return exact sampling rate as a floating point number \"\"\"\n sample_rate_hz = self.sample_rate()\n max_sample_rate = self.get_max_sample_rate()\n factor = int(np.round(max_sample_rate/sample_rate_hz))\n return max_sample_rate/factor\n\n def get_max_sample_rate(self, verbose=0):\n \"\"\"Return max sample rate.\"\"\"\n # read type, function and sn and check for D/A card\n value = self._param32bit(pyspcm.SPC_PCISAMPLERATE)\n if verbose:\n print('max_sample_rate: %s' % (value))\n return value\n\n def get_card_memory(self, verbose=0):\n data = pyspcm.int64(0)\n pyspcm.spcm_dwGetParam_i64(\n self.hCard, pyspcm.SPC_PCIMEMSIZE, pyspcm.byref(data))\n if verbose:\n print('card_memory: %s' % (data.value))\n return (data.value)\n"
]
| [
[
"numpy.round",
"numpy.mean",
"numpy.frombuffer"
]
]
|
wimpykid26/Evolutionary-Classification | [
"0a78cbebc252c0a13703aee20dac9fa234f07b08"
]
| [
"PCA/dermatology-pca.py"
]
| [
"import pandas as pd\nimport plotly.plotly as py\nfrom plotly.graph_objs import *\nimport plotly\nimport numpy as np\nplotly.tools.set_credentials_file(username='iwayankit', api_key='9syhwIKBYVUPY7uX20I9')\nimport plotly.tools as tls\n\ndf = pd.read_csv(\n filepath_or_buffer='https://archive.ics.uci.edu/ml/machine-learning-databases/dermatology/dermatology.data',\n header=None,\n sep=',',\n error_bad_lines = False)\n\ndf.colums=['erythema','scaling','definite_borders','itching','koebner_phenomenon','polygonal_papules','follicular_papules','oral_mucosal_involvement','knee_and_elbow_involvement','scalp_involvement','family_history','melanin_incontinence','eosinophils_in_the_infiltrate', 'd', 'e','f','g', 'h', 'i','j','k', 'l', 'm','n','o', 'p', 'q','r','s', 't','u','v','w', 'Age', 'class']\ndf.dropna(how=\"all\", inplace=True) # drops the empty line at file-end\n#cols = df.columns.tolist()\n#cols = cols[-1:] + cols[:-1]\n#df = df[cols]\ndf.replace('?',int('-9999'),inplace=True)\n\n# split data table into data X and class labels y\n\nX = df.ix[:,0:34].values\ny = df.ix[:,34].values\n\nfrom sklearn.preprocessing import StandardScaler\nX_std = StandardScaler().fit_transform(X)\n\n# plotting histograms\n\ntraces = []\n\nlegend = {0:False, 1:False, 2:False, 3:False ,4:False ,5:False ,6:True }\n\ncolors = {1 : 'rgb(31, 119, 180)',\n 2 : 'rgb(255, 127, 14)',\n 3 : 'rgb(44, 160, 44)',\n 4 : 'rgb(31, 221, 180)',\n 5 : 'rgb(255, 160, 14)',\n 6 : 'rgb(44, 127, 44)'}\n\nfor col in range(7):\n for key in colors:\n traces.append(Histogram(x=X[y==key, col],\n opacity=0.75,\n xaxis='x%s' %(col+1),\n marker=Marker(color=colors[key]),\n name=key,\n showlegend=legend[col]))\n\ndata = Data(traces)\n#print data\n\nmean_vec = np.mean(X_std, axis=0)\ncov_mat = (X_std - mean_vec).T.dot((X_std - mean_vec)) / (X_std.shape[0]-1)\nprint('Covariance matrix \\n%s' %cov_mat)\nprint('NumPy covariance matrix: \\n%s' %np.cov(X_std.T))\n\ncov_mat = np.cov(X_std.T)\n\neig_vals, eig_vecs = np.linalg.eig(cov_mat)\n\nprint('Eigenvectors \\n%s' %eig_vecs)\nprint('\\nEigenvalues \\n%s' %eig_vals)\n\n\ncor_mat1 = np.corrcoef(X_std.T)\n\neig_vals, eig_vecs = np.linalg.eig(cor_mat1)\n\nprint('Eigenvectors \\n%s' %eig_vecs)\nprint('\\nEigenvalues \\n%s' %eig_vals)\n\n#cor_mat2 = np.corrcoef(X.T)\n\n#eig_vals, eig_vecs = np.linalg.eig(cor_mat2)\n\n#print('Eigenvectors \\n%s' %eig_vecs)\n#print('\\nEigenvalues \\n%s' %eig_vals)\n\nu,s,v = np.linalg.svd(X_std.T)\nu\nfor ev in eig_vecs:\n np.testing.assert_array_almost_equal(1.0, np.linalg.norm(ev))\nprint('Everything ok!')\n\n# Make a list of (eigenvalue, eigenvector) tuples\neig_pairs = [(np.abs(eig_vals[i]), eig_vecs[:,i]) for i in range(len(eig_vals))]\n\n# Sort the (eigenvalue, eigenvector) tuples from high to low\neig_pairs.sort()\neig_pairs.reverse()\n\n# Visually confirm that the list is correctly sorted by decreasing eigenvalues\nprint('Eigenvalues in descending order:')\nfor i in eig_pairs:\n print(i[0])\n\ntot = sum(eig_vals)\nvar_exp = [(i / tot)*100 for i in sorted(eig_vals, reverse=True)]\ncum_var_exp = np.cumsum(var_exp)\n\ntrace1 = Bar(\n x=['PC %s' %i for i in range(1,35)],\n y=var_exp,\n showlegend=False)\n\ntrace2 = Scatter(\n x=['PC %s' %i for i in range(1,35)],\n y=cum_var_exp,\n name='cumulative explained variance')\n\ndata = Data([trace1, trace2])\n\nlayout=Layout(\n yaxis=YAxis(title='Explained variance in percent'),\n title='Explained variance by different principal components')\n\nfig = Figure(data=data, layout=layout)\npy.plot(fig)\n\n\nmatrix_w = np.hstack((eig_pairs[0][1].reshape(35,1),\n eig_pairs[1][1].reshape(35,1),\n eig_pairs[2][1].reshape(35,1),\n eig_pairs[3][1].reshape(35,1),\n eig_pairs[4][1].reshape(35,1),\n eig_pairs[5][1].reshape(35,1)))\n\nprint('Matrix W:\\n', matrix_w)\n\nY = X_std.dot(matrix_w)\n\ntraces = []\n\nfor name in ('psoriasis', 'seboreic-dermatitis', 'lichen-planus', 'pityriasis-rosea', 'cronic-dermatitis', 'pityriasis-rubra-pilaris'):\n\n trace = Scatter(\n x=Y[y==name,0],\n y=Y[y==name,1],\n mode='markers',\n name=name,\n marker=Marker(\n size=12,\n line=Line(\n color='rgba(217, 217, 217, 0.14)',\n width=0.5),\n opacity=0.8))\n traces.append(trace)\n\n\ndata = Data(traces)\nlayout = Layout(showlegend=True,\n scene=Scene(xaxis=XAxis(title='PC1'),\n yaxis=YAxis(title='PC2'),))\n\nfig = Figure(data=data, layout=layout)\npy.plot(fig)\n\n\nlayout = Layout(showlegend = True,\n barmode='overlay',\n xaxis=XAxis(domain=[0, .03], title='erythema (cm)'),\n xaxis2=XAxis(domain=[.03, .06], title='scaling (cm)'),\n xaxis3=XAxis(domain=[.09, .12], title='definite_borders (cm)'),\n xaxis4=XAxis(domain=[.12, .15], title='itching (cm)'),\n xaxis5=XAxis(domain=[.18, .21], title='koebner_phenomenon (cm)'),\n xaxis6=XAxis(domain=[.21, .24], title='polygonal_papules (cm)'),\n xaxis7=XAxis(domain=[.24, .27], title='follicular_papules (cm)'),\n xaxis8=XAxis(domain=[.3, .33], title='oral_mucosal_involvement (cm)'),\n xaxis9=XAxis(domain=[.33, .36], title='knee_and_elbow_involvement (cm)'),\n xaxis10=XAxis(domain=[.36, .39], title='scalp_involvement (cm)'),\n xaxis11=XAxis(domain=[.42, .45], title='family_history (cm)'),\n xaxis12=XAxis(domain=[.45, .48], title='melanin_incontinence (cm)'),\n xaxis13=XAxis(domain=[.48, .51], title='eosinophils_in_the_infiltrate (cm)'),\n xaxis14=XAxis(domain=[.51, .54], title='d (cm)'),\n xaxis15=XAxis(domain=[.54, .57], title='e (cm)'),\n xaxis16=XAxis(domain=[.57, .6], title='f (cm)'),\n xaxis17=XAxis(domain=[.6, .63], title='g (cm)'),\n xaxis18=XAxis(domain=[.63, .66], title='h (cm)'),\n xaxis19=XAxis(domain=[.66, .69], title='i (cm)'),\n xaxis20=XAxis(domain=[.69, .72], title='j (cm)'),\n xaxis21=XAxis(domain=[.72, .75], title='k (cm)'),\n xaxis22=XAxis(domain=[.75, .78], title='l (cm)'),\n xaxis23=XAxis(domain=[.78, .81], title='m (cm)'),\n xaxis24=XAxis(domain=[.81, .84], title='n (cm)'),\n xaxis25=XAxis(domain=[.84, .87], title='o (cm)'),\n xaxis26=XAxis(domain=[.87, .90], title='p (cm)'),\n xaxis27=XAxis(domain=[.90, .93], title='q (cm)'),\n xaxis28=XAxis(domain=[.93, .96], title='r (cm)'),\n xaxis29=XAxis(domain=[.96, .99], title='s (cm)'),\n xaxis30=XAxis(domain=[.99, 1.02], title='t (cm)'),\n xaxis31=XAxis(domain=[1.02, 1.05], title='u (cm)'),\n xaxis32=XAxis(domain=[1.05, 1.08], title='v (cm)'),\n xaxis33=XAxis(domain=[1.08, 1.11], title='w (cm)'),\n xaxis34=XAxis(domain=[1.11, 1.14], title='Age (cm)'),\n yaxis=YAxis(title='count'),\n title='Distribution of the dermatology attributes')\n\nfig = Figure(data=data, layout=layout)\n#py.plot(fig)\n\n\n\ndf.tail()\n"
]
| [
[
"numpy.linalg.norm",
"numpy.cov",
"sklearn.preprocessing.StandardScaler",
"numpy.mean",
"numpy.linalg.eig",
"numpy.linalg.svd",
"numpy.cumsum",
"numpy.abs",
"numpy.corrcoef",
"pandas.read_csv"
]
]
|
harupy/nyaggle | [
"132a93079e364d60b5598de77ab636a603ec06a4"
]
| [
"nyaggle/feature/nlp/bert.py"
]
| [
"from typing import Any, Callable, List, Optional, Union\nimport transformers\n\nimport numpy as np\nimport pandas as pd\nfrom category_encoders.utils import convert_input\nfrom sklearn.decomposition import TruncatedSVD\nfrom tqdm import tqdm\n\nfrom nyaggle.environment import requires_torch\nfrom nyaggle.feature.base import BaseFeaturizer\n\n\nclass BertSentenceVectorizer(BaseFeaturizer):\n \"\"\"Sentence Vectorizer using BERT pretrained model.\n\n Extract fixed-length feature vector from English/Japanese variable-length sentence using BERT.\n\n Args:\n lang:\n Type of language. If set to \"jp\", Japanese BERT model is used (you need to install MeCab).\n n_components:\n Number of components in SVD. If `None`, SVD is not applied.\n text_columns:\n List of processing columns. If `None`, all object columns are regarded as text column.\n pooling_strategy:\n The pooling algorithm for generating fixed length encoding vector. 'reduce_mean' and 'reduce_max' use\n average pooling and max pooling respectively to reduce vector from (num-words, emb-dim) to (emb_dim).\n 'reduce_mean_max' performs 'reduce_mean' and 'reduce_max' separately and concat them.\n 'cls_token' takes the first element (i.e. [CLS]).\n use_cuda:\n If `True`, inference is performed on GPU.\n tokenizer:\n The custom tokenizer used instead of default tokenizer\n model:\n The custom pretrained model used instead of default BERT model\n return_same_type:\n If True, `transform` and `fit_transform` return the same type as X.\n If False, these APIs always return a numpy array, similar to sklearn's API.\n column_format:\n Name of transformed columns (used if returning type is pd.DataFrame)\n \"\"\"\n\n def __init__(self, lang: str = 'en', n_components: Optional[int] = None,\n text_columns: List[str] = None, pooling_strategy: str = 'reduce_mean',\n use_cuda: bool = False, tokenizer: transformers.PreTrainedTokenizer = None,\n model=None, return_same_type: bool = True, column_format: str = '{col}_{idx}'):\n if tokenizer is not None:\n assert model is not None\n self.tokenizer = tokenizer\n self.model = model\n if lang == 'en':\n self.tokenizer = transformers.BertTokenizer.from_pretrained('bert-base-uncased')\n self.model = transformers.BertModel.from_pretrained('bert-base-uncased')\n elif lang == 'jp':\n self.tokenizer = transformers.BertJapaneseTokenizer.from_pretrained('bert-base-japanese-whole-word-masking')\n self.model = transformers.BertModel.from_pretrained('bert-base-japanese-whole-word-masking')\n else:\n raise ValueError('Specified language type () is invalid.'.format(lang))\n\n self.lang = lang\n self.n_components = n_components\n self.text_columns = text_columns\n self.pooling_strategy = pooling_strategy\n self.use_cuda = use_cuda\n self.return_same_type = return_same_type\n self.svd = {}\n self.column_format = column_format\n\n def _process_text(self, text: str) -> np.ndarray:\n requires_torch()\n import torch\n\n tokens_tensor = torch.tensor([self.tokenizer.encode(text, add_special_tokens=True)])\n if self.use_cuda:\n tokens_tensor = tokens_tensor.to('cuda')\n self.model.to('cuda')\n\n self.model.eval()\n with torch.no_grad():\n all_encoder_layers, _ = self.model(tokens_tensor)\n\n embedding = all_encoder_layers.cpu().numpy()[0]\n if self.pooling_strategy == 'reduce_mean':\n return np.mean(embedding, axis=0)\n elif self.pooling_strategy == 'reduce_max':\n return np.max(embedding, axis=0)\n elif self.pooling_strategy == 'reduce_mean_max':\n return np.r_[np.max(embedding, axis=0), np.mean(embedding, axis=0)]\n elif self.pooling_strategy == 'cls_token':\n return embedding[0]\n else:\n raise ValueError(\"specify valid pooling_strategy: {reduce_mean, reduce_max, reduce_mean_max, cls_token}\")\n\n def _fit_one(self, col: str, emb: np.ndarray):\n if not self.n_components or self.n_components >= emb.shape[1]:\n return emb\n self.svd[col] = TruncatedSVD(n_components=self.n_components, algorithm='arpack', random_state=0)\n return self.svd[col].fit(emb)\n\n def _transform_one(self, col: str, emb: np.ndarray):\n if not self.n_components or self.n_components >= emb.shape[1]:\n return emb\n return self.svd[col].transform(emb)\n\n def _fit_transform_one(self, col: str, emb: np.ndarray):\n if not self.n_components or self.n_components >= emb.shape[1]:\n return emb\n self.svd[col] = TruncatedSVD(n_components=self.n_components, algorithm='arpack', random_state=0)\n return self.svd[col].fit_transform(emb)\n\n def _process(self, X: pd.DataFrame, func: Callable[[str, np.ndarray], Any]):\n is_pandas = isinstance(X, pd.DataFrame)\n X = convert_input(X)\n\n tqdm.pandas()\n columns = self.text_columns or [c for c in X.columns if X[c].dtype == np.object]\n non_text_columns = [c for c in X.columns if c not in columns]\n\n column_names = []\n processed = []\n for c in columns:\n emb = np.vstack(X[c].progress_apply(lambda x: self._process_text(x)))\n emb = func(c, emb)\n processed.append(emb)\n column_names += [self.column_format.format(col=c, idx=i) for i in range(emb.shape[1])]\n\n processed_df = pd.DataFrame(np.hstack(processed), columns=column_names)\n\n if non_text_columns:\n X_ = X[non_text_columns].copy()\n X_ = pd.concat([X_, processed_df], axis=1)\n else:\n X_ = processed_df\n\n return X_ if self.return_same_type and is_pandas else X_.values\n\n def fit(self, X: Union[pd.DataFrame, np.ndarray], y=None):\n \"\"\"\n Fit SVD model on training data X.\n\n Args:\n X:\n Data\n y:\n Ignored\n \"\"\"\n self._process(X, self._fit_one)\n return self\n\n def transform(self, X: Union[pd.DataFrame, np.ndarray], y=None):\n \"\"\"\n Perform feature extraction and dimensionality reduction using\n BERT pre-trained model and trained SVD model.\n\n Args:\n X:\n Data\n y:\n Ignored\n \"\"\"\n return self._process(X, self._transform_one)\n\n def fit_transform(self, X: Union[pd.DataFrame, np.ndarray], y=None, **fit_params):\n \"\"\"\n Fit SVD model on training data X and perform feature extraction and dimensionality reduction using\n BERT pre-trained model and trained SVD model.\n\n Args:\n X:\n Data\n y:\n Ignored\n \"\"\"\n return self._process(X, self._fit_transform_one)\n"
]
| [
[
"numpy.max",
"torch.no_grad",
"numpy.hstack",
"numpy.mean",
"pandas.concat",
"sklearn.decomposition.TruncatedSVD"
]
]
|
ktlichkid/diy-gym | [
"8783f15e2cb203829f0f1e1eac06c3310065e7f9",
"8783f15e2cb203829f0f1e1eac06c3310065e7f9"
]
| [
"diy_gym/tests/test_environment.py",
"examples/r2d2_maze/generate_maze.py"
]
| [
"import unittest\nimport os\nimport numpy as np\nfrom diy_gym import DIYGym\n\n\nclass TestEnvironment(unittest.TestCase):\n def setUp(self):\n config_file = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'basic_env.yaml')\n self.env = DIYGym(config_file)\n\n def test_load_environment(self):\n self.assertTrue('plane' in self.env.models)\n self.assertTrue('red_marble' in self.env.models)\n self.assertTrue('green_marble' in self.env.models)\n self.assertTrue('blue_marble' in self.env.models)\n\n def test_spaces(self):\n self.assertTrue('force' in self.env.action_space['blue_marble'].spaces)\n self.assertTrue('camera' in self.env.observation_space['basic_env'].spaces)\n self.assertTrue('pose' in self.env.observation_space['green_marble'].spaces)\n\n def test_episode(self):\n observation = self.env.reset()\n initial_position = observation['green_marble']['pose']['position']\n\n # try to run the blue marble into the other two\n for _ in range(500):\n observation, _, _, _ = self.env.step({'blue_marble': {'force': [0, -100, 0]}})\n\n final_position = observation['green_marble']['pose']['position']\n\n # check that the green marble has moved\n self.assertNotAlmostEqual(np.linalg.norm(initial_position), np.linalg.norm(final_position), places=0)\n\n observation = self.env.reset()\n reset_position = observation['green_marble']['pose']['position']\n\n # check that the green marble has been reset back to its starting position\n self.assertAlmostEqual(np.linalg.norm(initial_position), np.linalg.norm(reset_position), places=1)\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"import yaml\nimport os\nimport numpy as np\nimport math as m\nimport random\nimport argparse\n\nif __name__ == '__main__':\n\n parser = argparse.ArgumentParser(description='Generate a maze for an R2D2 to drive around in')\n parser.add_argument('--maze_size', default=10, type=int, help='the dimensions of the maze to be generated')\n parser.add_argument('--use_gripper_camera',\n type=lambda x: bool(strtobool(x)),\n default=False,\n help='whether to attach a camera to the R2D2s gripper')\n args = parser.parse_args()\n\n # Make a dictionary containing the usual configs along with an R2D2 for us to drive around the maze\n env_config = {\n 'camera_yaw': 0,\n 'camera_distance': 12,\n 'r2d2': {\n 'model': 'r2d2.urdf',\n 'xyz': [1.5 - args.maze_size / 2, -0.5 - args.maze_size / 2, 0.5],\n 'mass': 50,\n 'wheel_driver': {\n 'addon':\n 'joint_controller',\n 'joints': [\n 'left_front_wheel_joint', 'left_back_wheel_joint', 'right_front_wheel_joint',\n 'right_back_wheel_joint'\n ],\n 'control_mode':\n 'velocity'\n },\n 'respawn': {\n 'addon': 'respawn'\n }\n },\n 'plane': {\n 'model': 'grass/plane.urdf'\n }\n }\n\n # Optionally we can also attach a camera to the gripper of the R2D2 to see what it sees as it drives around\n if args.use_gripper_camera:\n env_config['r2d2']['gripper_camera'] = {\n 'addon': 'camera',\n 'frame': 'left_gripper_joint',\n 'xyz': [0, 0, -0.1],\n 'rpy': [m.pi / 2, 0, -m.pi / 2],\n 'resolution': [100, 100]\n }\n\n wall_config = {'model': 'wall/wall.urdf', 'use_fixed_base': True, 'scale': 0.01}\n\n # Now we actually generate a maze. This is done using a random Depth First Search\n # source: https://en.wikipedia.org/wiki/Maze_generation_algorithm#Depth-first_search\n visited = np.zeros(shape=(args.maze_size, args.maze_size), dtype=np.bool)\n offsets = [np.array(offset) for offset in [(0, 1), (1, 0), (0, -1), (-1, 0)]]\n\n path = [(0, 1)]\n maze = {path[-1]: [(-1, 1)]}\n\n while len(path):\n unvisited_neighbours = [\n tuple(path[-1] + offset) for offset in offsets\n if not tuple((path[-1] + offset).clip(0, args.maze_size - 1)) in maze\n ]\n\n if len(unvisited_neighbours):\n next_cell = random.choice(unvisited_neighbours)\n maze[next_cell] = [path[-1]]\n maze[path[-1]].append(next_cell)\n path.append(next_cell)\n else:\n path.pop()\n\n maze[(args.maze_size - 1, args.maze_size - 2)].append((args.maze_size, args.maze_size - 2))\n\n # The maze is now expressed as a bunch of cells with a list of neighbours, we'll now just create walls to\n # separate adjacent cells that aren't neighbours\n walls = []\n for cell, edges in maze.items():\n for i, offset in enumerate(offsets):\n if tuple(np.add(cell, offset)) in edges:\n continue\n\n coords = offset * 0.5 + cell - args.maze_size / 2 + 0.5\n walls.append((float(coords[1]), float(coords[0]), float(m.pi / 2 * abs(offset[1]))))\n\n # now remove all the duplicate walls by converting to a dictionary and back again (this feels a bit hacky but google tells me it's ok)\n walls = list(dict.fromkeys(walls))\n\n for i, (x, y, theta) in enumerate(walls):\n new_wall = wall_config.copy()\n new_wall['xyz'] = [x, y, 0.0]\n new_wall['rpy'] = [m.pi / 2, 0, theta]\n env_config['wall_%d' % i] = new_wall\n\n # The environment is now completely described by env_config, we'll dump this to a yaml file so we\n # can pass it to DIYGym later\n config_file = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'r2d2_maze.yaml')\n\n with open(config_file, 'w') as f:\n f.write(yaml.dump(env_config))\n"
]
| [
[
"numpy.linalg.norm"
],
[
"numpy.array",
"numpy.add",
"numpy.zeros"
]
]
|
philipperemy/tensorflow-grid-lstm | [
"9983c474ae696e3f4808c0927e3b6c7d17882e42"
]
| [
"utils.py"
]
| [
"import codecs\nimport collections\nimport os\nimport pickle\n\nimport numpy as np\n\n\nclass TextLoader(object):\n def __init__(self, data_dir, batch_size, seq_length):\n self.data_dir = data_dir\n self.batch_size = batch_size\n self.seq_length = seq_length\n\n input_file = os.path.join(data_dir, \"input.txt\")\n vocab_file = os.path.join(data_dir, \"vocab.pkl\")\n tensor_file = os.path.join(data_dir, \"data.npy\")\n\n if not (os.path.exists(vocab_file) and os.path.exists(tensor_file)):\n print(\"reading text file\")\n self.preprocess(input_file, vocab_file, tensor_file)\n else:\n print(\"loading preprocessed files\")\n self.load_preprocessed(vocab_file, tensor_file)\n self.create_batches()\n self.reset_batch_pointer()\n\n def preprocess(self, input_file, vocab_file, tensor_file):\n with codecs.open(input_file, \"r\") as f:\n data = f.read()\n counter = collections.Counter(data)\n count_pairs = sorted(counter.items(), key=lambda x: -x[1])\n self.chars, _ = list(zip(*count_pairs))\n self.vocab_size = len(self.chars)\n self.vocab = dict(zip(self.chars, range(len(self.chars))))\n with open(vocab_file, 'wb') as f:\n pickle.dump(self.chars, f)\n self.tensor = np.array(list(map(self.vocab.get, data)))\n np.save(tensor_file, self.tensor)\n\n def load_preprocessed(self, vocab_file, tensor_file):\n with open(vocab_file, 'rb') as f:\n self.chars = pickle.load(f)\n self.vocab_size = len(self.chars)\n self.vocab = dict(zip(self.chars, range(len(self.chars))))\n self.tensor = np.load(tensor_file)\n self.num_batches = self.tensor.size // (self.batch_size * self.seq_length)\n\n def create_batches(self):\n self.num_batches = self.tensor.size // (self.batch_size * self.seq_length)\n self.tensor = self.tensor[:self.num_batches * self.batch_size * self.seq_length]\n xdata = self.tensor\n ydata = np.copy(self.tensor)\n ydata[:-1] = xdata[1:]\n ydata[-1] = xdata[0]\n self.x_batches = np.split(xdata.reshape(self.batch_size, -1), self.num_batches, 1)\n self.y_batches = np.split(ydata.reshape(self.batch_size, -1), self.num_batches, 1)\n\n validation_batches = int(self.num_batches * .2)\n self.val_batches = zip(self.x_batches[-validation_batches:], self.y_batches[-validation_batches:])\n self.x_batches = self.x_batches[:-validation_batches]\n self.y_batches = self.y_batches[:-validation_batches]\n self.num_batches -= validation_batches\n\n def next_batch(self):\n x, y = self.x_batches[self.pointer], self.y_batches[self.pointer]\n self.pointer += 1\n return x, y\n\n def reset_batch_pointer(self):\n self.pointer = 0\n\n\ndef visualize_result():\n import pandas as pd\n import matplotlib.pyplot as plt\n\n # These are the \"Tableau 20\" colors as RGB.\n tableau20 = [(31, 119, 180), (174, 199, 232), (255, 127, 14), (255, 187, 120),\n (44, 160, 44), (152, 223, 138), (214, 39, 40), (255, 152, 150),\n (148, 103, 189), (197, 176, 213), (140, 86, 75), (196, 156, 148),\n (227, 119, 194), (247, 182, 210), (127, 127, 127), (199, 199, 199),\n (188, 189, 34), (219, 219, 141), (23, 190, 207), (158, 218, 229)]\n\n # Scale the RGB values to the [0, 1] range, which is the format matplotlib accepts.\n for i in range(len(tableau20)):\n r, g, b = tableau20[i]\n tableau20[i] = (r / 255., g / 255., b / 255.)\n\n files = [('GridGRU, 3 layers', 'save_gridgru3layers/log.csv'),\n # ('GridGRU, 6 layers', 'save_gridgru6layers/log.csv'),\n ('GridLSTM, 3 layers', 'save_gridlstm3layers/log.csv'),\n ('GridLSTM, 6 layers', 'save_gridlstm6layers/log.csv'),\n ('Stacked GRU, 3 layers', 'save_gru3layers/log.csv'),\n # ('Stacked GRU, 6 layers', 'save_gru6layers/log.csv'),\n ('Stacked LSTM, 3 layers', 'save_lstm3layers/log.csv'),\n ('Stacked LSTM, 6 layers', 'save_lstm6layers/log.csv'),\n ('Stacked RNN, 3 layers', 'save_rnn3layers/log.csv'),\n ('Stacked RNN, 6 layers', 'save_rnn6layers/log.csv')]\n for i, (k, v) in enumerate(files):\n train_loss = pd.read_csv('./save/tinyshakespeare/{}'.format(v)).groupby('epoch').mean()['train_loss']\n plt.plot(train_loss.index.tolist(), train_loss.tolist(), label=k, lw=2, color=tableau20[i * 2])\n plt.legend()\n plt.xlabel('Epochs')\n plt.ylabel('Average training loss')\n plt.show()\n"
]
| [
[
"matplotlib.pyplot.xlabel",
"numpy.copy",
"matplotlib.pyplot.legend",
"numpy.load",
"numpy.save",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.show"
]
]
|
HubertBalcerzak/Kratos | [
"0bac5e132d02061680fc90f1e52d4930b5ed7fa3"
]
| [
"applications/CoSimulationApplication/tests/test_convergence_accelerators.py"
]
| [
"from __future__ import print_function, absolute_import, division # makes these scripts backward compatible with python 2.6 and 2.7\n\nimport KratosMultiphysics as KM\nimport KratosMultiphysics.KratosUnittest as KratosUnittest\n\nfrom KratosMultiphysics.CoSimulationApplication.coupling_interface_data import CouplingInterfaceData\nfrom KratosMultiphysics.CoSimulationApplication.convergence_accelerators.convergence_accelerator_wrapper import ConvergenceAcceleratorWrapper\nfrom testing_utilities import DummySolverWrapper\n\nfrom unittest.mock import Mock\nimport numpy as np\nfrom random import uniform\n\nif KM.IsDistributedRun():\n import KratosMultiphysics.mpi as KratosMPI\n\nclass TestConvergenceAcceleratorWrapper(KratosUnittest.TestCase):\n\n def setUp(self):\n self.model = KM.Model()\n self.model_part = self.model.CreateModelPart(\"default\")\n self.model_part.AddNodalSolutionStepVariable(KM.PRESSURE)\n self.model_part.AddNodalSolutionStepVariable(KM.PARTITION_INDEX)\n self.dimension = 3\n self.model_part.ProcessInfo[KM.DOMAIN_SIZE] = self.dimension\n\n self.my_pid = KM.DataCommunicator.GetDefault().Rank()\n self.num_nodes = self.my_pid % 5 + 3 # num_nodes in range (3 ... 7)\n if self.my_pid == 4:\n self.num_nodes = 0 # in order to emulate one partition not having local nodes\n\n for i in range(self.num_nodes):\n node = self.model_part.CreateNewNode(i, 0.1*i, 0.0, 0.0) # this creates the same coords in different ranks, which does not matter for this test\n\n node.SetSolutionStepValue(KM.PARTITION_INDEX, self.my_pid)\n node.SetSolutionStepValue(KM.PRESSURE, uniform(-10, 50))\n\n if KM.IsDistributedRun():\n KratosMPI.ParallelFillCommunicator(self.model_part).Execute()\n\n data_settings = KM.Parameters(\"\"\"{\n \"model_part_name\" : \"default\",\n \"variable_name\" : \"PRESSURE\"\n }\"\"\")\n self.interface_data = CouplingInterfaceData(data_settings, self.model)\n self.interface_data.Initialize()\n\n self.dummy_solver_wrapper = DummySolverWrapper({\"data_4_testing\" : self.interface_data})\n\n def test_accelerator_without_support_for_distributed_data(self):\n conv_acc_settings = KM.Parameters(\"\"\"{\n \"type\" : \"constant_relaxation\",\n \"data_name\" : \"data_4_testing\"\n }\"\"\")\n conv_acc_wrapper = ConvergenceAcceleratorWrapper(conv_acc_settings, self.dummy_solver_wrapper)\n\n exp_inp = self.interface_data.GetData()\n update_solution_return_value = [uniform(-10, 50) for _ in range(self.num_nodes)]\n\n global_update_solution_return_value = np.array(np.concatenate(KM.DataCommunicator.GetDefault().GathervDoubles(update_solution_return_value, 0)))\n\n conv_acc_mock = Mock()\n\n attrs = {\n 'SupportsDistributedData.return_value': False,\n 'UpdateSolution.return_value' : global_update_solution_return_value\n }\n conv_acc_mock.configure_mock(**attrs)\n\n conv_acc_wrapper.conv_acc = conv_acc_mock\n\n conv_acc_wrapper.InitializeSolutionStep()\n\n self.assertEqual(conv_acc_mock.SupportsDistributedData.call_count, 1)\n self.assertEqual(conv_acc_wrapper.gather_scatter_required, self.interface_data.IsDistributed()) # gather-scatter is only required in case of distributed data\n self.assertEqual(conv_acc_wrapper.executing_rank, self.my_pid == 0)\n\n conv_acc_wrapper.InitializeNonLinearIteration()\n\n # setting new solution for computing the residual\n rand_data = [uniform(-10, 50) for _ in range(self.num_nodes)]\n self.interface_data.SetData(rand_data)\n exp_res = rand_data - exp_inp\n\n conv_acc_wrapper.ComputeAndApplyUpdate()\n\n self.assertEqual(conv_acc_mock.UpdateSolution.call_count, int(self.my_pid == 0)) # only one rank calls \"UpdateSolution\"\n global_exp_res = np.array(np.concatenate(KM.DataCommunicator.GetDefault().GathervDoubles(exp_res, 0)))\n global_exp_inp = np.array(np.concatenate(KM.DataCommunicator.GetDefault().GathervDoubles(exp_inp, 0)))\n if self.my_pid == 0:\n # numpy arrays cannot be compared using the mock-functions, hence using the numpy functions\n np.testing.assert_array_equal(global_exp_res, conv_acc_mock.UpdateSolution.call_args[0][0])\n np.testing.assert_array_equal(global_exp_inp, conv_acc_mock.UpdateSolution.call_args[0][1])\n\n np.testing.assert_array_equal(exp_inp + update_solution_return_value, self.interface_data.GetData())\n\n\n def test_accelerator_with_support_for_distributed_data(self):\n conv_acc_settings = KM.Parameters(\"\"\"{\n \"type\" : \"constant_relaxation\",\n \"data_name\" : \"data_4_testing\"\n }\"\"\")\n conv_acc_wrapper = ConvergenceAcceleratorWrapper(conv_acc_settings, self.dummy_solver_wrapper)\n\n exp_inp = self.interface_data.GetData()\n update_solution_return_value = [uniform(-10, 50) for _ in range(self.num_nodes)]\n\n conv_acc_mock = Mock()\n\n attrs = {\n 'SupportsDistributedData.return_value': True,\n 'UpdateSolution.return_value' : update_solution_return_value\n }\n conv_acc_mock.configure_mock(**attrs)\n\n conv_acc_wrapper.conv_acc = conv_acc_mock\n\n conv_acc_wrapper.InitializeSolutionStep()\n\n self.assertEqual(conv_acc_mock.SupportsDistributedData.call_count, 1)\n self.assertFalse(conv_acc_wrapper.gather_scatter_required)\n self.assertTrue(conv_acc_wrapper.executing_rank)\n\n conv_acc_wrapper.InitializeNonLinearIteration()\n\n # setting new solution for computing the residual\n rand_data = [uniform(-10, 50) for _ in range(self.num_nodes)]\n self.interface_data.SetData(rand_data)\n exp_res = rand_data - exp_inp\n\n conv_acc_wrapper.ComputeAndApplyUpdate()\n\n self.assertEqual(conv_acc_mock.UpdateSolution.call_count, 1)\n\n # numpy arrays cannot be compared using the mock-functions, hence using the numpy functions\n np.testing.assert_array_equal(exp_res, conv_acc_mock.UpdateSolution.call_args[0][0])\n np.testing.assert_array_equal(exp_inp, conv_acc_mock.UpdateSolution.call_args[0][1])\n\n np.testing.assert_array_equal(exp_inp + update_solution_return_value, self.interface_data.GetData())\n\n\nif __name__ == '__main__':\n KratosUnittest.main()\n"
]
| [
[
"numpy.testing.assert_array_equal"
]
]
|
radhe2205/summar | [
"2e2e63efd06c14acf275faf49a1eb69648a761e4"
]
| [
"src/preprocess.py"
]
| [
"import re\n\nimport numpy as np\nimport pandas as pd\nfrom nltk import WordNetLemmatizer\nimport matplotlib.pyplot as plt\nfrom sklearn.model_selection import train_test_split\n\nfrom src.embeddings import load_vocab, load_embeddings\n\ndef find_all_num(data):\n all_ch_c = len(data)\n i = 0\n all_nums = set()\n while i < all_ch_c:\n if data[i].isnumeric():\n num = data[i]\n i+=1\n while data[i].isnumeric():\n num = num + data[i]\n i+=1\n all_nums.add(num)\n i+=1\n print(all_nums)\n\ndef clean_text(text):\n lemmatizer = WordNetLemmatizer()\n text = remove_mention_url(text=text)\n text = remove_entities(text=text)\n text = remove_hastags(text=text)\n text = lowercase(text=text)\n text = remove_non_ascii(text=text)\n text = add_space_latin(text=text)\n text = apostrophe_handling(text=text)\n text = add_space_punc(text=text)\n # text = remove_numbers(text=text)\n #text = remove_stop(text=text, stop=stop) # NOT NEEDED\n text = reduce_words(text=text)\n #text = stem_words(text=text, lemmatizer=lemmatizer)\n\n text = text.split()\n text = [w for w in text if w != '']\n text = ' '.join(text)\n return text\n\ndef stem_words(text, lemmatizer):\n if len(text) == 0:\n return text\n for word in text.split():\n text = text.replace(word, lemmatizer.lemmatize(word))\n return text\n\ndef remove_mention_url(text):\n text = re.sub('@[A-Za-z0-9_]+', '', text)\n text = re.sub('URL', '', text)\n return text\n\ndef remove_entities(text):\n text = text.replace('<', '')\n text = text.replace('>', '')\n text = text.replace('&', '')\n return text\n\ndef remove_hastags(text):\n text = re.sub('#[A-Za-z0-9_]+', '', text)\n return text\n\ndef lowercase(text):\n text = text.lower()\n return text\n\ndef remove_non_ascii(text):\n text = text.encode('ascii', 'ignore').decode('utf-8')\n return str(text)\n\ndef add_space_latin(text):\n text = re.sub('([.()!\"#$%&*+,-/:;<=>?@^_`{|}~])', '\\\\1', text)\n return text\n\ndef apostrophe_handling(text):\n contractions = {\n \"ain't\": \"am not\",\n \"aren't\": \"are not\",\n \"can't\": \"cannot\",\n \"can't've\": \"cannot have\",\n \"'cause\": \"because\",\n \"could've\": \"could have\",\n \"couldn't\": \"could not\",\n \"couldn't've\": \"could not have\",\n \"didn't\": \"did not\",\n \"doesn't\": \"does not\",\n \"don't\": \"do not\",\n \"hadn't\": \"had not\",\n \"hadn't've\": \"had not have\",\n \"hasn't\": \"has not\",\n \"haven't\": \"have not\",\n \"he'd\": \"he would\",\n \"he'd've\": \"he would have\",\n \"he'll\": \"he will\",\n \"he'll've\": \"he will have\",\n \"he's\": \"he is\",\n \"how'd\": \"how did\",\n \"how'd'y\": \"how do you\",\n \"how'll\": \"how will\",\n \"how's\": \"how is\",\n \"i'd\": \"i had\",\n \"i'd've\": \"i would have\",\n \"i'll\": \"i will\",\n \"i'll've\": \"i will have\",\n \"i'm\": \"i am\",\n \"i've\": \"i have\",\n \"isn't\": \"is not\",\n \"it'd\": \"it would\",\n \"it'd've\": \"it would have\",\n \"it'll\": \"it will\",\n \"it'll've\": \"it will have\",\n \"it's\": \"it is\",\n \"let's\": \"let us\",\n \"ma'am\": \"madam\",\n \"mayn't\": \"may not\",\n \"might've\": \"might have\",\n \"mightn't\": \"might not\",\n \"mightn't've\": \"might not have\",\n \"must've\": \"must have\",\n \"mustn't\": \"must not\",\n \"mustn't've\": \"must not have\",\n \"needn't\": \"need not\",\n \"needn't've\": \"need not have\",\n \"o'clock\": \"of the clock\",\n \"oughtn't\": \"ought not\",\n \"oughtn't've\": \"ought not have\",\n \"shan't\": \"shall not\",\n \"sha'n't\": \"shall not\",\n \"shan't've\": \"shall not have\",\n \"she'd\": \"she had\",\n \"she'd've\": \"she would have\",\n \"she'll\": \"she will\",\n \"she'll've\": \"she will have\",\n \"she's\": \"she is\",\n \"should've\": \"should have\",\n \"shouldn't\": \"should not\",\n \"shouldn't've\": \"should not have\",\n \"so've\": \"so have\",\n \"so's\": \"so is\",\n \"that'd\": \"that would\",\n \"that'd've\": \"that would have\",\n \"that's\": \"that is\",\n \"there'd\": \"there would\",\n \"there'd've\": \"there would have\",\n \"there's\": \"there is\",\n \"they'd\": \"they would\",\n \"they'd've\": \"they would have\",\n \"they'll\": \"they will\",\n \"they'll've\": \"they will have\",\n \"they're\": \"they are\",\n \"they've\": \"they have\",\n \"to've\": \"to have\",\n \"wasn't\": \"was not\",\n \"we'd\": \"we had\",\n \"we'd've\": \"we would have\",\n \"we'll\": \"we will\",\n \"we'll've\": \"we will have\",\n \"we're\": \"we are\",\n \"we've\": \"we have\",\n \"weren't\": \"were not\",\n \"what'll\": \"what will\",\n \"what'll've\": \"what will have\",\n \"what're\": \"what are\",\n \"what's\": \"what is\",\n \"what've\": \"what have\",\n \"when's\": \"when is\",\n \"when've\": \"when have\",\n \"where'd\": \"where did\",\n \"where's\": \"where is\",\n \"where've\": \"where have\",\n \"who'll\": \"who will\",\n \"who'll've\": \"who will have\",\n \"who's\": \"who is\",\n \"who've\": \"who have\",\n \"why's\": \"why is\",\n \"why've\": \"why have\",\n \"will've\": \"will have\",\n \"won't\": \"will not\",\n \"won't've\": \"will not have\",\n \"would've\": \"would have\",\n \"wouldn't\": \"would not\",\n \"wouldn't've\": \"would not have\",\n \"y'all\": \"you all\",\n \"y'all'd\": \"you all would\",\n \"y'all'd've\": \"you all would have\",\n \"y'all're\": \"you all are\",\n \"y'all've\": \"you all have\",\n \"you'd\": \"you had\",\n \"you'd've\": \"you would have\",\n \"you'll\": \"you will\",\n \"you'll've\": \"you will have\",\n \"you're\": \"you are\",\n \"you've\": \"you have\"\n }\n\n for word in text.split():\n if word in contractions:\n text = text.replace(word, contractions[word])\n return text\n\ndef add_space_punc(text):\n # pat = re.compile(r\"[()!?.,:;&@#*$%^+=-]\")\n pat = re.compile(r\"([\\[()!?.,:;&@#*$%><^\\\"\\'+=/\\\\\\-\\]])\")\n # text = re.sub('[()!?.,:;&@#*$%^+=-]', ' ', text)\n text = pat.sub(' \\\\1 ', text)\n return text\n\ndef remove_numbers(text):\n text = re.sub(\"\\d+\", '', text)\n return text\n\ndef remove_stop(text, stop):\n text = text.split()\n text = [w for w in text if w not in stop]\n text = ' '.join(text)\n return text\n\ndef reduce_words(text):\n def reduced_word(w):\n s = w[0]\n curr_char = w[0]\n curr_count = 1\n for c in w[1:]:\n if c == curr_char:\n curr_count += 1\n else:\n curr_char = c\n curr_count = 1\n\n if curr_count <= 2:\n s += c\n else:\n continue\n return s\n\n if len(text) == 0:\n return text\n text = reduced_word(w=text)\n return text\n\ndef read_wikihow_dataset(file_path):\n df = pd.read_csv(file_path)\n return df[\"text\"].values, df[\"headline\"].values\n\ndef save_cleaned_text(texts, summaries, file_path): # np_arrays\n cleaned_summaries = []\n cleaned_texts = []\n\n for i, (text, summary) in enumerate(zip(texts, summaries)):\n if type(text) == float or type(summary) == float:\n continue\n if i % 5000 == 0:\n print(f\"Cleaned {i}\")\n cleaned_summaries.append(clean_text(summary))\n cleaned_texts.append(clean_text(text))\n\n cleaned_frame = pd.DataFrame({\"text\": cleaned_texts, \"summary\": cleaned_summaries})\n if file_path is not None:\n cleaned_frame.to_csv(file_path, sep = \",\")\n return cleaned_texts, cleaned_summaries\n\ndef find_all_with_known_words(texts, summaries, wordtoidx):\n known_texts = []\n known_summaries = []\n for i, (text, summary) in enumerate(zip(texts, summaries)):\n if type(summary) == float or type(text) == float:\n continue\n not_found = False\n for word in text.split():\n if word not in wordtoidx:\n not_found = True\n break\n for word in summary.split():\n if word not in wordtoidx:\n not_found = True\n break\n if not_found:\n continue\n known_texts.append(text)\n known_summaries.append(summary)\n\n return known_texts, known_summaries\n\ndef save_known_text_summary(texts, summaries, wordtoidx, save_path):\n print(f\"Length before known word filter {len(texts)}\")\n\n known_texts, known_summaries = find_all_with_known_words(texts, summaries, wordtoidx)\n\n print(f\"Length After known word filter {len(known_summaries)}\")\n\n df = pd.DataFrame({\"text\": known_texts, \"summary\": known_summaries})\n if save_path is not None:\n df.to_csv(save_path, sep= \",\")\n return known_texts, known_summaries\n\ndef clean_wikihow():\n print(\"Reading started.\")\n texts, summaries = read_wikihow_dataset(\"data/wikihow.csv\")\n print(\"Reading complete.\")\n save_cleaned_text(texts, summaries, \"data/wikihow_clean.csv\")\n print(\"Cleaning complete.\")\n\ndef plot_word_count_stats(file_path):\n df = pd.read_csv(file_path)\n word_count = {}\n for text in df[\"text\"]:\n cnt = len(text.split())\n if cnt not in word_count:\n word_count[cnt] = 0\n\n word_count[cnt] += 1\n\n k = []\n v = []\n for cnt in word_count:\n k.append(cnt)\n v.append(word_count[cnt])\n\n plt.scatter(k, v, alpha=0.3)\n plt.legend()\n plt.show()\n\ndef clip_summary_word_count(file_path, word_count, target_file_path):\n df = pd.read_csv(file_path)\n summaries = []\n texts = []\n for text, summary in zip(df[\"text\"], df[\"summary\"]):\n if len(summary.split()) > word_count:\n continue\n summaries.append(summary)\n texts.append(text)\n\n print(f\"total exemplars after clipping: {len(texts)}\")\n pd.DataFrame({\"text\": texts, \"summary\": summaries}).to_csv(target_file_path, sep = \",\")\n\ndef add_start_end(file_path):\n df = pd.read_csv(file_path)\n texts = []\n summaries = []\n for text, summary in zip(df[\"text\"], df[\"summary\"]):\n if type(text) == float or type(summary) == float:\n continue\n summaries.append(\"<start> \" + summary + \" <end>\")\n texts.append(\"<start> \" + text + \" <end>\")\n pd.DataFrame({\"text\": texts, \"summary\": summaries}).to_csv(file_path, sep = \",\")\n\ndef all_known_count(emb_path, data_path):\n _, vocab = load_embeddings(emb_path, 50)\n df = pd.read_csv(data_path, sep=\",\")\n sum_count = 0\n for text, summary in zip(df[\"text\"], df[\"summary\"]):\n not_found = False\n if type(text) == float or type(summary) == float:\n continue\n for word in text.split():\n if word not in vocab:\n not_found = True\n if not not_found:\n for word in summary.split():\n if word not in vocab:\n not_found = True\n if not_found:\n sum_count += 1\n print(f\"Total known reviews: {sum_count}\")\n\ndef filter_with_word_count(texts, summaries, word_count_t, word_count_s):\n filtered_texts = []\n filtered_summaries = []\n for text, summary in zip(texts, summaries):\n ln = len(text.split())\n lns = len(text.split())\n if ln > word_count_t:\n continue\n if lns > word_count_s:\n continue\n filtered_summaries.append(summary)\n filtered_texts.append(text)\n return filtered_texts, filtered_summaries\n\ndef final_preprocessing():\n # clean data\n # find all with existing emb\n # add start end\n # save\n\n df = pd.read_csv(\"data/wikihow.csv\")\n print(\"CLEANING...\")\n texts, summaries = save_cleaned_text(df[\"text\"], df[\"headline\"], None)\n\n print(\"Filtering with word count\")\n texts, summaries = filter_with_word_count(texts, summaries, 500, 50)\n\n print(\"LOADING EMBEDDING...\")\n _, vocab = load_embeddings(\"data/embeddings/glove822/glove.6B.50d.txt\", 50)\n print(\"FINDING ALL KNOWNS...\")\n texts, summaries = save_known_text_summary(texts, summaries, vocab, save_path = None)\n\n df = pd.DataFrame({\"text\": texts, \"summary\": summaries})\n\n train_df, test_df = train_test_split(df, test_size=0.1, random_state=41)\n\n train_df.to_csv(\"data/wikihow_final_clean_known_50_train.csv\", sep = \",\")\n test_df.to_csv(\"data/wikihow_final_clean_known_50_test.csv\", sep = \",\")\n\n print(f\"total datapoints {len(texts)}\")\n add_start_end(\"data/wikihow_final_clean_known_50_train.csv\")\n add_start_end(\"data/wikihow_final_clean_known_50_test.csv\")\n print(\"DONE.\")\n\n# final_preprocessing()\n\n# all_known_count(\"data/embeddings/glove822/glove.6B.50d.txt\", \"data/wikihow_clean.csv\")\n# df = pd.read_csv(\"data/wikihow_final_clean_known.csv\")\n\ndef plot_word_count_distribution():\n text_lens = {i:0 for i in range(13000)}\n summary_lens = {i:0 for i in range(4000)}\n\n df = pd.read_csv(\"data/wikihow.csv\")\n print(len(df))\n # df = df[:1000]\n all_words = set()\n max_len_t = 0\n max_len_s = 0\n for text, summary in zip(df[\"text\"], df[\"headline\"]):\n if type(text) != str or type(summary) != str:\n continue\n\n words_t = text.split()\n words_s = summary.split()\n\n text_lens[len(words_t)] += 1\n summary_lens[len((words_s))] += 1\n\n for word in words_t:\n all_words.add(word)\n for word in words_s:\n all_words.add(word)\n if len(words_t) > max_len_t:\n max_len_t = len(words_t)\n if len(words_s) > max_len_s:\n max_len_s = len(words_s)\n\n del text_lens[0]\n del summary_lens[0]\n\n plt.plot(text_lens.values())\n plt.xlabel(\"Count words\")\n plt.ylabel(\"Number of data points\")\n plt.title(\"Text length distribution\")\n # plt.legend()\n plt.show()\n\n plt.plot(summary_lens.values())\n plt.xlabel(\"Count words\")\n plt.ylabel(\"Number of data points\")\n plt.title(\"Summary length distribution\")\n # plt.legend()\n plt.show()\n\n print(f\"Total distinct words {len(all_words)}\")\n print(f\"Max text length {max_len_t}\")\n print(f\"Max summary length {max_len_s}\")\n\ndef remove_non_string(texts, summaries):\n clean_texts = []\n clean_summaries = []\n\n for i, (text, summary) in enumerate(zip(texts, summaries)):\n if type(text) == float or pd.isnull(text) or text.isspace() or not text:\n continue\n clean_texts.append(text)\n clean_summaries.append(summary)\n\n return clean_texts, clean_summaries\n\ndef create_clean_data_sith():\n # clean data\n # find all with existing emb\n # add start end\n # save\n\n df = pd.read_csv(\"data/wikihow.csv\")\n print(\"Sith CLEANING...\")\n texts, summaries = save_cleaned_text(df[\"text\"], df[\"headline\"], None)\n summaries = [\" \" for _ in summaries]\n\n print(\"LOADING EMBEDDING...\")\n _, vocab = load_embeddings(\"data/embeddings/glove822/glove.6B.50d.txt\", 50)\n print(\"FINDING ALL KNOWNS...\")\n texts, summaries = save_known_text_summary(texts, summaries, vocab, save_path = None)\n\n texts, summaries = remove_non_string(texts, summaries)\n\n df = pd.DataFrame({\"text\": texts, \"summary\": summaries})\n\n train_df, test_df = train_test_split(df, test_size=0.1, random_state=41)\n\n train_df.to_csv(\"data/wikihow_final_clean_known_train_sith.csv\", sep = \",\")\n test_df.to_csv(\"data/wikihow_final_clean_known_test_sith.csv\", sep = \",\")\n\n # print(f\"total datapoints {len(texts)}\")\n # add_start_end(\"data/wikihow_final_clean_known_train_sith.csv\")\n # add_start_end(\"data/wikihow_final_clean_known_test_sith.csv\")\n print(\"DONE.\")\n\ncreate_clean_data_sith()\n\n# df = pd.read_csv(\"data/wikihow_known_500.csv\")\n# summ_cnt = {100: 0, 200:0, 300:0, 400:0, 500:0}\n# max_text_cnt = 0\n# all_words = set()\n# for text, summary in zip(df[\"text\"], df[\"summary\"]):\n# ln = len(summary.split())\n# txt_ln = len(text.split())\n# for word in summary.split():\n# all_words.add(word)\n# for word in text.split():\n# all_words.add(word)\n# if txt_ln > max_text_cnt:\n# max_text_cnt = txt_ln\n#\n# for ct in summ_cnt:\n# if ct > ln:\n# summ_cnt[ct] += 1\n#\n# print(f\"Total words: {len(all_words)}\")\n# print(summ_cnt)\n# print(max_text_cnt)\n#\n"
]
| [
[
"pandas.isnull",
"pandas.DataFrame",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.title",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel",
"sklearn.model_selection.train_test_split",
"matplotlib.pyplot.scatter",
"pandas.read_csv"
]
]
|
xinjie0831/OpenRAM | [
"76e2ab88fe4097ffa51e0387ba72165bcda49e68"
]
| [
"compiler/gdsMill/gdsMill/vlsiLayout.py"
]
| [
"from .gdsPrimitives import *\nfrom datetime import *\n#from mpmath import matrix\n#from numpy import matrix\nimport numpy as np\n#import gdsPrimitives\nimport debug\n\nclass VlsiLayout:\n \"\"\"Class represent a hierarchical layout\"\"\"\n\n def __init__(self, name=None, units=(0.001,1e-9), libraryName = \"DEFAULT.DB\", gdsVersion=5):\n #keep a list of all the structures in this layout\n self.units = units\n #print(units)\n modDate = datetime.now()\n self.structures=dict()\n self.layerNumbersInUse = []\n self.debug = False\n if name:\n self.rootStructureName=name\n #create the ROOT structure\n self.structures[self.rootStructureName] = GdsStructure()\n self.structures[self.rootStructureName].name = name\n self.structures[self.rootStructureName].createDate = (modDate.year,\n modDate.month,\n modDate.day,\n modDate.hour,\n modDate.minute,\n modDate.second)\n self.structures[self.rootStructureName].modDate = (modDate.year,\n modDate.month,\n modDate.day,\n modDate.hour,\n modDate.minute,\n modDate.second)\n \n self.info = dict() #information gathered from the GDSII header\n self.info['units']=self.units\n self.info['dates']=(modDate.year,\n modDate.month,\n modDate.day,\n modDate.hour,\n modDate.minute,\n modDate.second,\n modDate.year,\n modDate.month,\n modDate.day,\n modDate.hour,\n modDate.minute,\n modDate.second)\n self.info['libraryName']=libraryName\n self.info['gdsVersion']=gdsVersion\n \n self.xyTree = [] #This will contain a list of all structure names\n #expanded to include srefs / arefs separately.\n #each structure will have an X,Y,offset, and rotate associated\n #with it. Populate via traverseTheHierarchy method.\n \n #temp variables used in delegate functions\n self.tempCoordinates=None\n self.tempPassFail = True\n\n # This is a dict indexed by the pin labels.\n # It contains a list of list of shapes, one for each occurance of the label.\n # Multiple labels may be disconnected.\n self.pins = {}\n\n def rotatedCoordinates(self,coordinatesToRotate,rotateAngle):\n #helper method to rotate a list of coordinates\n angle=math.radians(float(0))\n if(rotateAngle):\n angle = math.radians(float(rotateAngle))\n\n coordinatesRotate = [] #this will hold the rotated values \n for coordinate in coordinatesToRotate:\n # This is the CCW rotation matrix\n newX = coordinate[0]*math.cos(angle) - coordinate[1]*math.sin(angle)\n newY = coordinate[0]*math.sin(angle) + coordinate[1]*math.cos(angle)\n coordinatesRotate.extend((newX,newY))\n return coordinatesRotate\n \n def rename(self,newName):\n #make sure the newName is a multiple of 2 characters\n if(len(newName)%2 == 1):\n #pad with a zero\n newName = newName + '\\x00'\n #take the root structure and copy it to a new structure with the new name\n self.structures[newName] = self.structures[self.rootStructureName]\n self.structures[newName].name = newName\n #and delete the old root\n del self.structures[self.rootStructureName]\n self.rootStructureName = newName\n #repopulate the 2d map so drawing occurs correctly\n del self.xyTree[:]\n self.populateCoordinateMap()\n\n def newLayout(self,newName):\n #if (newName == \"\" | newName == 0):\n # print(\"ERROR: vlsiLayout.py:newLayout newName is null\")\n\n #make sure the newName is a multiple of 2 characters\n #if(len(newName)%2 == 1):\n #pad with a zero\n #newName = newName + '\\x00'\n #take the root structure and copy it to a new structure with the new name\n #self.structures[newName] = self.structures[self.rootStructureName]\n\n modDate = datetime.now()\n\n self.structures[newName] = GdsStructure()\n self.structures[newName].name = newName\n\n\n\n self.rootStructureName = newName\n\n self.rootStructureName=newName\n\n #create the ROOT structure\n self.structures[self.rootStructureName] = GdsStructure()\n #self.structures[self.rootStructureName].name = name\n self.structures[self.rootStructureName].createDate = (modDate.year,\n modDate.month,\n modDate.day,\n modDate.hour,\n modDate.minute,\n modDate.second)\n self.structures[self.rootStructureName].modDate = (modDate.year,\n modDate.month,\n modDate.day,\n modDate.hour,\n modDate.minute,\n modDate.second)\n \n \n #repopulate the 2d map so drawing occurs correctly\n self.prepareForWrite()\n\n def prepareForWrite(self):\n del self.xyTree[:]\n self.populateCoordinateMap()\n\n def deduceHierarchy(self):\n \"\"\" First, find the root of the tree.\n Then go through and get the name of every structure.\n Then, go through and find which structure is not\n contained by any other structure. this is the root.\"\"\"\n structureNames=[]\n for name in self.structures:\n structureNames.append(name)\n for name in self.structures:\n if(len(self.structures[name].srefs)>0): #does this structure reference any others?\n for sref in self.structures[name].srefs: #go through each reference\n if sref.sName in structureNames: #and compare to our list\n structureNames.remove(sref.sName)\n\n debug.check(len(structureNames)==1,\"Multiple possible root structures in the layout: {}\".format(str(structureNames)))\n self.rootStructureName = structureNames[0]\n\n def traverseTheHierarchy(self, startingStructureName=None, delegateFunction = None, \n transformPath = [], rotateAngle = 0, transFlags = [0,0,0], coordinates = (0,0)):\n #since this is a recursive function, must deal with the default\n #parameters explicitly \n if startingStructureName == None:\n startingStructureName = self.rootStructureName \n \n #set up the rotation matrix \n if(rotateAngle == None or rotateAngle == \"\"):\n angle = 0\n else:\n # MRG: Added negative to make CCW rotate 8/29/18\n angle = math.radians(float(rotateAngle))\n mRotate = np.array([[math.cos(angle),-math.sin(angle),0.0],\n [math.sin(angle),math.cos(angle),0.0],\n [0.0,0.0,1.0]])\n #set up the translation matrix\n translateX = float(coordinates[0])\n translateY = float(coordinates[1])\n mTranslate = np.array([[1.0,0.0,translateX],[0.0,1.0,translateY],[0.0,0.0,1.0]])\n #set up the scale matrix (handles mirror X)\n scaleX = 1.0\n if(transFlags[0]):\n scaleY = -1.0\n else:\n scaleY = 1.0\n mScale = np.array([[scaleX,0.0,0.0],[0.0,scaleY,0.0],[0.0,0.0,1.0]])\n #we need to keep track of all transforms in the hierarchy\n #when we add an element to the xy tree, we apply all transforms from the bottom up\n transformPath.append((mRotate,mScale,mTranslate))\n if delegateFunction != None:\n delegateFunction(startingStructureName, transformPath)\n #starting with a particular structure, we will recursively traverse the tree\n #********might have to set the recursion level deeper for big layouts!\n if(len(self.structures[startingStructureName].srefs)>0): #does this structure reference any others?\n #if so, go through each and call this function again\n #if not, return back to the caller (caller can be this function) \n for sref in self.structures[startingStructureName].srefs:\n #here, we are going to modify the sref coordinates based on the parent objects rotation \n self.traverseTheHierarchy(startingStructureName = sref.sName, \n delegateFunction = delegateFunction,\n transformPath = transformPath,\n rotateAngle = sref.rotateAngle,\n transFlags = sref.transFlags,\n coordinates = sref.coordinates)\n #MUST HANDLE AREFs HERE AS WELL\n #when we return, drop the last transform from the transformPath\n del transformPath[-1]\n return\n \n def initialize(self):\n self.deduceHierarchy()\n #self.traverseTheHierarchy()\n self.populateCoordinateMap()\n\n for layerNumber in self.layerNumbersInUse:\n self.processLabelPins(layerNumber)\n \n \n def populateCoordinateMap(self):\n def addToXyTree(startingStructureName = None,transformPath = None):\n uVector = np.array([[1.0],[0.0],[0.0]]) #start with normal basis vectors\n vVector = np.array([[0.0],[1.0],[0.0]])\n origin = np.array([[0.0],[0.0],[1.0]]) #and an origin (Z component is 1.0 to indicate position instead of vector)\n #make a copy of all the transforms and reverse it \n reverseTransformPath = transformPath[:]\n if len(reverseTransformPath) > 1:\n reverseTransformPath.reverse() \n #now go through each transform and apply them to our basis and origin in succession\n for transform in reverseTransformPath:\n origin = np.dot(transform[0], origin) #rotate\n uVector = np.dot(transform[0], uVector) #rotate\n vVector = np.dot(transform[0], vVector) #rotate\n origin = np.dot(transform[1], origin) #scale\n uVector = np.dot(transform[1], uVector) #scale\n vVector = np.dot(transform[1], vVector) #scale\n origin = np.dot(transform[2], origin) #translate\n #we don't need to do a translation on the basis vectors \n #uVector = transform[2] * uVector #translate\n #vVector = transform[2] * vVector #translate\n #populate the xyTree with each structureName and coordinate space\n self.xyTree.append((startingStructureName,origin,uVector,vVector))\n self.traverseTheHierarchy(delegateFunction = addToXyTree)\n \n def microns(self,userUnits):\n \"\"\"Utility function to convert user units to microns\"\"\"\n userUnit = self.units[1]/self.units[0]\n userUnitsPerMicron = userUnit / (userunit)\n layoutUnitsPerMicron = userUnitsPerMicron / self.units[0]\n return userUnits / layoutUnitsPerMicron\n \n def userUnits(self,microns):\n \"\"\"Utility function to convert microns to user units\"\"\"\n userUnit = self.units[1]/self.units[0]\n #userUnitsPerMicron = userUnit / 1e-6\n userUnitsPerMicron = userUnit / (userUnit)\n layoutUnitsPerMicron = userUnitsPerMicron / self.units[0]\n #print(\"userUnit:\",userUnit,\"userUnitsPerMicron\",userUnitsPerMicron,\"layoutUnitsPerMicron\",layoutUnitsPerMicron,[microns,microns*layoutUnitsPerMicron])\n return round(microns*layoutUnitsPerMicron,0)\n\n def changeRoot(self,newRoot, create=False):\n \"\"\"\n Method to change the root pointer to another layout.\n \"\"\"\n\n if self.debug:\n debug.info(0,\"DEBUG: GdsMill vlsiLayout: changeRoot: %s \"%newRoot)\n \n # Determine if newRoot exists\n # layoutToAdd (default) or nameOfLayout\n if (newRoot == 0 | ((newRoot not in self.structures) & ~create)):\n print(\"ERROR: vlsiLayout.changeRoot: Name of new root [%s] not found and create flag is false\"%newRoot)\n exit(1)\n else:\n if ((newRoot not in self.structures) & create):\n self.newLayout(newRoot)\n self.rootStructureName = newRoot\n\n\n \n def addInstance(self,layoutToAdd,nameOfLayout=0,offsetInMicrons=(0,0),mirror=None,rotate=None):\n \"\"\"\n Method to insert one layout into another at a particular offset.\n \"\"\"\n offsetInLayoutUnits = (self.userUnits(offsetInMicrons[0]),self.userUnits(offsetInMicrons[1]))\n if self.debug: \n debug.info(0,\"DEBUG: GdsMill vlsiLayout: addInstance: type {0}, nameOfLayout {1}\".format(type(layoutToAdd),nameOfLayout))\n debug.info(0,\"DEBUG: name={0} offset={1} mirror={2} rotate={3}\".format(layoutToAdd.rootStructureName,offsetInMicrons, mirror, rotate))\n\n\n\n # Determine if we are instantiating the root design of \n # layoutToAdd (default) or nameOfLayout\n if nameOfLayout == 0:\n StructureFound = True\n StructureName = layoutToAdd.rootStructureName\n else:\n StructureName = nameOfLayout #layoutToAdd\n StructureFound = False\n for structure in layoutToAdd.structures:\n if StructureName in structure: \n if self.debug:\n debug.info(1,\"DEBUG: Structure %s Found\"%StructureName)\n StructureFound = True\n\n debug.check(StructureFound,\"Could not find layout to instantiate {}\".format(StructureName))\n\n\n # If layoutToAdd is a unique object (not this), then copy hierarchy, \n # otherwise, if it is a text name of an internal structure, use it.\n\n if layoutToAdd != self:\n #first, we need to combine the structure dictionaries from both layouts\n for structure in layoutToAdd.structures:\n if structure not in self.structures:\n self.structures[structure]=layoutToAdd.structures[structure]\n #also combine the \"layers in use\" list\n for layerNumber in layoutToAdd.layerNumbersInUse:\n if layerNumber not in self.layerNumbersInUse:\n self.layerNumbersInUse.append(layerNumber)\n\n #add a reference to the new layout structure in this layout's root\n layoutToAddSref = GdsSref()\n layoutToAddSref.sName = StructureName\n layoutToAddSref.coordinates = offsetInLayoutUnits\n\n if mirror or rotate:\n \n layoutToAddSref.transFlags = [0,0,0]\n # transFlags = (mirror around x-axis, magnification, rotation)\n # If magnification or rotation is true, it is the flags are then\n # followed by an amount in the record\n if mirror==\"R90\":\n rotate = 90.0\n if mirror==\"R180\":\n rotate = 180.0\n if mirror==\"R270\":\n rotate = 270.0\n if rotate:\n #layoutToAddSref.transFlags[2] = 1\n layoutToAddSref.rotateAngle = rotate\n if mirror == \"x\" or mirror == \"MX\":\n layoutToAddSref.transFlags[0] = 1\n if mirror == \"y\" or mirror == \"MY\": #NOTE: \"MY\" option will override specified rotate angle\n layoutToAddSref.transFlags[0] = 1\n #layoutToAddSref.transFlags[2] = 1\n layoutToAddSref.rotateAngle = 180.0\n if mirror == \"xy\" or mirror == \"XY\": #NOTE: \"XY\" option will override specified rotate angle\n #layoutToAddSref.transFlags[2] = 1\n layoutToAddSref.rotateAngle = 180.0\n\n #add the sref to the root structure\n self.structures[self.rootStructureName].srefs.append(layoutToAddSref)\n \n def addBox(self,layerNumber=0, purposeNumber=None, offsetInMicrons=(0,0), width=1.0, height=1.0,center=False):\n \"\"\"\n Method to add a box to a layout\n \"\"\"\n offsetInLayoutUnits = (self.userUnits(offsetInMicrons[0]),self.userUnits(offsetInMicrons[1]))\n #print(\"addBox:offsetInLayoutUnits\",offsetInLayoutUnits)\n widthInLayoutUnits = self.userUnits(width)\n heightInLayoutUnits = self.userUnits(height)\n #print(\"offsetInLayoutUnits\",widthInLayoutUnits,\"heightInLayoutUnits\",heightInLayoutUnits)\n if not center:\n coordinates=[offsetInLayoutUnits,\n (offsetInLayoutUnits[0]+widthInLayoutUnits,offsetInLayoutUnits[1]),\n (offsetInLayoutUnits[0]+widthInLayoutUnits,offsetInLayoutUnits[1]+heightInLayoutUnits),\n (offsetInLayoutUnits[0],offsetInLayoutUnits[1]+heightInLayoutUnits),\n offsetInLayoutUnits]\n else:\n startPoint = (offsetInLayoutUnits[0]-widthInLayoutUnits/2.0, offsetInLayoutUnits[1]-heightInLayoutUnits/2.0) \n coordinates=[startPoint,\n (startPoint[0]+widthInLayoutUnits,startPoint[1]),\n (startPoint[0]+widthInLayoutUnits,startPoint[1]+heightInLayoutUnits),\n (startPoint[0],startPoint[1]+heightInLayoutUnits),\n startPoint]\n\n boundaryToAdd = GdsBoundary()\n boundaryToAdd.drawingLayer = layerNumber\n boundaryToAdd.dataType = 0\n boundaryToAdd.coordinates = coordinates\n boundaryToAdd.purposeLayer = purposeNumber\n #add the sref to the root structure\n self.structures[self.rootStructureName].boundaries.append(boundaryToAdd)\n \n def addPath(self, layerNumber=0, purposeNumber = None, coordinates=[(0,0)], width=1.0):\n \"\"\"\n Method to add a path to a layout\n \"\"\"\n widthInLayoutUnits = self.userUnits(width)\n layoutUnitCoordinates = []\n #first convert to proper units\n for coordinate in coordinates:\n cX = self.userUnits(coordinate[0])\n cY = self.userUnits(coordinate[1])\n layoutUnitCoordinates.append((cX,cY))\n pathToAdd = GdsPath()\n pathToAdd.drawingLayer=layerNumber\n pathToAdd.purposeLayer = purposeNumber\n pathToAdd.pathWidth=widthInLayoutUnits\n pathToAdd.coordinates=layoutUnitCoordinates\n #add the sref to the root structure\n self.structures[self.rootStructureName].paths.append(pathToAdd)\n \n def addText(self, text, layerNumber=0, purposeNumber = None, offsetInMicrons=(0,0), magnification=0.1, rotate = None):\n offsetInLayoutUnits = (self.userUnits(offsetInMicrons[0]),self.userUnits(offsetInMicrons[1]))\n textToAdd = GdsText()\n textToAdd.drawingLayer = layerNumber\n textToAdd.purposeLayer = purposeNumber\n textToAdd.dataType = 0\n textToAdd.coordinates = [offsetInLayoutUnits]\n textToAdd.transFlags = [0,0,0] \n if(len(text)%2 == 1):\n text = text + '\\x00'\n textToAdd.textString = text\n #textToAdd.transFlags[1] = 1\n textToAdd.magFactor = magnification\n if rotate:\n #textToAdd.transFlags[2] = 1\n textToAdd.rotateAngle = rotate\n #add the sref to the root structure\n self.structures[self.rootStructureName].texts.append(textToAdd)\n \n def isBounded(self,testPoint,startPoint,endPoint):\n #these arguments are touples of (x,y) coordinates\n if testPoint == None:\n return 0\n if(testPoint[0]<=max(endPoint[0],startPoint[0]) and \\\n testPoint[0]>=min(endPoint[0],startPoint[0]) and \\\n testPoint[1]<=max(endPoint[1],startPoint[1]) and \\\n testPoint[1]>=min(endPoint[1],startPoint[1])):\n return 1\n else:\n return 0\n \n def intersectionPoint(self,startPoint1,endPoint1,startPoint2,endPoint2):\n if((endPoint1[0]-startPoint1[0])!=0 and (endPoint2[0]-startPoint2[0])!=0):\n pSlope = (endPoint1[1]-startPoint1[1])/(endPoint1[0]-startPoint1[0])\n pIntercept = startPoint1[1]-pSlope*startPoint1[0]\n qSlope = (endPoint2[1]-startPoint2[1])/(endPoint2[0]-startPoint2[0])\n qIntercept = startPoint2[1]-qSlope*startPoint2[0]\n if(pSlope!=qSlope):\n newX=(qIntercept-pIntercept)/(pSlope-qSlope)\n newY=pSlope*newX+pIntercept\n else:\n #parallel lines can't intersect\n newX=None\n newY=None\n elif((endPoint1[0]-startPoint1[0])==0 and (endPoint2[0]-startPoint2[0])==0):\n #two vertical lines cannot intersect\n newX = None\n newY = None\n elif((endPoint1[0]-startPoint1[0])==0 and (endPoint2[0]-startPoint2[0])!=0):\n qSlope = (endPoint2[1]-startPoint2[1])/(endPoint2[0]-startPoint2[0])\n qIntercept = startPoint2[1]-qSlope*startPoint2[0] \n newX=endPoint1[0]\n newY=qSlope*newX+qIntercept\n elif((endPoint1[0]-startPoint1[0])!=0 and (endPoint2[0]-startPoint2[0])==0):\n pSlope = (endPoint1[1]-startPoint1[1])/(endPoint1[0]-startPoint1[0])\n pIntercept = startPoint1[1]-pSlope*startPoint1[0]\n newX=endPoint2[0]\n newY=pSlope*newX+pIntercept\n return (newX,newY)\n \n def isCollinear(self,testPoint,point1,point2):\n slope1 = (testPoint[1]-point1[1])/(testPoint[0]-point1[0])\n slope2 = (point2[1]-point1[1])/(point2[0]-point1[0])\n if slope1 == slope2:\n return True\n return False\n \n def doShapesIntersect(self,shape1Coordinates, shape2Coordinates):\n \"\"\"\n Utility function to determine if 2 arbitrary shapes intersect.\n We define intersection by taking pairs of points in each shape (assuming they are in order)\n and seeing if any of the lines formed by these pais intersect.\n \"\"\"\n for shape1Index in range(0,len(shape1Coordinates)-1):\n for shape2Index in range(0,len(shape2Coordinates)-1):\n startPoint1 = shape1Coordinates[shape1Index]\n endPoint1 = shape1Coordinates[shape1Index+1]\n startPoint2 = shape2Coordinates[shape2Index]\n endPoint2 = shape2Coordinates[shape2Index+1]\n intersect = self.intersectionPoint(startPoint1,endPoint1,startPoint2,endPoint2)\n if(self.isBounded(intersect,startPoint1,endPoint1) and self.isBounded(intersect,startPoint2,endPoint2)):\n return True #these shapes overlap!\n return False #these shapes are ok\n \n def isPointInsideOfBox(self,pointCoordinates,boxCoordinates):\n \"\"\"\n Check if a point is contained in the shape\n \"\"\"\n debug.check(len(boxCoordinates)==4,\"Invalid number of coordinates for box.\")\n leftBound = boxCoordinates[0][0]\n rightBound = boxCoordinates[0][0]\n topBound = boxCoordinates[0][1]\n bottomBound = boxCoordinates[0][1]\n for point in boxCoordinates:\n if point[0]<leftBound:\n leftBound = point[0]\n if point[0]>rightBound:\n rightBound = point[0]\n if point[1]<bottomBound:\n bottomBound = point[1]\n if point[1]>topBound:\n topBound = point[1]\n if(pointCoordinates[0]>rightBound or\n pointCoordinates[0]<leftBound or\n pointCoordinates[1]>topBound or\n pointCoordinates[1]<bottomBound):\n return False\n return True\n \n def isShapeInsideOfBox(self,shapeCoordinates, boxCoordinates):\n \"\"\"\n Go through every point in the shape to test if they are all inside the box.\n \"\"\"\n for point in shapeCoordinates:\n if not self.isPointInsideOfBox(point,boxCoordinates):\n return False\n return True\n \n \n def fillAreaDensity(self, layerToFill = 0, offsetInMicrons = (0,0), coverageWidth = 100.0, coverageHeight = 100.0, minSpacing = 0.22, blockSize = 1.0):\n effectiveBlock = blockSize+minSpacing\n widthInBlocks = int(coverageWidth/effectiveBlock)\n heightInBlocks = int(coverageHeight/effectiveBlock)\n passFailRecord = []\n\n print(\"Filling layer:\",layerToFill)\n def isThisBlockOk(startingStructureName,coordinates,rotateAngle=None):\n #go through every boundary and check\n for boundary in self.structures[startingStructureName].boundaries:\n #only test shapes on the same layer\n if(boundary.drawingLayer == layerToFill):\n #remap coordinates\n shiftedBoundaryCoordinates = []\n for shapeCoordinate in boundary.rotatedCoordinates(rotateAngle):\n shiftedBoundaryCoordinates.append((shapeCoordinate[0]+coordinates[0],shapeCoordinate[1]+coordinates[1]))\n joint = self.doShapesIntersect(self.tempCoordinates, shiftedBoundaryCoordinates)\n if joint:\n self.tempPassFail = False \n common = self.isShapeInsideOfBox(shiftedBoundaryCoordinates,self.tempCoordinates)\n if common:\n self.tempPassFail = False\n for path in self.structures[startingStructureName].paths:\n #only test shapes on the same layer\n if(path.drawingLayer == layerToFill):\n #remap coordinates\n shiftedBoundaryCoordinates = []\n for shapeCoordinate in path.equivalentBoundaryCoordinates(rotateAngle):\n shiftedBoundaryCoordinates.append((shapeCoordinate[0]+coordinates[0],shapeCoordinate[1]+coordinates[1]))\n joint = self.doShapesIntersect(self.tempCoordinates, shiftedBoundaryCoordinates)\n if joint:\n self.tempPassFail = False \n common = self.isShapeInsideOfBox(shiftedBoundaryCoordinates,self.tempCoordinates)\n if common:\n self.tempPassFail = False\n \n for yIndex in range(0,heightInBlocks):\n for xIndex in range(0,widthInBlocks):\n percentDone = (float((yIndex*heightInBlocks)+xIndex) / (heightInBlocks*widthInBlocks))*100\n blockX = (xIndex*effectiveBlock)+offsetInMicrons[0]\n blockY = (yIndex*effectiveBlock)+offsetInMicrons[1]\n self.tempCoordinates=[(self.userUnits(blockX-minSpacing),self.userUnits(blockY-minSpacing)),\n (self.userUnits(blockX-minSpacing),self.userUnits(blockY+effectiveBlock)),\n (self.userUnits(blockX+effectiveBlock),self.userUnits(blockY+effectiveBlock)),\n (self.userUnits(blockX+effectiveBlock),self.userUnits(blockY-minSpacing)),\n (self.userUnits(blockX-minSpacing),self.userUnits(blockY-minSpacing))]\n self.tempPassFail = True\n #go through the hierarchy and see if the block will fit\n self.traverseTheHierarchy(delegateFunction = isThisBlockOk)\n #if its bad, this global tempPassFail will be false\n #if true, we can add the block\n passFailRecord.append(self.tempPassFail)\n print(\"Percent Complete:\"+str(percentDone))\n\n \n passFailIndex=0\n for yIndex in range(0,heightInBlocks):\n for xIndex in range(0,widthInBlocks):\n blockX = (xIndex*effectiveBlock)+offsetInMicrons[0]\n blockY = (yIndex*effectiveBlock)+offsetInMicrons[1]\n if passFailRecord[passFailIndex]:\n self.addBox(layerToFill, (blockX,blockY), width=blockSize, height=blockSize)\n passFailIndex += 1\n print(\"Done\\n\\n\")\n\n def getLayoutBorder(self,borderlayer):\n cellSizeMicron=None\n for boundary in self.structures[self.rootStructureName].boundaries:\n if boundary.drawingLayer==borderlayer:\n if self.debug:\n debug.info(1,\"Find border \"+str(boundary.coordinates))\n left_bottom=boundary.coordinates[0]\n right_top=boundary.coordinates[2]\n cellSize=[right_top[0]-left_bottom[0],right_top[1]-left_bottom[1]]\n cellSizeMicron=[cellSize[0]*self.units[0],cellSize[1]*self.units[0]]\n if not(cellSizeMicron):\n print(\"Error: \"+str(self.rootStructureName)+\".cell_size information not found yet\")\n return cellSizeMicron\n\n def measureSize(self,startStructure):\n self.rootStructureName=startStructure\n self.populateCoordinateMap()\n cellBoundary = [None, None, None, None]\n for TreeUnit in self.xyTree:\n cellBoundary=self.measureSizeInStructure(TreeUnit,cellBoundary)\n cellSize=[cellBoundary[2]-cellBoundary[0],cellBoundary[3]-cellBoundary[1]]\n cellSizeMicron=[cellSize[0]*self.units[0],cellSize[1]*self.units[0]]\n return cellSizeMicron\n\n def measureBoundary(self,startStructure):\n self.rootStructureName=startStructure\n self.populateCoordinateMap()\n cellBoundary = [None, None, None, None]\n for TreeUnit in self.xyTree:\n cellBoundary=self.measureSizeInStructure(TreeUnit,cellBoundary)\n return [[self.units[0]*cellBoundary[0],self.units[0]*cellBoundary[1]],\n [self.units[0]*cellBoundary[2],self.units[0]*cellBoundary[3]]]\n \n def measureSizeInStructure(self,structure,cellBoundary):\n (structureName,structureOrigin,structureuVector,structurevVector)=structure\n for boundary in self.structures[str(structureName)].boundaries:\n left_bottom=boundary.coordinates[0]\n right_top=boundary.coordinates[2]\n thisBoundary=[left_bottom[0],left_bottom[1],right_top[0],right_top[1]]\n thisBoundary=self.transformRectangle(thisBoundary,structureuVector,structurevVector)\n thisBoundary=[thisBoundary[0]+structureOrigin[0],thisBoundary[1]+structureOrigin[1],\n thisBoundary[2]+structureOrigin[0],thisBoundary[3]+structureOrigin[1]]\n cellBoundary=self.updateBoundary(thisBoundary,cellBoundary)\n return cellBoundary\n \n def updateBoundary(self,thisBoundary,cellBoundary):\n [left_bott_X,left_bott_Y,right_top_X,right_top_Y]=thisBoundary\n # If any are None\n if not (cellBoundary[0] and cellBoundary[1] and cellBoundary[2] and cellBoundary[3]):\n cellBoundary=thisBoundary\n else:\n if cellBoundary[0]>left_bott_X:\n cellBoundary[0]=left_bott_X\n if cellBoundary[1]>left_bott_Y:\n cellBoundary[1]=left_bott_Y\n if cellBoundary[2]<right_top_X:\n cellBoundary[2]=right_top_X\n if cellBoundary[3]<right_top_Y:\n cellBoundary[3]=right_top_Y\n return cellBoundary\n\n\n def getTexts(self, layer):\n \"\"\"\n Get all of the labels on a given layer only at the root level.\n \"\"\"\n text_list = []\n for Text in self.structures[self.rootStructureName].texts:\n if Text.drawingLayer == layer:\n text_list.append(Text)\n return text_list\n \n def getPinShape(self, pin_name):\n \"\"\"\n Search for a pin label and return the largest enclosing rectangle\n on the same layer as the pin label.\n If there are multiple pin lists, return the max of each.\n \"\"\"\n pin_map = self.pins[pin_name]\n max_pins = []\n for pin_list in pin_map:\n max_pin = None\n max_area = 0\n for pin in pin_list:\n (layer,boundary) = pin\n new_area = boundaryArea(boundary)\n if max_pin == None or new_area>max_area:\n max_pin = pin\n max_area = new_area\n max_pins.append(max_pin)\n\n return max_pins\n \n\n def getAllPinShapes(self, pin_name):\n \"\"\"\n Search for a pin label and return ALL the enclosing rectangles on the same layer\n as the pin label.\n \"\"\"\n shape_list = []\n pin_map = self.pins[pin_name]\n for pin_list in pin_map:\n for pin in pin_list:\n (pin_layer, boundary) = pin \n shape_list.append(pin)\n\n return shape_list\n \n\n def processLabelPins(self, layer):\n \"\"\"\n Find all text labels and create a map to a list of shapes that\n they enclose on the given layer.\n \"\"\"\n # Get the labels on a layer in the root level\n labels = self.getTexts(layer)\n # Get all of the shapes on the layer at all levels\n # and transform them to the current level\n shapes = self.getAllShapes(layer)\n\n for label in labels:\n label_coordinate = label.coordinates[0]\n user_coordinate = [x*self.units[0] for x in label_coordinate]\n pin_shapes = []\n for boundary in shapes:\n if self.labelInRectangle(user_coordinate,boundary):\n pin_shapes.append((layer, boundary))\n\n label_text = label.textString\n # Remove the padding if it exists\n if label_text[-1] == \"\\x00\":\n label_text = label_text[0:-1]\n\n try:\n self.pins[label_text]\n except KeyError:\n self.pins[label_text] = []\n self.pins[label_text].append(pin_shapes)\n \n \n\n def getAllShapes(self,layer):\n \"\"\"\n Return all gshapes on a given layer in [llx, lly, urx, ury] format and \n user units.\n \"\"\"\n boundaries = set()\n for TreeUnit in self.xyTree:\n #print(TreeUnit[0])\n boundaries.update(self.getShapesInStructure(layer,TreeUnit))\n\n # Convert to user units\n user_boundaries = []\n for boundary in boundaries:\n user_boundaries.append([boundary[0]*self.units[0],boundary[1]*self.units[0],\n boundary[2]*self.units[0],boundary[3]*self.units[0]])\n \n return user_boundaries\n\n\n def getShapesInStructure(self,layer,structure):\n \"\"\" \n Go through all the shapes in a structure and return the list of shapes in\n the form [llx, lly, urx, ury]\n \"\"\"\n\n (structureName,structureOrigin,structureuVector,structurevVector)=structure\n #print(structureName,\"u\",structureuVector.transpose(),\"v\",structurevVector.transpose(),\"o\",structureOrigin.transpose())\n boundaries = []\n for boundary in self.structures[str(structureName)].boundaries:\n # FIXME: Right now, this only supports rectangular shapes!\n # We should trigger an error but some FreePDK45 library cells contain paths.\n # These get saved fine, but we cannot parse them as blockages... \n #debug.check(len(boundary.coordinates)==5,\"Non-rectangular shapes are not supported.\")\n if len(boundary.coordinates)!=5:\n continue\n if layer==boundary.drawingLayer:\n left_bottom=boundary.coordinates[0]\n right_top=boundary.coordinates[2]\n # Rectangle is [leftx, bottomy, rightx, topy].\n boundaryRect=[left_bottom[0],left_bottom[1],right_top[0],right_top[1]]\n # perform the rotation\n boundaryRect=self.transformRectangle(boundaryRect,structureuVector,structurevVector)\n # add the offset and make it a tuple\n boundaryRect=(boundaryRect[0]+structureOrigin[0].item(),boundaryRect[1]+structureOrigin[1].item(),\n boundaryRect[2]+structureOrigin[0].item(),boundaryRect[3]+structureOrigin[1].item())\n boundaries.append(boundaryRect)\n \n return boundaries\n \n def transformRectangle(self,originalRectangle,uVector,vVector):\n \"\"\"\n Transforms the four coordinates of a rectangle in space\n and recomputes the left, bottom, right, top values.\n \"\"\"\n leftBottom=[originalRectangle[0],originalRectangle[1]]\n leftBottom=self.transformCoordinate(leftBottom,uVector,vVector)\n\n rightTop=[originalRectangle[2],originalRectangle[3]]\n rightTop=self.transformCoordinate(rightTop,uVector,vVector)\n\n left=min(leftBottom[0],rightTop[0])\n bottom=min(leftBottom[1],rightTop[1])\n right=max(leftBottom[0],rightTop[0])\n top=max(leftBottom[1],rightTop[1])\n\n newRectangle = [left,bottom,right,top]\n return newRectangle\n\n def transformCoordinate(self,coordinate,uVector,vVector):\n \"\"\"\n Rotate a coordinate in space.\n \"\"\"\n # MRG: 9/3/18 Incorrect matrixi multiplication! \n # This is fixed to be:\n # |u[0] v[0]| |x| |x'|\n # |u[1] v[1]|x|y|=|y'|\n x=coordinate[0]*uVector[0][0]+coordinate[1]*vVector[0][0]\n y=coordinate[0]*uVector[1][0]+coordinate[1]*vVector[1][0]\n transformCoordinate=[x,y]\n\n return transformCoordinate\n\n\n def labelInRectangle(self,coordinate,rectangle):\n \"\"\"\n Checks if a coordinate is within a given rectangle. Rectangle is [leftx, bottomy, rightx, topy].\n \"\"\"\n coordinate_In_Rectangle_x_range=(coordinate[0]>=rectangle[0])&(coordinate[0]<=rectangle[2])\n coordinate_In_Rectangle_y_range=(coordinate[1]>=rectangle[1])&(coordinate[1]<=rectangle[3])\n if coordinate_In_Rectangle_x_range & coordinate_In_Rectangle_y_range:\n return True\n else:\n return False\n\n \ndef boundaryArea(A):\n \"\"\"\n Returns boundary area for sorting.\n \"\"\"\n area_A=(A[2]-A[0])*(A[3]-A[1])\n return area_A\n\n"
]
| [
[
"numpy.array",
"numpy.dot"
]
]
|
shinsteve/udacity-carnd-vehicledetection-p5 | [
"fa76f7302dfc5a79082c3312eb228ea91762ce5e"
]
| [
"find_cars.py"
]
| [
"import sys\nimport glob\nimport os\nimport pprint\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pickle\nimport cv2\n\nfrom feature_tools import *\nimport bbox_filter\n\n\"\"\"\nUsage: python find_cars.py project_video.mp4\n\"\"\"\n\n\ndef main(video_clip_path):\n # load a pe-trained svc model from a serialized (pickle) file\n dist_pickle = pickle.load(open(\"svc_pickle.p\", \"rb\" ) )\n\n # get attributes of our svc object\n svc = dist_pickle[\"svc\"]\n X_scaler = dist_pickle[\"scaler\"]\n color_space = dist_pickle[\"color_space\"]\n orient = dist_pickle[\"orient\"]\n pix_per_cell = dist_pickle[\"pix_per_cell\"]\n cell_per_block = dist_pickle[\"cell_per_block\"]\n spatial_size = dist_pickle[\"spatial_size\"]\n hist_bins = dist_pickle[\"hist_bins\"]\n\n pprint.pprint(dist_pickle)\n\n # Parameters for sliding window\n ystart = 400\n scale_height_list = [\n # (scale_ratio, height), where yend = ystart + height\n (1.0, 96), (1.12, 96), (1.25, 120), (1.5, 160), (2.0, 256),\n # (1.0, 96), (1.12, 112), (1.25, 128), (1.5, 192), (2.0, 256), (2.5, 256), (3.0, 256), (4.0, 256)\n ]\n\n def find_car_multiscale(img, ystart, scale_height_list):\n all_bboxes = []\n for scale, height in scale_height_list:\n ystop = ystart + height\n bboxes = find_cars(img, ystart, ystop, scale,\n svc, X_scaler, color_space, orient,\n pix_per_cell, cell_per_block, spatial_size, hist_bins)\n all_bboxes.extend(bboxes)\n return all_bboxes\n\n def filter_and_draw_bboxes(img, bboxes, n_box_min_thr, enable_filter=True):\n if enable_filter: # Filter the boxes by using heatmap\n img, heatmap = bbox_filter.draw_filtered_bbox(img, bboxes, n_box_min_thr)\n else:\n for bbox in bboxes:\n cv2.rectangle(img, bbox[0], bbox[1], (255, 0, 0), 4) # Draw the box\n return\n\n def process_test_images(n_box_min_thr=0, enable_filter=True):\n for img_path in glob.glob('test_images/*.jpg'):\n print('processing: ', img_path)\n img = cv2.imread(img_path)\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n\n bboxes = find_car_multiscale(img, ystart, scale_height_list)\n filter_and_draw_bboxes(img, bboxes, n_box_min_thr, enable_filter)\n\n img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)\n cv2.imwrite(os.path.join('output_images', os.path.basename(img_path)), img)\n # cv2.imwrite(os.path.join(os.path.basename(img_path)), heatmap * 2)\n return\n\n def process_test_video(n_frame_history=1, n_box_min_thr=0, enable_filter=True):\n import collections\n from itertools import chain\n bbox_history = collections.deque(maxlen=n_frame_history) # use like ring buffer\n\n def pipeline_func(img):\n \"\"\" Precondition: color_space of img is RGB \"\"\"\n bboxes = find_car_multiscale(img, ystart, scale_height_list)\n bbox_history.append(bboxes)\n all_boxes = list(chain.from_iterable(bbox_history)) # flatten\n filter_and_draw_bboxes(img, all_boxes, n_box_min_thr, enable_filter)\n return img\n\n from moviepy.editor import VideoFileClip\n clip1 = VideoFileClip(video_clip_path) # .subclip(24, 26)\n out_path = os.path.join('output_videos', os.path.basename(video_clip_path))\n out_clip = clip1.fl_image(pipeline_func)\n out_clip.write_videofile(out_path, audio=False)\n return\n\n \"\"\" Excecution of pipeline \"\"\"\n if video_clip_path is None:\n process_test_images(n_box_min_thr=0, enable_filter=False)\n # process_test_images(n_box_min_thr=4)\n else:\n process_test_video(10, n_box_min_thr=40)\n # process_test_video(1, n_box_min_thr=0, enable_filter=False)\n return\n\n\n# Define a single function that can extract features using hog sub-sampling and make predictions\ndef find_cars(img, ystart, ystop, scale, svc, X_scaler,\n color_space, orient, pix_per_cell, cell_per_block,\n spatial_size, hist_bins):\n\n # Uncomment the following line if you extracted training\n # data from .png images (scaled 0 to 1 by mpimg) and the\n # image you are searching is a .jpg (scaled 0 to 255)\n # img = img.astype(np.float32) / 255\n\n img_tosearch = img[ystart:ystop, :, :]\n ctrans_tosearch = convert_color(img_tosearch, color_space)\n if scale != 1:\n imshape = ctrans_tosearch.shape\n ctrans_tosearch = cv2.resize(ctrans_tosearch, (np.int(imshape[1] / scale),\n np.int(imshape[0] / scale)))\n\n ch1 = ctrans_tosearch[:, :, 0]\n ch2 = ctrans_tosearch[:, :, 1]\n ch3 = ctrans_tosearch[:, :, 2]\n\n # Define blocks and steps as above\n nxblocks = (ch1.shape[1] // pix_per_cell) - cell_per_block + 1\n nyblocks = (ch1.shape[0] // pix_per_cell) - cell_per_block + 1\n nfeat_per_block = orient * cell_per_block ** 2\n\n # 64 was the orginal sampling rate, with 8 cells and 8 pix per cell\n window = 64\n nblocks_per_window = (window // pix_per_cell) - cell_per_block + 1\n cells_per_step = 2 # Instead of overlap, define how many cells to step\n nxsteps = (nxblocks - nblocks_per_window) // cells_per_step + 1\n nysteps = (nyblocks - nblocks_per_window) // cells_per_step + 1\n\n # Compute individual channel HOG features for the entire image\n hog1 = get_hog_features(ch1, orient, pix_per_cell, cell_per_block, feature_vec=False)\n hog2 = get_hog_features(ch2, orient, pix_per_cell, cell_per_block, feature_vec=False)\n hog3 = get_hog_features(ch3, orient, pix_per_cell, cell_per_block, feature_vec=False)\n\n bboxes = []\n for xb in range(nxsteps):\n for yb in range(nysteps):\n ypos = yb * cells_per_step\n xpos = xb * cells_per_step\n # Extract HOG for this patch\n hog_feat1 = hog1[ypos:ypos + nblocks_per_window, xpos:xpos + nblocks_per_window].ravel()\n hog_feat2 = hog2[ypos:ypos + nblocks_per_window, xpos:xpos + nblocks_per_window].ravel()\n hog_feat3 = hog3[ypos:ypos + nblocks_per_window, xpos:xpos + nblocks_per_window].ravel()\n hog_features = np.hstack((hog_feat1, hog_feat2, hog_feat3))\n\n xleft = xpos * pix_per_cell\n ytop = ypos * pix_per_cell\n\n # Extract the image patch\n subimg = cv2.resize(ctrans_tosearch[ytop:ytop + window, xleft:xleft + window], (64, 64))\n\n # Get color features\n spatial_features = bin_spatial(subimg, size=spatial_size)\n hist_features = color_hist(subimg, nbins=hist_bins)\n\n # Scale features and make a prediction\n combined_feature = np.hstack((spatial_features, hist_features, hog_features)).reshape(1, -1)\n test_features = X_scaler.transform(combined_feature)\n #test_features = X_scaler.transform(np.hstack((shape_feat, hist_feat)).reshape(1, -1)) \n test_prediction = svc.predict(test_features)\n\n if test_prediction == 1:\n xbox_left = np.int(xleft * scale)\n ytop_draw = np.int(ytop * scale)\n win_draw = np.int(window * scale)\n box = ((xbox_left, ytop_draw + ystart),\n (xbox_left + win_draw, ytop_draw + win_draw + ystart))\n bboxes.append(box)\n\n return bboxes\n\n\nif __name__ == '__main__':\n video_clip_path = sys.argv[1] if len(sys.argv) == 2 else None\n if video_clip_path is None:\n print('Video clip is not specified. Process still images under test_images/')\n\n main(video_clip_path)\n"
]
| [
[
"numpy.hstack",
"numpy.int"
]
]
|
RafaelAdao/cursomachinelearningalura | [
"b4c4ac3f675adb07df1b16dd9fd515001a16727a"
]
| [
"classifica_acesso.py"
]
| [
"from dados import carregar_acessos\r\n\r\nX, Y = carregar_acessos()\r\n\r\ntreino_dados = X[:90]\r\ntreino_marcacoes = Y[:90]\r\n\r\nteste_dados = X[-9:]\r\nteste_marcacoes = Y[-9:]\r\n\r\nfrom sklearn.naive_bayes import MultinomialNB\r\nmodelo = MultinomialNB()\r\nmodelo.fit(treino_dados, treino_marcacoes)\r\nresultado = modelo.predict(teste_dados)\r\n\r\ndiferencas = resultado - teste_marcacoes\r\nacertos = [d for d in diferencas if d == 0]\r\n\r\ntotal_de_acertos = len(acertos)\r\ntotal_de_elementos = len(teste_marcacoes)\r\ntaxa_acerto = 100.0 * total_de_acertos / total_de_elementos\r\n\r\nprint(taxa_acerto)\r\nprint(total_de_elementos)"
]
| [
[
"sklearn.naive_bayes.MultinomialNB"
]
]
|
dustymugs/PerceptualSimilarity | [
"f83089bd744ec132860b00d6163e36ab71da47f8"
]
| [
"PerceptualSimilarity/data/dataset/twoafc_dataset.py"
]
| [
"from __future__ import absolute_import\n\nimport os.path\nimport torchvision.transforms as transforms\nfrom data.dataset.base_dataset import BaseDataset\nfrom data.image_folder import make_dataset\nfrom PIL import Image\nimport numpy as np\nimport torch\n\nclass TwoAFCDataset(BaseDataset):\n def initialize(self, dataroots, load_size=64):\n if(not isinstance(dataroots,list)):\n dataroots = [dataroots,]\n self.roots = dataroots\n self.load_size = load_size\n\n # image directory\n self.dir_ref = [os.path.join(root, 'ref') for root in self.roots]\n self.ref_paths = make_dataset(self.dir_ref)\n self.ref_paths = sorted(self.ref_paths)\n\n self.dir_p0 = [os.path.join(root, 'p0') for root in self.roots]\n self.p0_paths = make_dataset(self.dir_p0)\n self.p0_paths = sorted(self.p0_paths)\n\n self.dir_p1 = [os.path.join(root, 'p1') for root in self.roots]\n self.p1_paths = make_dataset(self.dir_p1)\n self.p1_paths = sorted(self.p1_paths)\n\n transform_list = []\n transform_list.append(transforms.Scale(load_size))\n transform_list += [transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5),(0.5, 0.5, 0.5))]\n\n self.transform = transforms.Compose(transform_list)\n\n # judgement directory\n self.dir_J = [os.path.join(root, 'judge') for root in self.roots]\n self.judge_paths = make_dataset(self.dir_J,mode='np')\n self.judge_paths = sorted(self.judge_paths)\n\n def __getitem__(self, index):\n p0_path = self.p0_paths[index]\n p0_img_ = Image.open(p0_path).convert('RGB')\n p0_img = self.transform(p0_img_)\n\n p1_path = self.p1_paths[index]\n p1_img_ = Image.open(p1_path).convert('RGB')\n p1_img = self.transform(p1_img_)\n\n ref_path = self.ref_paths[index]\n ref_img_ = Image.open(ref_path).convert('RGB')\n ref_img = self.transform(ref_img_)\n\n judge_path = self.judge_paths[index]\n # judge_img = (np.load(judge_path)*2.-1.).reshape((1,1,1,)) # [-1,1]\n judge_img = np.load(judge_path).reshape((1,1,1,)) # [0,1]\n\n judge_img = torch.FloatTensor(judge_img)\n\n return {'p0': p0_img, 'p1': p1_img, 'ref': ref_img, 'judge': judge_img,\n 'p0_path': p0_path, 'p1_path': p1_path, 'ref_path': ref_path, 'judge_path': judge_path}\n\n def __len__(self):\n return len(self.p0_paths)\n"
]
| [
[
"torch.FloatTensor",
"numpy.load"
]
]
|
ZNLP/ATSum | [
"02e92489ebfa4652a4f3354c578f3a64c34ff64b"
]
| [
"ATS-A/beaver/model/transformer.py"
]
| [
"# -*- coding: utf-8 -*-\r\nimport math\r\n\r\nimport torch\r\nimport torch.nn as nn\r\n\r\n\r\nclass FeedForward(nn.Module):\r\n def __init__(self, hidden_size, inner_size, dropout):\r\n super(FeedForward, self).__init__()\r\n self.linear_in = nn.Linear(hidden_size, inner_size, bias=False)\r\n self.linear_out = nn.Linear(inner_size, hidden_size, bias=False)\r\n self.relu = nn.ReLU()\r\n self.dropout = nn.Dropout(dropout)\r\n\r\n self.reset_parameters()\r\n\r\n def reset_parameters(self):\r\n nn.init.xavier_uniform_(self.linear_in.weight)\r\n nn.init.xavier_uniform_(self.linear_out.weight)\r\n\r\n def forward(self, x):\r\n y = self.linear_in(x)\r\n y = self.relu(y)\r\n y = self.dropout(y)\r\n y = self.linear_out(y)\r\n return y\r\n\r\n\r\nclass EncoderLayer(nn.Module):\r\n\r\n def __init__(self, hidden_size, dropout, head_count, ff_size):\r\n super(EncoderLayer, self).__init__()\r\n\r\n self.self_attn = MultiHeadedAttention(head_count, hidden_size, dropout)\r\n self.feed_forward = FeedForward(hidden_size, ff_size, dropout)\r\n self.dropout = nn.ModuleList([nn.Dropout(dropout) for _ in range(2)])\r\n self.norm = nn.ModuleList([nn.LayerNorm(hidden_size) for _ in range(2)])\r\n\r\n def forward(self, x, mask):\r\n # self attention\r\n y, _ = self.self_attn(self.norm[0](x), mask=mask)\r\n x = x + self.dropout[0](y)\r\n\r\n # feed forward\r\n y = self.feed_forward(self.norm[1](x))\r\n x = x + self.dropout[1](y)\r\n return x\r\n\r\n\r\nclass Encoder(nn.Module):\r\n def __init__(self, num_layers, num_heads, hidden_size, dropout, ff_size, embedding):\r\n self.num_layers = num_layers\r\n\r\n super(Encoder, self).__init__()\r\n self.embedding = embedding\r\n self.layers = nn.ModuleList([EncoderLayer(hidden_size, dropout, num_heads, ff_size) for _ in range(num_layers)])\r\n self.norm = nn.LayerNorm(hidden_size)\r\n\r\n def forward(self, src, src_pad):\r\n src_mask = src_pad.unsqueeze(1)\r\n output = self.embedding(src)\r\n for i in range(self.num_layers):\r\n output = self.layers[i](output, src_mask)\r\n return self.norm(output)\r\n\r\n\r\nclass DecoderLayer(nn.Module):\r\n\r\n def __init__(self, hidden_size, dropout, head_count, ff_size):\r\n super(DecoderLayer, self).__init__()\r\n self.self_attn = MultiHeadedAttention(head_count, hidden_size, dropout)\r\n self.src_attn = MultiHeadedAttention(head_count, hidden_size, dropout)\r\n self.feed_forward = FeedForward(hidden_size, ff_size, dropout)\r\n self.norm = nn.ModuleList([nn.LayerNorm(hidden_size, eps=1e-6) for _ in range(3)])\r\n self.dropout = nn.ModuleList([nn.Dropout(dropout) for _ in range(3)])\r\n\r\n def forward(self, x, enc_out, src_mask, tgt_mask, previous=None):\r\n all_input = x if previous is None else torch.cat((previous, x), dim=1)\r\n\r\n # self attention\r\n y, _ = self.self_attn(self.norm[0](x), self.norm[0](all_input), mask=tgt_mask)\r\n x = x + self.dropout[0](y)\r\n\r\n # encoder decoder attention\r\n y, weights = self.src_attn(self.norm[1](x), enc_out, mask=src_mask)\r\n x = x + self.dropout[1](y)\r\n\r\n # feed forward\r\n y = self.feed_forward(self.norm[2](x))\r\n x = x + self.dropout[2](y)\r\n return x, all_input, weights\r\n\r\n\r\nclass Decoder(nn.Module):\r\n\r\n def __init__(self, num_layers, num_heads, hidden_size, dropout, ff_size, embedding):\r\n self.num_layers = num_layers\r\n\r\n super(Decoder, self).__init__()\r\n self.embedding = embedding\r\n self.layers = nn.ModuleList([DecoderLayer(hidden_size, dropout, num_heads, ff_size) for _ in range(num_layers)])\r\n self.register_buffer(\"upper_triangle\", torch.triu(torch.ones(1000, 1000), diagonal=1).byte())\r\n self.register_buffer(\"zero_mask\", torch.zeros(1).byte())\r\n self.norm = nn.LayerNorm(hidden_size, eps=1e-6)\r\n\r\n self.trans_linear1 = nn.Linear(hidden_size, hidden_size, bias=False)\r\n self.trans_linear2 = nn.Linear(hidden_size, 1, bias=False)\r\n self.trans_gate = nn.Sigmoid()\r\n\r\n def forward(self, tgt, enc_out, src_pad, tgt_pad, previous=None, timestep=0):\r\n\r\n output = self.embedding(tgt, timestep)\r\n tgt_len = tgt.size(1)\r\n\r\n src_mask = src_pad.unsqueeze(1)\r\n tgt_mask = tgt_pad.unsqueeze(1)\r\n upper_triangle = self.upper_triangle[:tgt_len, :tgt_len]\r\n\r\n # tgt mask: 0 if not upper and not pad\r\n tgt_mask = torch.gt(tgt_mask + upper_triangle, 0)\r\n saved_inputs = []\r\n for i in range(self.num_layers):\r\n prev_layer = None if previous is None else previous[:, i]\r\n tgt_mask = tgt_mask if previous is None else self.zero_mask\r\n\r\n output, all_input, weights = self.layers[i](output, enc_out, src_mask, tgt_mask, prev_layer)\r\n saved_inputs.append(all_input)\r\n\r\n result1 = self.norm(output)\r\n result2 = torch.stack(saved_inputs, dim=1)\r\n p_trans = self.trans_linear2(self.trans_linear1(output))\r\n p_trans = self.trans_gate(p_trans)\r\n\r\n return result1, result2, p_trans, weights\r\n\r\n\r\nclass TransAttnLayer(nn.Module):\r\n\r\n def __init__(self, hidden_size, dropout, head_count):\r\n super(TransAttnLayer, self).__init__()\r\n self.trans_attn = MultiHeadedAttention(head_count, hidden_size, dropout)\r\n self.norm = nn.LayerNorm(hidden_size, eps=1e-6)\r\n\r\n def forward(self, src, src_trans, trans_mask):\r\n y, weights = self.trans_attn(self.norm(src), self.norm(src_trans), mask=trans_mask)\r\n return weights\r\n\r\nclass TransAttn(nn.Module):\r\n\r\n def __init__(self, num_heads, hidden_size, dropout, embedding):\r\n super(TransAttn, self).__init__()\r\n self.embedding = embedding\r\n self.layer = TransAttnLayer(hidden_size, dropout, num_heads)\r\n\r\n def forward(self, enc_out, src_trans_idx):\r\n # enc_out: batch x src_len x hidden_size\r\n # src_trans_idx: batch x src_len x N\r\n batch_size, src_len, N = src_trans_idx.size()\r\n src_trans_idx = src_trans_idx.view(-1, src_trans_idx.size(2))\r\n enc_out = enc_out.view(-1, 1, enc_out.size(2)) # (batch x src_len) x 1 x hid\r\n src_trans_emb = self.embedding(src_trans_idx) # (batch x src_len) x N x emb\r\n trans_mask = torch.zeros(batch_size*src_len, 1, N).byte().cuda()\r\n weights = self.layer(enc_out, src_trans_emb, trans_mask) # (batch x src_len) x 1 x N\r\n return weights.view(batch_size, src_len, -1)\r\n\r\n\r\nclass MultiHeadedAttention(nn.Module):\r\n\r\n def __init__(self, head_count, model_dim, dropout):\r\n self.dim_per_head = model_dim // head_count\r\n self.head_count = head_count\r\n\r\n super(MultiHeadedAttention, self).__init__()\r\n\r\n self.linear_q = nn.Linear(model_dim, model_dim, bias=False)\r\n self.linear_k = nn.Linear(model_dim, model_dim, bias=False)\r\n self.linear_v = nn.Linear(model_dim, model_dim, bias=False)\r\n\r\n self.softmax = nn.Softmax(dim=-1)\r\n self.dropout = nn.Dropout(dropout)\r\n self.final_linear = nn.Linear(model_dim, model_dim)\r\n\r\n self.reset_parameters()\r\n\r\n def reset_parameters(self):\r\n nn.init.xavier_uniform_(self.linear_q.weight)\r\n nn.init.xavier_uniform_(self.linear_k.weight)\r\n nn.init.xavier_uniform_(self.linear_v.weight)\r\n nn.init.xavier_uniform_(self.final_linear.weight)\r\n\r\n def forward(self, query, memory=None, mask=None):\r\n memory = query if memory is None else memory\r\n\r\n def split_head(x):\r\n # B x L x D => B x h x L x d\r\n return x.view(x.size(0), -1, self.head_count, self.dim_per_head).transpose(1, 2)\r\n\r\n def combine_head(x):\r\n # B x h x L x d => B x L x D\r\n return x.transpose(1, 2).contiguous().view(x.size(0), -1, self.head_count * self.dim_per_head)\r\n\r\n # 1) Project q, k, v.\r\n q = split_head(self.linear_q(query))\r\n k = split_head(self.linear_k(memory))\r\n v = split_head(self.linear_v(memory))\r\n\r\n # 2) Calculate and scale scores.\r\n q = q / math.sqrt(self.dim_per_head)\r\n scores = torch.matmul(q, k.transpose(2, 3))\r\n\r\n mask = mask.unsqueeze(1).expand_as(scores)\r\n scores.masked_fill_(mask, -1e18)\r\n\r\n # 3) Apply attention dropout and compute context vectors.\r\n weights = self.dropout(self.softmax(scores))\r\n context = combine_head(torch.matmul(weights, v))\r\n\r\n avg_weights = torch.sum(weights, dim=1) / self.head_count\r\n return self.final_linear(context), avg_weights\r\n"
]
| [
[
"torch.nn.Linear",
"torch.zeros",
"torch.nn.Dropout",
"torch.nn.LayerNorm",
"torch.stack",
"torch.gt",
"torch.nn.Softmax",
"torch.nn.Sigmoid",
"torch.cat",
"torch.nn.init.xavier_uniform_",
"torch.ones",
"torch.nn.ReLU",
"torch.matmul",
"torch.sum"
]
]
|
athmargaritis/PyDMD | [
"82a54a3b3619995682e0be518ce4d596412a42b9"
]
| [
"pydmd/mrdmd.py"
]
| [
"\"\"\"\nDerived module from dmdbase.py for multi-resolution dmd.\n\nReference:\n- Kutz, J. Nathan, Xing Fu, and Steven L. Brunton. Multiresolution Dynamic Mode\nDecomposition. SIAM Journal on Applied Dynamical Systems 15.2 (2016): 713-735.\n\"\"\"\nfrom __future__ import division\nfrom builtins import range\nfrom past.utils import old_div\nimport numpy as np\nimport scipy.linalg\nimport matplotlib.pyplot as plt\n\nfrom .dmdbase import DMDBase\nfrom .dmdoperator import DMDOperator\nfrom .utils import compute_tlsq\n\nclass SubMrDMDOperator(DMDOperator):\n \"\"\"\n Used by MrDMDOperator in order to compute several quantities of interest\n over different views of the snapshots matrix.\n\n :param svd_rank: the rank for the truncation; If 0, the method computes the\n optimal rank and uses it for truncation; if positive interger, the\n method uses the argument for the truncation; if float between 0 and 1,\n the rank is the number of the biggest singular values that are needed\n to reach the 'energy' specified by `svd_rank`; if -1, the method does\n not compute truncation.\n :type svd_rank: int or float\n :param float eigs_divider: The divider of eigs in the evaluation of slow\n modes.\n :param float rho: The maximum eigenvalue amplitude of slow modes\n \"\"\"\n\n def __init__(self, svd_rank, eigs_divider, rho):\n super(SubMrDMDOperator, self).__init__(svd_rank=svd_rank, exact=True,\n rescale_mode=None, forward_backward=False)\n\n self._eigs_divider = eigs_divider\n self._rho = rho\n\n self._slow_modes = None\n\n def compute_operator(self, Xc, Yc):\n \"\"\"\n Compute the low-rank operator, the eigenquantities and slow modes.\n\n :param numpy.ndarray Xc: matrix containing the snapshots x0,..x{n-1} by\n column.\n :param numpy.ndarray Yc: matrix containing the snapshots x1,..x{n} by\n column.\n \"\"\"\n\n U, s, V = self._compute_svd(Xc)\n\n self._Atilde = (np.linalg.multi_dot([U.T.conj(), (Yc), (V)])\n * np.reciprocal(s))\n\n self._compute_eigenquantities()\n self._compute_modes(Yc, U, s, V)\n\n self._slow_modes = (np.abs(old_div(np.log(self.eigenvalues),\n self._eigs_divider))) <= self._rho\n\n def compute_sub_amplitudes(self, Xc, opt):\n \"\"\"\n Compute the ampltitudes for slow modes of this sub DMD operator.\n\n :param numpy.ndarray Xc: matrix containing the snapshots x0,..x{n-1} by\n column.\n :param bool opt: flag to compute optimized DMD.\n :return: The amplitudes for each slow mode.\n :rtype: numpy.ndarray\n \"\"\"\n\n if opt:\n # compute the vandermonde matrix\n omega = old_div(np.log(self.eigs), self.original_time['dt'])\n vander = np.exp(\n np.multiply(*np.meshgrid(omega, self.dmd_timesteps))).T\n\n # perform svd on all the snapshots\n U, s, V = np.linalg.svd(Xc, full_matrices=False)\n\n P = np.multiply(np.dot(self.modes.conj().T, self.modes),\n np.conj(np.dot(vander,\n vander.conj().T)))\n\n tmp = np.linalg.multi_dot([U, np.diag(s), V]).conj().T\n q = np.conj(np.diag(np.linalg.multi_dot([vander, tmp, self.modes])))\n\n # b optimal\n a = np.linalg.solve(P, q)\n else:\n a = np.linalg.lstsq(self.modes, Xc.T[0], rcond=None)[0]\n\n return a\n\n @DMDOperator.modes.getter\n def modes(self):\n if self._slow_modes is None:\n return super(SubMrDMDOperator, self).modes\n else:\n return super(SubMrDMDOperator, self).modes[:, self._slow_modes]\n\n @DMDOperator.eigenvalues.getter\n def eigenvalues(self):\n # we want to access eigenvalues before setting slow_modes, since\n # setting slow_modes requires evaluating eigenvalues\n if self._slow_modes is None:\n return super(SubMrDMDOperator, self).eigenvalues\n else:\n return super(SubMrDMDOperator, self).eigenvalues[self._slow_modes]\n\nclass MrDMDOperator(DMDOperator):\n \"\"\"\n Dynamic Mode Decomposition operator for MrDMD.\n\n :param svd_rank: the rank for the truncation; If 0, the method computes the\n optimal rank and uses it for truncation; if positive interger, the\n method uses the argument for the truncation; if float between 0 and 1,\n the rank is the number of the biggest singular values that are needed\n to reach the 'energy' specified by `svd_rank`; if -1, the method does\n not compute truncation.\n :type svd_rank: int or float\n :param int tlsq_rank: rank truncation computing Total Least Square. Default\n is 0, that means TLSQ is not applied.\n :param int max_cycles: the maximum number of mode oscillations in any given\n time scale. Default is 1.\n :param int max_level: the maximum number of levels. Defualt is 6.\n :param bool opt: flag to compute optimal amplitudes. Default is False.\n Doesn't support changing the temporal index of the snapshot used for the\n computation of DMD modes amplitudes.\n \"\"\"\n\n def __init__(self, svd_rank, tlsq_rank, max_cycles, max_level, opt):\n super(MrDMDOperator, self).__init__(svd_rank=svd_rank, exact=True,\n rescale_mode=None, forward_backward=False)\n\n self._tlsq_rank = tlsq_rank\n self._max_cycles = max_cycles\n self._max_level = max_level\n self._opt = opt\n\n self._nyq = 8 * self._max_cycles\n\n # initialization\n self._modes = []\n self._b = []\n self._Atilde = []\n self._eigenvalues = []\n self._nsamples = []\n self._steps = []\n\n @DMDOperator.as_numpy_array.getter\n def as_numpy_array(self):\n raise RuntimeError(\"This property isn't defined\")\n\n def compute_operator(self, snapshots):\n \"\"\"\n Compute the MrDMD operator, modes, amplitudes and eigenvalues.\n\n :param numpy.ndarray snapshots: The snapshots (by column).\n \"\"\"\n\n # To avoid recursion function, use FIFO list to simulate the tree\n # structure\n data_queue = [snapshots.copy()]\n\n current_bin = 0\n while data_queue:\n Xraw = data_queue.pop(0)\n\n n_samples = Xraw.shape[1]\n\n step = max(1, int(np.floor(old_div(n_samples, self._nyq))))\n Xsub = Xraw[:, ::step]\n Xc = Xsub[:, :-1]\n Yc = Xsub[:, 1:]\n\n Xc, Yc = compute_tlsq(Xc, Yc, self._tlsq_rank)\n\n rho = old_div(float(self._max_cycles), n_samples)\n sub_operator = SubMrDMDOperator(svd_rank=self._svd_rank,\n eigs_divider=2. * np.pi * step, rho=rho)\n sub_operator.compute_operator(Xc, Yc)\n\n modes = sub_operator.modes\n eigs = sub_operator.eigenvalues\n Atilde = sub_operator.as_numpy_array\n b = sub_operator.compute_sub_amplitudes(Xc, self._opt)\n\n #---------------------------------------------------------------\n # DMD Amplitudes and Dynamics\n #---------------------------------------------------------------\n Vand = np.vander(np.power(eigs, old_div(1., step)), n_samples, True)\n\n Psi = (Vand.T * b).T\n\n self._modes.append(modes)\n self._b.append(b)\n self._Atilde.append(Atilde)\n self._eigenvalues.append(eigs)\n self._nsamples.append(n_samples)\n self._steps.append(step)\n\n if Xraw.dtype == 'float64':\n Xraw -= modes.dot(Psi).real\n else:\n Xraw -= modes.dot(Psi)\n\n if current_bin < 2**(self._max_level - 1) - 1:\n current_bin += 1\n half = int(np.ceil(old_div(Xraw.shape[1], 2)))\n data_queue.append(Xraw[:, :half])\n data_queue.append(Xraw[:, half:])\n else:\n current_bin += 1\n\nclass MrDMD(DMDBase):\n \"\"\"\n Multi-resolution Dynamic Mode Decomposition\n\n :param svd_rank: the rank for the truncation; If 0, the method computes the\n optimal rank and uses it for truncation; if positive interger, the\n method uses the argument for the truncation; if float between 0 and 1,\n the rank is the number of the biggest singular values that are needed\n to reach the 'energy' specified by `svd_rank`; if -1, the method does\n not compute truncation.\n :type svd_rank: int or float\n :param int tlsq_rank: rank truncation computing Total Least Square. Default\n is 0, that means TLSQ is not applied.\n :param bool opt: flag to compute optimal amplitudes. Default is False.\n Doesn't support changing the temporal index of the snapshot used for the\n computation of DMD modes amplitudes.\n :param int max_cycles: the maximum number of mode oscillations in any given\n time scale. Default is 1.\n :param int max_level: the maximum number of levels. Defualt is 6.\n \"\"\"\n\n def __init__(self, svd_rank=0, tlsq_rank=0, opt=False,\n max_cycles=1, max_level=6):\n\n self._Atilde = MrDMDOperator(svd_rank=svd_rank, tlsq_rank=tlsq_rank,\n max_level=max_level, max_cycles=max_cycles, opt=opt)\n\n self._original_time = None\n self._dmd_time = None\n\n self._max_level = max_level\n\n def _index_list(self, level, node):\n \"\"\"\n Private method that return the right index element from a given level\n and node.\n\n :param int level: the level in the binary tree.\n :param int node: the node id.\n :rtype: int\n :return: the index of the list that contains the binary tree.\n \"\"\"\n if level >= self._max_level:\n raise ValueError(\"Invalid level: greater than `max_level`\")\n\n if node >= 2**level:\n raise ValueError(\"Invalid node\")\n\n return 2**level + node - 1\n\n def _index_list_reversed(self, index):\n \"\"\"\n Method that return the level and node given the index of the bin.\n\n :param int index: the index of the bin in the binary tree.\n :return: the level of the bin in the binary tree and the node id\n in that level.\n \"\"\"\n if index > 2**self._max_level - 2:\n raise ValueError(\"Invalid index: maximum index is ({})\".format(2**self._max_level - 2))\n for lvl in range(self._max_level + 1):\n if index < 2**lvl - 1:\n break\n level = lvl - 1\n node = index - 2**level + 1\n return level, node\n\n def partial_time_interval(self, level, node):\n \"\"\"\n Evaluate the start and end time and the period of a given bin.\n\n :param int level: the level in the binary tree.\n :param int node: the node id.\n :return: the start and end time and the period of the bin\n :rtype: dictionary\n \"\"\"\n if level >= self._max_level:\n raise ValueError(\n 'The level input parameter ({}) has to be less than the '\n 'max_level ({}). Remember that the starting index is 0'.format(\n level, self._max_level))\n\n if node >= 2**level:\n raise ValueError(\"Invalid node\")\n\n full_period = self.original_time['tend'] - self.original_time['t0']\n period = full_period / 2**level\n t0 = self.original_time['t0'] + period*node\n tend = t0 + period\n return {'t0': t0, 'tend':tend, 'dt':period}\n\n def time_window_bins(self, t0, tend):\n \"\"\"\n Find which bins are embedded (partially or totally) in a given\n time window.\n\n :param float t0: start time of the window.\n :param float tend: end time of the window.\n :return: indexes of the bins seen by the time window.\n :rtype: numpy.ndarray\n \"\"\"\n indexes = []\n for level in range(self._max_level):\n for i in range(2**level):\n local_times = self.partial_time_interval(level, i)\n if t0 >= local_times['t0'] and t0 < local_times['tend']:\n indexes.append(self._index_list(level, i))\n if tend > local_times['t0'] and tend <= local_times['tend']:\n indexes.append(self._index_list(level, i))\n if t0 <= local_times['t0'] and tend >= local_times['tend']:\n indexes.append(self._index_list(level, i))\n # Remove duplicates if they exist\n # indexes = list(dict.fromkeys(indexes)) # Python 3.7 or later (preserve order)\n indexes = list(set(indexes)) # Any Python version, but does not preserve order\n indexes = np.sort(indexes)\n return indexes\n\n def time_window_eigs(self, t0, tend):\n \"\"\"\n Get the eigenvalues relative to the modes of the bins embedded (partially\n or totally) in a given time window.\n\n :param float t0: start time of the window.\n :param float tend: end time of the window.\n :return: the eigenvalues for that time window.\n :rtype: numpy.ndarray\n \"\"\"\n indexes = self.time_window_bins(t0, tend)\n return np.concatenate([self.operator.eigenvalues[idx]\n for idx in indexes])\n\n def time_window_frequency(self, t0, tend):\n \"\"\"\n Get the frequencies relative to the modes of the bins embedded (partially\n or totally) in a given time window.\n\n :param float t0: start time of the window.\n :param float tend: end time of the window.\n :return: the frequencies for that time window.\n :rtype: numpy.ndarray\n \"\"\"\n eigs = self.time_window_eigs(t0, tend)\n return np.log(eigs).imag/(2*np.pi*self.original_time['dt'])\n\n def time_window_growth_rate(self, t0, tend):\n \"\"\"\n Get the growth rate values relative to the modes of the bins embedded (partially\n or totally) in a given time window.\n\n :param float t0: start time of the window.\n :param float tend: end time of the window.\n :return: the Floquet values for that time window.\n :rtype: numpy.ndarray\n \"\"\"\n return self.time_window_eigs(t0, tend).real/self.original_time['dt']\n\n def time_window_amplitudes(self, t0, tend):\n \"\"\"\n Get the amplitudes relative to the modes of the bins embedded (partially\n or totally) in a given time window.\n\n :param float t0: start time of the window.\n :param float tend: end time of the window.\n :return: the amplitude of the modes for that time window.\n :rtype: numpy.ndarray\n \"\"\"\n indexes = self.time_window_bins(t0, tend)\n return np.concatenate([self._b[idx] for idx in indexes])\n\n @property\n def reconstructed_data(self):\n \"\"\"\n Get the reconstructed data.\n\n :return: the matrix that contains the reconstructed snapshots.\n :rtype: numpy.ndarray\n \"\"\"\n try:\n data = np.sum(\n np.array([\n self.partial_reconstructed_data(i)\n for i in range(self._max_level)\n ]),\n axis=0)\n except MemoryError:\n data = np.array(self.partial_reconstructed_data(0))\n for i in range(1, self._max_level):\n data = np.sum([data,\n np.array(self.partial_reconstructed_data(i))], axis=0)\n return data\n\n @property\n def modes(self):\n \"\"\"\n Get the matrix containing the DMD modes, stored by column.\n\n :return: the matrix containing the DMD modes.\n :rtype: numpy.ndarray\n \"\"\"\n return np.hstack(tuple(self.operator.modes))\n\n @property\n def dynamics(self):\n \"\"\"\n Get the time evolution of each mode.\n\n :return: the matrix that contains all the time evolution, stored by\n row.\n :rtype: numpy.ndarray\n \"\"\"\n return np.vstack(\n tuple([self.partial_dynamics(i) for i in range(self._max_level)]))\n\n @property\n def eigs(self):\n \"\"\"\n Get the eigenvalues of A tilde.\n\n :return: the eigenvalues from the eigendecomposition of `atilde`.\n :rtype: numpy.ndarray\n \"\"\"\n return np.concatenate(self.operator.eigenvalues)\n\n @property\n def _steps(self):\n return self.operator._steps\n\n @property\n def _nsamples(self):\n return self.operator._nsamples\n\n @property\n def _b(self):\n return self.operator._b\n\n @property\n def max_level(self):\n return self._max_level\n\n @property\n def max_cycles(self):\n return self.operator._max_cycles\n\n def partial_modes(self, level, node=None):\n \"\"\"\n Return the modes at the specific `level` and at the specific `node`; if\n `node` is not specified, the method returns all the modes of the given\n `level` (all the nodes).\n\n :param int level: the index of the level from where the modes are\n extracted.\n :param int node: the index of the node from where the modes are\n extracted; if None, the modes are extracted from all the nodes of\n the given level. Default is None.\n \"\"\"\n if node:\n return self.operator.modes[self._index_list(level, node)]\n\n indeces = [self._index_list(level, i) for i in range(2**level)]\n return np.hstack(tuple([self.operator.modes[idx] for idx in indeces]))\n\n def partial_dynamics(self, level, node=None):\n \"\"\"\n Return the time evolution of the specific `level` and of the specific\n `node`; if `node` is not specified, the method returns the time\n evolution of the given `level` (all the nodes).\n\n :param int level: the index of the level from where the time evolution\n is extracted.\n :param int node: the index of the node from where the time evolution is\n extracted; if None, the time evolution is extracted from all the\n nodes of the given level. Default is None.\n \"\"\"\n\n def dynamic(eigs, amplitudes, step, nsamples):\n omega = old_div(\n np.log(np.power(eigs, old_div(1., step))),\n self.original_time['dt'])\n partial_timestep = np.arange(nsamples) * self.dmd_time['dt']\n vander = np.exp(np.multiply(*np.meshgrid(omega, partial_timestep)))\n return (vander * amplitudes).T\n\n if node:\n indeces = [self._index_list(level, node)]\n else:\n indeces = [self._index_list(level, i) for i in range(2**level)]\n\n level_dynamics = [\n dynamic(self.operator.eigenvalues[idx], self._b[idx], self._steps[idx],\n self._nsamples[idx]) for idx in indeces\n ]\n return scipy.linalg.block_diag(*level_dynamics)\n\n def partial_eigs(self, level, node=None):\n \"\"\"\n Return the eigenvalues of the specific `level` and of the specific\n `node`; if `node` is not specified, the method returns the eigenvalues\n of the given `level` (all the nodes).\n\n :param int level: the index of the level from where the eigenvalues is\n extracted.\n :param int node: the index of the node from where the eigenvalues is\n extracted; if None, the time evolution is extracted from all the\n nodes of the given level. Default is None.\n \"\"\"\n if level >= self._max_level:\n raise ValueError(\n 'The level input parameter ({}) has to be less than the'\n 'max_level ({}). Remember that the starting index is 0'.format(\n level, self._max_level))\n if node:\n return self.operator.eigenvalues[self._index_list(level, node)]\n\n indeces = [self._index_list(level, i) for i in range(2**level)]\n return np.concatenate([self.operator.eigenvalues[idx] for idx in indeces])\n\n def partial_reconstructed_data(self, level, node=None):\n \"\"\"\n Return the reconstructed data computed using the modes and the time\n evolution at the specific `level` and at the specific `node`; if `node`\n is not specified, the method returns the reconstructed data\n of the given `level` (all the nodes).\n\n :param int level: the index of the level.\n :param int node: the index of the node from where the time evolution is\n extracted; if None, the time evolution is extracted from all the\n nodes of the given level. Default is None.\n\n \"\"\"\n if level >= self._max_level:\n raise ValueError(\n 'The level input parameter ({}) has to be less than the '\n 'max_level ({}). Remember that the starting index is 0'.format(\n level, self._max_level))\n modes = self.partial_modes(level, node)\n dynamics = self.partial_dynamics(level, node)\n\n return modes.dot(dynamics)\n\n def fit(self, X):\n \"\"\"\n Compute the Dynamic Modes Decomposition to the input data.\n\n :param X: the input snapshots.\n :type X: numpy.ndarray or iterable\n \"\"\"\n self._snapshots, self._snapshots_shape = self._col_major_2darray(X)\n\n # Redefine max level if it is too big.\n lvl_threshold = int(np.log(self._snapshots.shape[1]/4.)/np.log(2.)) + 1\n if self._max_level > lvl_threshold:\n self._max_level = lvl_threshold\n print('Too many levels... '\n 'Redefining `max_level` to {}'.format(self._max_level))\n\n self.operator.compute_operator(self._snapshots)\n\n self.dmd_time = {'t0': 0, 'tend': self._snapshots.shape[1], 'dt': 1}\n self.original_time = self.dmd_time.copy()\n\n return self\n\n def plot_eigs(self,\n show_axes=True,\n show_unit_circle=True,\n figsize=(8, 8),\n title='',\n level=None,\n node=None):\n \"\"\"\n Plot the eigenvalues.\n\n :param bool show_axes: if True, the axes will be showed in the plot.\n Default is True.\n :param bool show_unit_circle: if True, the circle with unitary radius\n and center in the origin will be showed. Default is True.\n :param tuple(int,int) figsize: tuple in inches of the figure.\n :param str title: title of the plot.\n :param int level: plot only the eigenvalues of specific level.\n :param int node: plot only the eigenvalues of specific node.\n \"\"\"\n if self.operator.eigenvalues is None:\n raise ValueError('The eigenvalues have not been computed.'\n 'You have to perform the fit method.')\n\n if level:\n peigs = self.partial_eigs(level=level, node=node)\n else:\n peigs = self.eigs\n\n plt.figure(figsize=figsize)\n plt.title(title)\n plt.gcf()\n ax = plt.gca()\n\n if not level:\n cmap = plt.get_cmap('viridis')\n colors = [cmap(i) for i in np.linspace(0, 1, self._max_level)]\n\n points = []\n for lvl in range(self._max_level):\n indeces = [self._index_list(lvl, i) for i in range(2 ** lvl)]\n eigs = np.concatenate([self.operator.eigenvalues[idx] for idx in indeces])\n\n points.append(\n ax.plot(eigs.real, eigs.imag, '.', color=colors[lvl])[0])\n else:\n points = []\n points.append(\n ax.plot(peigs.real, peigs.imag, 'bo', label='Eigenvalues')[0])\n\n # set limits for axis\n limit = np.max(np.ceil(np.absolute(peigs)))\n ax.set_xlim((-limit, limit))\n ax.set_ylim((-limit, limit))\n\n plt.ylabel('Imaginary part')\n plt.xlabel('Real part')\n\n if show_unit_circle:\n unit_circle = plt.Circle(\n (0., 0.), 1., color='green', fill=False, linestyle='--')\n ax.add_artist(unit_circle)\n\n # Dashed grid\n gridlines = ax.get_xgridlines() + ax.get_ygridlines()\n for line in gridlines:\n line.set_linestyle('-.')\n ax.grid(True)\n\n ax.set_aspect('equal')\n\n # x and y axes\n if show_axes:\n ax.annotate(\n '',\n xy=(np.max([limit * 0.8, 1.]), 0.),\n xytext=(np.min([-limit * 0.8, -1.]), 0.),\n arrowprops=dict(arrowstyle=\"->\"))\n ax.annotate(\n '',\n xy=(0., np.max([limit * 0.8, 1.])),\n xytext=(0., np.min([-limit * 0.8, -1.])),\n arrowprops=dict(arrowstyle=\"->\"))\n\n # legend\n if level:\n labels = ['Eigenvalues - level {}'.format(level)]\n else:\n labels = [\n 'Eigenvalues - level {}'.format(i)\n for i in range(self._max_level)\n ]\n\n if show_unit_circle:\n points += [unit_circle]\n labels += ['Unit circle']\n\n ax.add_artist(plt.legend(points, labels, loc='best'))\n plt.show()\n"
]
| [
[
"numpy.min",
"numpy.linalg.lstsq",
"numpy.sort",
"matplotlib.pyplot.gcf",
"numpy.concatenate",
"numpy.max",
"numpy.log",
"matplotlib.pyplot.get_cmap",
"numpy.arange",
"matplotlib.pyplot.gca",
"numpy.linalg.multi_dot",
"matplotlib.pyplot.title",
"matplotlib.pyplot.figure",
"numpy.diag",
"matplotlib.pyplot.Circle",
"numpy.linalg.svd",
"numpy.absolute",
"matplotlib.pyplot.show",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.legend",
"numpy.reciprocal",
"matplotlib.pyplot.ylabel",
"numpy.linalg.solve",
"numpy.linspace",
"numpy.meshgrid"
]
]
|
clintg6/imageio | [
"ef70fcfbc2ed160881188a029e2ac08174a35611"
]
| [
"imageio/plugins/pillow.py"
]
| [
"# -*- coding: utf-8 -*-\n# imageio is distributed under the terms of the (new) BSD License.\n\n\"\"\" Plugin that wraps the the Pillow library.\n\"\"\"\n\nfrom __future__ import absolute_import, print_function, division\n\nimport logging\nimport threading\n\nimport numpy as np\n\nfrom .. import formats\nfrom ..core import Format, image_as_uint\n\n# Get info about pillow formats without having to import PIL\nfrom .pillow_info import pillow_formats, pillow_docs\n\n\nlogger = logging.getLogger(__name__)\n\n\n# todo: Pillow ImageGrab module supports grabbing the screen on Win and OSX.\n\n\nGENERIC_DOCS = \"\"\"\n Parameters for reading\n ----------------------\n \n pilmode : str\n From the Pillow documentation:\n \n * 'L' (8-bit pixels, grayscale)\n * 'P' (8-bit pixels, mapped to any other mode using a color palette)\n * 'RGB' (3x8-bit pixels, true color)\n * 'RGBA' (4x8-bit pixels, true color with transparency mask)\n * 'CMYK' (4x8-bit pixels, color separation)\n * 'YCbCr' (3x8-bit pixels, color video format)\n * 'I' (32-bit signed integer pixels)\n * 'F' (32-bit floating point pixels)\n \n PIL also provides limited support for a few special modes, including\n 'LA' ('L' with alpha), 'RGBX' (true color with padding) and 'RGBa'\n (true color with premultiplied alpha).\n \n When translating a color image to grayscale (mode 'L', 'I' or 'F'),\n the library uses the ITU-R 601-2 luma transform::\n \n L = R * 299/1000 + G * 587/1000 + B * 114/1000\n as_gray : bool\n If True, the image is converted using mode 'F'. When `mode` is\n not None and `as_gray` is True, the image is first converted\n according to `mode`, and the result is then \"flattened\" using\n mode 'F'.\n\"\"\"\n\n\nclass PillowFormat(Format):\n \"\"\"\n Base format class for Pillow formats.\n \"\"\"\n\n _pillow_imported = False\n _Image = None\n _modes = \"i\"\n _description = \"\"\n\n def __init__(self, *args, **kwargs):\n super(PillowFormat, self).__init__(*args, **kwargs)\n # Used to synchronize _init_pillow(), see #244\n self._lock = threading.RLock()\n\n @property\n def plugin_id(self):\n \"\"\" The PIL plugin id.\n \"\"\"\n return self._plugin_id # Set when format is created\n\n def _init_pillow(self):\n with self._lock:\n if not self._pillow_imported:\n self._pillow_imported = True # more like tried to import\n import PIL\n\n if not hasattr(PIL, \"__version__\"): # pragma: no cover\n raise ImportError(\n \"Imageio Pillow plugin requires \" \"Pillow, not PIL!\"\n )\n from PIL import Image\n\n self._Image = Image\n elif self._Image is None: # pragma: no cover\n raise RuntimeError(\"Imageio Pillow plugin requires \" \"Pillow lib.\")\n Image = self._Image\n\n if self.plugin_id in (\"PNG\", \"JPEG\", \"BMP\", \"GIF\", \"PPM\"):\n Image.preinit()\n else:\n Image.init()\n return Image\n\n def _can_read(self, request):\n Image = self._init_pillow()\n if request.mode[1] in (self.modes + \"?\"):\n if self.plugin_id in Image.OPEN:\n factory, accept = Image.OPEN[self.plugin_id]\n if accept:\n if request.firstbytes and accept(request.firstbytes):\n return True\n\n def _can_write(self, request):\n Image = self._init_pillow()\n if request.mode[1] in (self.modes + \"?\"):\n if request.extension in self.extensions:\n if self.plugin_id in Image.SAVE:\n return True\n\n class Reader(Format.Reader):\n def _open(self, pilmode=None, as_gray=False):\n Image = self.format._init_pillow()\n try:\n factory, accept = Image.OPEN[self.format.plugin_id]\n except KeyError:\n raise RuntimeError(\"Format %s cannot read images.\" % self.format.name)\n self._fp = self._get_file()\n self._im = factory(self._fp, \"\")\n if hasattr(Image, \"_decompression_bomb_check\"):\n Image._decompression_bomb_check(self._im.size)\n # Save the raw mode used by the palette for a BMP because it may not be the number of channels\n # When the data is read, imageio hands the palette to PIL to handle and clears the rawmode argument\n # However, there is a bug in PIL with handling animated GIFs with a different color palette on each frame.\n # This issue is resolved by using the raw palette data but the rawmode information is now lost. So we\n # store the raw mode for later use\n if self._im.palette and self._im.palette.dirty:\n self._im.palette.rawmode_saved = self._im.palette.rawmode\n pil_try_read(self._im)\n # Store args\n self._kwargs = dict(\n as_gray=as_gray, is_gray=_palette_is_grayscale(self._im)\n )\n # setting mode=None is not the same as just not providing it\n if pilmode is not None:\n self._kwargs[\"mode\"] = pilmode\n # Set length\n self._length = 1\n if hasattr(self._im, \"n_frames\"):\n self._length = self._im.n_frames\n\n def _get_file(self):\n self._we_own_fp = False\n return self.request.get_file()\n\n def _close(self):\n save_pillow_close(self._im)\n if self._we_own_fp:\n self._fp.close()\n # else: request object handles closing the _fp\n\n def _get_length(self):\n return self._length\n\n def _seek(self, index):\n try:\n self._im.seek(index)\n except EOFError:\n raise IndexError(\"Could not seek to index %i\" % index)\n\n def _get_data(self, index):\n if index >= self._length:\n raise IndexError(\"Image index %i > %i\" % (index, self._length))\n i = self._im.tell()\n if i > index:\n self._seek(index) # just try\n else:\n while i < index: # some formats need to be read in sequence\n i += 1\n self._seek(i)\n if self._im.palette and self._im.palette.dirty:\n self._im.palette.rawmode_saved = self._im.palette.rawmode\n self._im.getdata()[0]\n im = pil_get_frame(self._im, **self._kwargs)\n return im, self._im.info\n\n def _get_meta_data(self, index):\n if not (index is None or index == 0):\n raise IndexError()\n return self._im.info\n\n class Writer(Format.Writer):\n def _open(self):\n Image = self.format._init_pillow()\n try:\n self._save_func = Image.SAVE[self.format.plugin_id]\n except KeyError:\n raise RuntimeError(\"Format %s cannot write images.\" % self.format.name)\n self._fp = self.request.get_file()\n self._meta = {}\n self._written = False\n\n def _close(self):\n pass # request object handled closing _fp\n\n def _append_data(self, im, meta):\n if self._written:\n raise RuntimeError(\n \"Format %s only supports single images.\" % self.format.name\n )\n # Pop unit dimension for grayscale images\n if im.ndim == 3 and im.shape[-1] == 1:\n im = im[:, :, 0]\n self._written = True\n self._meta.update(meta)\n img = ndarray_to_pil(\n im, self.format.plugin_id, self._meta.pop(\"prefer_uint8\", True)\n )\n if \"bits\" in self._meta:\n img = img.quantize() # Make it a P image, so bits arg is used\n img.save(self._fp, format=self.format.plugin_id, **self._meta)\n save_pillow_close(img)\n\n def set_meta_data(self, meta):\n self._meta.update(meta)\n\n\nclass PNGFormat(PillowFormat):\n \"\"\"A PNG format based on Pillow.\n \n This format supports grayscale, RGB and RGBA images.\n \n Parameters for reading\n ----------------------\n ignoregamma : bool\n Avoid gamma correction. Default True.\n pilmode : str\n From the Pillow documentation:\n \n * 'L' (8-bit pixels, grayscale)\n * 'P' (8-bit pixels, mapped to any other mode using a color palette)\n * 'RGB' (3x8-bit pixels, true color)\n * 'RGBA' (4x8-bit pixels, true color with transparency mask)\n * 'CMYK' (4x8-bit pixels, color separation)\n * 'YCbCr' (3x8-bit pixels, color video format)\n * 'I' (32-bit signed integer pixels)\n * 'F' (32-bit floating point pixels)\n \n PIL also provides limited support for a few special modes, including\n 'LA' ('L' with alpha), 'RGBX' (true color with padding) and 'RGBa'\n (true color with premultiplied alpha).\n \n When translating a color image to grayscale (mode 'L', 'I' or 'F'),\n the library uses the ITU-R 601-2 luma transform::\n \n L = R * 299/1000 + G * 587/1000 + B * 114/1000\n as_gray : bool\n If True, the image is converted using mode 'F'. When `mode` is\n not None and `as_gray` is True, the image is first converted\n according to `mode`, and the result is then \"flattened\" using\n mode 'F'.\n \n Parameters for saving\n ---------------------\n optimize : bool\n If present and true, instructs the PNG writer to make the output file\n as small as possible. This includes extra processing in order to find\n optimal encoder settings.\n transparency: \n This option controls what color image to mark as transparent.\n dpi: tuple of two scalars\n The desired dpi in each direction.\n pnginfo: PIL.PngImagePlugin.PngInfo\n Object containing text tags.\n compress_level: int\n ZLIB compression level, a number between 0 and 9: 1 gives best speed,\n 9 gives best compression, 0 gives no compression at all. Default is 9.\n When ``optimize`` option is True ``compress_level`` has no effect\n (it is set to 9 regardless of a value passed).\n compression: int\n Compatibility with the freeimage PNG format. If given, it overrides\n compress_level.\n icc_profile:\n The ICC Profile to include in the saved file.\n bits (experimental): int\n This option controls how many bits to store. If omitted,\n the PNG writer uses 8 bits (256 colors).\n quantize: \n Compatibility with the freeimage PNG format. If given, it overrides\n bits. In this case, given as a number between 1-256.\n dictionary (experimental): dict\n Set the ZLIB encoder dictionary.\n prefer_uint8: bool\n Let the PNG writer truncate uint16 image arrays to uint8 if their values fall\n within the range [0, 255]. Defaults to true for legacy compatibility, however\n it is recommended to set this to false to avoid unexpected behavior when\n saving e.g. weakly saturated images.\n \"\"\"\n\n class Reader(PillowFormat.Reader):\n def _open(self, pilmode=None, as_gray=False, ignoregamma=True):\n return PillowFormat.Reader._open(self, pilmode=pilmode, as_gray=as_gray)\n\n def _get_data(self, index):\n im, info = PillowFormat.Reader._get_data(self, index)\n if not self.request.kwargs.get(\"ignoregamma\", True):\n # The gamma value in the file represents the gamma factor for the\n # hardware on the system where the file was created, and is meant\n # to be able to match the colors with the system on which the\n # image is shown. See also issue #366\n try:\n gamma = float(info[\"gamma\"])\n except (KeyError, ValueError):\n pass\n else:\n scale = float(65536 if im.dtype == np.uint16 else 255)\n gain = 1.0\n im[:] = ((im / scale) ** gamma) * scale * gain + 0.4999\n return im, info\n\n # --\n\n class Writer(PillowFormat.Writer):\n def _open(self, compression=None, quantize=None, interlaced=False, **kwargs):\n\n # Better default for compression\n kwargs[\"compress_level\"] = kwargs.get(\"compress_level\", 9)\n\n if compression is not None:\n if compression < 0 or compression > 9:\n raise ValueError(\"Invalid PNG compression level: %r\" % compression)\n kwargs[\"compress_level\"] = compression\n if quantize is not None:\n for bits in range(1, 9):\n if 2 ** bits == quantize:\n break\n else:\n raise ValueError(\n \"PNG quantize must be power of two, \" \"not %r\" % quantize\n )\n kwargs[\"bits\"] = bits\n if interlaced:\n logger.warning(\"PIL PNG writer cannot produce interlaced images.\")\n\n ok_keys = (\n \"optimize\",\n \"transparency\",\n \"dpi\",\n \"pnginfo\",\n \"bits\",\n \"compress_level\",\n \"icc_profile\",\n \"dictionary\",\n \"prefer_uint8\",\n )\n for key in kwargs:\n if key not in ok_keys:\n raise TypeError(\"Invalid arg for PNG writer: %r\" % key)\n\n PillowFormat.Writer._open(self)\n self._meta.update(kwargs)\n\n def _append_data(self, im, meta):\n if str(im.dtype) == \"uint16\" and (im.ndim == 2 or im.shape[-1] == 1):\n im = image_as_uint(im, bitdepth=16)\n else:\n im = image_as_uint(im, bitdepth=8)\n PillowFormat.Writer._append_data(self, im, meta)\n\n\nclass JPEGFormat(PillowFormat):\n \"\"\"A JPEG format based on Pillow.\n \n This format supports grayscale, RGB and RGBA images.\n \n Parameters for reading\n ----------------------\n exifrotate : bool\n Automatically rotate the image according to exif flag. Default True.\n pilmode : str\n From the Pillow documentation:\n \n * 'L' (8-bit pixels, grayscale)\n * 'P' (8-bit pixels, mapped to any other mode using a color palette)\n * 'RGB' (3x8-bit pixels, true color)\n * 'RGBA' (4x8-bit pixels, true color with transparency mask)\n * 'CMYK' (4x8-bit pixels, color separation)\n * 'YCbCr' (3x8-bit pixels, color video format)\n * 'I' (32-bit signed integer pixels)\n * 'F' (32-bit floating point pixels)\n \n PIL also provides limited support for a few special modes, including\n 'LA' ('L' with alpha), 'RGBX' (true color with padding) and 'RGBa'\n (true color with premultiplied alpha).\n \n When translating a color image to grayscale (mode 'L', 'I' or 'F'),\n the library uses the ITU-R 601-2 luma transform::\n \n L = R * 299/1000 + G * 587/1000 + B * 114/1000\n as_gray : bool\n If True, the image is converted using mode 'F'. When `mode` is\n not None and `as_gray` is True, the image is first converted\n according to `mode`, and the result is then \"flattened\" using\n mode 'F'.\n \n Parameters for saving\n ---------------------\n quality : scalar\n The compression factor of the saved image (1..100), higher\n numbers result in higher quality but larger file size. Default 75.\n progressive : bool\n Save as a progressive JPEG file (e.g. for images on the web).\n Default False.\n optimize : bool\n On saving, compute optimal Huffman coding tables (can reduce a few\n percent of file size). Default False.\n dpi : tuple of int\n The pixel density, ``(x,y)``.\n icc_profile : object\n If present and true, the image is stored with the provided ICC profile.\n If this parameter is not provided, the image will be saved with no\n profile attached.\n exif : dict\n If present, the image will be stored with the provided raw EXIF data.\n subsampling : str\n Sets the subsampling for the encoder. See Pillow docs for details.\n qtables : object\n Set the qtables for the encoder. See Pillow docs for details.\n \"\"\"\n\n class Reader(PillowFormat.Reader):\n def _open(self, pilmode=None, as_gray=False, exifrotate=True):\n return PillowFormat.Reader._open(self, pilmode=pilmode, as_gray=as_gray)\n\n def _get_file(self):\n # Pillow uses seek for JPG, so we cannot directly stream from web\n if self.request.filename.startswith(\n (\"http://\", \"https://\")\n ) or \".zip/\" in self.request.filename.replace(\"\\\\\", \"/\"):\n self._we_own_fp = True\n return open(self.request.get_local_filename(), \"rb\")\n else:\n self._we_own_fp = False\n return self.request.get_file()\n\n def _get_data(self, index):\n im, info = PillowFormat.Reader._get_data(self, index)\n\n # Handle exif\n if \"exif\" in info:\n from PIL.ExifTags import TAGS\n\n info[\"EXIF_MAIN\"] = {}\n for tag, value in self._im._getexif().items():\n decoded = TAGS.get(tag, tag)\n info[\"EXIF_MAIN\"][decoded] = value\n\n im = self._rotate(im, info)\n return im, info\n\n def _rotate(self, im, meta):\n \"\"\" Use Orientation information from EXIF meta data to \n orient the image correctly. Similar code as in FreeImage plugin.\n \"\"\"\n if self.request.kwargs.get(\"exifrotate\", True):\n try:\n ori = meta[\"EXIF_MAIN\"][\"Orientation\"]\n except KeyError: # pragma: no cover\n pass # Orientation not available\n else: # pragma: no cover - we cannot touch all cases\n # www.impulseadventure.com/photo/exif-orientation.html\n if ori in [1, 2]:\n pass\n if ori in [3, 4]:\n im = np.rot90(im, 2)\n if ori in [5, 6]:\n im = np.rot90(im, 3)\n if ori in [7, 8]:\n im = np.rot90(im)\n if ori in [2, 4, 5, 7]: # Flipped cases (rare)\n im = np.fliplr(im)\n return im\n\n # --\n\n class Writer(PillowFormat.Writer):\n def _open(self, quality=75, progressive=False, optimize=False, **kwargs):\n\n # Check quality - in Pillow it should be no higher than 95\n quality = int(quality)\n if quality < 1 or quality > 100:\n raise ValueError(\"JPEG quality should be between 1 and 100.\")\n quality = min(95, max(1, quality))\n\n kwargs[\"quality\"] = quality\n kwargs[\"progressive\"] = bool(progressive)\n kwargs[\"optimize\"] = bool(progressive)\n\n PillowFormat.Writer._open(self)\n self._meta.update(kwargs)\n\n def _append_data(self, im, meta):\n if im.ndim == 3 and im.shape[-1] == 4:\n raise IOError(\"JPEG does not support alpha channel.\")\n im = image_as_uint(im, bitdepth=8)\n PillowFormat.Writer._append_data(self, im, meta)\n return\n\n\ndef save_pillow_close(im):\n # see issue #216 and #300\n if hasattr(im, \"close\"):\n if hasattr(getattr(im, \"fp\", None), \"close\"):\n im.close()\n\n\n## Func from skimage\n\n# This cells contains code from scikit-image, in particular from\n# http://github.com/scikit-image/scikit-image/blob/master/\n# skimage/io/_plugins/pil_plugin.py\n# The scikit-image license applies.\n\n\ndef pil_try_read(im):\n try:\n # this will raise an IOError if the file is not readable\n im.getdata()[0]\n except IOError as e:\n site = \"http://pillow.readthedocs.io/en/latest/installation.html\"\n site += \"#external-libraries\"\n pillow_error_message = str(e)\n error_message = (\n 'Could not load \"%s\" \\n'\n 'Reason: \"%s\"\\n'\n \"Please see documentation at: %s\"\n % (im.filename, pillow_error_message, site)\n )\n raise ValueError(error_message)\n\n\ndef _palette_is_grayscale(pil_image):\n if pil_image.mode != \"P\":\n return False\n # get palette as an array with R, G, B columns\n palette = np.asarray(pil_image.getpalette()).reshape((256, 3))\n # Not all palette colors are used; unused colors have junk values.\n start, stop = pil_image.getextrema()\n valid_palette = palette[start : stop + 1]\n # Image is grayscale if channel differences (R - G and G - B)\n # are all zero.\n return np.allclose(np.diff(valid_palette), 0)\n\n\ndef pil_get_frame(im, is_gray=None, as_gray=None, mode=None, dtype=None):\n \"\"\" \n is_gray: Whether the image *is* gray (by inspecting its palette).\n as_gray: Whether the resulting image must be converted to gaey.\n mode: The mode to convert to.\n \"\"\"\n\n if is_gray is None:\n is_gray = _palette_is_grayscale(im)\n\n frame = im\n\n # Convert ...\n if mode is not None:\n # Mode is explicitly given ...\n if mode != im.mode:\n frame = im.convert(mode)\n elif as_gray:\n pass # don't do any auto-conversions (but do the explit one above)\n elif im.mode == \"P\" and is_gray:\n # Paletted images that are already gray by their palette\n # are converted so that the resulting numpy array is 2D.\n frame = im.convert(\"L\")\n elif im.mode == \"P\":\n # Paletted images are converted to RGB/RGBA. We jump some loops to make\n # this work well.\n if im.info.get(\"transparency\", None) is not None:\n # Let Pillow apply the transparency, see issue #210 and #246\n frame = im.convert(\"RGBA\")\n elif im.palette.mode in (\"RGB\", \"RGBA\"):\n # We can do this ourselves. Pillow seems to sometimes screw\n # this up if a multi-gif has a palette for each frame ...\n # Create palette array\n p = np.frombuffer(im.palette.getdata()[1], np.uint8)\n # Restore the raw mode that was saved to be used to parse the palette\n if hasattr(im.palette, \"rawmode_saved\"):\n im.palette.rawmode = im.palette.rawmode_saved\n mode = im.palette.rawmode if im.palette.rawmode else im.palette.mode\n nchannels = len(mode)\n # Shape it.\n p.shape = -1, nchannels\n if p.shape[1] == 3 or (p.shape[1] == 4 and mode[-1] == \"X\"):\n p = np.column_stack((p[:, :3], 255 * np.ones(p.shape[0], p.dtype)))\n # Swap the axes if the mode is in BGR and not RGB\n if mode.startswith(\"BGR\"):\n p = p[:, [2, 1, 0]] if p.shape[1] == 3 else p[:, [2, 1, 0, 3]]\n # Apply palette\n frame_paletted = np.array(im, np.uint8)\n try:\n frame = p[frame_paletted]\n except Exception:\n # Ok, let PIL do it. The introduction of the branch that\n # tests `im.info['transparency']` should make this happen\n # much less often, but let's keep it, to be safe.\n frame = im.convert(\"RGBA\")\n else:\n # Let Pillow do it. Unlinke skimage, we always convert\n # to RGBA; palettes can be RGBA.\n if True: # im.format == 'PNG' and 'transparency' in im.info:\n frame = im.convert(\"RGBA\")\n else:\n frame = im.convert(\"RGB\")\n elif \"A\" in im.mode:\n frame = im.convert(\"RGBA\")\n elif im.mode == \"CMYK\":\n frame = im.convert(\"RGB\")\n\n # Apply a post-convert if necessary\n if as_gray:\n frame = frame.convert(\"F\") # Scipy compat\n elif not isinstance(frame, np.ndarray) and frame.mode == \"1\":\n # Workaround for crash in PIL. When im is 1-bit, the call array(im)\n # can cause a segfault, or generate garbage. See\n # https://github.com/scipy/scipy/issues/2138 and\n # https://github.com/python-pillow/Pillow/issues/350.\n #\n # This converts im from a 1-bit image to an 8-bit image.\n frame = frame.convert(\"L\")\n\n # Convert to numpy array\n if im.mode.startswith(\"I;16\"):\n # e.g. in16 PNG's\n shape = im.size\n dtype = \">u2\" if im.mode.endswith(\"B\") else \"<u2\"\n if \"S\" in im.mode:\n dtype = dtype.replace(\"u\", \"i\")\n frame = np.frombuffer(frame.tobytes(), dtype).copy()\n frame.shape = shape[::-1]\n else:\n # Use uint16 for PNG's in mode I\n if im.format == \"PNG\" and im.mode == \"I\" and dtype is None:\n dtype = \"uint16\"\n frame = np.array(frame, dtype=dtype)\n\n return frame\n\n\ndef ndarray_to_pil(arr, format_str=None, prefer_uint8=True):\n\n from PIL import Image\n\n if arr.ndim == 3:\n arr = image_as_uint(arr, bitdepth=8)\n mode = {3: \"RGB\", 4: \"RGBA\"}[arr.shape[2]]\n\n elif format_str in [\"png\", \"PNG\"]:\n mode = \"I;16\"\n mode_base = \"I\"\n\n if arr.dtype.kind == \"f\":\n arr = image_as_uint(arr)\n\n elif prefer_uint8 and arr.max() < 256 and arr.min() >= 0:\n arr = arr.astype(np.uint8)\n mode = mode_base = \"L\"\n\n else:\n arr = image_as_uint(arr, bitdepth=16)\n\n else:\n arr = image_as_uint(arr, bitdepth=8)\n mode = \"L\"\n mode_base = \"L\"\n\n if mode == \"I;16\" and int(getattr(Image, \"__version__\", \"0\").split(\".\")[0]) < 6:\n # Pillow < v6.0.0 has limited support for the \"I;16\" mode,\n # requiring us to fall back to this expensive workaround.\n # tobytes actually creates a copy of the image, which is costly.\n array_buffer = arr.tobytes()\n if arr.ndim == 2:\n im = Image.new(mode_base, arr.T.shape)\n im.frombytes(array_buffer, \"raw\", mode)\n else:\n image_shape = (arr.shape[1], arr.shape[0])\n im = Image.frombytes(mode, image_shape, array_buffer)\n return im\n else:\n return Image.fromarray(arr, mode)\n\n\n## End of code from scikit-image\n\n\nfrom .pillowmulti import GIFFormat, TIFFFormat\n\nIGNORE_FORMATS = \"MPEG\"\n\nSPECIAL_FORMATS = dict(PNG=PNGFormat, JPEG=JPEGFormat, GIF=GIFFormat, TIFF=TIFFFormat)\n\n\ndef register_pillow_formats():\n\n for id, summary, ext in pillow_formats:\n if id in IGNORE_FORMATS:\n continue\n FormatCls = SPECIAL_FORMATS.get(id, PillowFormat)\n summary = FormatCls._description or summary\n format = FormatCls(id + \"-PIL\", summary, ext, FormatCls._modes)\n format._plugin_id = id\n if FormatCls is PillowFormat or not FormatCls.__doc__:\n format.__doc__ = pillow_docs[id] + GENERIC_DOCS\n formats.add_format(format)\n\n\nregister_pillow_formats()\n"
]
| [
[
"numpy.rot90",
"numpy.array",
"numpy.ones",
"numpy.diff",
"numpy.fliplr"
]
]
|
Appy1310/image_GAN_painting | [
"cbb7845add4a7501e61f64cab5bb07afe8314de0"
]
| [
"models/CycleGAN.py"
]
| [
"'''\nClass defining a CycleGAN model for image to image translation\n'''\n\n# import random\n# import os\n# #from os import listdir\n# from random import random\n# import numpy as np\n# from numpy import load, zeros, ones, asarray\n# from numpy.random import randint\nfrom tensorflow import keras\nimport tensorflow as tf\n\n#import matplotlib.pyplot as plt\n#from tensorflow.keras.callbacks import Callback\nfrom keras_contrib.layers.normalization.instancenormalization import InstanceNormalization\n#from matplotlib import pyplot\n\nfrom utils import generate_real_samples, generate_fake_samples\nfrom utils import save_models, summarize_performance, update_image_pool\n\n# Build the CycleGAN class\n''' It has the following methods:\ndiscriminator:\ngenerator:\n'''\n# # generator: A -> B\n# g_model_AtoB = model.define_generator(image_shape)\n# # generator: B -> A\n# g_model_BtoA = model.define_generator(image_shape)\n# # discriminator: A -> [real/fake]\n# d_model_A = model.define_discriminator(image_shape)\n# # discriminator: B -> [real/fake]\n# d_model_B = model.define_discriminator(image_shape)\n# # composite: A -> B -> [real/fake, A]\n# c_model_AtoB = model.define_composite_model(g_model_AtoB, d_model_B, g_model_BtoA, image_shape)\n# # composite: B -> A -> [real/fake, B]\n# c_model_BtoA = model.define_composite_model(g_model_BtoA, d_model_A, g_model_AtoB, image_shape)\n\n\nclass CycleGAN(tf.keras.Model):\n\n def __init__(self, image_shape):\n super(CycleGAN, self).__init__()\n self.image_shape = image_shape\n # generator: A -> B\n self.g_model_AtoB = self.define_generator()\n # generator: B -> A\n self.g_model_BtoA = self.define_generator()\n # discriminator: A -> [real/fake]\n self.d_model_A = self.define_discriminator()\n # discriminator: B -> [real/fake]\n self.d_model_B = self.define_discriminator()\n # composite: A -> B -> [real/fake, A]\n self.c_model_AtoB = self.define_composite_model(\n self.g_model_AtoB, self.d_model_B, self.g_model_BtoA, image_shape)\n # composite: B -> A -> [real/fake, B]\n self.c_model_BtoA = self.define_composite_model(\n self.g_model_BtoA, self.d_model_A, self.g_model_AtoB, image_shape)\n\n def define_discriminator(self):\n ''' defines the discriminator function'''\n # weight initialization\n init = keras.initializers.RandomNormal(stddev=0.02)\n # source image input\n input_image = keras.Input(shape=self.image_shape)\n # C64\n d_layer = keras.layers.Conv2D(64, (4, 4), strides=(\n 2, 2), padding='same', kernel_initializer=init)(input_image)\n d_layer = keras.layers.LeakyReLU(alpha=0.2)(d_layer)\n # C128\n d_layer = keras.layers.Conv2D(128, (4, 4), strides=(\n 2, 2), padding='same', kernel_initializer=init)(d_layer)\n d_layer = InstanceNormalization(axis=-1)(d_layer)\n d_layer = keras.layers.LeakyReLU(alpha=0.2)(d_layer)\n # C256\n d_layer = keras.layers.Conv2D(256, (4, 4), strides=(\n 2, 2), padding='same', kernel_initializer=init)(d_layer)\n d_layer = InstanceNormalization(axis=-1)(d_layer)\n d_layer = keras.layers.LeakyReLU(alpha=0.2)(d_layer)\n # C512\n d_layer = keras.layers.Conv2D(512, (4, 4), strides=(\n 2, 2), padding='same', kernel_initializer=init)(d_layer)\n d_layer = InstanceNormalization(axis=-1)(d_layer)\n d_layer = keras.layers.LeakyReLU(alpha=0.2)(d_layer)\n # second last output layer\n d_layer = keras.layers.Conv2D(\n 512, (4, 4), padding='same', kernel_initializer=init)(d_layer)\n d_layer = InstanceNormalization(axis=-1)(d_layer)\n d_layer = keras.layers.LeakyReLU(alpha=0.2)(d_layer)\n # patch output\n patch_out = keras.layers.Conv2D(\n 1, (4, 4), padding='same', kernel_initializer=init)(d_layer)\n # define model\n model = keras.Model(input_image, patch_out)\n # compile model\n model.compile(\n loss='mse',\n optimizer=keras.optimizers.Adam(\n lr=0.0002,\n beta_1=0.5),\n loss_weights=[0.5])\n return model\n\n # generator of a resnet block\n def resnet_block(self, n_filters, input_layer):\n ''' generates a resnet_block'''\n # weight initialization\n init = keras.initializers.RandomNormal(stddev=0.02)\n # first layer convolutional layer\n g_layer = keras.layers.Conv2D(\n n_filters, (3, 3), padding='same', kernel_initializer=init)(input_layer)\n g_layer = InstanceNormalization(axis=-1)(g_layer)\n g_layer = keras.layers.Activation('relu')(g_layer)\n # second convolutional layer\n g_layer = keras.layers.Conv2D(\n n_filters, (3, 3), padding='same', kernel_initializer=init)(g_layer)\n g_layer = InstanceNormalization(axis=-1)(g_layer)\n # concatenate merge channel-wise with input layer\n g_layer = keras.layers.Concatenate()([g_layer, input_layer])\n return g_layer\n\n # define the standalone generator model\n def define_generator(self, n_resnet=9):\n ''' defines a generator function'''\n # weight initialization\n init = keras.initializers.RandomNormal(stddev=0.02)\n # image input\n input_image = keras.Input(shape=self.image_shape)\n # c7s1-64\n g_layer = keras.layers.Conv2D(64, (7, 7), padding='same',\n kernel_initializer=init)(input_image)\n g_layer = InstanceNormalization(axis=-1)(g_layer)\n g_layer = keras.layers.Activation('relu')(g_layer)\n # d128\n g_layer = keras.layers.Conv2D(128, (3, 3), strides=(\n 2, 2), padding='same', kernel_initializer=init)(g_layer)\n g_layer = InstanceNormalization(axis=-1)(g_layer)\n g_layer = keras.layers.Activation('relu')(g_layer)\n # d256\n g_layer = keras.layers.Conv2D(256, (3, 3), strides=(\n 2, 2), padding='same', kernel_initializer=init)(g_layer)\n g_layer = InstanceNormalization(axis=-1)(g_layer)\n g_layer = keras.layers.Activation('relu')(g_layer)\n # R256\n for _ in range(n_resnet):\n #print('running through loop!')\n g_layer = self.resnet_block(256, g_layer)\n # u128\n g_layer = keras.layers.Conv2DTranspose(128, (3, 3), strides=(\n 2, 2), padding='same', kernel_initializer=init)(g_layer)\n g_layer = InstanceNormalization(axis=-1)(g_layer)\n g_layer = keras.layers.Activation('relu')(g_layer)\n # u64\n g_layer = keras.layers.Conv2DTranspose(64, (3, 3), strides=(\n 2, 2), padding='same', kernel_initializer=init)(g_layer)\n g_layer = InstanceNormalization(axis=-1)(g_layer)\n g_layer = keras.layers.Activation('relu')(g_layer)\n # c7s1-3\n g_layer = keras.layers.Conv2D(3, (7, 7), padding='same',\n kernel_initializer=init)(g_layer)\n g_layer = InstanceNormalization(axis=-1)(g_layer)\n out_image = keras.layers.Activation('tanh')(g_layer)\n # define model\n model = keras.Model(input_image, out_image)\n return model\n\n # define a composite model for updating generators by adversarial and\n # cycle loss\n\n def define_composite_model(\n self,\n g_model_1,\n d_model,\n g_model_2,\n image_shape):\n '''define a composite model for updating generators\n by adversarial and cycle loss'''\n # ensure the model we're updating is trainable\n g_model_1.trainable = True\n # mark discriminator as not trainable\n d_model.trainable = False\n # mark other generator model as not trainable\n g_model_2.trainable = False\n # discriminator element\n input_gen = keras.Input(shape=image_shape)\n gen1_out = g_model_1(input_gen)\n output_d = d_model(gen1_out)\n # identity element\n input_id = keras.Input(shape=image_shape)\n output_id = g_model_1(input_id)\n # forward cycle\n output_f = g_model_2(gen1_out)\n # backward cycle\n gen2_out = g_model_2(input_id)\n output_b = g_model_1(gen2_out)\n # define model graph\n model = keras.Model([input_gen, input_id], [\n output_d, output_id, output_f, output_b])\n # define optimization algorithm configuration\n opt = keras.optimizers.Adam(lr=0.0002, beta_1=0.5)\n # compile model with weighting of least squares loss and L1 loss\n model.compile(\n loss=[\n 'mse', 'mae', 'mae', 'mae'], loss_weights=[\n 1, 5, 10, 10], optimizer=opt)\n return model\n\n # train cyclegan models\n def train(self, d_model_A, d_model_B, g_model_AtoB, g_model_BtoA,\n c_model_AtoB, c_model_BtoA, dataset):\n ''' tarining step for cyclegan models'''\n # define properties of the training run\n n_epochs, n_batch, = 50, 1\n # determine the output square shape of the discriminator\n n_patch = d_model_A.output_shape[1]\n # unpack dataset\n trainA, trainB = dataset\n # prepare image pool for fakes\n poolA, poolB = list(), list()\n # calculate the number of batches per training epoch\n bat_per_epo = int(len(trainA) / n_batch)\n # calculate the number of training iterations\n n_steps = bat_per_epo * n_epochs\n # manually enumerate epochs\n for i in range(n_steps):\n # select a batch of real samples\n X_realA, y_realA = generate_real_samples(trainA, n_batch, n_patch)\n X_realB, y_realB = generate_real_samples(trainB, n_batch, n_patch)\n # generate a batch of fake samples\n X_fakeA, y_fakeA = generate_fake_samples(\n g_model_BtoA, X_realB, n_patch)\n X_fakeB, y_fakeB = generate_fake_samples(\n g_model_AtoB, X_realA, n_patch)\n # update fakes from pool\n X_fakeA = update_image_pool(poolA, X_fakeA)\n X_fakeB = update_image_pool(poolB, X_fakeB)\n # update generator B->A via adversarial and cycle los\n g_loss2, _, _, _, _ = c_model_BtoA.train_on_batch(\n [X_realB, X_realA], [y_realA, X_realA, X_realB, X_realA])\n # update discriminator for A -> [real/fake]\n dA_loss1 = d_model_A.train_on_batch(X_realA, y_realA)\n dA_loss2 = d_model_A.train_on_batch(X_fakeA, y_fakeA)\n # update generator A->B via adversarial and cycle loss\n g_loss1, _, _, _, _ = c_model_AtoB.train_on_batch(\n [X_realA, X_realB], [y_realB, X_realB, X_realA, X_realB])\n # update discriminator for B -> [real/fake]\n dB_loss1 = d_model_B.train_on_batch(X_realB, y_realB)\n dB_loss2 = d_model_B.train_on_batch(X_fakeB, y_fakeB)\n # summarize performance\n print(\n '>%d, dA[%.3f,%.3f] dB[%.3f,%.3f] g[%.3f,%.3f]' %\n (i + 1, dA_loss1, dA_loss2, dB_loss1, dB_loss2, g_loss1, g_loss2))\n # evaluate the model performance every so often\n if (i + 1) % (bat_per_epo * 1) == 0:\n # plot A->B translation\n summarize_performance(i, g_model_AtoB, trainA, 'AtoB')\n # plot B->A translation\n summarize_performance(i, g_model_BtoA, trainB, 'BtoA')\n if (i + 1) % (bat_per_epo * 5) == 0:\n # save the models\n save_models(i, g_model_AtoB, g_model_BtoA)\n"
]
| [
[
"tensorflow.keras.layers.Activation",
"tensorflow.keras.layers.Conv2D",
"tensorflow.keras.initializers.RandomNormal",
"tensorflow.keras.Model",
"tensorflow.keras.layers.LeakyReLU",
"tensorflow.keras.layers.Conv2DTranspose",
"tensorflow.keras.Input",
"tensorflow.keras.optimizers.Adam",
"tensorflow.keras.layers.Concatenate"
]
]
|
jokieleung/Maria | [
"51ac07d1de564c26fbf038b07031a55660bbcb27"
]
| [
"retrieval_model/xmatching/metric.py"
]
| [
"import torch\n\n\ndef batchwise_accuracy(lang_output, visn_output, lang_mask):\n \"\"\"\n Calculate the accuracy of contextual word retrieval, average by batch.\n :param lang_output: [batch_size, max_len, hid_dim]\n :param visn_output: [batch_size, hid_dim]\n :param lang_mask: Int Tensor [batch_size, max_len], 1 for tokens, 0 for paddings.\n :return:\n \"\"\"\n batch_size, lang_len, dim = lang_output.shape\n assert batch_size % 2 == 0 and batch_size == visn_output.shape[0]\n\n # Expand the visn_output to match each word\n visn_output = visn_output.unsqueeze(1) # [b, 1, dim]\n\n # The score of negative pairs. Note that the diagonal is actually the positive score,\n # but it would be zero-graded in calculating the loss below.\n negative_scores = (lang_output.reshape(batch_size, 1, lang_len, dim) *\n visn_output.reshape(1, batch_size, 1, dim)).sum(-1) # [b(lang), b(visn), max_len]\n # negative_scores = torch.einsum('ikd,jd->ijk', lang_output, visn_output)\n\n max_neg_score, max_neg_idx = negative_scores.max(1) # [batch, max_len], the batch_idx of max-aligned img\n pos_idx = torch.arange(0, batch_size, dtype=torch.int64).to(lang_output.device)\n\n correct = (pos_idx.unsqueeze(1) == max_neg_idx)\n bool_lang_mask = lang_mask.type(correct.dtype)\n correct = correct * bool_lang_mask\n correct_num = correct.sum()\n\n accuracy = correct_num * 1. / bool_lang_mask.sum()\n\n return accuracy\n\n\ndef batchwise_recall(lang_output, visn_output, lang_mask, recalls=(1,)):\n \"\"\"\n Calculate the accuracy of contextual word retrieval, average by batch.\n :param lang_output: [batch_size, max_len, hid_dim]\n :param visn_output: [batch_size, hid_dim]\n :param lang_mask: Int Tensor [batch_size, max_len], 1 for tokens, 0 for paddings.\n :param recall: a list, which are the number of recalls to be evaluated.\n :return:\n \"\"\"\n batch_size, lang_len, dim = lang_output.shape\n assert batch_size % 2 == 0 and batch_size == visn_output.shape[0]\n\n # Expand the visn_output to match each word\n visn_output = visn_output.unsqueeze(1) # [b, 1, dim]\n\n # The score of positive pairs\n positive_score = (lang_output * visn_output).sum(-1) # [b, max_len]\n\n # The score of negative pairs. Note that the diagonal is actually the positive score,\n # but it would be zero-graded in calculating the loss below.\n negative_scores = (lang_output.reshape(batch_size, 1, lang_len, dim) *\n visn_output.reshape(1, batch_size, 1, dim)).sum(-1) # [b(lang), b(visn), max_len]\n # negative_scores = torch.einsum('ikd,jd->ijk', lang_output, visn_output)\n\n result = {}\n for recall in recalls:\n kthscore, kthidx = torch.kthvalue(negative_scores, batch_size - recall, dim=1) # [b, max_len]\n # print(kthscore.shape) print(positive_score.shape)\n correct = (positive_score >= kthscore) # [b, max_len]\n bool_lang_mask = lang_mask.type(correct.dtype)\n correct = correct * bool_lang_mask\n correct_num = correct.sum()\n # print(correct_num)\n # print(bool_lang_mask.sum())\n result[recall] = (correct_num * 1. / bool_lang_mask.sum()).item()\n\n return result\n\ndef sent_batchwise_recall(lang_output, visn_output, lang_mask, recalls=(1,)):\n \"\"\"\n Calculate the accuracy of contextual word retrieval, average by batch.\n :param lang_output: [batch_size, max_len, hid_dim]\n :param lang_output: [batch_size, hid_dim]\n :param visn_output: [batch_size, hid_dim]\n :param lang_mask: Int Tensor [batch_size, max_len], 1 for tokens, 0 for paddings.\n :param recall: a list, which are the number of recalls to be evaluated.\n :return:\n \"\"\"\n # lang_output = lang_output[:,0,:].unsqueeze(1)\n lang_output = lang_output.unsqueeze(1)\n\n batch_size, lang_len, dim = lang_output.shape\n assert batch_size % 2 == 0 and batch_size == visn_output.shape[0]\n\n # Expand the visn_output to match each word\n visn_output = visn_output.unsqueeze(1) # [b, 1, dim]\n\n # The score of positive pairs\n positive_score = (lang_output * visn_output).sum(-1) # [b, max_len]\n\n # The score of negative pairs. Note that the diagonal is actually the positive score,\n # but it would be zero-graded in calculating the loss below.\n negative_scores = (lang_output.reshape(batch_size, 1, lang_len, dim) *\n visn_output.reshape(1, batch_size, 1, dim)).sum(-1) # [b(lang), b(visn), max_len]\n # negative_scores = torch.einsum('ikd,jd->ijk', lang_output, visn_output)\n\n result = {}\n for recall in recalls:\n kthscore, kthidx = torch.kthvalue(negative_scores, batch_size - recall, dim=1) # [b, max_len]\n # print(kthscore.shape) print(positive_score.shape)\n correct = (positive_score >= kthscore) # [b, max_len]\n correct_num = correct.sum()\n result[recall] = (correct_num * 1. / batch_size).item()\n\n return result"
]
| [
[
"torch.kthvalue",
"torch.arange"
]
]
|
Open-Source-Spatial-Clean-Cooking-Tool/OnSSTOVE | [
"0d723720a816dd528d24e813d392f1a566b402ed"
]
| [
"onsstove/raster.py"
]
| [
"import os\nimport glob\nimport numpy as np\nfrom math import sqrt\nfrom heapq import heapify, heappush, heappop\nimport rasterio\nimport rasterio.mask\nfrom rasterio.merge import merge\nfrom rasterio.warp import calculate_default_transform, reproject, Resampling\nfrom rasterio.fill import fillnodata\nfrom rasterio import features\nfrom rasterio.enums import Resampling as enumsResampling\nimport geopandas as gpd\nimport fiona\nimport shapely\nfrom osgeo import gdal, osr\nimport gzip\n\n\ndef align_raster(raster_1, raster_2, method='nearest', compression='DEFLATE'):\n with rasterio.open(raster_1) as src:\n raster_1_meta = src.meta\n with rasterio.open(raster_2) as src:\n raster_2 = src.read(1)\n raster_2_meta = src.meta\n\n out_meta = raster_1_meta.copy()\n out_meta.update({\n 'transform': raster_1_meta['transform'],\n 'crs': raster_1_meta['crs'],\n 'compress': compression,\n 'nodata': raster_2_meta['nodata']\n })\n destination = np.full((raster_1_meta['height'], raster_1_meta['width']), raster_2_meta['nodata'])\n reproject(\n source=raster_2,\n destination=destination,\n src_transform=raster_2_meta['transform'],\n src_crs=raster_2_meta['crs'],\n src_nodata=raster_2_meta['nodata'],\n dst_transform=raster_1_meta['transform'],\n dst_crs=raster_1_meta['crs'],\n resampling=Resampling[method])\n return destination, out_meta\n\n\ndef interpolate(raster, max_search_distance=10):\n with rasterio.open(raster) as src:\n profile = src.profile\n arr = src.read(1)\n arr_filled = fillnodata(arr, mask=src.read_masks(1), max_search_distance=max_search_distance)\n\n with rasterio.open(raster, 'w', **profile) as dest:\n dest.write_band(1, arr_filled)\n\n\ndef polygonize(raster, mask=None):\n with rasterio.Env():\n if type(raster) == str:\n with rasterio.open(raster) as raster:\n raster = raster.read(1)\n raster = raster.astype('float32')\n\n results = (\n {'properties': {'raster_val': v}, 'geometry': s}\n for i, (s, v)\n in enumerate(\n shapes(raster, mask=mask, transform=src.transform)))\n\n geoms = list(results)\n polygon = gpd.GeoDataFrame.from_features(geoms)\n return polygon\n\n\ndef proximity_raster(src_filename, dst_filename, values, compression):\n src_ds = gdal.Open(src_filename)\n srcband = src_ds.GetRasterBand(1)\n dst_filename = dst_filename\n\n drv = gdal.GetDriverByName('GTiff')\n dst_ds = drv.Create(dst_filename,\n src_ds.RasterXSize, src_ds.RasterYSize, 1,\n gdal.GetDataTypeByName('Float32'),\n options=['COMPRESS={}'.format(compression)])\n\n dst_ds.SetGeoTransform(src_ds.GetGeoTransform())\n dst_ds.SetProjection(src_ds.GetProjectionRef())\n\n dstband = dst_ds.GetRasterBand(1)\n\n gdal.ComputeProximity(srcband, dstband,\n [\"VALUES={}\".format(','.join([str(i) for i in values])),\n \"DISTUNITS=GEO\"])\n srcband = None\n dstband = None\n src_ds = None\n dst_ds = None\n\n\ndef mask_raster(raster_path, mask_layer, output_file, nodata=0, compression='NONE',\n all_touched=False):\n if isinstance(mask_layer, str):\n with fiona.open(mask_layer, \"r\") as shapefile:\n shapes = [feature[\"geometry\"] for feature in shapefile]\n crs = 'EPSG:4326'\n else:\n shapes = [mask_layer.dissolve().geometry.loc[0]]\n crs = mask_layer.crs\n\n if '.gz' in raster_path:\n with gzip.open(raster_path) as gzip_infile:\n with rasterio.open(gzip_infile) as src:\n out_image, out_transform = rasterio.mask.mask(src, shapes, crop=True, nodata=nodata,\n all_touched=all_touched)\n out_meta = src.meta\n else:\n with rasterio.open(raster_path) as src:\n out_image, out_transform = rasterio.mask.mask(src, shapes, crop=True, nodata=nodata,\n all_touched=all_touched)\n out_meta = src.meta\n\n out_meta.update({\"driver\": \"GTiff\",\n \"height\": out_image.shape[1],\n \"width\": out_image.shape[2],\n \"transform\": out_transform,\n 'compress': compression,\n 'nodata': nodata,\n \"crs\": crs})\n\n with rasterio.open(output_file, \"w\", **out_meta) as dest:\n dest.write(out_image)\n\n\ndef reproject_raster(raster_path, dst_crs,\n cell_width=None, cell_height=None, method='nearest',\n compression='DEFLATE'):\n \"\"\"\n Resamples and/or reproject a raster layer.\n \"\"\"\n with rasterio.open(raster_path) as src:\n # Calculates the new transform, widht and height of \n # the reprojected layer\n transform, width, height = calculate_default_transform(\n src.crs, dst_crs,\n src.width,\n src.height,\n *src.bounds)\n # If a destination cell width and height was provided, then it \n # calculates the new boundaries, with, heigh and transform \n # depending on the new cell size.\n if cell_width and cell_height:\n bounds = rasterio.transform.array_bounds(height, width, transform)\n width = int(width * (transform[0] / cell_width))\n height = int(height * (abs(transform[4]) / cell_height))\n transform = rasterio.transform.from_origin(bounds[0], bounds[3],\n cell_width, cell_height)\n # Updates the metadata\n out_meta = src.meta.copy()\n out_meta.update({\n 'crs': dst_crs,\n 'transform': transform,\n 'width': width,\n 'height': height,\n 'compress': compression\n })\n # The layer is then reprojected/resampled\n # if output_file:\n # # if an output file path was provided, then the layer is saved\n # with rasterio.open(output_file, 'w', **out_meta) as dst:\n # for i in range(1, src.count + 1):\n # reproject(\n # source=rasterio.band(src, i),\n # destination=rasterio.band(dst, i),\n # src_transform=src.transform,\n # src_crs=src.crs,\n # dst_transform=transform,\n # dst_crs=dst_crs,\n # resampling=Resampling[method])\n # else:\n # If not outputfile is provided, then a numpy array and the \n # metadata if returned\n destination = np.full((height, width), src.nodata)\n reproject(\n source=rasterio.band(src, 1),\n destination=destination,\n src_transform=src.transform,\n src_crs=src.crs,\n dst_transform=transform,\n dst_crs=dst_crs,\n resampling=Resampling[method])\n return destination, out_meta\n\n\ndef sample_raster(path, gdf):\n with rasterio.open(path) as src:\n return [float(val) for val in src.sample([(x.coords.xy[0][0],\n x.coords.xy[1][0]) for x in\n gdf['geometry']])]\n\n\ndef friction_start_points(friction, in_points):\n if isinstance(in_points, str):\n start = gpd.read_file(in_points)\n else:\n start = in_points\n row_list = []\n col_list = []\n with rasterio.open(friction) as src:\n arr = src.read(1)\n for index, row in start.iterrows():\n rows, cols = rasterio.transform.rowcol(src.transform, row[\"geometry\"].x, row[\"geometry\"].y)\n arr[rows][cols] = 0\n\n out_meta = src.meta\n\n row_list.append(rows)\n col_list.append(cols)\n\n return row_list, col_list\n\n\ndef merge_rasters(files_path, dst_crs, outpul_file):\n files = glob.glob(files_path)\n src_files_to_mosaic = []\n\n for fp in files:\n src = rasterio.open(fp)\n src_files_to_mosaic.append(src)\n\n mosaic, out_trans = merge(src_files_to_mosaic)\n\n out_meta = src.meta.copy()\n out_meta.update({\"driver\": \"GTiff\",\n \"height\": mosaic[0].shape[0],\n \"width\": mosaic[0].shape[1],\n \"transform\": out_trans,\n \"crs\": dst_crs\n }\n )\n with rasterio.open(outpul_file, \"w\", **out_meta) as dest:\n dest.write(mosaic[0], indexes=1)\n\n\ndef rasterize(vector_layer, raster_base_layer, outpul_file=None, value=None,\n nodata=0, compression='NONE', dtype=rasterio.uint8,\n all_touched=True, save=False):\n vector_layer = vector_layer.rename(columns={'geometry': 'geom'})\n if value:\n dff = vector_layer[[value, 'geom']]\n shapes = ((g, v) for v, g in zip(dff[value].values, dff['geom'].values))\n else:\n shapes = ((g, 1) for g in vector_layer['geom'].values)\n\n with rasterio.open(raster_base_layer) as src:\n image = features.rasterize(\n shapes,\n out_shape=src.shape,\n transform=src.transform,\n all_touched=all_touched,\n fill=nodata)\n\n out_meta = src.meta\n\n out_meta.update({\"driver\": \"GTiff\",\n \"height\": src.height,\n \"width\": src.width,\n \"transform\": src.transform,\n 'compress': compression,\n 'dtype': dtype,\n \"crs\": src.crs,\n 'nodata': nodata})\n\n if save:\n with rasterio.open(outpul_file, 'w', **out_meta) as dst:\n dst.write(image, indexes=1)\n else:\n return image, out_meta\n\n\ndef normalize(raster=None, limit=None, output_file=None,\n inverse=False, meta=None, buffer=False):\n if isinstance(raster, str):\n with rasterio.open(raster) as src:\n raster = src.read(1)\n nodata = src.nodata\n meta = src.meta\n else:\n raster = raster.copy()\n nodata = meta['nodata']\n meta = meta\n if callable(limit):\n raster[~limit(raster)] = np.nan\n\n raster[raster == nodata] = np.nan\n min_value = np.nanmin(raster)\n max_value = np.nanmax(raster)\n raster = (raster - min_value) / (max_value - min_value)\n if inverse:\n if not buffer:\n raster[np.isnan(raster)] = 1\n raster[raster < 0] = np.nan\n raster = 1 - raster\n else:\n if not buffer:\n raster[np.isnan(raster)] = 0\n raster[raster < 0] = np.nan\n\n meta.update(nodata=np.nan, dtype='float32')\n\n if output_file:\n with rasterio.open(output_file, \"w\", **meta) as dest:\n dest.write(raster.astype('float32'), indexes=1)\n else:\n return raster, meta\n\n\ndef resample(raster_path, height, width, method='bilinear'):\n with rasterio.open(raster_path) as src:\n # resample data to target shape\n data = src.read(\n out_shape=(\n src.count,\n int(src.height * (abs(src.transform[4]) / height)),\n int(src.width * (abs(src.transform[0]) / width))\n ),\n resampling=enumsResampling[method]\n )\n\n # scale image transform\n transform = src.transform * src.transform.scale(\n (src.width / data.shape[-1]),\n (src.height / data.shape[-2])\n )\n return data, transform\n\n\n# def travel_time(friction, starts):\n# friction *= 1000 / 60\n# friction[np.isnan(friction)] = float('inf')\n# mcp = MCP_Geometric(friction, fully_connected=True)\n# row, col = friction_start_points(friction, starts)\n# pointlist = np.column_stack((row, col))\n#\n# cumulative_costs, traceback = mcp.find_costs(starts=pointlist)\n# cumulative_costs[np.where(cumulative_costs == float('inf'))] = np.nan\n#\n# return cumulative_costs\n"
]
| [
[
"numpy.full",
"numpy.nanmax",
"numpy.nanmin",
"numpy.isnan"
]
]
|
montefiore-ai/alan-boilerplate | [
"abc1215551912a901d0793f9be76eb58d61ec98b"
]
| [
"experiments/experiment-example/train.py"
]
| [
"import argparse\nimport numpy as np\nimport torch\nimport torch.multiprocessing as mp\nimport time\n\nfrom torch.utils.data import TensorDataset\n\ncriterion = None\ndataset_training = None\ndataset_validation = None\ndevice = None\nmodel = None\nlearning_rate_scheduler = None\noptimizer = None\ntraining_losses = []\nvalidation_losses = []\n\n\n\ndef main(arguments):\n global criterion\n global device\n global model\n\n # Check if a GPU is available.\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n # Allocate the requested model.\n allocate_model(arguments)\n # Split the model over the GPU's when multiple machines are available.\n multi_gpu = torch.cuda.device_count() > 1 and arguments.gpus > 1\n if multi_gpu:\n model = torch.nn.DataParallel(model)\n model.to(device)\n # Allocate the criterion, optimizers and the datasets.\n criterion = torch.nn.BCELoss()\n allocate_optimizer(arguments, model)\n allocate_learning_rate_scheduler(arguments, optimizer)\n allocate_dataset_training(arguments)\n allocate_dataset_validation(arguments)\n # Start the training and validation procedure.\n for epoch in range(arguments.epochs):\n train(arguments)\n validate(arguments)\n learning_rate_scheduler.step()\n # Model saving for multi-GPU needs to be handled a bit differently.\n if multi_gpu:\n state_dict = model.module.state_dict()\n else:\n state_dict = model.state_dict()\n torch.save(state_dict, arguments.out + \"/model.th\")\n np.save(arguments.out + \"/loss_validation\", np.array(validation_losses))\n np.save(arguments.out + \"/loss_training\", np.array(training_losses))\n\n\ndef train(arguments):\n batch_size = arguments.batch_size\n workers = arguments.workers\n running_loss = 0.0\n loader = torch.utils.data.DataLoader(\n dataset_training, batch_size=batch_size, shuffle=True,\n num_workers=workers, pin_memory=arguments.pin_memory, drop_last=True)\n num_iterations = len(loader)\n loader = iter(loader)\n ones = torch.ones(batch_size, 1).to(device)\n zeros = torch.zeros(batch_size, 1).to(device)\n model.train()\n for batch_index in range(num_iterations):\n x, y_target = next(loader)\n x = x.to(device, non_blocking=arguments.pin_memory)\n y_target = y_target.to(device, non_blocking=arguments.pin_memory)\n y = model(x)\n loss = criterion(y, y_target)\n optimizer.zero_grad()\n loss.backward()\n running_loss += loss.item()\n optimizer.step()\n running_loss /= num_iterations\n add_training_loss(running_loss)\n del loader\n\n\ndef add_training_loss(loss):\n training_losses.append(loss)\n\n\ndef add_validation_loss(loss):\n validation_losses.append(loss)\n\n\ndef validate(arguments):\n batch_size = arguments.batch_size\n workers = arguments.workers\n running_loss = 0.0\n loader = torch.utils.data.DataLoader(\n dataset_validation, batch_size=batch_size, shuffle=True,\n num_workers=workers, pin_memory=arguments.pin_memory, drop_last=True)\n num_iterations = len(loader)\n loader = iter(loader)\n ones = torch.ones(batch_size, 1).to(device)\n zeros = torch.zeros(batch_size, 1).to(device)\n model.eval()\n for batch_index in range(num_iterations):\n x, y_target = next(loader)\n x = x.to(device, non_blocking=arguments.pin_memory)\n y_target = y_target.to(device, non_blocking=arguments.pin_memory)\n y = model(x)\n loss = criterion(y, y_target)\n running_loss += loss.item()\n running_loss /= num_iterations\n add_validation_loss(running_loss)\n del loader\n\n\ndef allocate_model(arguments):\n global model\n\n hidden = 128\n model = torch.nn.Sequential(\n torch.nn.Linear(5, hidden),\n torch.nn.SELU(),\n torch.nn.Linear(hidden, hidden),\n torch.nn.SELU(),\n torch.nn.Linear(hidden, hidden),\n torch.nn.SELU(),\n torch.nn.Linear(hidden, 1),\n torch.nn.Sigmoid())\n\n\ndef allocate_optimizer(arguments, model):\n global optimizer\n\n optimizer = torch.optim.Adam(model.parameters(), amsgrad=True, lr=arguments.lr)\n\n\ndef allocate_learning_rate_scheduler(arguments, optimizer):\n global learning_rate_scheduler\n\n scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=25, gamma=0.5)\n learning_rate_scheduler = scheduler\n\n\ndef allocate_dataset_training(arguments):\n global dataset_training\n\n x = torch.randn(10000, 5)\n y = torch.FloatTensor(10000, 1).random_(0, 2)\n dataset_training = TensorDataset(x, y)\n # In practice we would be loading data from $DATADIR.\n\n\ndef allocate_dataset_validation(arguments):\n global dataset_validation\n\n x = torch.randn(1000, 5)\n y = torch.FloatTensor(1000, 1).random_(0, 2)\n dataset_validation = TensorDataset(x, y)\n # In practice we would be loading data from $DATADIR.\n\n\ndef parse_arguments():\n parser = argparse.ArgumentParser(\"Training\")\n parser.add_argument(\"--batch-size\", type=int, default=256, help=\"Batch-size (default: 256).\")\n parser.add_argument(\"--epochs\", type=int, default=1, help=\"Number of data epochs (default: 1).\")\n parser.add_argument(\"--gpus\", type=int, default=1, help=\"Number of GPU's to use in the case multiple accelerators are available (default: 1).'\")\n parser.add_argument(\"--lr\", type=float, default=0.001, help=\"Initial learning rate of the optimizer (default: 0.001).\")\n parser.add_argument(\"--out\", type=str, default=None, help=\"Output directory of the trained model (default: None).\")\n parser.add_argument(\"--pin-memory\", type=int, default=1, help=\"Pin the memory of the data loaders (default: true).\")\n parser.add_argument(\"--workers\", type=int, default=4, help=\"Number of asynchronous workers for data loading (default: 4).\")\n arguments, _ = parser.parse_known_args()\n # Check if an output path has been specified.\n if arguments.out is None:\n raise ValueError(\"Please specify an output path.\")\n arguments.pin_memory = (arguments.pin_memory == 1)\n\n return arguments\n\nif __name__ == \"__main__\":\n arguments = parse_arguments()\n main(arguments)\n # Clean up multiprocessing (/tmp madness).\n manager = mp.Manager()\n manager.shutdown()\n del manager\n"
]
| [
[
"torch.nn.Linear",
"torch.zeros",
"numpy.array",
"torch.nn.SELU",
"torch.optim.lr_scheduler.StepLR",
"torch.nn.Sigmoid",
"torch.save",
"torch.FloatTensor",
"torch.cuda.device_count",
"torch.ones",
"torch.multiprocessing.Manager",
"torch.cuda.is_available",
"torch.utils.data.TensorDataset",
"torch.utils.data.DataLoader",
"torch.nn.BCELoss",
"torch.randn",
"torch.nn.DataParallel"
]
]
|
sconlyshootery/DeepV2D | [
"a669dce1eef74648aec71f4282145b7cd0788d82"
]
| [
"deepv2d/geometry/projective_ops.py"
]
| [
"import numpy as np\nimport tensorflow as tf\nfrom deepv2d.utils.einsum import einsum\n\n\nMIN_DEPTH = 0.1\n\ndef coords_grid(shape, homogeneous=True):\n \"\"\" grid of pixel coordinates \"\"\"\n xx, yy = tf.meshgrid(tf.range(shape[-1]), tf.range(shape[-2]))\n\n xx = tf.cast(xx, tf.float32)\n yy = tf.cast(yy, tf.float32)\n\n if homogeneous:\n coords = tf.stack([xx, yy, tf.ones_like(xx)], axis=-1)\n else:\n coords = tf.stack([xx, yy], axis=-1)\n\n new_shape = (tf.ones_like(shape[:-2]), shape[-2:], [-1])\n new_shape = tf.concat(new_shape, axis=0)\n coords = tf.reshape(coords, new_shape)\n\n tile = tf.concat((shape[:-2], [1,1,1]), axis=0)\n coords = tf.tile(coords, tile)\n return coords\n\n\ndef extract_and_reshape_intrinsics(intrinsics, shape=None):\n \"\"\" Extracts (fx, fy, cx, cy) from intrinsics matrix \"\"\"\n\n fx = intrinsics[:, 0, 0]\n fy = intrinsics[:, 1, 1]\n cx = intrinsics[:, 0, 2]\n cy = intrinsics[:, 1, 2]\n\n if shape is not None:\n batch = tf.shape(fx)[:1]\n fillr = tf.ones_like(shape[1:])\n k_shape = tf.concat([batch, fillr], axis=0)\n\n fx = tf.reshape(fx, k_shape)\n fy = tf.reshape(fy, k_shape)\n cx = tf.reshape(cx, k_shape)\n cy = tf.reshape(cy, k_shape)\n\n return (fx, fy, cx, cy)\n\n\ndef backproject(depth, intrinsics, jacobian=False):\n \"\"\" backproject depth map to point cloud \"\"\"\n\n coords = coords_grid(tf.shape(depth), homogeneous=True)\n x, y, _ = tf.unstack(coords, num=3, axis=-1)\n\n x_shape = tf.shape(x)\n fx, fy, cx, cy = extract_and_reshape_intrinsics(intrinsics, x_shape)\n\n Z = tf.identity(depth)\n X = Z * (x - cx) / fx\n Y = Z * (y - cy) / fy\n points = tf.stack([X, Y, Z], axis=-1)\n\n if jacobian:\n o = tf.zeros_like(Z) # used to fill in zeros\n\n # jacobian w.r.t (fx, fy)\n jacobian_intrinsics = tf.stack([\n tf.stack([-X / fx], axis=-1),\n tf.stack([-Y / fy], axis=-1),\n tf.stack([o], axis=-1),\n tf.stack([o], axis=-1)], axis=-2)\n\n return points, jacobian_intrinsics\n \n return points\n\n\ndef project(points, intrinsics, jacobian=False):\n \n \"\"\" project point cloud onto image \"\"\"\n X, Y, Z = tf.unstack(points, num=3, axis=-1)\n Z = tf.maximum(Z, MIN_DEPTH)\n\n x_shape = tf.shape(X)\n fx, fy, cx, cy = extract_and_reshape_intrinsics(intrinsics, x_shape)\n\n x = fx * (X / Z) + cx\n y = fy * (Y / Z) + cy\n coords = tf.stack([x, y], axis=-1)\n\n if jacobian:\n o = tf.zeros_like(x) # used to fill in zeros\n zinv1 = tf.where(Z <= MIN_DEPTH+.01, tf.zeros_like(Z), 1.0 / Z)\n zinv2 = tf.where(Z <= MIN_DEPTH+.01, tf.zeros_like(Z), 1.0 / Z**2)\n\n # jacobian w.r.t (X, Y, Z)\n jacobian_points = tf.stack([\n tf.stack([fx * zinv1, o, -fx * X * zinv2], axis=-1),\n tf.stack([o, fy * zinv1, -fy * Y * zinv2], axis=-1)], axis=-2)\n\n # jacobian w.r.t (fx, fy)\n jacobian_intrinsics = tf.stack([\n tf.stack([X * zinv1], axis=-1),\n tf.stack([Y * zinv1], axis=-1),], axis=-2)\n\n return coords, (jacobian_points, jacobian_intrinsics)\n\n return coords\n"
]
| [
[
"tensorflow.shape",
"tensorflow.range",
"tensorflow.concat",
"tensorflow.ones_like",
"tensorflow.reshape",
"tensorflow.zeros_like",
"tensorflow.identity",
"tensorflow.tile",
"tensorflow.stack",
"tensorflow.maximum",
"tensorflow.unstack",
"tensorflow.cast"
]
]
|
cnc-ood/cnc_ood | [
"a149fa22ea32e14e977c893f2ce524ad8e770cf4"
]
| [
"utils/test_utils.py"
]
| [
"import logging\nimport torch\nimport numpy as np\nfrom tqdm import tqdm\n\nimport sklearn.metrics as sk\n\ndata_path_dict = {\n \"imagenet\" : \"/nvme/scratch/ankita/Imagenet/ILSVRC/Data/CLS-LOC/val\",\n \"inaturalist\" : \"/nvme/scratch/jatin/Data/iNaturalist\",\n \"isun\" : \"/nvme/scratch/jatin/Data/SUN\"\n}\n\ndef iterate_data_msp(data_loader, model):\n confs = []\n m = torch.nn.Softmax(dim=-1).cuda()\n bar = tqdm(enumerate(data_loader), total=len(data_loader))\n for b, (x, y) in bar:\n with torch.no_grad():\n x = x.cuda()\n # compute output, measure accuracy and record loss.\n logits = model(x)\n\n conf, _ = torch.max(m(logits), dim=-1)\n confs.extend(conf.data.cpu().numpy())\n return np.array(confs)\n\ndef iterate_data_cnc(data_loader, model):\n confs = []\n m = torch.nn.Softmax(dim=-1).cuda()\n bar = tqdm(enumerate(data_loader), total=len(data_loader))\n for b, (x, y) in bar:\n with torch.no_grad():\n x = x.cuda()\n # compute output, measure accuracy and record loss.\n logits = model(x)\n\n conf = torch.softmax(logits, dim=-1)[:,-1]\n confs.extend(conf.data.cpu().numpy())\n return np.array(confs)\n\ndef stable_cumsum(arr, rtol=1e-05, atol=1e-08):\n \"\"\"Use high precision for cumsum and check that final value matches sum\n Parameters\n ----------\n arr : array-like\n To be cumulatively summed as flat\n rtol : float\n Relative tolerance, see ``np.allclose``\n atol : float\n Absolute tolerance, see ``np.allclose``\n \"\"\"\n out = np.cumsum(arr, dtype=np.float64)\n expected = np.sum(arr, dtype=np.float64)\n if not np.allclose(out[-1], expected, rtol=rtol, atol=atol):\n raise RuntimeError('cumsum was found to be unstable: '\n 'its last element does not correspond to sum')\n return out\n\ndef fpr_and_fdr_at_recall(y_true, y_score, recall_level, pos_label=1.):\n # make y_true a boolean vector\n y_true = (y_true == pos_label)\n\n # sort scores and corresponding truth values\n desc_score_indices = np.argsort(y_score, kind=\"mergesort\")[::-1]\n y_score = y_score[desc_score_indices]\n y_true = y_true[desc_score_indices]\n\n # y_score typically has many tied values. Here we extract\n # the indices associated with the distinct values. We also\n # concatenate a value for the end of the curve.\n distinct_value_indices = np.where(np.diff(y_score))[0]\n threshold_idxs = np.r_[distinct_value_indices, y_true.size - 1]\n\n # accumulate the true positives with decreasing threshold\n tps = stable_cumsum(y_true)[threshold_idxs]\n fps = 1 + threshold_idxs - tps # add one because of zero-based indexing\n\n thresholds = y_score[threshold_idxs]\n\n recall = tps / tps[-1]\n\n last_ind = tps.searchsorted(tps[-1])\n sl = slice(last_ind, None, -1) # [last_ind::-1]\n recall, fps, tps, thresholds = np.r_[recall[sl], 1], np.r_[fps[sl], 0], np.r_[tps[sl], 0], thresholds[sl]\n\n cutoff = np.argmin(np.abs(recall - recall_level))\n\n return fps[cutoff] / (np.sum(np.logical_not(y_true))) # , fps[cutoff]/(fps[cutoff] + tps[cutoff])\n\ndef get_measures(in_examples, out_examples):\n num_in = in_examples.shape[0]\n num_out = out_examples.shape[0]\n\n logging.info(\"# in example is: {}\".format(num_in))\n logging.info(\"# out example is: {}\".format(num_out))\n\n labels = np.zeros(num_in + num_out, dtype=np.int32)\n labels[:num_in] += 1\n\n examples = np.squeeze(np.vstack((in_examples, out_examples)))\n aupr_in = sk.average_precision_score(labels, examples)\n auroc = sk.roc_auc_score(labels, examples)\n\n recall_level = 0.95\n fpr = fpr_and_fdr_at_recall(labels, examples, recall_level)\n\n labels_rev = np.zeros(num_in + num_out, dtype=np.int32)\n labels_rev[num_in:] += 1\n examples = np.squeeze(-np.vstack((in_examples, out_examples)))\n aupr_out = sk.average_precision_score(labels_rev, examples)\n return auroc, aupr_in, aupr_out, fpr\n"
]
| [
[
"numpy.logical_not",
"numpy.array",
"numpy.zeros",
"torch.nn.Softmax",
"numpy.sum",
"torch.no_grad",
"torch.softmax",
"numpy.diff",
"numpy.allclose",
"sklearn.metrics.average_precision_score",
"numpy.argsort",
"numpy.cumsum",
"numpy.abs",
"sklearn.metrics.roc_auc_score",
"numpy.vstack"
]
]
|
JoonHong-Kim/KoDALLE | [
"0011ac21bfe8aa840828feb2d30e9e8948344c9a"
]
| [
"dalle/models.py"
]
| [
"import torch\nfrom torch import nn\nimport torch.nn.functional as F\nimport numpy as np\nfrom einops import repeat\nfrom axial_positional_embedding import AxialPositionalEmbedding\nfrom einops import rearrange\n\nfrom dalle_pytorch import DiscreteVAE\nfrom dalle_pytorch.vae import OpenAIDiscreteVAE, VQGanVAE\n\nfrom dalle_pytorch.transformer import Transformer, DivideMax\nfrom utils import *\n\n\nclass DALLE_Klue_Roberta(nn.Module):\n def __init__(\n self,\n *,\n # dim,\n vae,\n num_text_tokens=10000,\n text_seq_len=256,\n depth,\n heads=8,\n dim_head=64,\n reversible=False,\n attn_dropout=0.0,\n ff_dropout=0,\n sparse_attn=False,\n attn_types=None,\n loss_img_weight=7,\n stable=False,\n sandwich_norm=False,\n shift_tokens=True,\n rotary_emb=False,\n wte_dir=None,\n wpe_dir=None,\n ):\n super().__init__()\n assert isinstance(\n vae, (DiscreteVAE, OpenAIDiscreteVAE, VQGanVAE)\n ), \"vae must be an instance of DiscreteVAE\"\n image_size = vae.image_size\n num_image_tokens = vae.num_tokens\n image_fmap_size = vae.image_size // (2 ** vae.num_layers)\n image_seq_len = image_fmap_size ** 2\n\n num_text_tokens = (\n num_text_tokens + text_seq_len\n ) # reserve unique padding tokens for each position (text seq len)\n\n self.text_emb = torch.load(wte_dir)\n dim = self.text_emb.weight.shape[1]\n self.image_emb = nn.Embedding(num_image_tokens, dim)\n print(dim, image_fmap_size, image_fmap_size)\n self.text_pos_emb = (\n torch.load(wpe_dir) if not rotary_emb else always(0)\n ) # +1 for <bos>\n self.image_pos_emb = (\n AxialPositionalEmbedding(\n dim, axial_shape=(image_fmap_size, image_fmap_size)\n )\n if not rotary_emb\n else always(0)\n )\n\n self.num_text_tokens = num_text_tokens # for offsetting logits index and calculating cross entropy loss\n self.num_image_tokens = num_image_tokens\n\n self.text_seq_len = text_seq_len\n self.image_seq_len = image_seq_len\n\n seq_len = text_seq_len + image_seq_len\n total_tokens = num_text_tokens + num_image_tokens\n self.total_tokens = total_tokens\n self.total_seq_len = seq_len\n\n self.vae = vae\n set_requires_grad(self.vae, False) # freeze VAE from being trained\n\n self.transformer = Transformer(\n dim=dim,\n causal=True,\n seq_len=seq_len,\n depth=depth,\n heads=heads,\n dim_head=dim_head,\n reversible=reversible,\n attn_dropout=attn_dropout,\n ff_dropout=ff_dropout,\n attn_types=attn_types,\n image_fmap_size=image_fmap_size,\n sparse_attn=sparse_attn,\n stable=stable,\n sandwich_norm=sandwich_norm,\n shift_tokens=shift_tokens,\n rotary_emb=rotary_emb,\n )\n\n self.stable = stable\n\n if stable:\n self.norm_by_max = DivideMax(dim=-1)\n\n self.to_logits = nn.Sequential(\n nn.LayerNorm(dim),\n nn.Linear(dim, self.total_tokens),\n )\n\n seq_range = torch.arange(seq_len)\n logits_range = torch.arange(total_tokens)\n\n seq_range = rearrange(seq_range, \"n -> () n ()\")\n logits_range = rearrange(logits_range, \"d -> () () d\")\n\n logits_mask = (\n (seq_range >= text_seq_len) & (logits_range < num_text_tokens)\n ) | ((seq_range < text_seq_len) & (logits_range >= num_text_tokens))\n\n self.register_buffer(\"logits_mask\", logits_mask, persistent=False)\n self.loss_img_weight = loss_img_weight\n\n @torch.no_grad()\n @eval_decorator\n def generate_images(\n self,\n encoded_text,\n *,\n clip=None,\n filter_thres=0.5,\n temperature=1.0,\n img=None,\n num_init_img_tokens=None,\n img_num=1,\n ):\n text = encoded_text['input_ids']\n text=repeat(text,'() n -> b n',b=img_num)\n mask=encoded_text['attention_mask']\n vae, text_seq_len, image_seq_len, num_text_tokens = (\n self.vae,\n self.text_seq_len,\n self.image_seq_len,\n self.num_text_tokens,\n )\n total_len = text_seq_len + image_seq_len\n\n text = text[:, :text_seq_len] # make sure text is within bounds\n out = text\n \n if exists(img):\n image_size = vae.image_size\n assert (\n img.shape[1] == 3\n and img.shape[2] == image_size\n and img.shape[3] == image_size\n ), f\"input image must have the correct image size {image_size}\"\n\n indices = vae.get_codebook_indices(img)\n num_img_tokens = default(\n num_init_img_tokens, int(0.4375 * image_seq_len)\n ) # OpenAI used 14 * 32 initial tokens to prime\n assert (\n num_img_tokens < image_seq_len\n ), \"number of initial image tokens for priming must be less than the total image token sequence length\"\n\n indices = indices[:, :num_img_tokens]\n out = torch.cat((out, indices), dim=-1)\n\n for cur_len in range(out.shape[1], total_len):\n is_image = cur_len >= text_seq_len\n\n text, image = out[:, :text_seq_len], out[:, text_seq_len:]\n\n logits = self(text, image, mask=mask)[:, -1, :]\n\n filtered_logits = top_k(logits, thres=filter_thres)\n probs = F.softmax(filtered_logits / temperature, dim=-1)\n sample = torch.multinomial(probs, 1)\n\n sample -= (\n num_text_tokens if is_image else 0\n ) # offset sampled token if it is an image token, since logit space is composed of text and then image tokens\n out = torch.cat((out, sample), dim=-1)\n\n if out.shape[1] <= text_seq_len:\n mask = F.pad(mask, (0, 1), value=True)\n\n\n\n img_seq = out[:, -image_seq_len:]\n images = vae.decode(img_seq)\n\n if exists(clip):\n #encoded_text = encoded_text.to(\"cuda\")\n text_embeds, image_embeds = clip(encoded_text, images)\n logits = text_embeds @ image_embeds.T\n return images, logits\n\n return images\n\n def forward(self, text, image=None, mask=None, return_loss=False):\n assert (\n text.shape[-1] == self.text_seq_len\n ), f\"the length {text.shape[-1]} of the text tokens you passed in does not have the correct length ({self.text_seq_len})\"\n device, total_seq_len = text.device, self.total_seq_len\n\n # make sure padding in text tokens get unique padding token id\n text = F.pad(text, (1, 0), value=0)\n\n tokens = self.text_emb(text)\n tokens += self.text_pos_emb(torch.arange(text.shape[1], device=device))\n\n seq_len = tokens.shape[1]\n\n if exists(image) and not is_empty(image):\n is_raw_image = len(image.shape) == 4\n\n if is_raw_image:\n image_size = self.vae.image_size\n assert tuple(image.shape[1:]) == (\n 3,\n image_size,\n image_size,\n ), f\"invalid image of dimensions {image.shape} passed in during training\"\n\n image = self.vae.get_codebook_indices(image)\n image_len = image.shape[1]\n image_emb = self.image_emb(image)\n image_emb += self.image_pos_emb(image_emb)\n\n tokens = torch.cat((tokens, image_emb), dim=1)\n\n seq_len += image_len\n\n # when training, if the length exceeds the total text + image length\n # remove the last token, since it needs not to be trained\n\n if tokens.shape[1] > total_seq_len:\n seq_len -= 1\n tokens = tokens[:, :-1]\n\n if self.stable:\n alpha = 0.1\n tokens = tokens * alpha + tokens.detach() * (1 - alpha)\n\n out = self.transformer(tokens)\n\n if self.stable:\n out = self.norm_by_max(out)\n\n logits = self.to_logits(out)\n\n # mask logits to make sure text predicts text (except last token), and image predicts image\n\n logits_mask = self.logits_mask[:, :seq_len]\n max_neg_value = -torch.finfo(logits.dtype).max\n logits.masked_fill_(logits_mask, max_neg_value)\n\n if not return_loss:\n return logits\n\n assert exists(image), \"when training, image must be supplied\"\n\n offsetted_image = image + self.num_text_tokens\n labels = torch.cat((text[:, 1:], offsetted_image), dim=1)\n\n logits = rearrange(logits, \"b n c -> b c n\")\n\n loss_text = F.cross_entropy(\n logits[:, :, : self.text_seq_len], labels[:, : self.text_seq_len]\n )\n loss_img = F.cross_entropy(\n logits[:, :, self.text_seq_len :], labels[:, self.text_seq_len :]\n )\n\n loss = (loss_text + self.loss_img_weight * loss_img) / (\n self.loss_img_weight + 1\n )\n return loss\n"
]
| [
[
"torch.nn.Linear",
"torch.cat",
"torch.nn.LayerNorm",
"torch.arange",
"torch.finfo",
"torch.no_grad",
"torch.multinomial",
"torch.nn.functional.cross_entropy",
"torch.load",
"torch.nn.functional.softmax",
"torch.nn.functional.pad",
"torch.nn.Embedding"
]
]
|
FACEGOOD/Audio2BlendshapeWeights | [
"9ec7df27bdc09f8b84151335af94d3666971ddda"
]
| [
"code/train/step1_LPC.py"
]
| [
"# Copyright 2021 The FACEGOOD Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n# encoding=utf-8\nimport os\nimport numpy as np\nimport scipy.io.wavfile as wavfile\n# from audiolazy.lazy_lpc import lpc\nfrom ctypes import *\n# import time\n\nproject_dir = r'D:\\voice2face\\shirley_1119'\ndll = cdll.LoadLibrary(os.path.join(project_dir,'LPC.dll'))\n# wav_path = os.path.join(project_dir,'wav','1015_2_01.wav') #音频路径\n# save_path = os.path.join(project_dir,'lpc','lpc_1015_2_01.npy') #保存LPC处理后的数组\nname_list = ['1_01','1_02','2_01','2_02','2_03','2_04','2_05','2_06','2_07','2_08','2_09','2_10',\n'3_01','3_02','3_03','3_04','3_05','3_06','3_07','3_08','3_09','3_10','3_11','3_12','3_13','3_14','3_15','3_16']\nwav_path = [os.path.join(project_dir,'wav','1114_'+ file + '.wav') for file in name_list]\nsave_path = [os.path.join(project_dir,'lpc','1114_'+ file + '.npy') for file in name_list]\n\ndef audioProcess(wav_path):\n\t# 读取wav文件,存入list\n\trate,signal = wavfile.read(wav_path) #rate:采样率\n\t# print(len(signal))\n\t# print('signal:',signal[1000:2000])\n\tprint('rate: ',rate) #采样率\n\n\tframes_per_second = 30 #视频fps\n\tchunks_length = 260 #音频分割,520ms\n\taudio_frameNum = int(len(signal)/rate*frames_per_second) #计算音频对应的视频帧数\n\tprint('audio_frameNum: ',audio_frameNum)\n\t# 前后各添加260ms音频\n\ta = np.zeros(chunks_length*rate//1000, dtype=np.int16)\n\tsignal = np.hstack((a,signal,a))\n\n\t# signal = signal / (2.**15)\n\tframes_step = 1000.0 / frames_per_second #视频每帧的时长间隔33.3333ms\n\trate_kHz = int(rate / 1000) #采样率:48kHz\n\n\t# 分割音频\n\taudio_frames = [signal[int(i*frames_step*rate_kHz) : int((i*frames_step + chunks_length * 2)*rate_kHz)] for i in range(audio_frameNum)]\n\tinputData_array = np.zeros(shape=(1,32,64)) #创建一个空3D数组,该数组(1*32*64)最后需要删除\n\n\tfor i in range(len(audio_frames)):\n\t\tprint(i)\n\t\taudio_frame = audio_frames[i] #每段音频,8320个采样点\n\n\t\toverlap_frames_apart = 0.008\n\t\toverlap = int(rate*overlap_frames_apart) #128 samples\n\t\tframeSize = int(rate*overlap_frames_apart*2) #256 samples\n\t\tnumberOfFrames = 64\n\n\t\tframes = np.ndarray((numberOfFrames,frameSize))# initiate a 2D array with numberOfFrames rows and frame size columns\n\t\tfor k in range(0,numberOfFrames):\n\t\t\tfor i in range(0,frameSize):\n\t\t\t\tif((k*overlap+i)<len(audio_frame)):\n\t\t\t\t\tframes[k][i]=audio_frame[k*overlap+i]\n\t\t\t\telse:\n\t\t\t\t\tframes[k][i]=0\n\n\t\tframes*=np.hanning(frameSize)\n\t\tframes_lpc_features = []\n\n\t\t# a = (c_double*frameSize)()\n\t\tb = (c_double*32)()\n\t\t\n\t\tfor k in range(0,numberOfFrames):\n\t\t\t# temp_list = frames[k]\n\t\t\ta = (c_double*frameSize)(*frames[k])\n\t\t\t# a = (c_double*len(frames[k]))()\n\t\t\t# b = (c_double*32)()\n\t\t\t# LPC(float *in, int size, int order, float *out)\n\t\t\tdll.LPC(pointer(a),frameSize,32,pointer(b));\n\t\t\tframes_lpc_features.append(list(b))\n\n\t\timage_temp1 = np.array(frames_lpc_features) #list2array\n\t\timage_temp2 = image_temp1.transpose() #array转置\n\t\timage_temp3 = np.expand_dims(image_temp2,axis=0) #升维\n\t\tinputData_array = np.concatenate((inputData_array, image_temp3), axis = 0) #array拼接\n\n\t# 删除第一行\n\tinputData_array = inputData_array[1:]\n\n\t# #扩展为4维:(,32,64,1)\n\tinputData_array=np.expand_dims(inputData_array,axis=3)\n\tprint(inputData_array.shape)\n\n\treturn inputData_array\n\n\nif __name__=='__main__':\n\tfor i in range(len(name_list)):\n\t\tinputData_array = audioProcess(wav_path[i])\n\t\tnp.save(save_path[i],inputData_array)\n"
]
| [
[
"numpy.concatenate",
"numpy.array",
"scipy.io.wavfile.read",
"numpy.zeros",
"numpy.hanning",
"numpy.save",
"numpy.ndarray",
"numpy.hstack",
"numpy.expand_dims"
]
]
|
MarShaikh/ivy | [
"e6bf8c1ea2af409fe61d16bc5874b5e21dc5a333",
"e6bf8c1ea2af409fe61d16bc5874b5e21dc5a333"
]
| [
"ivy/functional/backends/tensorflow/manipulation.py",
"ivy_tests/test_ivy/test_functional/test_core/test_sorting.py"
]
| [
"# global\nimport math\nimport tensorflow as tf\nfrom numbers import Number\nfrom typing import Union, Tuple, Optional, List\nfrom tensorflow.python.types.core import Tensor\n\n# local\nimport ivy\n\n\ndef roll(\n x: Tensor,\n shift: Union[int, Tuple[int, ...]],\n axis: Optional[Union[int, Tuple[int, ...]]] = None,\n out: Optional[Tensor] = None,\n) -> Tensor:\n if axis is None:\n originalShape = x.shape\n axis = 0\n x = tf.reshape(x, [-1])\n roll = tf.roll(x, shift, axis)\n ret = tf.reshape(roll, originalShape)\n else:\n ret = tf.roll(x, shift, axis)\n if ivy.exists(out):\n return ivy.inplace_update(out, ret)\n return ret\n\n\ndef squeeze(\n x: Tensor, axis: Union[int, Tuple[int], List[int]], out: Optional[Tensor] = None\n) -> Tensor:\n if isinstance(axis, int):\n if x.shape[axis] > 1:\n raise ValueError(\n \"Expected dimension of size 1, but found dimension size {}\".format(\n x.shape[axis]\n )\n )\n ret = tf.squeeze(x, axis)\n else:\n if isinstance(axis, tuple):\n axis = list(axis)\n normalise_axis = [\n (len(x.shape) - abs(element)) if element < 0 else element\n for element in axis\n ]\n normalise_axis.sort()\n axis_updated_after_squeeze = [\n dim - key for (key, dim) in enumerate(normalise_axis)\n ]\n for i in axis_updated_after_squeeze:\n if x.shape[i] > 1:\n raise ValueError(\n \"Expected dimension of size 1, but found dimension size {}\".format(\n x.shape[i]\n )\n )\n else:\n x = tf.squeeze(x, i)\n ret = x\n if ivy.exists(out):\n return ivy.inplace_update(out, ret)\n return ret\n\n\ndef flip(\n x: Tensor,\n axis: Optional[Union[int, Tuple[int], List[int]]] = None,\n out: Optional[Tensor] = None,\n) -> Tensor:\n num_dims = len(x.shape)\n if not num_dims:\n ret = x\n else:\n if axis is None:\n new_axis = list(range(num_dims))\n else:\n new_axis = axis\n if type(new_axis) is int:\n new_axis = [new_axis]\n else:\n new_axis = new_axis\n new_axis = [item + num_dims if item < 0 else item for item in new_axis]\n ret = tf.reverse(x, new_axis)\n if ivy.exists(out):\n return ivy.inplace_update(out, ret)\n return ret\n\n\ndef expand_dims(x: Tensor, axis: int = 0, out: Optional[Tensor] = None) -> Tensor:\n try:\n ret = tf.expand_dims(x, axis)\n if ivy.exists(out):\n return ivy.inplace_update(out, ret)\n return ret\n except tf.errors.InvalidArgumentError as error:\n raise IndexError(error)\n\n\ndef permute_dims(\n x: Tensor, axes: Tuple[int, ...], out: Optional[Tensor] = None\n) -> Tensor:\n ret = tf.transpose(x, perm=axes)\n if ivy.exists(out):\n return ivy.inplace_update(out, ret)\n return ret\n\n\ndef stack(\n x: Union[Tuple[Tensor], List[Tensor]],\n axis: Optional[int] = 0,\n out: Optional[Tensor] = None,\n) -> Tensor:\n ret = tf.experimental.numpy.stack(x, axis)\n if ivy.exists(out):\n return ivy.inplace_update(out, ret)\n return ret\n\n\ndef reshape(\n x: Tensor,\n shape: Tuple[int, ...],\n copy: Optional[bool] = None,\n out: Optional[Tensor] = None,\n) -> Tensor:\n ret = tf.reshape(x, shape)\n if ivy.exists(out):\n return ivy.inplace_update(out, ret)\n return ret\n\n\ndef concat(xs: List[Tensor], axis: int = 0, out: Optional[Tensor] = None) -> Tensor:\n is_tuple = type(xs) is tuple\n is_axis_none = axis is None\n if is_tuple:\n xs = list(xs)\n highest_dtype = xs[0].dtype\n for i in xs:\n highest_dtype = tf.experimental.numpy.promote_types(highest_dtype, i.dtype)\n\n for i in range(len(xs)):\n if is_axis_none:\n xs[i] = tf.reshape(xs[i], -1)\n xs[i] = tf.cast(xs[i], highest_dtype)\n if is_axis_none:\n axis = 0\n if is_tuple:\n xs = tuple(xs)\n ret = tf.concat(xs, axis)\n if ivy.exists(out):\n return ivy.inplace_update(out, ret)\n return ret\n\n\n# Extra #\n# ------#\n\n\ndef split(x, num_or_size_splits=None, axis=0, with_remainder=False):\n if x.shape == ():\n if num_or_size_splits is not None and num_or_size_splits != 1:\n raise Exception(\n \"input array had no shape, but num_sections specified was {}\".format(\n num_or_size_splits\n )\n )\n return [x]\n if num_or_size_splits is None:\n dim_size = tf.shape(x)[axis]\n num_or_size_splits = dim_size\n elif isinstance(num_or_size_splits, int) and with_remainder:\n num_chunks = x.shape[axis] / num_or_size_splits\n num_chunks_int = math.floor(num_chunks)\n remainder = num_chunks - num_chunks_int\n if remainder != 0:\n num_or_size_splits = [num_or_size_splits] * num_chunks_int + [\n int(remainder * num_or_size_splits)\n ]\n return tf.split(x, num_or_size_splits, axis)\n\n\ndef repeat(\n x: Tensor,\n repeats: Union[int, List[int]],\n axis: int = None,\n out: Optional[Tensor] = None,\n) -> Tensor:\n ret = tf.repeat(x, repeats, axis)\n if ivy.exists(out):\n return ivy.inplace_update(out, ret)\n return ret\n\n\ndef tile(x, reps, out: Optional[Tensor] = None):\n if x.shape == ():\n x = tf.reshape(x, (-1,))\n if isinstance(reps, Number):\n reps = [reps]\n if isinstance(reps, Tensor) and reps.shape == ():\n reps = tf.reshape(reps, (-1,))\n ret = tf.tile(x, reps)\n if ivy.exists(out):\n return ivy.inplace_update(out, ret)\n return ret\n\n\ndef constant_pad(x, pad_width, value=0, out: Optional[Tensor] = None):\n if x.shape == ():\n x = tf.reshape(x, (-1,))\n ret = tf.pad(x, pad_width, constant_values=value)\n if ivy.exists(out):\n return ivy.inplace_update(out, ret)\n return ret\n\n\ndef zero_pad(x, pad_width, out: Optional[Tensor] = None):\n if x.shape == ():\n x = tf.reshape(x, (-1,))\n ret = tf.pad(x, pad_width)\n if ivy.exists(out):\n return ivy.inplace_update(out, ret)\n return ret\n\n\ndef swapaxes(x, axis0, axis1, out: Optional[Tensor] = None):\n x_shape = x.shape\n num_dims = len(x_shape)\n axis0 %= num_dims\n axis1 %= num_dims\n config = list(range(num_dims))\n config.pop(axis0)\n config.insert(axis0, axis1)\n config.pop(axis1)\n config.insert(axis1, axis0)\n ret = tf.transpose(x, config)\n if ivy.exists(out):\n return ivy.inplace_update(out, ret)\n return ret\n\n\ndef clip(x, x_min, x_max, out: Optional[Tensor] = None):\n if hasattr(x_min, \"dtype\") and hasattr(x_max, \"dtype\"):\n promoted_type = tf.experimental.numpy.promote_types(x.dtype, x_min.dtype)\n promoted_type = tf.experimental.numpy.promote_types(promoted_type, x_max.dtype)\n x = tf.cast(x, promoted_type)\n x_min = tf.cast(x_min, promoted_type)\n x_max = tf.cast(x_max, promoted_type)\n if tf.size(x) == 0:\n ret = x\n elif x.dtype == tf.bool:\n ret = tf.clip_by_value(tf.cast(x, tf.float16), x_min, x_max)\n ret = tf.cast(ret, x.dtype)\n else:\n ret = tf.clip_by_value(x, x_min, x_max)\n if ivy.exists(out):\n return ivy.inplace_update(out, ret)\n return ret\n",
"\"\"\"Collection of tests for sorting functions.\"\"\"\n\n# global\nimport numpy as np\nfrom hypothesis import given, strategies as st\n\n# local\nimport ivy_tests.test_ivy.helpers as helpers\nimport ivy.functional.backends.numpy as ivy_np\n\n\n# argsort\n@given(\n dtype_and_x=helpers.dtype_and_values(ivy_np.valid_numeric_dtype_strs),\n as_variable=st.booleans(),\n with_out=st.booleans(),\n num_positional_args=st.integers(0, 1),\n native_array=st.booleans(),\n container=st.booleans(),\n instance_method=st.booleans(),\n)\ndef test_argsort(\n dtype_and_x,\n as_variable,\n with_out,\n num_positional_args,\n native_array,\n container,\n instance_method,\n fw,\n):\n dtype, x = dtype_and_x\n # smoke this for torch\n if fw == \"torch\" and dtype in [\"uint16\", \"uint32\", \"uint64\"]:\n return\n helpers.test_array_function(\n dtype,\n as_variable,\n with_out,\n num_positional_args,\n native_array,\n container,\n instance_method,\n fw,\n \"argsort\",\n x=np.asarray(x, dtype=dtype),\n )\n\n\n# sort\n@given(\n dtype_and_x=helpers.dtype_and_values(ivy_np.valid_numeric_dtype_strs),\n as_variable=st.booleans(),\n with_out=st.booleans(),\n num_positional_args=st.integers(0, 1),\n native_array=st.booleans(),\n container=st.booleans(),\n instance_method=st.booleans(),\n)\ndef test_sort(\n dtype_and_x,\n as_variable,\n with_out,\n num_positional_args,\n native_array,\n container,\n instance_method,\n fw,\n):\n dtype, x = dtype_and_x\n # smoke this for torch\n if fw == \"torch\" and dtype in [\"uint16\", \"uint32\", \"uint64\"]:\n return\n helpers.test_array_function(\n dtype,\n as_variable,\n with_out,\n num_positional_args,\n native_array,\n container,\n instance_method,\n fw,\n \"sort\",\n x=np.asarray(x, dtype=dtype),\n )\n"
]
| [
[
"tensorflow.experimental.numpy.promote_types",
"tensorflow.size",
"tensorflow.shape",
"tensorflow.concat",
"tensorflow.roll",
"tensorflow.expand_dims",
"tensorflow.transpose",
"tensorflow.reshape",
"tensorflow.repeat",
"tensorflow.reverse",
"tensorflow.squeeze",
"tensorflow.clip_by_value",
"tensorflow.tile",
"tensorflow.split",
"tensorflow.experimental.numpy.stack",
"tensorflow.cast",
"tensorflow.pad"
],
[
"numpy.asarray"
]
]
|
lectorvin/slam_algorithm | [
"9dd5e97dcdb1903bfe5fde2fe2764f8beb292f4b"
]
| [
"smth/cov_matrix.py"
]
| [
"import numpy as np\n\n\ndef mean(arr):\n return sum(arr)/(len(arr))\n\n\ndef mul(arr1, arr2):\n a = [arr1[i]*arr2[i] for i in range(len(arr1))]\n return a\n\n\ndef cov(arr1, arr2):\n m = len(mul(arr1, arr2))\n return (sum(mul(arr1, arr2)) - sum(arr1)*sum(arr2)/m)/(m-1)\n\n\ndef covMatrix(x, y):\n # column - variable, row - observation\n try:\n temp = [[0 for j in range(len(y[0]))] for i in range(len(x[0]))]\n for i in range(len(x[0])):\n for j in range(len(y[0])):\n temp[i][j] = round(cov([k[i] for k in x], [t[j] for t in y]),\n 6)\n except TypeError:\n temp = [[0, 0], [0, 0]]\n temp[0][0] = cov(x, x)\n temp[0][1] = temp[1][0] = cov(x, y)\n temp[1][1] = cov(y, y)\n return temp\n\n\nif __name__ == \"__main__\":\n x = [[-2.1, -1, 4.3]]\n y = [[3, 1.1, 0.12]]\n X = [[-2.1, -1, 4.3],\n [3, 1.1, 0.12]]\n XT = [[-2.1, 3],\n [-1, 1.1],\n [4.3, 0.12]]\n M = [[0.4706, 0.0588, 0.0882, 0.3824],\n [0.1471, 0.3235, 0.2941, 0.2353],\n [0.2647, 0.2059, 0.1765, 0.3529],\n [0.1176, 0.4118, 0.4412, 0.0294]]\n B = [[10, 5, 15],\n [15, 10, 16],\n [14, 5, 5],\n [13, 3, 2],\n [5, 10, -5],\n [-10, -5, -15],\n [-5, -10, 15],\n [3, 5, 20],\n [10, 8, 18],\n [15, 10, 15]]\n\n P = covMatrix(M, M)\n for l in P:\n for x in l:\n print(x, end=\" \")\n print()\n print()\n\n M = np.matrix(M)\n P = np.cov(M, rowvar=0, ddof=1)\n for l in P:\n for x in l:\n print(round(x, 6), end=' ')\n print()\n print()\n\n U = covMatrix(B, B)\n for l in U:\n for x in l:\n print(x, end=' ')\n print()\n print()\n\n B = np.matrix(B)\n U = np.cov(B, rowvar=0, ddof=1)\n for l in U:\n for x in l:\n print(round(x, 6), end=' ')\n print()\n print()\n\n U = covMatrix(XT, XT)\n for l in U:\n for x in l:\n print(round(x, 6), end=' ')\n print()\n"
]
| [
[
"numpy.matrix",
"numpy.cov"
]
]
|
upamanyus/primerunning | [
"dbd0b588e5952b6cd917d41c877bc190183c86d7"
]
| [
"src/primegraph.py"
]
| [
"#!/bin/env python\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n\nclass PrimeGraph:\n\n def __init__(self):\n plt.figure()\n\n def loadFromFile(self, infilename):\n data = np.genfromtxt(infilename, delimiter=',', names=True)\n self.xs = data[data.dtype.names[0]]\n self.ys = data[data.dtype.names[1]]\n self.xlabel = data.dtype.names[0]\n self.ylabel = data.dtype.names[1]\n # plt.rc('text', usetex=True)\n\n def loadAllFromFile(self, infilename):\n data = np.genfromtxt(infilename, delimiter=',', names=True)\n self.xlabel = data.dtype.names[0]\n self.xs = data[data.dtype.names[0]]\n self.yss = []\n self.ylabels = []\n for name in data.dtype.names[1:]:\n self.yss.append(data[name])\n self.ylabels.append(name)\n\n def graphPlotAll(self):\n plt.xlabel(self.xlabel)\n plt.ylim(0.8/len(self.yss), 1.5/len(self.yss))\n\n for i in range(len(self.yss)):\n plt.plot(self.xs, self.yss[i], label=self.ylabels[i])\n plt.legend()\n\n def graphPlot(self):\n plt.xlabel(self.xlabel)\n plt.ylabel(self.ylabel)\n\n plt.plot(self.xs, self.ys, label=self.ylabel)\n plt.legend()\n\n\n def display(self):\n plt.show()\n\n def calculateSlopes(self):\n self.ys = self.ys/self.xs\n\n def calculateSlopesAll(self):\n for i in range(len(self.yss)):\n self.yss[i] = self.yss[i]/self.xs\n\n def graphSave(self, fileName, xaxis, yaxis, title):\n fig = plt.figure()\n ax = fig.add_subplot(111)\n\n ax.set_title(title)\n ax.set_xlabel(xaxis)\n ax.set_ylabel(yaxis)\n\n ax.plot(self.xs, self.ys)\n plt.savefig(fileName)\n\nprimeGraph = PrimeGraph()\nprimeGraph.loadAllFromFile('out.txt')\nprimeGraph.calculateSlopesAll()\nprimeGraph.graphPlotAll()\nprimeGraph.display()\n"
]
| [
[
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.plot",
"numpy.genfromtxt",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.show"
]
]
|
xiaobao520123/EnterpriseNavigator | [
"eb380d3e7ae3074c82a2b4b46eb9ecf834a991d9"
]
| [
"srvbase_qcc/east/nms.py"
]
| [
"# coding=utf-8\nimport numpy as np\n\nimport cfg\n\ndef should_merge(region, i, j):\n neighbor = {(i, j - 1)}\n return not region.isdisjoint(neighbor)\n\ndef region_neighbor(region_set):\n j_min = 100000000\n j_max = -1\n i_m = 0\n for node in region_set:\n i_m = node[0] + 1\n if node[1] > j_max:\n j_max = node[1]\n if node[1] < j_min:\n j_min = node[1]\n j_min = j_min - 1\n j_max = j_max + 2\n neighbor = set()\n for j in range(j_min, j_max):\n neighbor.add((i_m, j))\n return neighbor\n\n\ndef region_group(region_list):\n S = [i for i in range(len(region_list))]\n D = []\n while len(S) > 0:\n m = S.pop(0)\n if len(S) == 0:\n # S has only one element, put it to D\n D.append([m])\n else:\n D.append(rec_region_merge(region_list, m, S))\n return D\n\ndef rec_region_merge(region_list, m, S):\n rows = [m]\n tmp = []\n for n in S:\n if n > m and abs(n - m) > 20: # 判断 n > m的目的是:防止n从m后边追上来时,被break,比如:n=44;m=56\n break \n if not region_neighbor(region_list[m]).isdisjoint(region_list[n]) or \\\n not region_neighbor(region_list[n]).isdisjoint(region_list[m]):\n # 第m与n相交\n tmp.append(n)\n for d in tmp:\n S.remove(d)\n for e in tmp:\n rows.extend(rec_region_merge(region_list, e, S))\n return rows\n\ndef nms(predict, activation_pixels, threshold=cfg.side_vertex_pixel_threshold):\n region_list = []\n region_list_idx = []\n last_i = -1\n current_i = 0\n zipv = zip(activation_pixels[0], activation_pixels[1])\n for i, j in zipv:\n if i != last_i:\n region_list.append({(i, j)})\n region_list_idx.append(i)\n last_i = i\n continue\n merge = False\n for k in range(len(region_list)):\n current_i = region_list_idx[k]\n if i != current_i:\n continue\n if should_merge(region_list[k], i, j):\n region_list[k].add((i, j))\n merge = True\n # Fixme 重叠文本区域处理,存在和多个区域邻接的pixels,先都merge试试\n # break\n if not merge:\n region_list.append({(i, j)})\n region_list_idx.append(i)\n D = region_group(region_list)\n quad_list = np.zeros((len(D), 4, 2))\n score_list = np.zeros((len(D), 4))\n for group, g_th in zip(D, range(len(D))):\n total_score = np.zeros((4, 2))\n for row in group:\n for ij in region_list[row]:\n score = predict[ij[0], ij[1], 1]\n if score >= threshold:\n ith_score = predict[ij[0], ij[1], 2:3]\n if not (cfg.trunc_threshold <= ith_score < 1 -\n cfg.trunc_threshold):\n ith = int(np.around(ith_score))\n total_score[ith * 2:(ith + 1) * 2] += score\n px = (ij[1] + 0.5) * cfg.pixel_size\n py = (ij[0] + 0.5) * cfg.pixel_size\n p_v = [px, py] + np.reshape(predict[ij[0], ij[1], 3:7],\n (2, 2))\n quad_list[g_th, ith * 2:(ith + 1) * 2] += score * p_v\n score_list[g_th] = total_score[:, 0]\n quad_list[g_th] /= (total_score + cfg.epsilon)\n return score_list, quad_list\n"
]
| [
[
"numpy.around",
"numpy.reshape",
"numpy.zeros"
]
]
|
singagan/nn-Meter | [
"38654df84064aaa0f56043a99a7317667aca53ca"
]
| [
"nn_meter/builder/backend_meta/fusion_rule_tester/generate_testcase.py"
]
| [
"# Copyright (c) Microsoft Corporation.\n# Licensed under the MIT license.\nimport os\nimport sys\nimport yaml\nimport importlib\nfrom tensorflow import keras\nfrom .utils import get_operator_by_name, generate_model_for_testcase\nfrom .build_models import SingleOpModel\nfrom nn_meter.builder.backend_meta.utils import Latency\n\n__BUILTIN_TESTCASES__ = {'MON'}\n\n__user_config_folder__ = os.path.expanduser('~/.nn_meter/config')\n__registry_cfg_filename__ = 'registry.yaml'\n__REG_TESTCASES__ = {}\nif os.path.isfile(os.path.join(__user_config_folder__, __registry_cfg_filename__)):\n with open(os.path.join(__user_config_folder__, __registry_cfg_filename__), 'r') as fp:\n registry_modules = yaml.load(fp, yaml.FullLoader)\n if \"testcases\" in registry_modules:\n __REG_TESTCASES__ = registry_modules[\"testcases\"]\n\n\nclass BaseTestCase:\n name = ''\n cases = None\n true_case = ''\n deps = {}\n input_shape = None\n\n def __init__(self, config, **kwargs):\n self._kwargs = kwargs\n self.latency = {}\n self.config = config\n self.load_config()\n\n def generate_testcase(self):\n testcase = {}\n model, shapes = self._model_block()\n testcase['block'] = {\n 'model': model,\n 'shapes': shapes,\n }\n\n for _, ops in self.cases.items():\n for op in ops:\n try:\n model, shapes = getattr(self, '_model_' + op)()\n testcase[op] = {\n 'model': model,\n 'shapes': shapes\n }\n except:\n layer, _, op1_is_two_inputs = get_operator_by_name(op, self.input_shape, self.config)\n model = SingleOpModel(layer)\n shapes = [self.input_shape] * (1 + op1_is_two_inputs)\n testcase[op] = {\n 'model': model,\n 'shapes': shapes\n }\n return testcase\n\n def save_testcase(self):\n from nn_meter.builder.nn_generator.tf_networks.utils import get_tensor_by_shapes\n testcase = self.generate_testcase()\n\n for op, model in testcase.items():\n model_path = os.path.join(self.model_dir, self.name + '_' + op)\n model['model'](get_tensor_by_shapes(model['shapes']))\n keras.models.save_model(model['model'], model_path)\n testcase[op]['model'] = model_path\n\n return testcase\n\n def load_latency(self, testcase):\n self.latency['block'] = Latency(testcase['block']['latency'])\n\n for case, ops in self.cases.items():\n latency_sum = 0\n for op in ops:\n if op not in self.latency:\n self.latency[op] = Latency(testcase[op]['latency'])\n latency_sum += self.latency[op]\n self.latency[case] = latency_sum\n\n def test(self):\n true_case_lat_diff = abs(self.latency[self.true_case].avg - self.latency['block'].avg)\n\n for case, _ in self.cases.items():\n if case != self.true_case and true_case_lat_diff > abs(self.latency[case].avg - self.latency['block'].avg):\n return case\n\n return self.true_case\n\n def load_config(self):\n config = self.config\n if not self.input_shape:\n self.input_shape = [config['HW'], config['HW'], config['CIN']]\n self.kernel_size = config['KERNEL_SIZE']\n self.cout = config['COUT']\n self.padding = config['PADDING']\n self.model_dir = os.path.join(config['MODEL_DIR'], 'models')\n os.makedirs(self.model_dir, exist_ok=True)\n\n def _model_block(self):\n pass\n\n\nclass BasicFusion(BaseTestCase):\n name = ''\n cases = {\n 'ops': ['', ''],\n }\n false_case = 'ops'\n\n def load_config(self):\n super().load_config()\n self.eps = self.config['EMP_ALPHA']\n\n def test(self):\n secondary_op_lat = min(lat for op, lat in self.latency.items() if op != 'block' or op != self.false_case)\n return self.latency[self.false_case].avg - self.latency['block'].avg > self.eps * secondary_op_lat.avg\n\n def load_latency(self, testcase):\n self.latency['block'] = Latency(testcase['block']['latency'])\n\n op1, op2 = self.cases['ops']\n op1_alias, op2_alias = op1, op2\n\n if op1_alias == op2_alias:\n op1_alias += '_1'\n op2_alias += '_2'\n \n self.latency[op1_alias] = Latency(testcase[op1_alias]['latency'])\n self.latency[op2_alias] = Latency(testcase[op2_alias]['latency'])\n self.latency['ops'] = self.latency[op1_alias] + self.latency[op2_alias]\n\n def generate_testcase(self):\n testcase = {}\n\n op1, op2 = self.cases['ops']\n op1_alias, op2_alias = op1, op2\n\n if op1_alias == op2_alias:\n op1_alias += '_1'\n op2_alias += '_2'\n\n op1_model, op2_model, block_model, op1_shapes, op2_shapes, block_shapes = \\\n generate_model_for_testcase(op1, op2, self.input_shape, self.config)\n testcase[op1_alias] = {\n 'model': op1_model,\n 'shapes': op1_shapes,\n }\n testcase[op2_alias] = {\n 'model': op2_model,\n 'shapes': op2_shapes,\n }\n testcase['block'] = {\n 'model': block_model,\n 'shapes': block_shapes,\n }\n return testcase\n\n\nclass MultipleOutNodes(BaseTestCase):\n name = 'MON'\n cases = {\n 'case1': ['relu_relu', 'relu_dwconv', 'dwconv'],\n 'case2': ['dwconv_relu_relu', 'relu_dwconv'],\n 'case3': ['dwconv_relu', 'dwconv', 'relu_relu']\n }\n true_case = 'case1'\n deps = {\n 'BF_dwconv_relu': True,\n }\n\n def _model_block(self):\n input_layer = keras.Input(shape=self.input_shape)\n\n x = keras.layers.DepthwiseConv2D(self.kernel_size, padding=self.padding)(input_layer)\n branch_1 = keras.layers.ReLU(negative_slope=0)(x)\n branch_1 = keras.layers.ReLU(negative_slope=0)(branch_1)\n branch_2 = keras.layers.ReLU(negative_slope=2)(x)\n branch_2 = keras.layers.DepthwiseConv2D(self.kernel_size, padding=self.padding)(branch_2)\n\n return keras.models.Model(input_layer, [branch_1, branch_2]), [self.input_shape]\n\n def _model_relu_relu(self):\n input_layer = keras.Input(shape=self.input_shape)\n\n x = keras.layers.ReLU()(input_layer)\n x = keras.layers.ReLU()(x)\n\n return keras.models.Model(input_layer, x), [self.input_shape]\n\n def _model_dwconv_relu_relu(self):\n input_layer = keras.Input(shape=self.input_shape)\n\n x = keras.layers.DepthwiseConv2D(self.kernel_size, padding=self.padding)(input_layer)\n x = keras.layers.ReLU()(x)\n x = keras.layers.ReLU()(x)\n\n return keras.models.Model(input_layer, x), [self.input_shape]\n\n def _model_relu_dwconv(self):\n input_layer = keras.Input(shape=self.input_shape)\n\n x = keras.layers.ReLU()(input_layer)\n x = keras.layers.DepthwiseConv2D(self.kernel_size, padding=self.padding)(x)\n\n return keras.models.Model(input_layer, x), [self.input_shape]\n\n def _model_dwconv_relu(self):\n input_layer = keras.Input(shape=self.input_shape)\n\n x = keras.layers.DepthwiseConv2D(self.kernel_size, padding=self.padding)(input_layer)\n x = keras.layers.ReLU()(x)\n\n return keras.models.Model(input_layer, x), [self.input_shape]\n\n def _model_dwconv(self):\n input_layer = keras.Input(shape=self.input_shape)\n\n x = keras.layers.DepthwiseConv2D(self.kernel_size, padding=self.padding)(input_layer)\n\n return keras.models.Model(input_layer, x), [self.input_shape]\n\n\ndef generate_testcases():\n testcases_list = {}\n from nn_meter.builder import builder_config\n config = builder_config.get_module('ruletest')\n\n if config['BASIC_TESTCASES'] != None:\n testcases = [case.split('_') for case in config['BASIC_TESTCASES']]\n d1_required_layers = config['LAYERS_1D']\n for op1, op2 in testcases:\n class_name = f'BasicFusion_{op1}_{op2}'\n name = f'BF_{op1}_{op2}'\n cases = {\n 'ops': [op1, op2],\n }\n if op1 in d1_required_layers or op2 in d1_required_layers:\n input_shape = [config['SHAPE_1D']]\n else:\n input_shape = [config['HW'], config['HW'], config['CIN']]\n bf_cls = type(class_name, (BasicFusion,), {\n 'name': name,\n 'cases': cases,\n 'input_shape': input_shape,\n })\n testcases_list[bf_cls.name] = bf_cls\n \n if config['OTHER_TESTCASES'] != None:\n for testcase in config['OTHER_TESTCASES']:\n if testcase in __BUILTIN_TESTCASES__:\n testcases_list[testcase] = MultipleOutNodes\n else:\n try:\n testcase_info = __REG_TESTCASES__[testcase]\n sys.path.append(testcase_info[\"package_location\"])\n testcase_module_name = testcase_info[\"class_name\"]\n testcase_module = importlib.import_module(testcase_info[\"class_module\"])\n testcase_cls = getattr(testcase_module, testcase_module_name)\n testcases_list[testcase] = testcase_cls\n except:\n raise KeyError(f'Unsupported test case: {testcase}.')\n\n return testcases_list\n\ndef list_testcases():\n return __BUILTIN_TESTCASES__ + [\"* \" + item for item in list(__REG_TESTCASES__.keys())]\n"
]
| [
[
"tensorflow.keras.layers.DepthwiseConv2D",
"tensorflow.keras.layers.ReLU",
"tensorflow.keras.models.Model",
"tensorflow.keras.Input",
"tensorflow.keras.models.save_model"
]
]
|
solmn/parallel_wavenet | [
"45e9eceb7a2d1982b3d45823332575eb26f333c0"
]
| [
"wavenet/ops.py"
]
| [
"from __future__ import division\n\nimport tensorflow as tf\n\n\ndef create_adam_optimizer(learning_rate, momentum):\n return tf.train.AdamOptimizer(learning_rate=learning_rate,\n epsilon=1e-8)\n\n\ndef create_sgd_optimizer(learning_rate, momentum):\n return tf.train.MomentumOptimizer(learning_rate=learning_rate,\n momentum=momentum)\n\n\ndef create_rmsprop_optimizer(learning_rate, momentum):\n return tf.train.RMSPropOptimizer(learning_rate=learning_rate,\n momentum=momentum,\n epsilon=1e-5)\n\n\noptimizer_factory = {'adam': create_adam_optimizer,\n 'sgd': create_sgd_optimizer,\n 'rmsprop': create_rmsprop_optimizer}\n\n\ndef time_to_batch(value, dilation, name=None):\n with tf.name_scope('time_to_batch'):\n shape = tf.shape(value)\n pad_elements = dilation - 1 - (shape[1] + dilation - 1) % dilation\n padded = tf.pad(value, [[0, 0], [0, pad_elements], [0, 0]])\n reshaped = tf.reshape(padded, [-1, dilation, shape[2]])\n transposed = tf.transpose(reshaped, perm=[1, 0, 2])\n return tf.reshape(transposed, [shape[0] * dilation, -1, shape[2]])\n\n\ndef batch_to_time(value, dilation, name=None):\n with tf.name_scope('batch_to_time'):\n shape = tf.shape(value)\n prepared = tf.reshape(value, [dilation, -1, shape[2]])\n transposed = tf.transpose(prepared, perm=[1, 0, 2])\n return tf.reshape(transposed,\n [tf.div(shape[0], dilation), -1, shape[2]])\n\n\ndef causal_conv(value, filter_, dilation, name='causal_conv'):\n with tf.name_scope(name):\n filter_width = tf.shape(filter_)[0]\n if dilation > 1:\n transformed = time_to_batch(value, dilation)\n conv = tf.nn.conv1d(transformed, filter_, stride=1,\n padding='VALID')\n restored = batch_to_time(conv, dilation)\n else:\n restored = tf.nn.conv1d(value, filter_, stride=1, padding='VALID')\n # Remove excess elements at the end.\n out_width = tf.shape(value)[1] - (filter_width - 1) * dilation\n result = tf.slice(restored,\n [0, 0, 0],\n [-1, out_width, -1])\n return result\n\n\ndef mu_law_encode(audio, quantization_channels):\n '''Quantizes waveform amplitudes.'''\n with tf.name_scope('encode'):\n mu = tf.to_float(quantization_channels - 1)\n # Perform mu-law companding transformation (ITU-T, 1988).\n # Minimum operation is here to deal with rare large amplitudes caused\n # by resampling.\n safe_audio_abs = tf.minimum(tf.abs(audio), 1.0)\n magnitude = tf.log1p(mu * safe_audio_abs) / tf.log1p(mu)\n signal = tf.sign(audio) * magnitude\n # Quantize signal to the specified number of levels.\n return tf.to_int32((signal + 1) / 2 * mu + 0.5)\n\n\ndef mu_law_decode(output, quantization_channels):\n '''Recovers waveform from quantized values.'''\n with tf.name_scope('decode'):\n mu = quantization_channels - 1\n # Map values back to [-1, 1].\n signal = 2 * (tf.to_float(output) / mu) - 1\n # Perform inverse of mu-law transformation.\n magnitude = (1 / mu) * ((1 + mu)**abs(signal) - 1)\n return tf.sign(signal) * magnitude\n\n\n"
]
| [
[
"tensorflow.abs",
"tensorflow.log1p",
"tensorflow.shape",
"tensorflow.train.AdamOptimizer",
"tensorflow.train.RMSPropOptimizer",
"tensorflow.train.MomentumOptimizer",
"tensorflow.reshape",
"tensorflow.transpose",
"tensorflow.nn.conv1d",
"tensorflow.sign",
"tensorflow.name_scope",
"tensorflow.slice",
"tensorflow.to_float",
"tensorflow.div",
"tensorflow.pad",
"tensorflow.to_int32"
]
]
|
RaimisJ/dc-app-performance-toolkit | [
"492a4dd163fddecf1a277ce3f6460060018eedc8"
]
| [
"app/util/jtl_convertor/jtls-to-csv.py"
]
| [
"import os\nimport sys\nimport tempfile\nimport time\nfrom pathlib import Path\nfrom typing import IO, List, Set\nimport csv\nimport pandas\nimport json\n\nfrom util.jtl_convertor import jtl_validator\nfrom util.project_paths import ENV_TAURUS_ARTIFACT_DIR, DEFAULT_TEST_ACTIONS\n\nLABEL = 'Label'\nSAMPLES = '# Samples'\nAVERAGE = 'Average'\nMEDIAN = 'Median'\nPERC_90 = '90% Line'\nPERC_95 = '95% Line'\nPERC_99 = '99% Line'\nMIN = 'Min'\nMAX = 'Max'\nERROR_RATE = 'Error %'\nLABEL_JTL = 'label'\nELAPSED_JTL_TMP = 'elapsed_tmp'\nELAPSED_JTL = 'elapsed'\nSUCCESS_JTL = 'success'\nSUCCESS_JTL_TMP = 'success_tmp'\nFALSE_JTL = 'false'\nAPP_SPECIFIC = 'App specific'\n\nCSV_HEADER = f'{LABEL},{SAMPLES},{AVERAGE},{MEDIAN},{PERC_90},{PERC_95},{PERC_99},{MIN},{MAX},{ERROR_RATE},' \\\n f'{APP_SPECIFIC}\\n'\nRESULTS_CSV_NAME = 'results.csv'\nAPPS = ['jira', 'confluence', 'bitbucket', 'jsm', 'crowd']\nTEST_TYPES = ['selenium', 'jmeter', 'locust']\n\n\ndef __get_all_default_actions():\n full_actions_list = []\n actions_data = read_json_file(DEFAULT_TEST_ACTIONS)\n for app in APPS:\n for test_type in TEST_TYPES:\n for action in actions_data[app][test_type]:\n full_actions_list.append(action)\n return full_actions_list\n\n\ndef read_json_file(file_path):\n with open(file_path) as json_file:\n data = json.load(json_file)\n return data\n\n\ndef __count_file_lines(stream: IO) -> int:\n return sum(1 for _ in stream)\n\n\ndef __reset_file_stream(stream: IO) -> None:\n stream.seek(0)\n\n\ndef __convert_jtl_to_csv(input_file_path: Path, output_file_path: Path, default_test_actions: list) -> None:\n if not input_file_path.exists():\n raise SystemExit(f'Input file {output_file_path} does not exist')\n start = time.time()\n convert_to_csv(output_csv=output_file_path, input_jtl=input_file_path, default_test_actions=default_test_actions)\n if not output_file_path.exists():\n raise SystemExit(f'Something went wrong. Output file {output_file_path} does not exist')\n\n print(f'Created file {output_file_path}. Converted from jtl to csv in {time.time() - start} ')\n\n\ndef __change_file_extension(file_name: str, new_extension) -> str:\n return __get_file_name_without_extension(file_name) + new_extension\n\n\ndef __get_file_name_without_extension(file_name):\n return os.path.splitext(file_name)[0]\n\n\ndef __read_csv_without_first_line(results_file_stream, input_csv):\n with input_csv.open(mode='r') as file_stream:\n __reset_file_stream(file_stream)\n\n for cnt, line in enumerate(file_stream, 1):\n if cnt != 1:\n results_file_stream.write(line)\n print(f'File {input_csv} successfully read')\n\n\ndef __create_results_csv(csv_list: List[Path], results_file_path: Path) -> None:\n with results_file_path.open(mode='w') as results_file_stream:\n results_file_stream.write(CSV_HEADER)\n\n for temp_csv_path in csv_list:\n __read_csv_without_first_line(results_file_stream, temp_csv_path)\n\n if not results_file_path.exists():\n raise SystemExit(f'Something went wrong. Output file {results_file_path} does not exist')\n print(f'Created file {results_file_path}')\n\n\ndef __validate_file_names(file_names: List[str]):\n file_names_set: Set[str] = set()\n\n for file_name in file_names:\n if '.' not in file_name:\n raise SystemExit(f'File name {file_name} does not have extension')\n\n file_name_without_extension = __get_file_name_without_extension(file_name)\n if file_name_without_extension in file_names_set:\n raise SystemExit(f'Duplicated file name {file_name_without_extension}')\n\n file_names_set.add(file_name_without_extension)\n\n\ndef convert_to_csv(input_jtl: Path, output_csv: Path, default_test_actions: list):\n reader = csv.DictReader(input_jtl.open(mode='r'))\n\n jtl_list = [row for row in reader]\n csv_list = []\n\n for jtl_sample in jtl_list:\n sample = {}\n if jtl_sample[LABEL_JTL] not in [processed_sample[LABEL] for processed_sample in csv_list]:\n sample[LABEL] = jtl_sample[LABEL_JTL]\n sample[SAMPLES] = 1\n sample[ELAPSED_JTL_TMP] = [int(jtl_sample[ELAPSED_JTL])] # Temp list with 'elapsed' value for current label\n # Temp list with 'success' value for current label\n sample[SUCCESS_JTL_TMP] = [jtl_sample[SUCCESS_JTL].lower()]\n csv_list.append(sample)\n\n else:\n # Get and update processed row with current label\n processed_sample = [row for row in csv_list if row[LABEL] == jtl_sample['label']][0]\n processed_sample[SAMPLES] = processed_sample[SAMPLES] + 1 # Count samples\n processed_sample[ELAPSED_JTL_TMP].append(int(jtl_sample[ELAPSED_JTL])) # list of elapsed values\n processed_sample[SUCCESS_JTL_TMP].append(jtl_sample[SUCCESS_JTL].lower()) # list of success values\n\n # Calculation after the last row in kpi.jtl is processed\n if jtl_sample == jtl_list[-1]:\n for processed_sample in csv_list:\n elapsed_df = pandas.Series(processed_sample[ELAPSED_JTL_TMP])\n processed_sample[AVERAGE] = int(round(elapsed_df.mean()))\n processed_sample[MEDIAN] = int(round(elapsed_df.quantile(0.5)))\n processed_sample[PERC_90] = int(round(elapsed_df.quantile(0.9)))\n processed_sample[PERC_95] = int(round(elapsed_df.quantile(0.95)))\n processed_sample[PERC_99] = int(round(elapsed_df.quantile(0.99)))\n processed_sample[MIN] = min(processed_sample[ELAPSED_JTL_TMP])\n processed_sample[MAX] = max(processed_sample[ELAPSED_JTL_TMP])\n\n success_list = processed_sample[SUCCESS_JTL_TMP]\n processed_sample[ERROR_RATE] = round(success_list.count(FALSE_JTL) / len(success_list), 2) * 100.00\n processed_sample[APP_SPECIFIC] = processed_sample['Label'] not in default_test_actions\n del processed_sample[SUCCESS_JTL_TMP]\n del processed_sample[ELAPSED_JTL_TMP]\n\n headers = csv_list[0].keys()\n with output_csv.open('w') as output_file:\n dict_writer = csv.DictWriter(output_file, headers)\n dict_writer.writeheader()\n for row in csv_list:\n dict_writer.writerow(row)\n\n\ndef main():\n file_names = sys.argv[1:]\n __validate_file_names(file_names)\n\n with tempfile.TemporaryDirectory() as tmp_dir:\n temp_csv_list: List[Path] = []\n for file_name in file_names:\n jtl_file_path = ENV_TAURUS_ARTIFACT_DIR / file_name\n jtl_validator.validate(jtl_file_path)\n csv_file_path = Path(tmp_dir) / __change_file_extension(file_name, '.csv')\n default_test_actions = __get_all_default_actions()\n __convert_jtl_to_csv(jtl_file_path, csv_file_path, default_test_actions)\n temp_csv_list.append(csv_file_path)\n\n results_file_path = ENV_TAURUS_ARTIFACT_DIR / RESULTS_CSV_NAME\n __create_results_csv(temp_csv_list, results_file_path)\n\n\nif __name__ == \"__main__\":\n start_time = time.time()\n main()\n print(f'Done in {time.time() - start_time} seconds')\n"
]
| [
[
"pandas.Series"
]
]
|
NCAR/lrose-projects-front | [
"333a1607550fe94ea9cf5689160187baa82fc6cf"
]
| [
"projDir/qc/scripts/PlotFieldDiffs.spol.qc.pecan.py"
]
| [
"#!/usr/bin/env python\n\n#===========================================================================\n#\n# Produce plots for field diffs from original to QC\n#\n#===========================================================================\n\nfrom __future__ import print_function\n\nimport os\nimport sys\nimport subprocess\nfrom optparse import OptionParser\nimport numpy as np\nfrom numpy import convolve\nfrom numpy import linalg, array, ones\nimport matplotlib.pyplot as plt\nfrom matplotlib import dates\nimport math\nimport datetime\nimport contextlib\n\ndef main():\n\n# globals\n\n global options\n global debug\n global startTime\n global endTime\n\n# parse the command line\n\n usage = \"usage: %prog [options]\"\n parser = OptionParser(usage)\n parser.add_option('--debug',\n dest='debug', default=False,\n action=\"store_true\",\n help='Set debugging on')\n parser.add_option('--verbose',\n dest='verbose', default=False,\n action=\"store_true\",\n help='Set verbose debugging on')\n parser.add_option('--diffs_file',\n dest='diffsFilePath',\n default='../data/pecan/field_diffs.spol.qc.txt',\n help='File path for bias results')\n parser.add_option('--cp_file',\n dest='cpFilePath',\n default='../data/pecan/cp_analysis.spol.txt',\n help='CP results file path')\n parser.add_option('--title',\n dest='title',\n default='FIELD DIFFS - ORIGINAL TO QC',\n help='Title for plot')\n parser.add_option('--width',\n dest='figWidthMm',\n default=400,\n help='Width of figure in mm')\n parser.add_option('--height',\n dest='figHeightMm',\n default=200,\n help='Height of figure in mm')\n parser.add_option('--lenMean',\n dest='lenMean',\n default=1,\n help='Len of moving mean filter')\n parser.add_option('--start',\n dest='startTime',\n default='1970 01 01 00 00 00',\n help='Start time for XY plot')\n parser.add_option('--end',\n dest='endTime',\n default='1970 01 01 01 00 00',\n help='End time for XY plot')\n \n (options, args) = parser.parse_args()\n \n if (options.verbose == True):\n options.debug = True\n\n year, month, day, hour, minute, sec = options.startTime.split()\n startTime = datetime.datetime(int(year), int(month), int(day),\n int(hour), int(minute), int(sec))\n\n year, month, day, hour, minute, sec = options.endTime.split()\n endTime = datetime.datetime(int(year), int(month), int(day),\n int(hour), int(minute), int(sec))\n\n if (options.debug == True):\n print(\"Running %prog\", file=sys.stderr)\n print(\" cpFilePath: \", options.cpFilePath, file=sys.stderr)\n print(\" diffsFilePath: \", options.diffsFilePath, file=sys.stderr)\n print(\" startTime: \", startTime, file=sys.stderr)\n print(\" endTime: \", endTime, file=sys.stderr)\n\n # read in column headers for diffs results\n\n iret, diffsHdrs, diffsData = readColumnHeaders(options.diffsFilePath)\n if (iret != 0):\n sys.exit(-1)\n\n # read in data for diffs results\n\n diffsData, diffsTimes = readInputData(options.diffsFilePath, diffsHdrs, diffsData)\n\n # read in column headers for CP results\n\n iret, cpHdrs, cpData = readColumnHeaders(options.cpFilePath)\n if (iret != 0):\n sys.exit(-1)\n\n # read in data for CP results\n\n cpData, cpTimes = readInputData(options.cpFilePath, cpHdrs, cpData)\n\n # render the plot\n \n doPlot(diffsData, diffsTimes, cpData, cpTimes)\n\n sys.exit(0)\n \n########################################################################\n# Read columm headers for the data\n# this is in the first line\n\ndef readColumnHeaders(filePath):\n\n colHeaders = []\n colData = {}\n\n fp = open(filePath, 'r')\n line = fp.readline()\n fp.close()\n \n commentIndex = line.find(\"#\")\n if (commentIndex == 0):\n # header\n colHeaders = line.lstrip(\"# \").rstrip(\"\\n\").split()\n if (options.debug == True):\n print(\"colHeaders: \", colHeaders, file=sys.stderr)\n else:\n print(\"ERROR - readColumnHeaders\", file=sys.stderr)\n print(\" First line does not start with #\", file=sys.stderr)\n return -1, colHeaders, colData\n \n for index, var in enumerate(colHeaders, start=0):\n colData[var] = []\n \n return 0, colHeaders, colData\n\n########################################################################\n# Read in the data\n\ndef readInputData(filePath, colHeaders, colData):\n\n # open file\n\n fp = open(filePath, 'r')\n lines = fp.readlines()\n\n # read in a line at a time, set colData\n for line in lines:\n \n commentIndex = line.find(\"#\")\n if (commentIndex >= 0):\n continue\n \n # data\n \n data = line.strip().split()\n\n for index, var in enumerate(colHeaders, start=0):\n if (var == 'count' or var == 'year' or var == 'month' or var == 'day' or \\\n var == 'hour' or var == 'min' or var == 'sec' or \\\n var == 'unix_time'):\n colData[var].append(int(data[index]))\n else:\n colData[var].append(float(data[index]))\n\n fp.close()\n\n # load observation times array\n\n year = colData['year']\n month = colData['month']\n day = colData['day']\n hour = colData['hour']\n minute = colData['min']\n sec = colData['sec']\n\n obsTimes = []\n for ii, var in enumerate(year, start=0):\n thisTime = datetime.datetime(year[ii], month[ii], day[ii],\n hour[ii], minute[ii], sec[ii])\n obsTimes.append(thisTime)\n\n return colData, obsTimes\n\n########################################################################\n# Moving average filter\n\ndef movingAverage(values, window):\n\n if (window < 2):\n return values\n\n weights = np.repeat(1.0, window)/window\n sma = np.convolve(values, weights, 'same')\n return sma\n\n########################################################################\n# Plot\n\ndef doPlot(diffsData, diffsTimes, cpData, cpTimes):\n\n fileName = options.diffsFilePath\n titleStr = \"File: \" + fileName\n hfmt = dates.DateFormatter('%y/%m/%d')\n\n lenMeanFilter = int(options.lenMean)\n\n # set up arrays for diffs\n\n dtimes = np.array(diffsTimes).astype(datetime.datetime)\n \n dbzDiffs = np.array(diffsData[\"DBZ_F_diffMean\"]).astype(np.double)\n dbzDiffs = movingAverage(dbzDiffs, lenMeanFilter)\n dbzValid = np.isfinite(dbzDiffs)\n \n zdrDiffs = np.array(diffsData[\"ZDR_F_diffMean\"]).astype(np.double)\n zdrDiffs = movingAverage(zdrDiffs, lenMeanFilter)\n zdrValid = np.isfinite(zdrDiffs)\n \n validDbzDtimes = dtimes[dbzValid]\n validDbzVals = dbzDiffs[dbzValid]\n \n validZdrDtimes = dtimes[zdrValid]\n validZdrVals = zdrDiffs[zdrValid]\n \n # load up receiver gain etc - axis 4\n \n (dailyTimeDbz, dailyValDbz) = computeDailyStats(validDbzDtimes, validDbzVals)\n (dailyTimeZdr, dailyValZdr) = computeDailyStats(validZdrDtimes, validZdrVals)\n\n # transmit power\n\n cptimes = np.array(cpTimes).astype(datetime.datetime)\n\n timeStart1us = datetime.datetime(2015, 6, 8, 0, 0, 0)\n pwrCorrFlag = cptimes < timeStart1us\n pwrCorr = np.zeros(len(cptimes))\n pwrCorr[cptimes < timeStart1us] = -1.76\n \n TxPwrH = np.array(cpData[\"TxPwrH\"]).astype(np.double)\n TxPwrH = movingAverage(TxPwrH, 11)\n TxPwrH = TxPwrH + pwrCorr\n validTxPwrH = np.isfinite(TxPwrH)\n\n TxPwrV = np.array(cpData[\"TxPwrV\"]).astype(np.double)\n TxPwrV = movingAverage(TxPwrV, 11)\n TxPwrV = TxPwrV + pwrCorr\n validTxPwrV = np.isfinite(TxPwrV)\n\n # set up plots\n\n widthIn = float(options.figWidthMm) / 25.4\n htIn = float(options.figHeightMm) / 25.4\n\n fig1 = plt.figure(1, (widthIn, htIn))\n\n ax1a = fig1.add_subplot(2,1,1,xmargin=0.0)\n ax1b = fig1.add_subplot(2,1,2,xmargin=0.0)\n #ax1c = fig1.add_subplot(3,1,3,xmargin=0.0)\n\n oneDay = datetime.timedelta(1.0)\n ax1a.set_xlim([dtimes[0] - oneDay, dtimes[-1] + oneDay])\n ax1a.set_title(\"DBZ and ZDR differences, QC minus original (dB)\")\n ax1b.set_xlim([dtimes[0] - oneDay, dtimes[-1] + oneDay])\n ax1b.set_title(\"Daily mean differences, QC minus original (dB)\")\n #ax1c.set_xlim([dtimes[0] - oneDay, dtimes[-1] + oneDay])\n #ax1c.set_title(\"Measured transmit power (dBm)\")\n\n ax1a.plot(validDbzDtimes, validDbzVals, \\\n \"o\", label = 'DBZ diffs', color='blue')\n \n ax1a.plot(validDbzDtimes, validDbzVals, \\\n label = 'DBZ diffs', linewidth=1, color='blue')\n \n ax1a.plot(validZdrDtimes, validZdrVals, \\\n \"o\", label = 'ZDR diffs', color='red')\n \n ax1a.plot(validZdrDtimes, validZdrVals, \\\n label = 'ZDR diffs', linewidth=1, color='red')\n \n ax1b.plot(dailyTimeDbz, dailyValDbz, \\\n label = 'Daily DBZ Diffs', linewidth=1, color='blue')\n ax1b.plot(dailyTimeDbz, dailyValDbz, \\\n \"^\", label = 'Daily DBZ Diffs', color='blue', markersize=10)\n\n ax1b.plot(dailyTimeZdr, dailyValZdr, \\\n label = 'Daily ZDR Diffs', linewidth=1, color='red')\n ax1b.plot(dailyTimeZdr, dailyValZdr, \\\n \"^\", label = 'Daily ZDR Diffs', color='red', markersize=10)\n\n #ax1c.plot(cptimes[validTxPwrH], TxPwrH[validTxPwrH], \\\n # label = 'TxPwrH', linewidth=2, color='blue')\n\n #ax1c.plot(cptimes[validTxPwrV], TxPwrV[validTxPwrV], \\\n # label = 'TxPwrV', linewidth=2, color='cyan')\n\n configDateAxis(ax1a, -4.0, 4.0, \"Vol-by-vol diffs (dB)\", 'upper right')\n configDateAxis(ax1b, -4.0, 4.0, \"Daily mean diffs (dB)\", 'upper right')\n #configDateAxis(ax1c, 85.0, 88.0, \"Power (dBm)\", 'upper right')\n\n fig1.autofmt_xdate()\n fig1.tight_layout()\n fig1.subplots_adjust(bottom=0.10, left=0.06, right=0.97, top=0.94)\n plt.show()\n\n########################################################################\n# initialize legends etc\n\ndef configDateAxis(ax, miny, maxy, ylabel, legendLoc):\n \n legend = ax.legend(loc=legendLoc, ncol=6)\n for label in legend.get_texts():\n label.set_fontsize('x-small')\n ax.set_xlabel(\"Date\")\n ax.set_ylabel(ylabel)\n ax.grid(True)\n if (miny > -9990 and maxy > -9990):\n ax.set_ylim([miny, maxy])\n hfmt = dates.DateFormatter('%y/%m/%d')\n ax.xaxis.set_major_locator(dates.DayLocator())\n ax.xaxis.set_major_formatter(hfmt)\n for tick in ax.xaxis.get_major_ticks():\n tick.label.set_fontsize(8) \n\n########################################################################\n# compute daily stats for a variable\n\ndef computeDailyStats(times, vals):\n\n dailyTimes = []\n dailyMeans = []\n\n nptimes = np.array(times).astype(datetime.datetime)\n npvals = np.array(vals).astype(np.double)\n\n validFlag = np.isfinite(npvals)\n timesValid = nptimes[validFlag]\n valsValid = npvals[validFlag]\n \n startTime = nptimes[0]\n endTime = nptimes[-1]\n \n startDate = datetime.datetime(startTime.year, startTime.month, startTime.day, 0, 0, 0)\n endDate = datetime.datetime(endTime.year, endTime.month, endTime.day, 0, 0, 0)\n\n oneDay = datetime.timedelta(1)\n halfDay = datetime.timedelta(0.5)\n \n thisDate = startDate\n while (thisDate < endDate + oneDay):\n \n nextDate = thisDate + oneDay\n result = []\n \n sum = 0.0\n sumDeltaTime = datetime.timedelta(0)\n count = 0.0\n for ii, val in enumerate(valsValid, start=0):\n thisTime = timesValid[ii]\n if (thisTime >= thisDate and thisTime < nextDate):\n sum = sum + val\n deltaTime = thisTime - thisDate\n sumDeltaTime = sumDeltaTime + deltaTime\n count = count + 1\n result.append(val)\n if (count > 5):\n mean = sum / count\n meanDeltaTime = datetime.timedelta(0, sumDeltaTime.total_seconds() / count)\n dailyMeans.append(mean)\n dailyTimes.append(thisDate + meanDeltaTime)\n # print >>sys.stderr, \" daily time, meanStrong: \", dailyTimes[-1], meanStrong\n result.sort()\n \n thisDate = thisDate + oneDay\n\n return (dailyTimes, dailyMeans)\n\n\n########################################################################\n# Run a command in a shell, wait for it to complete\n\ndef runCommand(cmd):\n\n if (options.debug == True):\n print(\"running cmd:\",cmd, file=sys.stderr)\n \n try:\n retcode = subprocess.call(cmd, shell=True)\n if retcode < 0:\n print(\"Child was terminated by signal: \", -retcode, file=sys.stderr)\n else:\n if (options.debug == True):\n print(\"Child returned code: \", retcode, file=sys.stderr)\n except OSError as e:\n print(\"Execution failed:\", e, file=sys.stderr)\n\n########################################################################\n# Run - entry point\n\nif __name__ == \"__main__\":\n main()\n\n"
]
| [
[
"numpy.array",
"matplotlib.dates.DateFormatter",
"matplotlib.pyplot.figure",
"numpy.isfinite",
"numpy.repeat",
"matplotlib.pyplot.show",
"matplotlib.dates.DayLocator",
"numpy.convolve"
]
]
|
DeFacto/WebCredibility | [
"dfbb990966fc6b33f60378acffa0f12e25183431"
]
| [
"trustworthiness/util.py"
]
| [
"import collections\nimport datetime\nimport logging\nimport os\nimport sys\nfrom pathlib import Path\n\nimport numpy as np\nimport pdfkit as pdfkit\nfrom bs4 import BeautifulSoup\nfrom sklearn.metrics import mean_absolute_error, mean_squared_error, confusion_matrix, classification_report, \\\n accuracy_score\nfrom tldextract import tldextract\nfrom sklearn.externals import joblib\n\nfrom coffeeandnoodles.core.util import get_md5_from_string\nfrom trustworthiness.config import DeFactoConfig\nfrom trustworthiness.definitions import DATASET_3C_SITES_PATH, DATASET_MICROSOFT_PATH_PAGES_MISSING, \\\n DATASET_MICROSOFT_PATH_PAGES_CACHED, ENC_WEB_DOMAIN, ENC_WEB_DOMAIN_SUFFIX, DATASET_MICROSOFT_PATH, OUTPUT_FOLDER, \\\n ENC_TAGS\n\nimport re\n\nconfig = DeFactoConfig()\n\n\ndef filterTerm(word):\n if word is not None:\n temp = word.lower()\n return re.sub(r\"[^A-Za-z]+\", '', temp)\n else:\n return ''\n\ndef print_report_regression(clf_name, predictions, y_test, targets):\n print('MAE', mean_absolute_error(y_test, predictions))\n print('RMSE', np.math.sqrt(mean_squared_error(y_test, predictions)))\n print(\"-----------------------------------------------------------------------\")\n\n\ndef print_report(clf_name, predictions, y_test, targets):\n print(\"Classifier: \", clf_name)\n print(confusion_matrix(y_test, predictions))\n print(\"accuracy: \", accuracy_score(y_test, predictions))\n print(classification_report(y_test, predictions, target_names=targets))\n # print(\":: recall: \", recall_score(y_test, predictions, average='weighted'))\n # print(\":: precision: \", precision_score(y_test, predictions, average='weighted'))\n # print(\":: f1: \", f1_score(y_test, predictions, average='weighted'))\n print(\"-----------------------------------------------------------------------\")\n\ndef get_logger(name, dir, file_level=logging.DEBUG, console_level=logging.INFO):\n\n try:\n logger = logging.getLogger(name)\n if len(logger.handlers) == 0:\n now = datetime.datetime.now()\n filename = dir + name + '_' + now.strftime(\"%Y-%m-%d\") + '.log'\n\n formatter = logging.Formatter(\"%(asctime)s [%(threadName)-12.12s] [%(levelname)-5.5s] %(message)s\")\n\n fileHandler = logging.FileHandler(filename)\n fileHandler.setFormatter(formatter)\n fileHandler.setLevel(file_level)\n\n consoleHandler = logging.StreamHandler(sys.stdout)\n consoleHandler.setFormatter(formatter)\n consoleHandler.setLevel(console_level)\n\n logger.setLevel(logging.DEBUG)\n logger.addHandler(fileHandler)\n logger.addHandler(consoleHandler)\n logger.propagate = False\n\n return logger\n\n except:\n raise\n\n\ndef get_html_file_path(url):\n path = url.replace('http://', '')\n last = path.split('/')[-1]\n\n path_root = None\n if ('.html' not in last) and ('.htm' not in last) and ('.shtml' not in last):\n if path[-1] != '/':\n path = path + '/'\n path_root1 = Path(DATASET_MICROSOFT_PATH_PAGES_CACHED + path + 'index.html')\n path_root2 = Path(DATASET_MICROSOFT_PATH_PAGES_MISSING + path + 'index.html')\n else:\n path_root1 = Path(DATASET_MICROSOFT_PATH_PAGES_CACHED + path)\n path_root2 = Path(DATASET_MICROSOFT_PATH_PAGES_MISSING + path)\n\n if path_root1.exists():\n path_root = path_root1\n elif path_root2.exists():\n path_root = path_root2\n else:\n # sometimes the last part is not a folder, but the file itself without the \".html\" , try it as a last attempt\n path_root3a = Path(DATASET_MICROSOFT_PATH_PAGES_CACHED + path.replace(last, '') + last + '.html')\n path_root3b = Path(DATASET_MICROSOFT_PATH_PAGES_CACHED + path.replace(last, '') + last + '.htm')\n path_root3c = Path(DATASET_MICROSOFT_PATH_PAGES_CACHED + path.replace(last, '') + last + '.shtml')\n if path_root3a.exists():\n path_root = path_root3a\n elif path_root3b.exists():\n path_root = path_root3b\n elif path_root3c.exists():\n path_root = path_root3c\n else:\n # url_broken.append(url)\n raise Exception(\n ':: this should not happen, double check core/web/credibility/fix_dataset_microsoft.py | url = ' + url)\n\n return path_root\n\n\ndef save_encoder_html2seq(folder_html_data):\n\n from sklearn import preprocessing\n le = preprocessing.LabelEncoder()\n\n config.logger.info('get_encoder_html2seq()')\n\n try:\n tags_set = []\n #sentences = []\n tot_files = 0\n #my_file = Path(folder_html_data + 'features.html2seq.pkl')\n my_encoder = Path(ENC_TAGS)\n #path_html2seq = folder_html_data + 'html2seq/'\n #path_html = folder_html_data + 'html/'\n #path_text = folder_html_data + 'text/'\n\n for dirpath, dirs, files in os.walk(folder_html_data):\n for file_html in files:\n if file_html.endswith('.txt'):\n tot_files += 1\n config.logger.info('processing file ' + str(tot_files) + ' - ' + str(len(tags_set)))\n # get tags\n tags = []\n soup = BeautifulSoup(open(os.path.join(dirpath, file_html)), \"html.parser\")\n html = soup.prettify()\n for line in html.split('\\n'):\n if isinstance(line, str) and len(line.strip()) > 0:\n if (line.strip()[0] == '<') and (line.strip()[0:2] != '<!'):\n if len(line.split()) > 1:\n tags.append(line.split()[0] + '>')\n else:\n tags.append(line.split()[0])\n elif (line.strip()[0:2] == '</' and line.strip()[0:2] != '<!'):\n tags.append(line.split()[0])\n\n if len(tags) > 0:\n #sentences.append(tags)\n tags_set.extend(tags)\n tags_set = list(set(tags_set))\n else:\n config.logger.info('no tags for this file...')\n\n\n config.logger.info('saving dump')\n le.fit(tags_set)\n joblib.dump(le, str(my_encoder))\n\n config.logger.info('tot files: ' + str(tot_files))\n config.logger.info('dictionary size: ' + str(len(tags_set)))\n\n except Exception as e:\n config.logger.error(repr(e))\n raise\n\n\ndef save_encoder_domain_and_suffix():\n\n import pandas as pd\n from sklearn import preprocessing\n le1 = preprocessing.LabelEncoder()\n le2 = preprocessing.LabelEncoder()\n\n domain_s = ['com']\n domain_s = ['']\n domain = ['']\n\n df_sites = pd.read_csv(DATASET_3C_SITES_PATH, na_values=0, delimiter=',', usecols=['document_url'])\n for index, row in df_sites.iterrows():\n url = str(row[0])\n print(index, url)\n try:\n o = tldextract.extract(url)\n if o.suffix is not None:\n domain_s.append(str(o.suffix).lower())\n if o.domain is not None:\n domain.append(str(o.domain).lower())\n except:\n continue\n\n # appending upper level domains, from http://data.iana.org/TLD/tlds-alpha-by-domain.txt\n # Version 2018040300, Last Updated Tue Apr 3 07:07:01 2018 UTC\n df = pd.read_csv(config.datasets + 'data/iana/org/TLD/tlds-alpha-by-domain.txt', sep=\" \", header=None)\n for index, row in df.iterrows():\n print(index, row[0])\n domain.append(str(row[0]).lower())\n\n df = pd.read_csv(DATASET_MICROSOFT_PATH, delimiter='\\t', header=0)\n for index, row in df.iterrows():\n url = str(row[3])\n print(index, url)\n try:\n o = tldextract.extract(url)\n if o.suffix is not None:\n domain_s.append(str(o.suffix).lower())\n if o.domain is not None:\n domain.append(str(o.domain).lower())\n except:\n continue\n\n\n le1.fit(domain)\n joblib.dump(le1, ENC_WEB_DOMAIN)\n print(le1.classes_)\n\n le2.fit(domain_s)\n joblib.dump(le2, ENC_WEB_DOMAIN_SUFFIX)\n print(le2.classes_)\n\ndef diff_month(d1, d2):\n return (d1.year - d2.year) * 12 + d1.month - d2.month\n\ndef save_url_body(extractor):\n try:\n config.logger.info('extracting features for: ' + extractor.url)\n hash = get_md5_from_string(extractor.local_file_path)\n text=extractor.webscrap.get_body()\n with open(config.root_dir_data + 'marseille/input/' + hash + '.txt', \"w\") as file:\n file.write(text)\n\n except Exception as e:\n config.logger.error(repr(e))\n raise\n\n\n\nif __name__ == '__main__':\n save_encoder_domain_and_suffix()\n # save_encoder_html2seq('/Users/diegoesteves/DropDrive/CloudStation/experiments_cache/web_credibility/output/all_html/') # just copy and paste all html files into a single temp file to generate this."
]
| [
[
"sklearn.preprocessing.LabelEncoder",
"sklearn.metrics.confusion_matrix",
"sklearn.metrics.mean_squared_error",
"sklearn.externals.joblib.dump",
"sklearn.metrics.accuracy_score",
"sklearn.metrics.classification_report",
"sklearn.metrics.mean_absolute_error",
"pandas.read_csv"
]
]
|
crpurcell/MQ_DPI_Release | [
"97444513e8b8d48ec91ff8a43b9dfaed0da029f9"
]
| [
"Shark_Tagging/Imports/boxtracker.py"
]
| [
"#!/usr/bin/env python\nfrom scipy.spatial import distance as dist\nfrom collections import OrderedDict\nimport numpy as np\nimport os\nimport json\nimport itertools\nimport operator\nclass BoxTracker():\n def __init__(self, maxGone=20, iouThreshSelf=0.3):\n self.nextObjectID = 0 \n self.boxDict = OrderedDict() \n self.scoreDict = OrderedDict() \n self.centDict = OrderedDict() \n self.labelDict = OrderedDict() \n self.goneCntDict = OrderedDict() \n self.maxGone = maxGone \n self.iouThreshSelf = iouThreshSelf \n self.tracks = OrderedDict()\n def _calc_IOUs(self, boxes1, boxes2):\n x11, y11, x12, y12 = np.split(boxes1, 4, axis=1)\n x21, y21, x22, y22 = np.split(boxes2, 4, axis=1)\n xA = np.maximum(x11, np.transpose(x21))\n yA = np.maximum(y11, np.transpose(y21))\n xB = np.minimum(x12, np.transpose(x22))\n yB = np.minimum(y12, np.transpose(y22))\n interArea = np.maximum((xB - xA + 1), 0) * np.maximum((yB - yA + 1), 0)\n boxAarea = (x12 - x11 + 1) * (y12 - y11 + 1)\n boxBarea = (x22 - x21 + 1) * (y22 - y21 + 1)\n iou = interArea / (boxAarea + np.transpose(boxBarea) - interArea)\n return iou\n def _register(self, box, score, label, centroid, frm):\n self.boxDict[self.nextObjectID] = box\n self.scoreDict[self.nextObjectID] = score\n self.labelDict[self.nextObjectID] = label\n self.centDict[self.nextObjectID] = centroid\n self.goneCntDict[self.nextObjectID] = 0\n track = TimeTrack()\n track.update(frm, centroid, box, score, label)\n self.tracks[self.nextObjectID] = track\n self.nextObjectID += 1\n def _deregister(self, objectID):\n del self.boxDict[objectID]\n del self.scoreDict[objectID]\n del self.labelDict[objectID]\n del self.centDict[objectID]\n del self.goneCntDict[objectID]\n def update(self, boxes, scores, labels, frm):\n boxes = boxes.copy()\n scores = scores.copy()\n labels = labels.copy()\n if len(scores) == 0:\n for objectID in list(self.goneCntDict.keys()):\n self.goneCntDict[objectID] += 1\n if self.goneCntDict[objectID] > self.maxGone:\n self._deregister(objectID)\n return boxes.copy(), scores.copy(), labels.copy()\n dropLst = [] \n iouArr = self._calc_IOUs(boxes, boxes)\n triBool = ~np.tril(np.ones_like(iouArr)).astype(np.bool)\n rows, cols = np.nonzero(triBool * iouArr > self.iouThreshSelf)\n for row, col in zip(rows, cols):\n if scores[row] >= scores[col]:\n dropLst.append(col)\n else:\n dropLst.append(row)\n boxes = np.delete(boxes, dropLst, axis=0)\n scores = np.delete(scores, dropLst)\n labels = np.delete(labels, dropLst)\n inputCentroids = np.zeros((len(scores), 2), dtype=\"int\")\n for (i, (startX, startY, endX, endY)) in enumerate(boxes):\n cX = int((startX + endX) / 2.0)\n cY = int((startY + endY) / 2.0)\n inputCentroids[i] = (cX, cY)\n xHalfSide = (endX - startX) / 2\n yHalfSide = (endY - startY) / 2\n if len(self.scoreDict) == 0:\n for i in range(0, len(scores)):\n self._register(boxes[i, :], scores[i], labels[i],\n inputCentroids[i], frm)\n else:\n objectIDs = list(self.centDict.keys())\n storedCentroids = np.array(list(self.centDict.values()))\n D = dist.cdist(np.array(storedCentroids), inputCentroids)\n rows = D.min(axis=1).argsort()\n cols = D.argmin(axis=1)[rows]\n usedRows = set()\n usedCols = set()\n for (row, col) in zip(rows, cols):\n if row in usedRows or col in usedCols:\n continue\n objectID = objectIDs[row]\n self.boxDict[objectID] = boxes[col, :]\n self.scoreDict[objectID] = scores[col]\n self.labelDict[objectID] = labels[col]\n self.centDict[objectID] = inputCentroids[col]\n self.goneCntDict[objectID] = 0\n self.tracks[objectID].update(frm,\n inputCentroids[col],\n boxes[col, :],\n scores[col],\n labels[col])\n usedRows.add(row)\n usedCols.add(col)\n unusedRows = set(range(0, D.shape[0])).difference(usedRows)\n unusedCols = set(range(0, D.shape[1])).difference(usedCols)\n if D.shape[0] >= D.shape[1]:\n for row in unusedRows:\n objectID = objectIDs[row]\n self.goneCntDict[objectID] += 1\n if self.goneCntDict[objectID] > self.maxGone:\n self._deregister(objectID)\n else:\n for col in unusedCols:\n self._register(boxes[col, :], scores[col], labels[col],\n inputCentroids[col], frm)\n return boxes.copy(), scores.copy(), labels.copy()\n def save_json(self, outFile, labelDict=None, doAppend=True):\n if os.path.exists(outFile):\n if doAppend:\n with open(outFile, 'r') as FH:\n trackLst = json.load(FH)\n nTracks = len(trackLst)\n print(\"[INFO] appending to {:d} tracks in existing file\"\n .format(nTracks))\n else:\n print(\"[WARN] overwriting existing file\")\n trackLst = []\n else:\n trackLst = []\n for objectID, track in self.tracks.items():\n trackLst.append(track.get_trackdict(labelDict))\n nTracks = len(trackLst)\n with open(outFile, 'w') as FH:\n json.dump(trackLst, FH)\n print(\"[INFO] wrote {:d} tracks to {}.\".format(nTracks, outFile))\nclass TimeTrack():\n def __init__(self):\n self.frmLst = []\n self.xLst = []\n self.yLst = []\n self.xHalfSideLst = []\n self.yHalfSideLst = []\n self.label = \"\"\n self.labelLst = []\n self.qual = \"\"\n self.scoreLst = []\n self.comments = \"\"\n self.pickle = \"\"\n def get_trackdict(self, labelDict=None):\n labelNum = max(set(self.labelLst), key = self.labelLst.count)\n if labelDict is not None:\n if labelNum in labelDict:\n self.label = labelDict[labelNum]\n else:\n self.label = str(labelNum)\n self.qual = int(round(np.median(self.scoreLst)*10))\n trackDict = {\"frmLst\" : self.frmLst,\n \"xLst\" : self.xLst,\n \"yLst\" : self.yLst,\n \"xHalfSideLst\" : self.xHalfSideLst,\n \"yHalfSideLst\" : self.yHalfSideLst,\n \"label\" : self.label,\n \"labelLst\" : self.labelLst,\n \"qual\" : self.qual,\n \"scoreLst\" : self.scoreLst,\n \"comments\" : self.comments,\n \"pickle\" : self.pickle}\n return trackDict\n def update(self, frm, centroid, box, score, label):\n self.frmLst.append(int(frm))\n self.xLst.append(int(round(centroid[0])))\n self.yLst.append(int(round(centroid[1])))\n self.xHalfSideLst.append(float((box[2] - box[0]) / 2))\n self.yHalfSideLst.append(float((box[3] - box[1]) / 2))\n self.labelLst.append(int(label))\n self.scoreLst.append(float(score))\n"
]
| [
[
"numpy.delete",
"numpy.array",
"numpy.ones_like",
"numpy.median",
"numpy.nonzero",
"numpy.split",
"numpy.transpose",
"numpy.maximum"
]
]
|
vaibhav-s/self-driving-car | [
"eb5865d50499f90b3eeace869c1f8a65cf9e2c46"
]
| [
"steering-models/community-models/cg23/data_explore.py"
]
| [
"# -------------------------------------------------------------------\r\n# Challenge #2 - Data Exploration\r\n# -------------------------------------------------------------------\r\n\r\n# Creates plots of steering angles by consecutive timestamps\r\n# By: cgundling \r\n# Rev Date: 11/19/16\r\n\r\nfrom __future__ import print_function\r\n\r\nimport numpy as np\r\nimport pandas as pd\r\nimport csv\r\nimport matplotlib.pyplot as plt\r\nfrom pylab import *\r\n\r\ndef plotFeatures(data):\r\n # Plot all the time sections of steering data\r\n j = 0 # Rotating Color\r\n k = 0 # Rotating Marker\r\n jj = 0 # Subplot Number\r\n timebreak = [0] # Store indices of timebreaks\r\n start = 0 \r\n c = ['r','b','g','k','m','y','c']\r\n marker = ['.','o','x','+','*','s','d']\r\n\r\n for i in range(1,data.shape[0]):\r\n if data[i,0] != data[i-1,0] and data[i,0] != (data[i-1,0] + 1):\r\n timebreak.append(int(data[i-1,0]))\r\n if jj < 70:\r\n jj = jj + 1\r\n print(jj)\r\n plt.subplot(7,10,jj)\r\n plt.plot(data[start:i-1,0],data[start:i-1,1],c=c[j],marker=marker[k])\r\n start = i\r\n j = j + 1\r\n if jj == 69:\r\n plt.subplot(7,10,jj+1)\r\n plt.plot(data[start:-1,0],data[start:-1,1],c=c[j],marker=marker[k])\r\n if j == 6:\r\n j = 0\r\n k = 0 #k = k + 1\r\n if k == 7:\r\n k = 0\r\n\r\n for i in range (1,71):\r\n plt.subplot(7,10,i)\r\n plt.xlabel('TimeStamp')\r\n plt.ylabel('Steering Angle')\r\n plt.grid(True)\r\n\r\n plt.suptitle('Consecutive Timestamp Steering')\r\n plt.subplots_adjust(left=0.05,bottom=0.05,right=0.95,top=0.95,wspace=0.40,hspace=0.25)\r\n fig = plt.gcf()\r\n fig.set_size_inches(30, 15)\r\n fig.savefig('Steering.png', dpi=200)\r\n\r\n# Main Program\r\ndef main():\r\n # Stats on steering data\r\n df_steer = pd.read_csv('dataset/steering.csv',usecols=['timestamp','angle'],index_col = False)\r\n u_A = str(len(list(set(df_steer['angle'].values.tolist()))))\r\n counts_A = df_steer['angle'].value_counts()\r\n\r\n # Mod the timestamp data\r\n time_factor = 10\r\n time_scale = int(1e9) / time_factor\r\n df_steer['time_mod'] = df_steer['timestamp'].astype(int) / time_scale\r\n u_T = str(len(list(set(df_steer['time_mod'].astype(int).values.tolist()))))\r\n\r\n # Some stats on steering angles/timestamps\r\n print('Number of unique steering angles...')\r\n print (u_A,df_steer.shape)\r\n print('Number of unique timestamps...')\r\n print (u_T,df_steer.shape)\r\n\r\n np.set_printoptions(suppress=False)\r\n counts_A.to_csv('counts.csv')\r\n df_steer['time_mod'].astype(int).to_csv('timestamps.csv',index=False)\r\n\r\n # Plot the steering data\r\n angle = np.zeros((df_steer.shape[0],1))\r\n time = np.zeros((df_steer.shape[0],1))\r\n\r\n angle[:,0] = df_steer['angle'].values\r\n time[:,0] = df_steer['time_mod'].values.astype(int)\r\n data = np.append(time,angle,axis=1)\r\n plotFeatures(data)\r\n\r\nif __name__ == '__main__':\r\n main()\r\n"
]
| [
[
"numpy.zeros",
"numpy.set_printoptions",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.suptitle",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.gcf",
"matplotlib.pyplot.ylabel",
"numpy.append",
"pandas.read_csv",
"matplotlib.pyplot.subplots_adjust",
"matplotlib.pyplot.subplot"
]
]
|
celinede/nilearn | [
"901a627c4c5ae491fef19d58307805b3657b3b7e"
]
| [
"nilearn/image/image.py"
]
| [
"\"\"\"\nPreprocessing functions for images.\n\nSee also nilearn.signal.\n\"\"\"\n# Authors: Philippe Gervais, Alexandre Abraham\n# License: simplified BSD\n\nimport collections\nimport warnings\n\nimport numpy as np\nfrom scipy import ndimage\nfrom scipy.stats import scoreatpercentile\nimport copy\nimport nibabel\nfrom sklearn.externals.joblib import Parallel, delayed\n\nfrom .. import signal\nfrom .._utils import (check_niimg_4d, check_niimg_3d, check_niimg, as_ndarray,\n _repr_niimgs)\nfrom .._utils.niimg_conversions import _index_img, _check_same_fov\nfrom .._utils.niimg import _safe_get_data\nfrom .._utils.compat import _basestring\nfrom .._utils.param_validation import check_threshold\n\n\ndef high_variance_confounds(imgs, n_confounds=5, percentile=2.,\n detrend=True, mask_img=None):\n \"\"\" Return confounds signals extracted from input signals with highest\n variance.\n\n Parameters\n ----------\n imgs: Niimg-like object\n See http://nilearn.github.io/manipulating_images/input_output.html\n 4D image.\n\n mask_img: Niimg-like object\n See http://nilearn.github.io/manipulating_images/input_output.html\n If provided, confounds are extracted from voxels inside the mask.\n If not provided, all voxels are used.\n\n n_confounds: int\n Number of confounds to return\n\n percentile: float\n Highest-variance signals percentile to keep before computing the\n singular value decomposition, 0. <= `percentile` <= 100.\n mask_img.sum() * percentile / 100. must be greater than n_confounds.\n\n detrend: bool\n If True, detrend signals before processing.\n\n Returns\n -------\n v: numpy.ndarray\n highest variance confounds. Shape: (number of scans, n_confounds)\n\n Notes\n ------\n This method is related to what has been published in the literature\n as 'CompCor' (Behzadi NeuroImage 2007).\n\n The implemented algorithm does the following:\n\n - compute sum of squares for each signals (no mean removal)\n - keep a given percentile of signals with highest variance (percentile)\n - compute an svd of the extracted signals\n - return a given number (n_confounds) of signals from the svd with\n highest singular values.\n\n See also\n --------\n nilearn.signal.high_variance_confounds\n \"\"\"\n from .. import masking\n\n if mask_img is not None:\n sigs = masking.apply_mask(imgs, mask_img)\n else:\n # Load the data only if it doesn't need to be masked\n imgs = check_niimg_4d(imgs)\n sigs = as_ndarray(imgs.get_data())\n # Not using apply_mask here saves memory in most cases.\n del imgs # help reduce memory consumption\n sigs = np.reshape(sigs, (-1, sigs.shape[-1])).T\n\n return signal.high_variance_confounds(sigs, n_confounds=n_confounds,\n percentile=percentile,\n detrend=detrend)\n\n\ndef _fast_smooth_array(arr):\n \"\"\"Simple smoothing which is less computationally expensive than\n applying a gaussian filter.\n\n Only the first three dimensions of the array will be smoothed. The\n filter uses [0.2, 1, 0.2] weights in each direction and use a\n normalisation to preserve the local average value.\n\n Parameters\n ----------\n arr: numpy.ndarray\n 4D array, with image number as last dimension. 3D arrays are\n also accepted.\n\n Returns\n -------\n smoothed_arr: numpy.ndarray\n Smoothed array.\n\n Notes\n -----\n Rather than calling this function directly, users are encouraged\n to call the high-level function :func:`smooth_img` with\n fwhm='fast'.\n\n \"\"\"\n neighbor_weight = 0.2\n # 6 neighbors in 3D if not on an edge\n nb_neighbors = 6\n # This scale ensures that a uniform array stays uniform\n # except on the array edges\n scale = 1 + nb_neighbors * neighbor_weight\n\n # Need to copy because the smoothing is done in multiple statements\n # and there does not seem to be an easy way to do it in place\n smoothed_arr = arr.copy()\n weighted_arr = neighbor_weight * arr\n\n smoothed_arr[:-1] += weighted_arr[1:]\n smoothed_arr[1:] += weighted_arr[:-1]\n smoothed_arr[:, :-1] += weighted_arr[:, 1:]\n smoothed_arr[:, 1:] += weighted_arr[:, :-1]\n smoothed_arr[:, :, :-1] += weighted_arr[:, :, 1:]\n smoothed_arr[:, :, 1:] += weighted_arr[:, :, :-1]\n smoothed_arr /= scale\n\n return smoothed_arr\n\n\ndef _smooth_array(arr, affine, fwhm=None, ensure_finite=True, copy=True):\n \"\"\"Smooth images by applying a Gaussian filter.\n\n Apply a Gaussian filter along the three first dimensions of arr.\n\n Parameters\n ----------\n arr: numpy.ndarray\n 4D array, with image number as last dimension. 3D arrays are also\n accepted.\n\n affine: numpy.ndarray\n (4, 4) matrix, giving affine transformation for image. (3, 3) matrices\n are also accepted (only these coefficients are used).\n If fwhm='fast', the affine is not used and can be None\n\n fwhm: scalar, numpy.ndarray, 'fast' or None\n Smoothing strength, as a full-width at half maximum, in millimeters.\n If a scalar is given, width is identical on all three directions.\n A numpy.ndarray must have 3 elements, giving the FWHM along each axis.\n If fwhm == 'fast', a fast smoothing will be performed with\n a filter [0.2, 1, 0.2] in each direction and a normalisation\n to preserve the local average value.\n If fwhm is None, no filtering is performed (useful when just removal\n of non-finite values is needed).\n\n\n ensure_finite: bool\n if True, replace every non-finite values (like NaNs) by zero before\n filtering.\n\n copy: bool\n if True, input array is not modified. False by default: the filtering\n is performed in-place.\n\n Returns\n -------\n filtered_arr: numpy.ndarray\n arr, filtered.\n\n Notes\n -----\n This function is most efficient with arr in C order.\n \"\"\"\n # Here, we have to investigate use cases of fwhm. Particularly, if fwhm=0.\n # See issue #1537\n if fwhm == 0.:\n warnings.warn(\"The parameter 'fwhm' for smoothing is specified \"\n \"as {0}. Converting to None (no smoothing option)\"\n .format(fwhm))\n fwhm = None\n\n if arr.dtype.kind == 'i':\n if arr.dtype == np.int64:\n arr = arr.astype(np.float64)\n else:\n # We don't need crazy precision\n arr = arr.astype(np.float32)\n if copy:\n arr = arr.copy()\n\n if ensure_finite:\n # SPM tends to put NaNs in the data outside the brain\n arr[np.logical_not(np.isfinite(arr))] = 0\n\n if fwhm == 'fast':\n arr = _fast_smooth_array(arr)\n elif fwhm is not None:\n # Keep only the scale part.\n affine = affine[:3, :3]\n\n # Convert from a FWHM to a sigma:\n fwhm_over_sigma_ratio = np.sqrt(8 * np.log(2))\n vox_size = np.sqrt(np.sum(affine ** 2, axis=0))\n sigma = fwhm / (fwhm_over_sigma_ratio * vox_size)\n for n, s in enumerate(sigma):\n ndimage.gaussian_filter1d(arr, s, output=arr, axis=n)\n\n return arr\n\n\ndef smooth_img(imgs, fwhm):\n \"\"\"Smooth images by applying a Gaussian filter.\n\n Apply a Gaussian filter along the three first dimensions of arr.\n In all cases, non-finite values in input image are replaced by zeros.\n\n Parameters\n ----------\n imgs: Niimg-like object or iterable of Niimg-like objects\n See http://nilearn.github.io/manipulating_images/input_output.html\n Image(s) to smooth.\n\n fwhm: scalar, numpy.ndarray, 'fast' or None\n Smoothing strength, as a Full-Width at Half Maximum, in millimeters.\n If a scalar is given, width is identical on all three directions.\n A numpy.ndarray must have 3 elements, giving the FWHM along each axis.\n If fwhm == 'fast', a fast smoothing will be performed with\n a filter [0.2, 1, 0.2] in each direction and a normalisation\n to preserve the scale.\n If fwhm is None, no filtering is performed (useful when just removal\n of non-finite values is needed).\n\n In corner case situations, fwhm is simply kept to None when fwhm is\n specified as fwhm=0.\n\n Returns\n -------\n filtered_img: nibabel.Nifti1Image or list of.\n Input image, filtered. If imgs is an iterable, then filtered_img is a\n list.\n \"\"\"\n\n # Use hasattr() instead of isinstance to workaround a Python 2.6/2.7 bug\n # See http://bugs.python.org/issue7624\n if hasattr(imgs, \"__iter__\") \\\n and not isinstance(imgs, _basestring):\n single_img = False\n else:\n single_img = True\n imgs = [imgs]\n\n ret = []\n for img in imgs:\n img = check_niimg(img)\n affine = img.affine\n filtered = _smooth_array(img.get_data(), affine, fwhm=fwhm,\n ensure_finite=True, copy=True)\n ret.append(new_img_like(img, filtered, affine, copy_header=True))\n\n if single_img:\n return ret[0]\n else:\n return ret\n\n\ndef _crop_img_to(img, slices, copy=True):\n \"\"\"Crops image to a smaller size\n\n Crop img to size indicated by slices and adjust affine\n accordingly\n\n Parameters\n ----------\n img: Niimg-like object\n See http://nilearn.github.io/manipulating_images/input_output.html\n Img to be cropped. If slices has less entries than img\n has dimensions, the slices will be applied to the first len(slices)\n dimensions\n\n slices: list of slices\n Defines the range of the crop.\n E.g. [slice(20, 200), slice(40, 150), slice(0, 100)]\n defines a 3D cube\n\n copy: boolean\n Specifies whether cropped data is to be copied or not.\n Default: True\n\n Returns\n -------\n cropped_img: Niimg-like object\n See http://nilearn.github.io/manipulating_images/input_output.html\n Cropped version of the input image\n \"\"\"\n\n img = check_niimg(img)\n\n data = img.get_data()\n affine = img.affine\n\n cropped_data = data[tuple(slices)]\n if copy:\n cropped_data = cropped_data.copy()\n\n linear_part = affine[:3, :3]\n old_origin = affine[:3, 3]\n new_origin_voxel = np.array([s.start for s in slices])\n new_origin = old_origin + linear_part.dot(new_origin_voxel)\n\n new_affine = np.eye(4)\n new_affine[:3, :3] = linear_part\n new_affine[:3, 3] = new_origin\n\n return new_img_like(img, cropped_data, new_affine)\n\n\ndef crop_img(img, rtol=1e-8, copy=True, pad=True, return_offset=False):\n \"\"\"Crops img as much as possible\n\n Will crop img, removing as many zero entries as possible\n without touching non-zero entries. Will leave one voxel of\n zero padding around the obtained non-zero area in order to\n avoid sampling issues later on.\n\n Parameters\n ----------\n img: Niimg-like object\n See http://nilearn.github.io/manipulating_images/input_output.html\n img to be cropped.\n\n rtol: float\n relative tolerance (with respect to maximal absolute\n value of the image), under which values are considered\n negligeable and thus croppable.\n\n copy: boolean\n Specifies whether cropped data is copied or not.\n\n pad: boolean\n Toggles adding 1-voxel of 0s around the border. Recommended.\n\n return_offset: boolean\n Specifies whether to return a tuple of the removed padding.\n\n Returns\n -------\n cropped_img: image\n Cropped version of the input image\n\n offset: list (optional)\n List of tuples representing the number of voxels removed (before, after)\n the cropped volumes, i.e.:\n [(x1_pre, x1_post), (x2_pre, x2_post), ..., (xN_pre, xN_post)]\n\n \"\"\"\n\n img = check_niimg(img)\n data = img.get_data()\n infinity_norm = max(-data.min(), data.max())\n passes_threshold = np.logical_or(data < -rtol * infinity_norm,\n data > rtol * infinity_norm)\n\n if data.ndim == 4:\n passes_threshold = np.any(passes_threshold, axis=-1)\n coords = np.array(np.where(passes_threshold))\n\n # Sets full range if no data are found along the axis\n if coords.shape[1] == 0:\n start, end = [0, 0, 0], list(data.shape)\n else:\n start = coords.min(axis=1)\n end = coords.max(axis=1) + 1\n\n # pad with one voxel to avoid resampling problems\n if pad:\n start = np.maximum(start - 1, 0)\n end = np.minimum(end + 1, data.shape[:3])\n\n slices = [slice(s, e) for s, e in zip(start, end)][:3]\n cropped_im = _crop_img_to(img, slices, copy=copy)\n return cropped_im if not return_offset else (cropped_im, tuple(slices))\n\n\ndef _pad_array(array, pad_sizes):\n \"\"\"Pad an ndarray with zeros of quantity specified\n as follows pad_sizes = [x1minpad, x1maxpad, x2minpad,\n x2maxpad, x3minpad, ...]\n \"\"\"\n\n if len(pad_sizes) % 2 != 0:\n raise ValueError(\"Please specify as many max paddings as min\"\n \" paddings. You have specified %d arguments\" %\n len(pad_sizes))\n\n all_paddings = np.zeros([array.ndim, 2], dtype=np.int64)\n all_paddings[:len(pad_sizes) // 2] = np.array(pad_sizes).reshape(-1, 2)\n\n lower_paddings, upper_paddings = all_paddings.T\n new_shape = np.array(array.shape) + upper_paddings + lower_paddings\n\n padded = np.zeros(new_shape, dtype=array.dtype)\n source_slices = [slice(max(-lp, 0), min(s + up, s))\n for lp, up, s in zip(lower_paddings,\n upper_paddings,\n array.shape)]\n target_slices = [slice(max(lp, 0), min(s - up, s))\n for lp, up, s in zip(lower_paddings,\n upper_paddings,\n new_shape)]\n\n padded[tuple(target_slices)] = array[source_slices].copy()\n return padded\n\n\ndef _compute_mean(imgs, target_affine=None,\n target_shape=None, smooth=False):\n from . import resampling\n input_repr = _repr_niimgs(imgs)\n\n imgs = check_niimg(imgs)\n mean_data = _safe_get_data(imgs)\n affine = imgs.affine\n # Free memory ASAP\n imgs = None\n if not mean_data.ndim in (3, 4):\n raise ValueError('Computation expects 3D or 4D '\n 'images, but %i dimensions were given (%s)'\n % (mean_data.ndim, input_repr))\n if mean_data.ndim == 4:\n mean_data = mean_data.mean(axis=-1)\n else:\n mean_data = mean_data.copy()\n mean_data = resampling.resample_img(\n nibabel.Nifti1Image(mean_data, affine),\n target_affine=target_affine, target_shape=target_shape,\n copy=False)\n affine = mean_data.affine\n mean_data = mean_data.get_data()\n\n if smooth:\n nan_mask = np.isnan(mean_data)\n mean_data = _smooth_array(mean_data, affine=np.eye(4), fwhm=smooth,\n ensure_finite=True, copy=False)\n mean_data[nan_mask] = np.nan\n\n return mean_data, affine\n\n\ndef mean_img(imgs, target_affine=None, target_shape=None,\n verbose=0, n_jobs=1):\n \"\"\" Compute the mean of the images (in the time dimension of 4th dimension)\n\n Note that if list of 4D images are given, the mean of each 4D image is\n computed separately, and the resulting mean is computed after.\n\n Parameters\n ----------\n\n imgs: Niimg-like object or iterable of Niimg-like objects\n See http://nilearn.github.io/manipulating_images/input_output.html\n Images to mean.\n\n target_affine: numpy.ndarray, optional\n If specified, the image is resampled corresponding to this new affine.\n target_affine can be a 3x3 or a 4x4 matrix\n\n target_shape: tuple or list, optional\n If specified, the image will be resized to match this new shape.\n len(target_shape) must be equal to 3.\n A target_affine has to be specified jointly with target_shape.\n\n verbose: int, optional\n Controls the amount of verbosity: higher numbers give\n more messages (0 means no messages).\n\n n_jobs: integer, optional\n The number of CPUs to use to do the computation. -1 means\n 'all CPUs'.\n\n Returns\n -------\n mean: nibabel.Nifti1Image\n mean image\n\n See Also\n --------\n nilearn.image.math_img : For more general operations on images\n\n \"\"\"\n if (isinstance(imgs, _basestring) or\n not isinstance(imgs, collections.Iterable)):\n imgs = [imgs, ]\n\n imgs_iter = iter(imgs)\n first_img = check_niimg(next(imgs_iter))\n\n # Compute the first mean to retrieve the reference\n # target_affine and target_shape if_needed\n n_imgs = 1\n running_mean, first_affine = _compute_mean(first_img,\n target_affine=target_affine,\n target_shape=target_shape)\n\n if target_affine is None or target_shape is None:\n target_affine = first_affine\n target_shape = running_mean.shape[:3]\n\n for this_mean in Parallel(n_jobs=n_jobs, verbose=verbose)(\n delayed(_compute_mean)(n, target_affine=target_affine,\n target_shape=target_shape)\n for n in imgs_iter):\n n_imgs += 1\n # _compute_mean returns (mean_img, affine)\n this_mean = this_mean[0]\n running_mean += this_mean\n\n running_mean = running_mean / float(n_imgs)\n return new_img_like(first_img, running_mean, target_affine)\n\n\ndef swap_img_hemispheres(img):\n \"\"\"Performs swapping of hemispheres in the indicated nifti.\n\n Use case: synchronizing ROIs across hemispheres\n\n Parameters\n ----------\n img: Niimg-like object\n See http://nilearn.github.io/manipulating_images/input_output.html\n Images to swap.\n\n Returns\n -------\n output: nibabel.Nifti1Image\n hemispherically swapped image\n\n Notes\n -----\n Supposes a nifti of a brain that is sagitally aligned\n\n Should be used with caution (confusion might be caused with\n radio/neuro conventions)\n\n Note that this does not require a change of the affine matrix.\n \"\"\"\n from .resampling import reorder_img\n\n # Check input is really a path to a nifti file or a nifti object\n img = check_niimg_3d(img)\n\n # get nifti in x-y-z order\n img = reorder_img(img)\n\n # create swapped nifti object\n out_img = new_img_like(img, img.get_data()[::-1], img.affine,\n copy_header=True)\n\n return out_img\n\n\ndef index_img(imgs, index):\n \"\"\"Indexes into a 4D Niimg-like object in the fourth dimension.\n\n Common use cases include extracting a 3D image out of `img` or\n creating a 4D image whose data is a subset of `img` data.\n\n Parameters\n ----------\n imgs: 4D Niimg-like object\n See http://nilearn.github.io/manipulating_images/input_output.html\n\n index: Any type compatible with numpy array indexing\n Used for indexing the 4D data array in the fourth dimension.\n\n Returns\n -------\n output: nibabel.Nifti1Image\n\n See Also\n --------\n nilearn.image.concat_imgs\n nilearn.image.iter_img\n\n Examples\n --------\n First we concatenate two mni152 images to create a 4D-image::\n\n >>> from nilearn import datasets\n >>> from nilearn.image import concat_imgs, index_img\n >>> joint_mni_image = concat_imgs([datasets.load_mni152_template(),\n ... datasets.load_mni152_template()])\n >>> print(joint_mni_image.shape)\n (91, 109, 91, 2)\n\n We can now select one slice from the last dimension of this 4D-image::\n\n >>> single_mni_image = index_img(joint_mni_image, 1)\n >>> print(single_mni_image.shape)\n (91, 109, 91)\n \"\"\"\n imgs = check_niimg_4d(imgs)\n # duck-type for pandas arrays, and select the 'values' attr\n if hasattr(index, 'values') and hasattr(index, 'iloc'):\n index = index.values.flatten()\n return _index_img(imgs, index)\n\n\ndef iter_img(imgs):\n \"\"\"Iterates over a 4D Niimg-like object in the fourth dimension.\n\n Parameters\n ----------\n imgs: 4D Niimg-like object\n See http://nilearn.github.io/manipulating_images/input_output.html\n\n Returns\n -------\n output: iterator of 3D nibabel.Nifti1Image\n\n See Also\n --------\n nilearn.image.index_img\n\n \"\"\"\n return check_niimg_4d(imgs, return_iterator=True)\n\n\ndef new_img_like(ref_niimg, data, affine=None, copy_header=False):\n \"\"\"Create a new image of the same class as the reference image\n\n Parameters\n ----------\n ref_niimg: image\n Reference image. The new image will be of the same type.\n\n data: numpy array\n Data to be stored in the image\n\n affine: 4x4 numpy array, optional\n Transformation matrix\n\n copy_header: boolean, optional\n Indicated if the header of the reference image should be used to\n create the new image\n\n Returns\n -------\n new_img: image\n A loaded image with the same type (and header) as the reference image.\n \"\"\"\n # Hand-written loading code to avoid too much memory consumption\n orig_ref_niimg = ref_niimg\n if (not isinstance(ref_niimg, _basestring)\n and not hasattr(ref_niimg, 'get_data')\n and hasattr(ref_niimg, '__iter__')):\n ref_niimg = ref_niimg[0]\n if not (hasattr(ref_niimg, 'get_data')\n and hasattr(ref_niimg, 'affine')):\n if isinstance(ref_niimg, _basestring):\n ref_niimg = nibabel.load(ref_niimg)\n else:\n raise TypeError(('The reference image should be a niimg, %r '\n 'was passed') % orig_ref_niimg)\n\n if affine is None:\n affine = ref_niimg.affine\n if data.dtype == bool:\n default_dtype = np.int8\n if isinstance(ref_niimg, nibabel.freesurfer.mghformat.MGHImage):\n default_dtype = np.uint8\n data = as_ndarray(data, dtype=default_dtype)\n header = None\n if copy_header:\n header = copy.deepcopy(ref_niimg.header)\n if 'scl_slope' in header:\n header['scl_slope'] = 0.\n if 'scl_inter' in header:\n header['scl_inter'] = 0.\n # 'glmax' is removed for Nifti2Image. Modify only if 'glmax' is\n # available in header. See issue #1611\n if 'glmax' in header:\n header['glmax'] = 0.\n if 'cal_max' in header:\n header['cal_max'] = np.max(data) if data.size > 0 else 0.\n if 'cal_min' in header:\n header['cal_min'] = np.min(data) if data.size > 0 else 0.\n klass = ref_niimg.__class__\n if klass is nibabel.Nifti1Pair:\n # Nifti1Pair is an internal class, without a to_filename,\n # we shouldn't return it\n klass = nibabel.Nifti1Image\n return klass(data, affine, header=header)\n\n\ndef threshold_img(img, threshold, mask_img=None):\n \"\"\" Threshold the given input image, mostly statistical or atlas images.\n\n Thresholding can be done based on direct image intensities or selection\n threshold with given percentile.\n\n .. versionadded:: 0.2\n\n Parameters\n ----------\n img: a 3D/4D Niimg-like object\n Image contains of statistical or atlas maps which should be thresholded.\n\n threshold: float or str\n If float, we threshold the image based on image intensities meaning\n voxels which have intensities greater than this value will be kept.\n The given value should be within the range of minimum and\n maximum intensity of the input image.\n If string, it should finish with percent sign e.g. \"80%\" and we threshold\n based on the score obtained using this percentile on the image data. The\n voxels which have intensities greater than this score will be kept.\n The given string should be within the range of \"0%\" to \"100%\".\n\n mask_img: Niimg-like object, default None, optional\n Mask image applied to mask the input data.\n If None, no masking will be applied.\n\n Returns\n -------\n threshold_img: Nifti1Image\n thresholded image of the given input image.\n \"\"\"\n from . import resampling\n from .. import masking\n\n img = check_niimg(img)\n img_data = _safe_get_data(img, ensure_finite=True)\n affine = img.affine\n\n if mask_img is not None:\n mask_img = check_niimg_3d(mask_img)\n if not _check_same_fov(img, mask_img):\n mask_img = resampling.resample_img(mask_img, target_affine=affine,\n target_shape=img.shape[:3],\n interpolation=\"nearest\")\n\n mask_data, _ = masking._load_mask_img(mask_img)\n # Set as 0 for the values which are outside of the mask\n img_data[mask_data == 0.] = 0.\n\n if threshold is None:\n raise ValueError(\"The input parameter 'threshold' is empty. \"\n \"Please give either a float value or a string as e.g. '90%'.\")\n else:\n cutoff_threshold = check_threshold(threshold, img_data,\n percentile_func=scoreatpercentile,\n name='threshold')\n\n img_data[np.abs(img_data) < cutoff_threshold] = 0.\n threshold_img = new_img_like(img, img_data, affine)\n\n return threshold_img\n\n\ndef math_img(formula, **imgs):\n \"\"\"Interpret a numpy based string formula using niimg in named parameters.\n\n .. versionadded:: 0.2.3\n\n Parameters\n ----------\n formula: str\n The mathematical formula to apply to image internal data. It can use\n numpy imported as 'np'.\n imgs: images (Nifti1Image or file names)\n Keyword arguments corresponding to the variables in the formula as\n Nifti images. All input images should have the same geometry (shape,\n affine).\n\n Returns\n -------\n return_img: Nifti1Image\n Result of the formula as a Nifti image. Note that the dimension of the\n result image can be smaller than the input image. The affine is the\n same as the input image.\n\n See Also\n --------\n nilearn.image.mean_img : To simply compute the mean of multiple images\n\n Examples\n --------\n Let's load an image using nilearn datasets module::\n\n >>> from nilearn import datasets\n >>> anatomical_image = datasets.load_mni152_template()\n\n Now we can use any numpy function on this image::\n\n >>> from nilearn.image import math_img\n >>> log_img = math_img(\"np.log(img)\", img=anatomical_image)\n\n We can also apply mathematical operations on several images::\n\n >>> result_img = math_img(\"img1 + img2\",\n ... img1=anatomical_image, img2=log_img)\n\n Notes\n -----\n\n This function is the Python equivalent of ImCal in SPM or fslmaths\n in FSL.\n\n \"\"\"\n try:\n # Check that input images are valid niimg and have a compatible shape\n # and affine.\n niimgs = []\n for image in imgs.values():\n niimgs.append(check_niimg(image))\n _check_same_fov(*niimgs, raise_error=True)\n except Exception as exc:\n exc.args = ((\"Input images cannot be compared, you provided '{0}',\"\n .format(imgs.values()),) + exc.args)\n raise\n\n # Computing input data as a dictionary of numpy arrays. Keep a reference\n # niimg for building the result as a new niimg.\n niimg = None\n data_dict = {}\n for key, img in imgs.items():\n niimg = check_niimg(img)\n data_dict[key] = _safe_get_data(niimg)\n\n # Add a reference to numpy in the kwargs of eval so that numpy functions\n # can be called from there.\n data_dict['np'] = np\n try:\n result = eval(formula, data_dict)\n except Exception as exc:\n exc.args = ((\"Input formula couldn't be processed, you provided '{0}',\"\n .format(formula),) + exc.args)\n raise\n\n return new_img_like(niimg, result, niimg.affine)\n\n\ndef clean_img(imgs, sessions=None, detrend=True, standardize=True,\n confounds=None, low_pass=None, high_pass=None, t_r=None,\n ensure_finite=False, mask_img=None):\n \"\"\"Improve SNR on masked fMRI signals.\n\n This function can do several things on the input signals, in\n the following order:\n\n - detrend\n - low- and high-pass filter\n - remove confounds\n - standardize\n\n Low-pass filtering improves specificity.\n\n High-pass filtering should be kept small, to keep some\n sensitivity.\n\n Filtering is only meaningful on evenly-sampled signals.\n\n According to Lindquist et al. (2018), removal of confounds will be done\n orthogonally to temporal filters (low- and/or high-pass filters), if both\n are specified.\n\n .. versionadded:: 0.2.5\n\n Parameters\n ----------\n imgs: Niimg-like object\n See http://nilearn.github.io/manipulating_images/input_output.html\n 4D image. The signals in the last dimension are filtered.\n\n sessions : numpy array, optional\n Add a session level to the cleaning process. Each session will be\n cleaned independently. Must be a 1D array of n_samples elements.\n\n detrend: bool\n If detrending should be applied on timeseries (before\n confound removal)\n\n standardize: bool\n If True, returned signals are set to unit variance.\n\n confounds: numpy.ndarray, str or list of\n Confounds timeseries. Shape must be\n (instant number, confound number), or just (instant number,)\n The number of time instants in signals and confounds must be\n identical (i.e. signals.shape[0] == confounds.shape[0]).\n If a string is provided, it is assumed to be the name of a csv file\n containing signals as columns, with an optional one-line header.\n If a list is provided, all confounds are removed from the input\n signal, as if all were in the same array.\n\n low_pass, high_pass: float\n Respectively low and high cutoff frequencies, in Hertz.\n\n t_r: float, optional\n Repetition time, in second (sampling period). Set to None if not\n specified. Mandatory if used together with low_pass or high_pass.\n\n ensure_finite: bool, optional\n If True, the non-finite values (NaNs and infs) found in the images\n will be replaced by zeros.\n\n mask_img: Niimg-like object, optional\n See http://nilearn.github.io/manipulating_images/input_output.html\n If provided, signal is only cleaned from voxels inside the mask. If\n mask is provided, it should have same shape and affine as imgs.\n If not provided, all voxels are used.\n\n Returns\n -------\n cleaned_img: Niimg-like object\n Input images, cleaned. Same shape as `imgs`.\n\n Notes\n -----\n Confounds removal is based on a projection on the orthogonal\n of the signal space. See `Friston, K. J., A. P. Holmes,\n K. J. Worsley, J.-P. Poline, C. D. Frith, et R. S. J. Frackowiak.\n \"Statistical Parametric Maps in Functional Imaging: A General\n Linear Approach\". Human Brain Mapping 2, no 4 (1994): 189-210.\n <http://dx.doi.org/10.1002/hbm.460020402>`_\n\n Orthogonalization between temporal filters and confound removal is based on\n suggestions in `Lindquist, M., Geuter, S., Wager, T., & Caffo, B. (2018).\n Modular preprocessing pipelines can reintroduce artifacts into fMRI data.\n bioRxiv, 407676. <http://dx.doi.org/10.1101/407676>`_\n\n See Also\n --------\n nilearn.signal.clean\n \"\"\"\n # Avoid circular import\n from .image import new_img_like\n from .. import masking\n\n imgs_ = check_niimg_4d(imgs)\n\n # Check if t_r is set, otherwise propose t_r from imgs header\n if low_pass is not None or high_pass is not None:\n if t_r is None:\n\n # We raise an error, instead of using the header's t_r as this\n # value is considered to be non-reliable\n raise ValueError(\n \"Repetition time (t_r) must be specified for filtering. You \"\n \"specified None. imgs header suggest it to be {0}\".format(\n imgs.header.get_zooms()[3]))\n\n # Prepare signal for cleaning\n if mask_img is not None:\n signals = masking.apply_mask(imgs_, mask_img)\n else:\n signals = imgs_.get_data().reshape(-1, imgs_.shape[-1]).T\n\n # Clean signal\n data = signal.clean(\n signals, sessions=sessions, detrend=detrend, standardize=standardize,\n confounds=confounds, low_pass=low_pass, high_pass=high_pass, t_r=t_r,\n ensure_finite=ensure_finite)\n\n # Put results back into Niimg-like object\n if mask_img is not None:\n imgs_ = masking.unmask(data, mask_img)\n else:\n imgs_ = new_img_like(\n imgs_, data.T.reshape(imgs_.shape), copy_header=True)\n\n return imgs_\n\n\ndef load_img(img, wildcards=True, dtype=None):\n \"\"\"Load a Niimg-like object from filenames or list of filenames.\n\n .. versionadded:: 0.2.5\n\n Parameters\n ----------\n img: Niimg-like object\n See http://nilearn.github.io/manipulating_images/input_output.html\n If niimg is a string, consider it as a path to Nifti image and\n call nibabel.load on it. The '~' symbol is expanded to the user home\n folder.\n If it is an object, check if get_data()\n and affine attributes are present, raise TypeError otherwise.\n\n wildcards: bool, optional\n Use niimg as a regular expression to get a list of matching input\n filenames.\n If multiple files match, the returned list is sorted using an ascending\n order.\n If no file matches the regular expression, a ValueError exception is\n raised.\n\n dtype: {dtype, \"auto\"}\n Data type toward which the data should be converted. If \"auto\", the\n data will be converted to int32 if dtype is discrete and float32 if it\n is continuous.\n\n Returns\n -------\n result: 3D/4D Niimg-like object\n Result can be nibabel.Nifti1Image or the input, as-is. It is guaranteed\n that the returned object has get_data() and affine attributes.\n \"\"\"\n return check_niimg(img, wildcards=wildcards, dtype=dtype)\n\n\ndef largest_connected_component_img(imgs):\n \"\"\" Return the largest connected component of an image or list of images.\n\n .. versionadded:: 0.3.1\n\n Parameters\n ----------\n imgs: Niimg-like object or iterable of Niimg-like objects (3D)\n See http://nilearn.github.io/manipulating_images/input_output.html\n Image(s) to extract the largest connected component from.\n\n Returns\n -------\n img or list of img containing the largest connected component\n\n Notes\n -----\n\n **Handling big-endian in given Nifti image**\n This function changes the existing byte-ordering information to new byte\n order, if the dtype in given Nifti image has non-native data type.\n This operation is done internally to avoid big-endian issues with\n scipy ndimage module.\n \"\"\"\n from .._utils.ndimage import largest_connected_component\n\n if hasattr(imgs, \"__iter__\") and not isinstance(imgs, _basestring):\n single_img = False\n else:\n single_img = True\n imgs = [imgs]\n\n ret = []\n for img in imgs:\n img = check_niimg_3d(img)\n affine = img.affine\n largest_component = largest_connected_component(_safe_get_data(img))\n ret.append(new_img_like(img, largest_component, affine,\n copy_header=True))\n\n if single_img:\n return ret[0]\n else:\n return ret\n"
]
| [
[
"numpy.minimum",
"numpy.min",
"numpy.where",
"sklearn.externals.joblib.Parallel",
"numpy.max",
"numpy.log",
"numpy.eye",
"numpy.isfinite",
"numpy.logical_or",
"numpy.array",
"numpy.reshape",
"numpy.zeros",
"numpy.isnan",
"numpy.sum",
"numpy.any",
"scipy.ndimage.gaussian_filter1d",
"numpy.abs",
"sklearn.externals.joblib.delayed",
"numpy.maximum"
]
]
|
Cgadal/PyDune | [
"9f4ca2b154734d88d4eaeb2b41d86dd37b43cabf"
]
| [
"PyDune/data_processing/meteorological/downloadCDS.py"
]
| [
"\"\"\"\nThis module allows to dowload data from the ERA5 and ERA5Land datasets hosted at\nthe `Climate Data Store <https://cds.climate.copernicus.eu/#!/home>`_ . Before\nusing this module, please read the corresponding documentation\n`here <https://confluence.ecmwf.int/display/CKB/How+to+download+ERA5`_,\nand especially the `part 4 <https://confluence.ecmwf.int/display/CKB/How+to+download+ERA5#HowtodownloadERA5-4-DownloadERA5familydatathroughtheCDSAPI>`_.\n\nRoughly, the steps are:\n\n - create a CDS account `here <https://cds.climate.copernicus.eu/user/register>`_\n - install the CDS API, typically using `pip3 install cdsapi`\n - install the CDS API key corresponding to your account on your computer,\n following the steps described `here <https://confluence.ecmwf.int/display/CKB/How+to+install+and+use+CDS+API+on+macOS>`_\n\n..warning:\n Still experimental, and scarce documentation.\n\"\"\"\n\nimport cdsapi\nimport os\nimport numpy as np\nfrom decimal import Decimal\nfrom scipy.io import netcdf\nfrom datetime import datetime, timezone, timedelta\n\n\ndef getting_wind_data(dataset, variable_dic, name, Nsplit=1, file='info.txt', on_grid=True):\n \"\"\" This fuction helps to download data from datasets stored in the Climate Data Store.\n\n Parameters\n ----------\n dataset : int\n dataset in which downloading the data.\n It can be 'reanalysis-era5-single-levels' or 'reanalysis-era5-land' for now.\n variable_dic : dic\n variable dictionnary to provide as a request.\n name : str\n name used to label the downloaded files.\n Nsplit : int\n number of requests in which the main request is split (the default is 1).\n If too small, will be corrected automatically.\n file : str\n filename under which some information about the request will be saved (the default is 'info.txt').\n on_grid : bool\n if True, the required coordinates will be matched with the native grid\n of the requested dataset. Otherwise, the dataset will be downloaded at\n the requested coordinates, using the interpolation of the CDS server (the default is True).\n\n Returns\n -------\n file_names : list\n the list of downloaded file names\n\n Examples\n --------\n >>> month = [i for i in range(1, 13)]\n >>> day = [i for i in range(1, 32)]\n >>> time = [i for i in range(0, 24)]\n >>> year = [i for i in range(1950, 2023)]\n >>> area = [-16.65, 11.9, -16.66, 11.91]\n >>> variable_dic = {'format': 'netcdf',\n 'variable': ['v10'],\n 'month': month,\n 'day': day,\n 'time': time,\n 'year': year,\n 'area': area,\n 'grid': [1.0, 1.0]}\n >>> a = CDS.Getting_wind_data('reanalysis-era5-land',\n variable_dic, 'Angola_coast_v10',\n Nsplit=6,\n file='info.txt',\n on_grid=False)\n\n \"\"\"\n Names = {'reanalysis-era5-single-levels': 'ERA5', 'reanalysis-era5-land': 'ERA5Land'}\n Nitems_max = {'reanalysis-era5-single-levels': 120000, 'reanalysis-era5-land': 100000}\n area_ref = [0, 0]\n #\n if Nsplit < 1:\n Nsplit = 1\n Nitems = len(variable_dic['variable']) * (365.25 * len(variable_dic['month'])/12 * len(variable_dic['day'])/31) \\\n * len(variable_dic['time']) * len(variable_dic['year'])\n if Nitems/Nsplit > Nitems_max[dataset]:\n Nsplit = round(Nitems/Nitems_max[dataset]) + 1\n print('Request too large. Setting Nsplit =', Nsplit)\n\n # Defining years for data, either from dic variable\n dates = np.array([int(i) for i in variable_dic['year']])\n\n # Puting the required area on the ERA5 grid\n area_wanted = variable_dic['area']\n if on_grid:\n area_wanted[0] = area_wanted[0] - float(Decimal(str(area_wanted[0] - area_ref[0])) % Decimal(str(variable_dic['grid'])))\n area_wanted[1] = area_wanted[1] - float(Decimal(str(area_wanted[1] - area_ref[1])) % Decimal(str(variable_dic['grid'])))\n area_wanted[2] = area_wanted[2] - float(Decimal(str(area_wanted[2] - area_ref[0])) % Decimal(str(variable_dic['grid'])))\n area_wanted[3] = area_wanted[3] - float(Decimal(str(area_wanted[3] - area_ref[1])) % Decimal(str(variable_dic['grid'])))\n #\n variable_dic['area'] = area_wanted\n\n print('Area is :', area_wanted)\n #\n # Spliting request\n dates = np.array([int(i) for i in variable_dic['year']])\n year_list = [list(map(str, j)) for j in np.array_split(dates, Nsplit)]\n #\n # checking the Nitems for every Nsplit\n Nitems_list = np.array([len(variable_dic['variable']) * (365.25 * len(variable_dic['month'])/12 * len(variable_dic['day'])/31)*len(variable_dic['time']) * len(i)\n for i in year_list])\n if (Nitems_list > Nitems_max[dataset]).any():\n Nsplit = Nsplit + 1\n year_list = [list(map(str, j)) for j in np.array_split(dates, Nsplit)]\n #\n # Launching requests by year bins\n file_names = []\n for years in year_list:\n string = years[0] + 'to' + years[-1]\n print(string)\n file_names.append(Names[dataset] + string + '_' + name + '.' + variable_dic['format'])\n c = cdsapi.Client()\n variable_dic['year'] = years\n c.retrieve(dataset, variable_dic, file_names[-1])\n # Writing informations to spec file\n _save_spec_to_txt(dataset, variable_dic, file)\n return file_names\n\n\ndef load_netcdf(files_list):\n \"\"\" This function loads and concatenate (along the time axis) several NETCDF\n files from a list of filenames.\n\n Parameters\n ----------\n files_list : list\n the list of downloaded file names.\n\n Returns\n -------\n data : dict\n a dictionnary containing all data, concatenated along the time axis.\n\n \"\"\"\n Data = {}\n for j, file in enumerate(files_list):\n file_temp = netcdf.NetCDFFile(file, 'r', maskandscale=True)\n for key in file_temp.variables.keys():\n if key not in Data.keys():\n Data[key] = file_temp.variables[key][:]\n elif key not in ['latitude', 'longitude']:\n Data[key] = np.concatenate((Data[key], file_temp.variables[key][:]), axis=0)\n #\n Data['time'] = _convert_time(Data['time'].astype(np.float64))\n return Data\n\n\ndef _save_spec_to_txt(dataset, variable_dic, file):\n if os.path.isfile(file):\n print(file + ' already exists')\n else:\n with open(file, \"w\") as f:\n f.write('dataset: ' + dataset + '\\n')\n f.write('\\n')\n for key in sorted(variable_dic.keys()):\n f.write(str(key) + ': ' + str(variable_dic[key]) + '\\n')\n f.write('\\n')\n\n# def Extract_points(points, file_format = 'npy', system_coordinates = 'cartesian'):\n# ######## function to extract specific points and write (u, v) velocity to <format> files\n# # points can either be a list of integers (1 is top left of the grid), or a list of coordinates (lat, lon)\n# # file_format is 'npy' or 'txt'\n# # system_coordinates is cartesian or polar\n# points = np.array(points)\n# if system_coordinates == 'polar':\n# self.Cartesian_to_polar()\n# for i, coords in points:\n# ## if for referencing point system\n# if ((len(points.shape) == 2) & (points.shape[-1] == 2)):\n# lat_ind = np.argwhere(coords[0] == self.latitude)[0][0]\n# lon_ind = np.argwhere(coords[1] == self.longitude)[0][0]\n# indexes = _sub2ind(self.Uwind[:-1], lat_ind, lon_ind)\n# else:\n# lat_ind, lon_ind = _ind2sub(self.Uwind[:-1], coords)\n# indexes = coords\n# #\n# # if for data coordinate system\n# if system_coordinates == 'cartesian':\n# data_to_write = [self.Uwind[lat_ind, lon_ind, :], self.Vwind[lat_ind, lon_ind, :]]\n# elif system_coordinates == 'polar':\n# data_to_write = [self.Ustrength[lat_ind, lon_ind, :], self.Uorientation[lat_ind, lon_ind, :]]\n# #\n# # if for saved file format\n# if file_format == 'npy':\n# np.save('Point_' + str(indexes) + '.npy', )\n# else:\n# np.savetxt('Point_' + str(indexes) + '.txt', [self.Uwind[lat_ind, lon_ind, :], self.Vwind[lat_ind, lon_ind, :]])\n\n#############################################################################################\n\n\ndef _format_time(date):\n return '{:04d}'.format(date[0]) + '-' + '{:02d}'.format(date[1]) + '-' + '{:02d}'.format(date[2])\n\n\ndef _file_lenght(fname):\n with open(fname) as f:\n for i, l in enumerate(f):\n pass\n return i + 1\n\n\ndef _convert_time(Times):\n atmos_epoch = datetime(1900, 1, 1, 0, 0, tzinfo=timezone.utc)\n # convert array of times in hours from epoch to dates\n return np.array([atmos_epoch + timedelta(hours=i) for i in Times])\n\n\ndef _sub2ind(array_shape, rows, cols):\n return rows*array_shape[1] + cols\n\n\ndef _ind2sub(array_shape, ind):\n rows = (ind.astype('int') / array_shape[1])\n cols = (ind.astype('int') % array_shape[1]) # or numpy.mod(ind.astype('int'), array_shape[1])\n return (rows, cols)\n"
]
| [
[
"numpy.concatenate",
"scipy.io.netcdf.NetCDFFile",
"numpy.array_split"
]
]
|
stalei/TagMix | [
"36b7b6619c835b981c6f769cffbc34677d561346"
]
| [
"TagMix.py"
]
| [
"# © Shahram Talei @ 2021 The University of Alabama - All rights reserved.\n#you can redistribute it and/or modify\n#it under the terms of the GNU General Public License as published by\n#the Free Software Foundation; either version 3 of the License, or\n#(at your option) any later version.\n#You should have received a copy of the GNU General Public License\n#along with this program. If not, see <http://www.gnu.org/licenses/>.\n\n\nfrom __future__ import division\nimport yt\nimport numpy as np\nfrom yt.analysis_modules.halo_finding.api import *\nfrom yt.analysis_modules.halo_analysis.api import *\nfrom os import environ\nenviron['CFLAGS'] = \"-I\"+np.get_include()\n\nimport pyximport; pyximport.install()\n#import particle_ops\nimport argparse\n\n\nimport tempfile\nimport shutil\nimport os\nimport sys\n\nfrom scipy.spatial.transform import Rotation as R\nfrom numpy import linalg as LA\nfrom operator import mul\nfrom functools import reduce\nimport matplotlib.pyplot as plt\n\nimport csv\nimport h5py as h5\n\nplt.rcParams[\"font.size\"] =12\n\ndef partition(array, begin, end):\n pivot = begin\n for i in range(begin+1, end+1):\n if array[i] <= array[begin]:\n pivot += 1\n array[i], array[pivot] = array[pivot], array[i]\n array[pivot], array[begin] = array[begin], array[pivot]\n return pivot\n\n\n\ndef quicksort(array, begin=0, end=None):\n if end is None:\n end = len(array) - 1\n def _quicksort(array, begin, end):\n if begin >= end:\n return\n pivot = partition(array, begin, end)\n _quicksort(array, begin, pivot-1)\n _quicksort(array, pivot+1, end)\n return _quicksort(array, begin, end)\n\n #how to run: python ShapeAnalysis.py snapshot_file halo_catalog particles_list check_contamination extract_shape bin_number iteration\n #example: $python ShapeAnalysisV2.py snap_264 halos_0.0.ascii halos_0.0.particles 1.4e12 1.1e12 1 1 5 3\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"snap\", type=str)\n parser.add_argument(\"halo\",type=str)\n parser.add_argument(\"GalFile\",type=str)\n parser.add_argument(\"fraction\",type=float)\n parser.add_argument(\"Age\",type=float)\n args = parser.parse_args()\n Gals=np.genfromtxt(args.GalFile, delimiter = ',')\n Gx0=np.array(Gals[:,0])\n Gy0=np.array(Gals[:,1])\n Gz0=np.array(Gals[:,2])\n GMv0=np.array(Gals[:,3])\n GRv0=np.array(Gals[:,4])\n GS0=np.array(Gals[:,5])\n GSM0=np.array(Gals[:,6])\n GIndex0=np.array(Gals[:,7])\n HIndex0=np.array(Gals[:,8])\n CentralG0=np.array(Gals[:,9])\n MetalStellarG0=np.array(Gals[:,10])\n #\n snap = yt.load(args.snap)\n ad = snap.all_data()\n coordinatesDM = ad[(\"Halo\",\"Coordinates\")]\n velocitiesDM = ad[(\"Halo\",\"Velocities\")]\n IDDM = ad[(\"Halo\",\"ParticleIDs\")]\n #BEDM = ad[(\"Halo\",\"BindingEnergy\")]\n #print(sorted(snap.field_list))\n p=np.array(coordinatesDM)\n v=np.array(velocitiesDM)\n Id=np.array(IDDM)\n #print(p[:,1].shape)\n #print(p[1:])\n px=p[:,0]\n py=p[:,1]\n pz=p[:,2]\n pVx=v[:,0]\n pVy=v[:,1]\n pVz=v[:,2]\n print(\"Total number of galaxies:%d\"%len(Gx0))\n #hf = h5.File('s1.h5', 'w')\n for i in range(0,len(Gx0)):\n if True:\n print(i)\n #if GSM0[i] !=0:\n dx=px-Gx0[i]\n dy=py-Gy0[i]\n dz=pz-Gz0[i]\n r2=dx*dx+dy*dy+dz*dz\n r=np.sqrt(r2)\n Pcount=len(r[r<=GRv0[i]])\n tagLimit=int((args.fraction/100.)*Pcount)\n if tagLimit>Pcount:\n tagLimit=Pcount\n if Pcount==0:\n continue\n # tagLimit=1\n rLim=GRv0[i]\n pxh=px[r<=rLim]\n pyh=py[r<=rLim]\n pzh=pz[r<=rLim]\n pVxh=pVx[r<=rLim]\n pVyh=pVy[r<=rLim]\n pVzh=pVz[r<=rLim]\n Idh=Id[r<=rLim]\n size=len(pxh)\n print(\"Ps in Rv:%d\"%size)\n #PotE=[0.0]*size\n #KinE=[0.0]*size\n BE=[0.0]*size\n print(np.array(BE).shape)\n rh=r[r<=rLim]\n c=0\n for j in Idh:\n dxp=pxh[Idh==j]-pxh[Idh !=j]\n dyp=pyh[Idh==j]-pyh[Idh !=j]\n dzp=pzh[Idh==j]-pzh[Idh !=j]\n vx=float(pVxh[Idh==j])\n vy=float(pVyh[Idh==j])\n vz=float(pVzh[Idh==j])\n #print(\"vx:%g\"%vx)\n #rp2=dxp*dxp+dyp*dyp+dzp*dzp\n rp=np.sqrt(dxp*dxp+dyp*dyp+dzp*dzp)\n #PotE[c]=np.sum(1./rp)\n #KinE[c]=0.5*(pVxh*pVxh+pVyh*pVyh+pVzh*pVzh)\n BE[c]=float(np.sum(1./rp)+0.5*(vx*vx+vy*vy+vz*vz))#PotE[c]+KinE[c]\n c+=1\n print(\"counted:%d\"%c)\n BE2=BE#np.array(np.sort(BE))\n #BE.sort(key=lambda x: x[0],reverse=True)\n BE2.sort(reverse=True)\n #print(BE.shape)\n #print(\"before sort:\")\n print(np.array(BE))\n print(\"after sort:\")\n print(np.array(BE2))\n #quicksort(BE2)\n #BErev=BE2[::-1] #reverse it\n #print(BE)\n #BELimit=BE[0][tagLimit] #what is there are amny particles at the same BE?\n BELimit=BE2[tagLimit]\n print(\"BELimit:%g\"%BELimit)\n #print(BE[0][:])\n BE=np.array(BE)\n pxtag=pxh[BE>=BELimit]\n pytag=pyh[BE>=BELimit]\n pztag=pzh[BE>=BELimit]\n pIDtag=Idh[BE>=BELimit]\n #\n pVxTag=pVxh[BE>=BELimit]\n pVyTag=pVyh[BE>=BELimit]\n pVzTag=pVzh[BE>=BELimit]\n pGID=[GIndex0[i]]*len(Idh)\n #\n print(\" # of most bound Ps:%d\"%len(pxtag))\n pSM=[0.0]*len(pxtag)\n pZZ=[0.0]*len(pxtag)\n pAge=[0.0]*len(pxtag)\n for k in range(0,len(pxtag)):\n pSM[k]=GSM0[i]/tagLimit\n pZZ[k]= MetalStellarG0[i]/GSM0[i]\n pAge[k]=args.Age\n #AllStars[id].ZZ=SageOutput[galaxy].MetalsStellarMass/SageOutput[galaxy].StellarMass\n hf = h5.File('%s.h5' %str(Id[i]), 'w')\n hf.create_dataset('ID', data=pIDtag)\n hf.create_dataset('X', data=pxtag)\n hf.create_dataset('Y', data=pytag)\n hf.create_dataset('Z', data=pztag)\n #\n hf.create_dataset('Vx', data=pVxTag)\n hf.create_dataset('Vy', data=pVyTag)\n hf.create_dataset('Vz', data=pVzTag)\n hf.create_dataset('GID', data=pGID)\n #\n hf.create_dataset('StellarMass', data=pSM)\n hf.create_dataset('Metallicity', data=pZZ)\n hf.create_dataset('Age', data=pAge)\n hf.close()\n"
]
| [
[
"numpy.array",
"numpy.sum",
"numpy.genfromtxt",
"numpy.sqrt",
"numpy.get_include"
]
]
|
bridget-haus/Energy_Disaggregation | [
"5cf5a9277bda471c3c4e3b897a7d2fc32a782da3"
]
| [
"Dashboard/data-manipulation/clean-save-result.py"
]
| [
"import os\nimport json\nimport pandas as pd\nimport pickle5 as pkl\nfrom pathlib import Path\nfrom collections import defaultdict\n\ncurr_dir = Path(os.getcwd())\nparent_dir = curr_dir.parent\nsource_dir = parent_dir.parent\nresult_dir = os.path.join(source_dir, 'pkl_results')\noutput_dir = os.path.join(parent_dir, 'dashboard', 'src', 'charts', 'data')\noutput_file_nn_houses = os.path.join(output_dir, 'nn_houses.json')\noutput_file_nn_appliances = os.path.join(output_dir, 'nn_appliances.json')\noutput_file_svm = os.path.join(output_dir, 'svm_results.json')\n\nif not os.path.isdir(output_dir):\n os.mkdir(output_dir)\n\nhouses = defaultdict(list)\nappliances = []\n\nfor file in os.listdir(result_dir):\n\n file_dir = os.path.join(result_dir, file)\n model = file.split('_')[0]\n\n if model == 'nn':\n\n house = file[11:18]\n appliance = file[19:-4]\n\n inner_dict = {'appliance': appliance}\n appliance_dict = {'appliance': appliance, 'house': house}\n\n with open(file_dir, 'rb') as f:\n\n df = pkl.load(f)\n df['timestamp'] = df['timestamp'].astype('datetime64[ns]')\n total_usage = df['prediction'].sum()\n appliance_dict['usage'] = total_usage\n\n df_30 = df.groupby([pd.Grouper(key='timestamp', freq='30min')]).sum()\n df_30 = df_30.reset_index()\n df_30['timestamp'] = df_30['timestamp'].astype(str)\n inner_dict['values'] = df_30.to_dict(orient='records')\n\n appliances.append(appliance_dict)\n houses[house].append(inner_dict)\n\n # if model == 'svm':\n #\n # appliance = file[19:-4]\n #\n # with open(file_dir, 'rb') as f:\n #\n # appliances[appliance] = pkl.load(f)\n\n\nif not os.path.isfile(output_file_nn_houses):\n with open(output_file_nn_houses, 'w') as f:\n json.dump(houses, f)\n\nif not os.path.isfile(output_file_nn_appliances):\n with open(output_file_nn_appliances, 'w') as f:\n json.dump(appliances, f)\n\nif not os.path.isfile(output_file_svm):\n with open(output_file_svm, 'w') as f:\n json.dump(houses, f)\n\n\n\n"
]
| [
[
"pandas.Grouper"
]
]
|
yang182/MolGAN | [
"31a4aeceed1fd413877dd446a7daddae58f44e48"
]
| [
"utils/sparse_molecular_dataset.py"
]
| [
"import pickle\nimport numpy as np\n\nfrom rdkit import Chem\n\nif __name__ == '__main__':\n from progress_bar import ProgressBar\nelse:\n from utils.progress_bar import ProgressBar\n\nfrom datetime import datetime\n\n\nclass SparseMolecularDataset():\n\n def load(self, filename, subset=1):\n\n with open(filename, 'rb') as f:\n self.__dict__.update(pickle.load(f))\n\n self.train_idx = np.random.choice(self.train_idx, int(len(self.train_idx) * subset), replace=False)\n self.validation_idx = np.random.choice(self.validation_idx, int(len(self.validation_idx) * subset),\n replace=False)\n self.test_idx = np.random.choice(self.test_idx, int(len(self.test_idx) * subset), replace=False)\n\n self.train_count = len(self.train_idx)\n self.validation_count = len(self.validation_idx)\n self.test_count = len(self.test_idx)\n\n self.__len = self.train_count + self.validation_count + self.test_count\n\n def save(self, filename):\n with open(filename, 'wb') as f:\n print(\"self idct: \", self.__dict__)\n pickle.dump(self.__dict__, f)\n\n def generate(self, filename, add_h=False, filters=lambda x: True, size=None, validation=0.1, test=0.1):\n self.log('Extracting {}..'.format(filename))\n\n if filename.endswith('.sdf'):\n print(\"begin data\")\n self.data = list(filter(lambda x: x is not None, Chem.SDMolSupplier(filename)))\n elif filename.endswith('.smi'):\n self.data = [Chem.MolFromSmiles(line) for line in open(filename, 'r').readlines()]\n\n self.data = list(map(Chem.AddHs, self.data)) if add_h else self.data\n self.data = list(filter(filters, self.data))\n self.data = self.data[:size]\n\n self.log('Extracted {} out of {} molecules {}adding Hydrogen!'.format(len(self.data),\n len(Chem.SDMolSupplier(filename)),\n '' if add_h else 'not '))\n\n self._generate_encoders_decoders()\n self._generate_AX()\n\n # it contains the all the molecules stored as rdkit.Chem objects\n self.data = np.array(self.data)\n\n # it contains the all the molecules stored as SMILES strings\n self.smiles = np.array(self.smiles)\n\n # a (N, L) matrix where N is the length of the dataset and each L-dim vector contains the \n # indices corresponding to a SMILE sequences with padding wrt the max length of the longest \n # SMILES sequence in the dataset (see self._genS)\n self.data_S = np.stack(self.data_S)\n\n # a (N, 9, 9) tensor where N is the length of the dataset and each 9x9 matrix contains the \n # indices of the positions of the ones in the one-hot representation of the adjacency tensor\n # (see self._genA)\n self.data_A = np.stack(self.data_A)\n\n # a (N, 9) matrix where N is the length of the dataset and each 9-dim vector contains the \n # indices of the positions of the ones in the one-hot representation of the annotation matrix\n # (see self._genX)\n self.data_X = np.stack(self.data_X)\n\n # a (N, 9) matrix where N is the length of the dataset and each 9-dim vector contains the \n # diagonal of the correspondent adjacency matrix\n self.data_D = np.stack(self.data_D)\n\n # a (N, F) matrix where N is the length of the dataset and each F vector contains features \n # of the correspondent molecule (see self._genF)\n self.data_F = np.stack(self.data_F)\n\n # a (N, 9) matrix where N is the length of the dataset and each 9-dim vector contains the\n # eigenvalues of the correspondent Laplacian matrix\n self.data_Le = np.stack(self.data_Le)\n\n # a (N, 9, 9) matrix where N is the length of the dataset and each 9x9 matrix contains the \n # eigenvectors of the correspondent Laplacian matrix\n self.data_Lv = np.stack(self.data_Lv)\n\n self.vertexes = self.data_F.shape[-2]\n self.features = self.data_F.shape[-1]\n\n self._generate_train_validation_test(validation, test)\n\n def _generate_encoders_decoders(self):\n self.log('Creating atoms encoder and decoder..')\n atom_labels = sorted(set([atom.GetAtomicNum() for mol in self.data for atom in mol.GetAtoms()] + [0]))\n self.atom_encoder_m = {l: i for i, l in enumerate(atom_labels)}\n self.atom_decoder_m = {i: l for i, l in enumerate(atom_labels)}\n self.atom_num_types = len(atom_labels)\n self.log('Created atoms encoder and decoder with {} atom types and 1 PAD symbol!'.format(\n self.atom_num_types - 1))\n\n self.log('Creating bonds encoder and decoder..')\n bond_labels = [Chem.rdchem.BondType.ZERO] + list(sorted(set(bond.GetBondType()\n for mol in self.data\n for bond in mol.GetBonds())))\n\n self.bond_encoder_m = {l: i for i, l in enumerate(bond_labels)}\n self.bond_decoder_m = {i: l for i, l in enumerate(bond_labels)}\n self.bond_num_types = len(bond_labels)\n self.log('Created bonds encoder and decoder with {} bond types and 1 PAD symbol!'.format(\n self.bond_num_types - 1))\n\n self.log('Creating SMILES encoder and decoder..')\n smiles_labels = ['E'] + list(set(c for mol in self.data for c in Chem.MolToSmiles(mol)))\n self.smiles_encoder_m = {l: i for i, l in enumerate(smiles_labels)}\n self.smiles_decoder_m = {i: l for i, l in enumerate(smiles_labels)}\n self.smiles_num_types = len(smiles_labels)\n self.log('Created SMILES encoder and decoder with {} types and 1 PAD symbol!'.format(\n self.smiles_num_types - 1))\n\n def _generate_AX(self):\n self.log('Creating features and adjacency matrices..')\n pr = ProgressBar(60, len(self.data))\n\n data = []\n smiles = []\n data_S = []\n data_A = []\n data_X = []\n data_D = []\n data_F = []\n data_Le = []\n data_Lv = []\n\n max_length = max(mol.GetNumAtoms() for mol in self.data)\n max_length_s = max(len(Chem.MolToSmiles(mol)) for mol in self.data)\n\n for i, mol in enumerate(self.data):\n A = self._genA(mol, connected=True, max_length=max_length)\n D = np.count_nonzero(A, -1)\n if A is not None:\n data.append(mol)\n smiles.append(Chem.MolToSmiles(mol))\n data_S.append(self._genS(mol, max_length=max_length_s)) # smile character encoder\n data_A.append(A)\n data_X.append(self._genX(mol, max_length=max_length)) # atom number encoder\n data_D.append(D)\n data_F.append(self._genF(mol, max_length=max_length)) # atom feature\n\n L = np.diag(D) - A # laplacian matrix\n Le, Lv = np.linalg.eigh(L) # eigenvalues and eigenvectors\n\n data_Le.append(Le)\n data_Lv.append(Lv)\n\n pr.update(i + 1)\n\n self.log(date=False)\n self.log('Created {} features and adjacency matrices out of {} molecules!'.format(len(data),\n len(self.data)))\n\n self.data = data\n self.smiles = smiles\n self.data_S = data_S\n self.data_A = data_A\n self.data_X = data_X\n self.data_D = data_D\n self.data_F = data_F\n self.data_Le = data_Le\n self.data_Lv = data_Lv\n self.__len = len(self.data)\n\n def _genA(self, mol, connected=True, max_length=None):\n\n max_length = max_length if max_length is not None else mol.GetNumAtoms()\n\n A = np.zeros(shape=(max_length, max_length), dtype=np.int32)\n\n begin, end = [b.GetBeginAtomIdx() for b in mol.GetBonds()], [b.GetEndAtomIdx() for b in mol.GetBonds()]\n bond_type = [self.bond_encoder_m[b.GetBondType()] for b in mol.GetBonds()]\n\n A[begin, end] = bond_type\n A[end, begin] = bond_type\n\n degree = np.sum(A[:mol.GetNumAtoms(), :mol.GetNumAtoms()], axis=-1)\n\n return A if connected and (degree > 0).all() else None\n\n def _genX(self, mol, max_length=None):\n\n max_length = max_length if max_length is not None else mol.GetNumAtoms()\n\n return np.array([self.atom_encoder_m[atom.GetAtomicNum()] for atom in mol.GetAtoms()] + [0] * (\n max_length - mol.GetNumAtoms()), dtype=np.int32)\n\n def _genS(self, mol, max_length=None):\n\n max_length = max_length if max_length is not None else len(Chem.MolToSmiles(mol))\n\n return np.array([self.smiles_encoder_m[c] for c in Chem.MolToSmiles(mol)] + [self.smiles_encoder_m['E']] * (\n max_length - len(Chem.MolToSmiles(mol))), dtype=np.int32)\n\n def _genF(self, mol, max_length=None):\n\n max_length = max_length if max_length is not None else mol.GetNumAtoms()\n\n features = np.array([[*[a.GetDegree() == i for i in range(5)],\n *[a.GetExplicitValence() == i for i in range(9)], # 原子显示价\n *[int(a.GetHybridization()) == i for i in range(1, 7)], # H的数目\n *[a.GetImplicitValence() == i for i in range(9)], # 原子上隐式H的数目\n a.GetIsAromatic(), # 芳香性\n a.GetNoImplicit(), # 是否允许有隐式H\n *[a.GetNumExplicitHs() == i for i in range(5)],\n *[a.GetNumImplicitHs() == i for i in range(5)],\n *[a.GetNumRadicalElectrons() == i for i in range(5)],\n a.IsInRing(),\n *[a.IsInRingSize(i) for i in range(2, 9)]] for a in mol.GetAtoms()], dtype=np.int32)\n\n return np.vstack((features, np.zeros((max_length - features.shape[0], features.shape[1]))))\n\n def matrices2mol(self, node_labels, edge_labels, strict=False):\n mol = Chem.RWMol()\n\n for node_label in node_labels:\n mol.AddAtom(Chem.Atom(self.atom_decoder_m[node_label]))\n\n for start, end in zip(*np.nonzero(edge_labels)):\n if start > end:\n mol.AddBond(int(start), int(end), self.bond_decoder_m[edge_labels[start, end]])\n\n if strict:\n try:\n Chem.SanitizeMol(mol)\n except:\n mol = None\n\n return mol\n\n def seq2mol(self, seq, strict=False):\n mol = Chem.MolFromSmiles(''.join([self.smiles_decoder_m[e] for e in seq if e != 0]))\n\n if strict:\n try:\n Chem.SanitizeMol(mol)\n except:\n mol = None\n\n return mol\n\n def _generate_train_validation_test(self, validation, test):\n\n self.log('Creating train, validation and test sets..')\n\n validation = int(validation * len(self))\n test = int(test * len(self))\n train = len(self) - validation - test\n\n self.all_idx = np.random.permutation(len(self))\n self.train_idx = self.all_idx[0:train]\n self.validation_idx = self.all_idx[train:train + validation]\n self.test_idx = self.all_idx[train + validation:]\n\n self.train_counter = 0\n self.validation_counter = 0\n self.test_counter = 0\n\n self.train_count = train\n self.validation_count = validation\n self.test_count = test\n\n self.log('Created train ({} items), validation ({} items) and test ({} items) sets!'.format(\n train, validation, test))\n\n def _next_batch(self, counter, count, idx, batch_size):\n if batch_size is not None:\n if counter + batch_size >= count:\n counter = 0\n np.random.shuffle(idx)\n\n output = [obj[idx[counter:counter + batch_size]]\n for obj in (self.data, self.smiles, self.data_S, self.data_A, self.data_X,\n self.data_D, self.data_F, self.data_Le, self.data_Lv)]\n\n counter += batch_size\n else:\n output = [obj[idx] for obj in (self.data, self.smiles, self.data_S, self.data_A, self.data_X,\n self.data_D, self.data_F, self.data_Le, self.data_Lv)]\n\n return [counter] + output\n\n def next_train_batch(self, batch_size=None):\n out = self._next_batch(counter=self.train_counter, count=self.train_count,\n idx=self.train_idx, batch_size=batch_size)\n self.train_counter = out[0]\n\n return out[1:]\n\n def next_validation_batch(self, batch_size=None):\n out = self._next_batch(counter=self.validation_counter, count=self.validation_count,\n idx=self.validation_idx, batch_size=batch_size)\n self.validation_counter = out[0]\n\n return out[1:]\n\n def next_test_batch(self, batch_size=None):\n out = self._next_batch(counter=self.test_counter, count=self.test_count,\n idx=self.test_idx, batch_size=batch_size)\n self.test_counter = out[0]\n\n return out[1:]\n\n @staticmethod\n def log(msg='', date=True):\n print(str(datetime.now().strftime('%Y-%m-%d %H:%M:%S')) + ' ' + str(msg) if date else str(msg))\n\n def __len__(self):\n return self.__len\n\n\nif __name__ == '__main__':\n data = SparseMolecularDataset()\n data.generate('../data/gdb9.sdf', filters=lambda x: x.GetNumAtoms() <= 9)\n data.save('../data/gdb9_9nodes.sparsedataset')\n\n # data = SparseMolecularDataset()\n # data.generate('data/qm9_5k.smi', validation=0.00021, test=0.00021) # , filters=lambda x: x.GetNumAtoms() <= 9)\n # data.save('data/qm9_5k.sparsedataset')\n"
]
| [
[
"numpy.array",
"numpy.count_nonzero",
"numpy.zeros",
"numpy.linalg.eigh",
"numpy.nonzero",
"numpy.random.shuffle",
"numpy.stack",
"numpy.diag"
]
]
|
WittmannF/fastai_docs | [
"03ecae01557a5e4a196dd858b10a57b224df52cd"
]
| [
"dev_course/dl2/exp/nb_03.py"
]
| [
"\n#################################################\n### THIS FILE WAS AUTOGENERATED! DO NOT EDIT! ###\n#################################################\n# file to edit: dev_nb/03_minibatch_training.ipynb\n\nfrom exp.nb_02 import *\nimport torch.nn.functional as F\n\ndef accuracy(out, yb):\n preds = torch.argmax(out, dim=1)\n return (preds==yb).float().mean()\n\nfrom torch import optim\n\nclass Dataset():\n def __init__(self, x, y): self.x,self.y = x,y\n def __len__(self): return len(self.x)\n def __getitem__(self, i): return self.x[i],self.y[i]\n\nfrom torch.utils.data import DataLoader, SequentialSampler, RandomSampler\n\ndef get_dls(train_ds, valid_ds, bs):\n return (DataLoader(train_ds, batch_size=bs, shuffle=True),\n DataLoader(valid_ds, batch_size=bs*2))"
]
| [
[
"torch.utils.data.DataLoader"
]
]
|
Zephyr-29/RecBole | [
"e8300611765c947ce904f29c610b188033ec8da8"
]
| [
"tests/metrics/test_topk_metrics.py"
]
| [
"# -*- encoding: utf-8 -*-\n# @Time : 2020/11/1\n# @Author : Kaiyuan Li\n# @email : [email protected]\n\n\nimport os\nimport sys\nimport unittest\n\nsys.path.append(os.getcwd())\nimport numpy as np\nfrom recbole.evaluator.metrics import metrics_dict\n\npos_idx = np.array([\n [0, 0, 0],\n [1, 1, 1],\n [1, 0, 1],\n [0, 0, 1],\n])\npos_len = np.array([1, 3, 4, 2])\n\n\ndef get_result(name):\n func = metrics_dict[name]\n return func(pos_idx, pos_len)\n\n\nclass TestTopKMetrics(unittest.TestCase):\n def test_hit(self):\n name = 'hit'\n self.assertEqual(\n get_result(name).tolist(),\n np.array([[0, 0, 0], [1, 1, 1], [1, 1, 1], [0, 0, 1]]).tolist())\n\n def test_ndcg(self):\n name = 'ndcg'\n self.assertEqual(\n get_result(name).tolist(),\n np.array([[0, 0, 0], [1, 1, 1],\n [\n 1,\n (1 / np.log2(2) / (1 / np.log2(2) + 1 / np.log2(3))),\n ((1 / np.log2(2) + 1 / np.log2(4)) / (1 / np.log2(2) + 1 / np.log2(3) + 1 / np.log2(4)))\n ],\n [\n 0,\n 0,\n (1 / np.log2(4) / (1 / np.log2(2) + 1 / np.log2(3)))\n ]]).tolist())\n\n def test_mrr(self):\n name = 'mrr'\n self.assertEqual(\n get_result(name).tolist(),\n np.array([[0, 0, 0], [1, 1, 1], [1, 1, 1], [0, 0,\n 1 / 3]]).tolist())\n\n def test_map(self):\n name = 'map'\n self.assertEqual(\n get_result(name).tolist(),\n np.array([[0, 0, 0], [1, 1, 1],\n [1, (1 / 2), (1 / 3) * ((1 / 1) + (2 / 3))],\n [0, 0, (1 / 3) * (1 / 2)]]).tolist())\n\n def test_recall(self):\n name = 'recall'\n self.assertEqual(\n get_result(name).tolist(),\n np.array([[0, 0, 0], [1 / 3, 2 / 3, 3 / 3], [1 / 4, 1 / 4, 2 / 4],\n [0, 0, 1 / 2]]).tolist())\n\n def test_precision(self):\n name = 'precision'\n self.assertEqual(\n get_result(name).tolist(),\n np.array([[0, 0, 0], [1 / 1, 2 / 2, 3 / 3], [1 / 1, 1 / 2, 2 / 3],\n [0, 0, 1 / 3]]).tolist())\n\n\nif __name__ == \"__main__\":\n unittest.main()\n"
]
| [
[
"numpy.array",
"numpy.log2"
]
]
|
xsthunder/TUH_EEG_Seizure_Detection | [
"e19b9b788eda26db83269e5a076afa115b2d1db4"
]
| [
"src/tools/tuh_sz_extract_metadata.py"
]
| [
"\"\"\"Extract the metadata of the dataset.\n\nAuthors: Vincent Stragier\n\nDescription:\n - List all the files of the dataset to find the EEG recording.\n - Extract all the metadata of the dataset and store them\n in a pickle and a JSON file.\n - Provides functions to load the extracted metadata\n in a dictionary\n\nLogs:\n 27/10/2020 (Vincent Stragier)\n - Comply to PEP8 and makes this script only\n excecutable as a module\n (py -m src.tools.tuh_sz_extract_metadata)\n 01/10/2020 (Vincent Stragier)\n - create this script\n\"\"\"\nimport datetime\nimport lzma\nimport multiprocessing as mp\nimport os\nimport pickle\nfrom functools import partial\n\nimport numpy as np\nimport tqdm\n\nimport feature_extraction as fe\n\n# Maximal number of thread to use\nMAX_THREAD = int(1.5 * os.cpu_count())\n\n\ndef save_pickle(filename: str, variable):\n \"\"\"Save a variable in a binary file.\n\n Args:\n filename: file name (with or without path or extension).\n variable: the variable to save as a binary.\n\n Returns:\n Returns nothing.\n \"\"\"\n with open(filename, 'wb') as f:\n pickle.dump(variable, f, protocol=pickle.HIGHEST_PROTOCOL)\n\n\ndef save_pickle_lzma(filename: str, variable):\n \"\"\"Save a variable in a lzma compressed binary file.\n\n Args:\n filename: file name (with or without path or extension).\n variable: the variable to save as a binary.\n\n Returns:\n Returns nothing.\n \"\"\"\n with lzma.open(filename, 'wb') as f:\n pickle.dump(variable, f, protocol=pickle.HIGHEST_PROTOCOL)\n\n\ndef load_pickle_lzma(filename: str):\n \"\"\"Load a variable from a lzma compressed binary file.\n\n Args:\n filename: file name.\n\n Returns:\n Returns the variable.\n \"\"\"\n with lzma.open(filename, 'rb') as f:\n return pickle.load(f)\n\n\ndef load_pickle(filename: str):\n \"\"\"Load a variable from a binary file.\n\n Args:\n filename: file name.\n\n Returns:\n Returns the variable.\n \"\"\"\n with open(filename, 'rb') as f:\n return pickle.load(f)\n\n\ndef extract_metadata_all_worker(filepath,\n calibration_dict: dict,\n calibrated_files: list):\n \"\"\"Extract all the metadata of the recording linked to filepath.\n\n Args:\n filepath: the path to the recording related files without extension.\n calibration_dict: a dict which contains the calibration information\n (start and stop time) of the calibrated files.\n calibrated_files: the list of the calibrated files.\n\n Returns:\n A dictonnary with all the metadata.\n \"\"\"\n if os.path.basename(filepath) in calibrated_files:\n metadata = fe.extract_metadata_all(\n filepath, calibration_dict[os.path.basename(filepath)])\n\n else:\n metadata = fe.extract_metadata_all(filepath)\n return metadata\n\n\ndef seconds_to_human_readble_time(seconds):\n \"\"\"Converts a number of seconds to a readable string\n (days, hours, minutes, seconds, etc.)\n\n Args:\n seconds: the number of seconds to convert.\n\n Returns:\n A string with the human readable among of seconds.\n \"\"\"\n return str(datetime.timedelta(seconds=seconds))\n\n\ndef labels_to_events(recording_meta):\n \"\"\"From the recording converts the labels to a more explicite form.\n\n Args:\n recording_meta: a dictionary which contains the metadata\n of the recording.\n\n Returns:\n A list of dictionaries, which is structured as followed.\n {'start': l['start'],\n 'stop': l['stop'],\n 'montage': montages[l['montage']],\n 'event':symbols[np.argmax(l['probabilities'])]}\n\n where:\n - ``l['start']`` is the start time of the event\n - ``l['stop']`` is the stop time of the event\n - ``montages[l['montage']]`` is the montage\n on which the event is occuring\n - ``symbols[np.argmax(l['probabilities'])]`` is\n the label of the most probable event\n \"\"\"\n symbols = recording_meta['annotations_lbl']['symbols'][0]\n montages = recording_meta['annotations_lbl']['montages']\n # For each label extract start, stop, montage, symbol\n labels = list() # List of dictionaries\n for label in recording_meta['annotations_lbl']['labels']:\n labels.append(\n {\n 'start': label['start'],\n 'stop': label['stop'],\n 'montage': montages[label['montage']],\n 'event': symbols[np.argmax(label['probabilities'])],\n },\n )\n\n return labels\n\n\ndef focal_starting_points(\n recording_meta: dict,\n seizure_types: list = ['fnsz', 'cpsz', 'spsz']):\n \"\"\"Return a list of dictionaries which contain the focal events\n with their starting points.\n\n Args:\n recording_meta: a dictionary which contains the metadata\n of the recording.\n\n Returns:\n A list of dictionaries whom contain the information\n about the starting point of a focal seizure of any kind.\n The dictionary looks like:\n {'start': start_time,\n 'event': event_tse['event'],\n 'montages': montages}\n\n where:\n - ``start_time`` is the starting time of the event\n - ``event_tse['event']`` is the kind\n of event according to the tse file\n - ``montages`` is a list of montages\n on which the event did start from\n \"\"\"\n # Convert the labels to more workable events\n events_list_lbl = labels_to_events(recording_meta)\n # Only keep focal seizure related events\n focal_events_list_lbl = [\n e for e in events_list_lbl\n if e['event'] in seizure_types\n ]\n\n events_list_tse = recording_meta['annotations_tse']\n # Only keep focal seizure related events\n focal_events_list_tse = [\n e for e in events_list_tse\n if e['event'] in seizure_types\n ]\n\n events = []\n for event_tse in focal_events_list_tse:\n start_time = event_tse['start']\n montages = [\n event_lbl['montage'] for event_lbl\n in focal_events_list_lbl\n if event_lbl['start'] == start_time\n ]\n\n events.append(\n {\n 'start': start_time,\n 'event': event_tse['event'],\n 'montages': montages,\n },\n )\n\n return events\n\n\ndef main(path: str, path_calibration: str, metadata_save_path: str):\n \"\"\"\n -List all the files of the dataset to find the EEG recording.\n -Extract all the metadata of the dataset\n and store them in a pickle and a JSON file.\n \"\"\"\n # List the recording paths\n files_list = fe.extract_files_list(\n path=path,\n extension_filter='tse',\n ) + fe.extract_files_list(\n path=path,\n extension_filter='lbl',\n ) + fe.extract_files_list(\n path=path,\n extension_filter='edf',\n )\n\n print('Number of used files (found):', len(files_list))\n print('Number of recordings (found):', len(list(set(files_list))))\n\n # Remove incomplete recording from the dataset\n print('Filter the filelist to remove incomplete recordings.')\n filtered_paths = sorted(\n list(\n filter(\n fe.filters_dataset_files,\n list(set(files_list)),\n ),\n ),\n )\n\n print('Number of complete recordings:', len(filtered_paths))\n\n # Extract calibration periods\n calibration = fe.extract_calibration_periods(path_calibration)\n calibrated_files = list(calibration.keys())\n\n # Create a partial version of the 'extract_metadata_all_worker()'\n # to fix constant parameters\n partial_extract_metadata_all_worker = partial(\n extract_metadata_all_worker,\n calibration_dict=calibration,\n calibrated_files=calibrated_files,\n )\n\n # Start pool, inspired from\n # https://leimao.github.io/blog/Python-tqdm-Multiprocessing/\n print('Start pool using {0} workers.'.format(MAX_THREAD))\n result_list_tqdm = []\n\n with mp.Pool(processes=MAX_THREAD) as pool:\n for result in tqdm.tqdm(\n pool.imap(\n func=partial_extract_metadata_all_worker,\n iterable=filtered_paths,\n ),\n total=len(filtered_paths),\n ):\n\n result_list_tqdm.append(result)\n\n print('The metadata collection is finished.')\n print('Save variable in a file.')\n save_pickle_lzma(metadata_save_path, result_list_tqdm)\n print('Variable saved.')\n\n\nif __name__ == '__main__':\n # Create the script arguments parser\n import argparse\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\n 'dataset_path',\n type=str,\n help='path to the dataset',\n )\n\n parser.add_argument(\n 'calibration',\n type=str,\n help='path to the Excel calibration file',\n )\n\n parser.add_argument(\n 'metadata_file',\n type=str,\n help='path to the metadata file to create',\n )\n\n args = parser.parse_args()\n main(\n path=args.dataset_path,\n path_calibration=args.calibration,\n metadata_save_path=args.metadata_file,\n )\n"
]
| [
[
"numpy.argmax"
]
]
|
404akhan/pytorch-a3c | [
"a223b1b4bb9589a35cf14aa9aff94525eec873d4"
]
| [
"test.py"
]
| [
"import math\nimport os\nimport sys\nimport itertools\n\nimport torch\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom envs import create_atari_env\nfrom model import ActorCritic\nfrom torch.autograd import Variable\nfrom torchvision import datasets, transforms\nimport time\nfrom collections import deque\n\nimport numpy as np \n\ndef is_dead(info):\n dead = False\n if is_dead.current_life > info['ale.lives']:\n dead = True\n is_dead.current_life = info['ale.lives']\n return dead\n\nis_dead.current_life = 0\n\ndef test(rank, args, shared_model):\n torch.manual_seed(args.seed + rank)\n\n env = create_atari_env(args.env_name)\n env.seed(args.seed + rank)\n\n model = ActorCritic(env.observation_space.shape[0], env.action_space, args.num_skips)\n \n model.eval()\n\n state = env.reset()\n state = np.concatenate([state] * 4, axis=0)\n state = torch.from_numpy(state)\n reward_sum = 0\n done = True\n action_stat = [0] * (model.n_real_acts + model.n_aux_acts)\n\n start_time = time.time()\n episode_length = 0\n\n for ep_counter in itertools.count(1):\n # Sync with the shared model\n if done:\n model.load_state_dict(shared_model.state_dict())\n \n if not os.path.exists('model-a3c-aux'):\n os.makedirs('model-a3c-aux')\n torch.save(shared_model.state_dict(), 'model-a3c-aux/model-{}.pth'.format(args.model_name))\n print('saved model')\n\n value, logit = model(Variable(state.unsqueeze(0), volatile=True))\n prob = F.softmax(logit)\n action = prob.max(1)[1].data.numpy()\n\n action_np = action[0, 0]\n action_stat[action_np] += 1\n\n if action_np < model.n_real_acts:\n state_new, reward, done, info = env.step(action_np)\n dead = is_dead(info)\n \n if args.testing: \n print('episode', episode_length, 'normal action', action_np, 'lives', info['ale.lives'])\n env.render()\n state = np.append(state.numpy()[1:,:,:], state_new, axis=0)\n done = done or episode_length >= args.max_episode_length\n\n reward_sum += reward\n episode_length += 1\n else:\n state = state.numpy()\n\n for _ in range(model.get_skip(action_np)):\n state_new, rew, done, info = env.step(0) # instead of random perform NOOP=0\n dead = is_dead(info)\n\n if args.testing: \n print('episode', episode_length, 'random action', action_np, 'lives', info['ale.lives'])\n env.render()\n state = np.append(state[1:,:,:], state_new, axis=0) \n done = done or episode_length >= args.max_episode_length\n\n reward_sum += rew\n episode_length += 1\n if done or dead:\n break\n\n if done:\n print(\"Time {}, episode reward {}, episode length {}\".format(\n time.strftime(\"%Hh %Mm %Ss\",\n time.gmtime(time.time() - start_time)),\n reward_sum, episode_length))\n print(\"actions stats real {}, aux {}\".format(action_stat[:model.n_real_acts], action_stat[model.n_real_acts:]))\n\n reward_sum = 0\n episode_length = 0\n state = env.reset()\n env.seed(args.seed + rank + (args.num_processes+1)*ep_counter)\n state = np.concatenate([state] * 4, axis=0)\n action_stat = [0] * (model.n_real_acts + model.n_aux_acts)\n if not args.testing: time.sleep(60)\n\n state = torch.from_numpy(state)\n"
]
| [
[
"numpy.concatenate",
"torch.nn.functional.softmax",
"torch.from_numpy",
"torch.manual_seed",
"numpy.append"
]
]
|
chemmatcars/XModFit | [
"7d1298448d1908d78797fd67ce0a00ecfaf17629"
]
| [
"Functions/ASAXS/Biphasic_Sphere_Uniform.py"
]
| [
"####Please do not remove lines below####\nfrom lmfit import Parameters\nimport numpy as np\nimport sys\nimport os\nsys.path.append(os.path.abspath('.'))\nsys.path.append(os.path.abspath('./Functions'))\nsys.path.append(os.path.abspath('./Fortran_routines'))\n####Please do not remove lines above####\n\n####Import your modules below if needed####\nfrom FormFactors.Sphere import Sphere\n# from ff_sphere import ff_sphere_ml\nfrom Chemical_Formula import Chemical_Formula\nfrom PeakFunctions import LogNormal, Gaussian\nfrom Structure_Factors import hard_sphere_sf, sticky_sphere_sf\nfrom utils import find_minmax, calc_rho, create_steps\nfrom functools import lru_cache\nimport time\n\nfrom numba import njit, prange\n\n@njit(parallel=True, cache=True)\ndef ff_sphere_ml(q,R,rho):\n Nlayers=len(R)\n aff=np.ones_like(q)*complex(0,0)\n ff=np.zeros_like(q)\n for i in prange(len(q)):\n fact = 0.0\n rt = 0.0\n for j in prange(1,Nlayers):\n rt += R[j - 1]\n fact += (rho[j - 1] - rho[j]) * (np.sin(q[i] * rt) - q[i] * rt * np.cos(q[i] * rt)) / q[i] ** 3\n aff[i] = fact\n ff[i] = abs(fact) ** 2\n return ff,aff\n\nclass Biphasic_Sphere_Uniform: #Please put the class name same as the function name\n def __init__(self, x=0, Np=20, error_factor=1.0, term='Total',dist='Gaussian', Energy=None, relement='Au', NrDep='False',\n norm=1.0e-9, sbkg=0.0, cbkg=0.0, abkg=0.0, D=1.0, phi=0.1, U=-1.0, SF='None',Rsig=0.0,\n mpar={'Phase_1':{'Material':['Au','H2O'],\n 'Density':[19.32,1.0],\n 'VolFrac':[1.0,1.0],\n 'Rmoles':[1.0,0.0],\n 'R':[1.0,0.0]},\n 'Phase_2':{'Material':['Au','H2O'],\n 'Density':[19.32,1.0],\n 'VolFrac':[1.0,1.0],\n 'Rmoles':[1.0,0.0],\n 'R':[1.0,0.0]},\n 'Solvent':{'Material':['H2O','H2O'],\n 'Density':[1.0,1.0],\n 'VolFrac':[1.0,1.0],\n 'Rmoles':[1.0,0.0],\n 'R':[1.0,0.0]}}):\n \"\"\"\n Documentation\n Calculates the Energy dependent form factor of multilayered spherical nanoparticles with two different set of materials\n\n x : Reciprocal wave-vector 'Q' inv-Angs in the form of a scalar or an array\n relement : Resonant element of the nanoparticle. Default: 'Au'\n Energy : Energy of X-rays in keV at which the form-factor is calculated. Default: None\n Np : No. of points with which the size distribution will be computed. Default: 10\n NrDep : Energy dependence of the non-resonant element. Default= 'False' (Energy independent), 'True' (Energy dependent)\n dist : The probablity distribution fucntion for the radii of different interfaces in the nanoparticles. Default: Gaussian\n norm : The density of the nanoparticles in Molar (Moles/Liter)\n sbkg : Constant incoherent background for SAXS-term\n cbkg : Constant incoherent background for cross-term\n abkg : Constant incoherent background for Resonant-term\n error_factor: Error-factor to simulate the error-bars\n term : 'SAXS-term' or 'Cross-term' or 'Resonant-term' or 'Total'\n D : Hard Sphere Diameter\n phi : Volume fraction of particles\n U : The sticky-sphere interaction energy\n SF : Type of structure factor. Default: 'None'\n Rsig : Widths of the total radius of the nanoparticles. Default: 0.0\n mpar : Multi-parameter which defines the following including the solvent/bulk medium which is the last one. Default: 'H2O'\n Material ('Materials' using chemical formula),\n Density ('Density' in gm/cubic-cms),\n Density of solvent ('SolDensity' in gm/cubic-cms) of the particular layer\n Mole-fraction ('Rmoles') of resonant element in the material)\n Radii ('R' in Angs), and\n\n \"\"\"\n if type(x)==list:\n self.x=np.array(x)\n else:\n self.x=x\n self.norm=norm\n self.sbkg=sbkg\n self.cbkg=cbkg\n self.abkg=abkg\n self.dist=dist\n self.Np=Np\n self.Energy=Energy\n self.relement=relement\n self.NrDep=NrDep\n #self.rhosol=rhosol\n self.error_factor=error_factor\n self.D=D\n self.phi=phi\n self.U=U\n self.__mpar__=mpar #If there is any multivalued parameter\n self.SF=SF\n self.term=term\n self.Rsig=Rsig\n self.__Density__={}\n self.__VolFrac__={}\n self.__R__={}\n self.__Rmoles__={}\n self.__material__={}\n self.choices={'dist':['Gaussian','LogNormal'],'NrDep':['True','False'],'SF':['None','Hard-Sphere', 'Sticky-Sphere'],\n 'term':['SAXS-term','Cross-term','Resonant-term','Total']} #If there are choices available for any fixed parameters\n self.__fit__=False\n self.__mkeys__=list(self.__mpar__.keys())\n self.init_params()\n\n\n def init_params(self):\n \"\"\"\n Define all the fitting parameters like\n self.params.add('sig',value = 0, vary = 0, min = -np.inf, max = np.inf, expr = None, brute_step = None)\n \"\"\"\n self.params=Parameters()\n self.params.add('norm',value=self.norm,vary=0, min = -np.inf, max = np.inf, expr = None, brute_step = 0.1)\n self.params.add('D', value=self.D, vary=0, min=-np.inf, max=np.inf, expr=None, brute_step=0.1)\n self.params.add('phi', value=self.phi, vary=0, min=-np.inf, max=np.inf, expr=None, brute_step=0.1)\n self.params.add('sbkg',value=self.sbkg,vary=0, min = -np.inf, max = np.inf, expr = None, brute_step = 0.1)\n self.params.add('cbkg', value=self.cbkg, vary=0, min=-np.inf, max=np.inf, expr=None, brute_step=0.1)\n self.params.add('abkg', value=self.abkg, vary=0, min=-np.inf, max=np.inf, expr=None, brute_step=0.1)\n self.params.add('U', value=self.U, vary=0, min=-np.inf, max=np.inf, expr=None, brute_step=0.1)\n self.params.add('Rsig',value=self.Rsig,vary=0,min=0,max=np.inf,expr=None,brute_step=0.1)\n mkey1=self.__mkeys__[0]\n for key in self.__mpar__[mkey1].keys():\n if key != 'Material':\n for i in range(len(self.__mpar__[mkey1][key])):\n self.params.add('__%s_%s_%03d' % (mkey1, key, i), value=self.__mpar__[mkey1][key][i], vary=0,\n min=-np.inf, max=np.inf, expr=None, brute_step=0.1)\n for mkey in self.__mkeys__[1:]:\n for key in self.__mpar__[mkey].keys():\n if key!='Material' and key!='R':\n for i in range(len(self.__mpar__[mkey][key])):\n self.params.add('__%s_%s_%03d'%(mkey, key,i),value=self.__mpar__[mkey][key][i],vary=0,min=-np.inf,max=np.inf,expr=None,brute_step=0.1)\n elif key=='R':\n for i in range(len(self.__mpar__[mkey][key])):\n self.params.add('__%s_%s_%03d'%(mkey, key,i),value=self.__mpar__[mkey][key][i],vary=0,min=-np.inf,max=np.inf\n ,expr='__%s_%s_%03d'%(mkey1, key,i),brute_step=0.1)\n\n\n\n @lru_cache(maxsize=10)\n def calc_Rdist(self, R, Rsig, dist, N):\n R = np.array(R)\n totalR = np.sum(R[:-1])\n if Rsig > 0.001:\n fdist = eval(dist + '.' + dist + '(x=0.001, pos=totalR, wid=Rsig)')\n if dist == 'Gaussian':\n rmin, rmax = max(0.001, totalR - 5 * Rsig), totalR + 5 * Rsig\n dr = np.linspace(rmin, rmax, N)\n else:\n rmin, rmax = max(-3, np.log(totalR) - 5 * Rsig), np.log(totalR) + 5 * Rsig\n dr = np.logspace(rmin, rmax, N, base=np.exp(1.0))\n fdist.x = dr\n rdist = fdist.y()\n sumdist = np.sum(rdist)\n rdist = rdist / sumdist\n return dr, rdist, totalR\n else:\n return [totalR], [1.0], totalR\n\n @lru_cache(maxsize=10)\n def new_sphere(self, q, R, Rsig, rho, eirho, adensity, dist='Gaussian',Np=10):\n q = np.array(q)\n dr, rdist, totalR = self.calc_Rdist(R, Rsig, dist, Np)\n form = np.zeros_like(q)\n eiform = np.zeros_like(q)\n aform = np.zeros_like(q)\n cform = np.zeros_like(q)\n pfac = (4 * np.pi * 2.818e-5 * 1.0e-8) ** 2\n for i in range(len(dr)):\n r = np.array(R) * (1 + (dr[i] - totalR) / totalR)\n ff, mff = ff_sphere_ml(q, r, rho)\n form = form + rdist[i] * ff\n eiff, meiff = ff_sphere_ml(q, r, eirho)\n eiform = eiform + rdist[i] * eiff\n aff, maff = ff_sphere_ml(q, r, adensity)\n aform = aform + rdist[i] * aff\n cform = cform + rdist[i] * (meiff * maff.conjugate()+meiff.conjugate()*maff)\n return pfac * form, pfac * eiform, pfac * aform, np.abs(pfac * cform)/2 # in cm^2\n\n @lru_cache(maxsize=2)\n def new_sphere_dict(self, q, R, Rsig, rho, eirho, adensity, dist='Gaussian',Np=10,key='SAXS-term'):\n form, eiform, aform, cform = self.new_sphere(q, R, Rsig, rho, eirho, adensity,dist=dist,Np=Np)\n if key == 'SAXS-term':\n return eiform\n elif key == 'Resonant-term':\n return aform\n elif key == 'Cross-term':\n return cform\n elif key == 'Total':\n return form\n\n def update_params(self):\n for mkey in self.__mkeys__:\n key = 'Density'\n Nmpar=len(self.__mpar__[mkey][key])\n self.__Density__[mkey] = [self.params['__%s_%s_%03d' % (mkey, key, i)].value for i in range(Nmpar)]\n key = 'VolFrac'\n self.__VolFrac__[mkey] = [self.params['__%s_%s_%03d' % (mkey, key, i)].value for i in range(Nmpar)]\n key = 'Rmoles'\n self.__Rmoles__[mkey] = [self.params['__%s_%s_%03d' % (mkey, key, i)].value for i in range(Nmpar)]\n key = 'R'\n self.__R__[mkey] = [self.params['__%s_%s_%03d' % (mkey, key, i)].value for i in range(Nmpar)]\n key = 'Material'\n self.__material__[mkey] = [self.__mpar__[mkey][key][i] for i in range(Nmpar)]\n for mkey in self.__mkeys__[1:]:\n key='R'\n for i in range(Nmpar):\n self.params['__%s_%s_%03d'%(mkey,key,i)].set(expr='__%s_%s_%03d'%(self.__mkeys__[0],key,i))\n mkey = 'Solvent'\n key = 'VolFrac'\n for i in range(Nmpar):\n self.params['__%s_%s_%03d' % (mkey, key, i)].set(\n expr='1.0-__Phase_1_VolFrac_%03d-__Phase_2_VolFrac_%03d' % (i, i))\n\n\n def y(self):\n \"\"\"\n Define the function in terms of x to return some value\n \"\"\"\n svol = 1.5 * 0.0172 ** 2 / 370 ** 2 # scattering volume in cm^3\n self.output_params = {'scaler_parameters': {}}\n self.update_params()\n mkey = 'Solvent'\n sol_density = tuple(np.ones_like(self.__Density__[mkey]))\n R = self.__R__[mkey]\n rho, eirho, adensity, rhor, eirhor, adensityr = calc_rho(R=tuple(R),\n material=tuple(self.__material__[mkey]),\n relement=self.relement,\n density=tuple(self.__Density__[mkey]),\n sol_density=sol_density,\n Energy=self.Energy,\n Rmoles=tuple(self.__Rmoles__[mkey]),\n NrDep=self.NrDep)\n for mkey in self.__mkeys__:\n if mkey != 'Solvent':\n trho, teirho, tadensity, trhor, teirhor, tadensityr = calc_rho(R=tuple(self.__R__[mkey]),\n material=tuple(self.__material__[mkey]),\n relement=self.relement,\n density=tuple(self.__Density__[mkey]),\n sol_density=sol_density,\n Energy=self.Energy,\n Rmoles=tuple(self.__Rmoles__[mkey]),\n NrDep=self.NrDep)\n vf = np.array(self.__VolFrac__[mkey])\n rho = rho + vf * trho\n eirho = eirho + vf * teirho\n adensity = adensity + vf * tadensity\n\n\n if type(self.x) == dict:\n sqf = {}\n for key in self.x.keys():\n sqf[key] = self.norm * 6.022e20 * self.new_sphere_dict(tuple(self.x[key]), tuple(self.__R__[self.__mkeys__[0]]),\n self.Rsig, tuple(rho), tuple(eirho),\n tuple(adensity), key=key, dist=self.dist,Np=self.Np) # in cm^-1\n if self.SF is None:\n struct = np.ones_like(self.x[key]) # hard_sphere_sf(self.x[key], D = self.D, phi = 0.0)\n elif self.SF == 'Hard-Sphere':\n struct = hard_sphere_sf(self.x[key], D=self.D, phi=self.phi)\n else:\n struct = sticky_sphere_sf(self.x[key], D=self.D, phi=self.phi, U=self.U, delta=0.01)\n if key == 'SAXS-term':\n sqf[key] = sqf[key] * struct + self.sbkg\n if key == 'Cross-term':\n sqf[key] = sqf[key] * struct + self.cbkg\n if key == 'Resonant-term':\n sqf[key] = sqf[key] * struct + self.abkg\n key1 = 'Total'\n total = self.norm * 6.022e20 * struct * self.new_sphere_dict(tuple(self.x[key]), tuple(self.__R__[self.__mkeys__[0]]),\n self.Rsig, tuple(rho), tuple(eirho),\n tuple(adensity),\n key=key1,dist=self.dist,Np=self.Np) + self.sbkg # in cm^-1\n if not self.__fit__:\n dr, rdist, totalR = self.calc_Rdist(tuple(self.__R__[self.__mkeys__[0]]), self.Rsig, self.dist, self.Np)\n self.output_params['Distribution'] = {'x': dr, 'y': rdist}\n signal = total\n minsignal = np.min(signal)\n normsignal = signal / minsignal\n sqerr = np.random.normal(normsignal, scale=self.error_factor)\n meta = {'Energy': self.Energy}\n if self.Energy is not None:\n self.output_params['simulated_w_err_%.4fkeV' % self.Energy] = {'x': self.x[key],\n 'y': sqerr * minsignal,\n 'yerr': np.sqrt(\n normsignal) * minsignal * self.error_factor,\n 'meta': meta}\n self.output_params['Simulated_total_wo_err'] = {'x': self.x[key], 'y': total}\n self.output_params['Total'] = {'x': self.x[key], 'y': total}\n for key in self.x.keys():\n self.output_params[key] = {'x': self.x[key], 'y': sqf[key]}\n self.output_params['rho_r'] = {'x': rhor[:, 0], 'y': rhor[:, 1]}\n self.output_params['eirho_r'] = {'x': eirhor[:, 0], 'y': eirhor[:, 1]}\n self.output_params['adensity_r'] = {'x': adensityr[:, 0], 'y': adensityr[:, 1]}\n self.output_params['Structure_Factor'] = {'x': self.x[key], 'y': struct}\n\n else:\n if self.SF is None:\n struct = np.ones_like(self.x)\n elif self.SF == 'Hard-Sphere':\n struct = hard_sphere_sf(self.x, D=self.D, phi=self.phi)\n else:\n struct = sticky_sphere_sf(self.x, D=self.D, phi=self.phi, U=self.U, delta=0.01)\n\n tsqf, eisqf, asqf, csqf = self.new_sphere(tuple(self.x), tuple(self.__R__[self.__mkeys__[0]]), self.Rsig, tuple(rho),\n tuple(eirho), tuple(adensity),dist=self.dist,Np=self.Np)\n sqf = self.norm * np.array(tsqf) * 6.022e20 * struct + self.sbkg # in cm^-1\n if not self.__fit__: #Generate all the quantities below while not fitting\n asqf = self.norm * np.array(asqf) * 6.022e20 * struct + self.abkg # in cm^-1\n eisqf = self.norm * np.array(eisqf) * 6.022e20 * struct + self.sbkg # in cm^-1\n csqf = self.norm * np.array(csqf) * 6.022e20 * struct + self.cbkg # in cm^-1\n # sqerr = np.sqrt(6.020e20*self.flux *self.norm*tsqf*struct*svol+self.sbkg)\n # sqwerr = (6.022e20*tsqf * svol * self.flux*self.norm*struct + self.sbkg + 2 * (0.5 - np.random.rand(len(tsqf))) * sqerr)\n signal = 6.022e20 * self.norm * np.array(tsqf) * struct + self.sbkg\n minsignal = np.min(signal)\n normsignal = signal / minsignal\n sqerr = np.random.normal(normsignal, scale=self.error_factor)\n meta = {'Energy': self.Energy}\n if self.Energy is not None:\n self.output_params['simulated_w_err_%.4fkeV' % self.Energy] = {'x': self.x, 'y': sqerr * minsignal,\n 'yerr': np.sqrt(\n normsignal) * minsignal * self.error_factor,\n 'meta': meta}\n else:\n self.output_params['simulated_w_err'] = {'x': self.x, 'y': sqerr * minsignal,\n 'yerr': np.sqrt(normsignal) * minsignal * self.error_factor,\n 'meta': meta}\n dr, rdist, totalR = self.calc_Rdist(tuple(self.__R__[self.__mkeys__[0]]), self.Rsig, self.dist, self.Np)\n self.output_params['Distribution'] = {'x': dr, 'y': rdist}\n self.output_params['Total'] = {'x': self.x, 'y': sqf}\n self.output_params['Resonant-term'] = {'x': self.x, 'y': asqf}\n self.output_params['SAXS-term'] = {'x': self.x, 'y': eisqf}\n self.output_params['Cross-term'] = {'x': self.x, 'y': csqf}\n self.output_params['rho_r'] = {'x': rhor[:, 0], 'y': rhor[:, 1]}\n self.output_params['eirho_r'] = {'x': eirhor[:, 0], 'y': eirhor[:, 1]}\n self.output_params['adensity_r'] = {'x': adensityr[:, 0], 'y': adensityr[:, 1]}\n self.output_params['Structure_Factor'] = {'x': self.x, 'y': struct}\n sqf = self.output_params[self.term]['y']\n return sqf\n\n\nif __name__=='__main__':\n x=np.logspace(-3,0,200)\n fun=Biphasic_Sphere_Uniform(x=x)\n print(fun.y())\n"
]
| [
[
"numpy.random.normal",
"numpy.array",
"numpy.zeros_like",
"numpy.ones_like",
"numpy.sin",
"numpy.log",
"numpy.sum",
"numpy.min",
"numpy.exp",
"numpy.abs",
"numpy.cos",
"numpy.sqrt",
"numpy.linspace",
"numpy.logspace"
]
]
|
yih301/pdenv | [
"ee51a3e39e7e353b30bc3d10e37d72877cfe5921"
]
| [
"train_stable.py"
]
| [
"import os\n\nimport gym\nimport gym\nfrom gym import wrappers, logger\nimport gym_panda\nfrom gym_panda.wrapper_env.wrapper import *\n# import gym_circle_move\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\nfrom stable_baselines import DDPG,PPO2,TRPO\nfrom stable_baselines.common.policies import MlpPolicy\nfrom stable_baselines import results_plotter\nfrom stable_baselines.bench import Monitor\nfrom stable_baselines.results_plotter import load_results, ts2xy\n#from stable_baselines.common.noise import AdaptiveParamNoiseSpec\nfrom stable_baselines.common.callbacks import BaseCallback\nfrom stable_baselines.ddpg.policies import MlpPolicy as ddpg_MlpPolicy\n\nfrom stable_baselines.common.noise import NormalActionNoise, OrnsteinUhlenbeckActionNoise, AdaptiveParamNoiseSpec\nfrom datetime import datetime\n\nclass SaveOnBestTrainingRewardCallback(BaseCallback):\n \"\"\"\n Callback for saving a model (the check is done every ``check_freq`` steps)\n based on the training reward (in practice, we recommend using ``EvalCallback``).\n\n :param check_freq: (int)\n :param log_dir: (str) Path to the folder where the model will be saved.\n It must contains the file created by the ``Monitor`` wrapper.\n :param verbose: (int)\n \"\"\"\n def __init__(self, check_freq: int, log_dir: str, verbose=1):\n super(SaveOnBestTrainingRewardCallback, self).__init__(verbose)\n self.check_freq = check_freq\n self.log_dir = log_dir\n self.save_path = os.path.join(log_dir, 'best_model')\n self.latest_path = os.path.join(log_dir, 'latest_model')\n self.best_mean_reward = -np.inf\n self.reward = []\n\n def _init_callback(self) -> None:\n # Create folder if needed\n if self.save_path is not None:\n os.makedirs(self.save_path, exist_ok=True)\n if self.latest_path is not None:\n os.makedirs(self.latest_path, exist_ok=True)\n\n def _on_step(self) -> bool:\n # print(\"h------------------------------------------------------g\")\n if self.n_calls % self.check_freq == 0:\n\n # Retrieve training reward\n x, y = ts2xy(load_results(self.log_dir), 'timesteps')\n if len(x) > 0:\n # Mean training reward over the last 100 episodes\n mean_reward = np.mean(y[-100:])\n if self.verbose > 0:\n print(\"Num timesteps: {}\".format(self.num_timesteps))\n print(\"Best mean reward: {:.2f} - Last mean reward per episode: {:.2f}\".format(self.best_mean_reward, mean_reward))\n\n # New best model, you could save the agent here\n if mean_reward > self.best_mean_reward:\n self.best_mean_reward = mean_reward\n # Example for saving best model\n if self.verbose > 0:\n print(\"Saving new best model to {}\".format(self.save_path))\n self.model.save(self.save_path)\n \n if self.n_calls % 1e4 == 0:\n self.model.save(self.latest_path)\n\n return True\n\n\n\n\nif __name__ == \"__main__\":\n # make env\n env_name = \"feasibilitypanda-v0\"\n #env_name = \"disabledpanda-v0\"\n env = gym.make(env_name)\n #pdb.set_trace()\n #env = SkipStepsWrapperVAE(env)\n #env = infeasibleWrapper(env)\n\n # Create log dir\n #log_dir = \"/iliad/u/yilunhao/logs/models/sb-trpo-joint-target-diffdynamics-{}/\".format(datetime.now().strftime(\"%Y-%m-%d\"))\n log_dir = \"../logs/models/sb-trpo-joint-target-diffdynamics-{}/\".format(datetime.now().strftime(\"%Y-%m-%d\"))\n tensorboard_dir = \"../logs/logs\"\n os.makedirs(tensorboard_dir, exist_ok=True)\n os.makedirs(log_dir, exist_ok=True)\n\n env = Monitor(env, log_dir)\n env.reset()\n # print(env.n)\n n_actions = env.action_space.shape[-1]\n param_noise = AdaptiveParamNoiseSpec(initial_stddev=0.01, desired_action_stddev=0.01)\n action_noise = OrnsteinUhlenbeckActionNoise(mean=np.zeros(n_actions), sigma=float(0.5) * np.ones(n_actions))\n model = TRPO(MlpPolicy, env, verbose=1, tensorboard_log=tensorboard_dir)\n # model = DDPG(ddpg_MlpPolicy, env, param_noise=param_noise, action_noise=action_noise, verbose=1,tensorboard_log=tensorboard_dir)#, param_noise=param_noise, action_noise=action_noise) \n callback = SaveOnBestTrainingRewardCallback(check_freq=100, log_dir=log_dir)\n \n # Train the model\n time_steps = 1e8\n model.learn(total_timesteps=int(time_steps), callback=callback)\n model.save(os.path.join(log_dir, \"final_model.pt\"))\n\n # model = PPO2.load(\"/home/jingjia16/marl/models/best_model.zip\")\n # obs = env.reset()\n # while True:\n # action, _states = model.predict(obs)\n # obs, rewards, dones, info = env.step(action)\n # env.render()\n\n\n # results_plotter.plot_results([log_dir], time_steps, results_plotter.X_TIMESTEPS, \"DDPG\")\n # plt.savefig(os.path.join(log_dir, 'plot.png'))\n"
]
| [
[
"numpy.mean",
"numpy.zeros",
"numpy.ones"
]
]
|
mou3adb/RodiCS | [
"caafe8f6427943cb6d82cf3245a3d774ba7664f1"
]
| [
"scripts/axes_world.py"
]
| [
"import numpy as np\nimport matplotlib.pyplot as pp\n\nfrom matplotlib import rc\n#==============================================================================\nrc('text', usetex=True)\n\nmarkersize = 8\nfontsize = 12\n\ndef one_by_one(inch_x=3.5, inch_y=3.5):\n fig = pp.figure(figsize=(inch_x,inch_y))\n ax = fig.add_axes([0.15,0.13,0.8,0.81])\n \n pp.setp(ax.spines.values(), linewidth=0.7)\n\n ax.tick_params(axis='both', which='major', width=0.7)\n\n return ax\n\ndef one_by_two(inch_x=6.5,inch_y=3.5):\n fig = pp.figure(figsize=(inch_x,inch_y))\n \n ax_a = fig.add_axes([0.11,0.14,0.37,0.8])\n \n ax_b = fig.add_axes([0.61,0.14,0.37,0.8])\n \n ax_a.text(-0.26, 0.95, '$(a)$', fontsize=fontsize, transform=ax_a.transAxes)\n ax_b.text(-0.26, 0.95, '$(b)$', fontsize=fontsize, transform=ax_b.transAxes)\n \n pp.setp(ax_a.spines.values(), linewidth=0.7)\n pp.setp(ax_b.spines.values(), linewidth=0.7)\n \n ax_a.tick_params(axis='both', which='major', width=0.7)\n ax_b.tick_params(axis='both', which='major', width=0.7)\n \n return ax_a, ax_b\n\ndef two_by_one(inch_x=5, inch_y=3.5*2):\n fig = pp.figure(figsize=(inch_x,inch_y))\n \n ax_b = fig.add_axes([0.14,0.07,0.83,0.41])\n \n ax_a = fig.add_axes([0.14,0.56,0.83,0.41])\n \n ax_b.text(-0.13, 0.95, '$(b)$', fontsize=12, transform=ax_b.transAxes)\n ax_a.text(-0.13, 0.95, '$(a)$', fontsize=12, transform=ax_a.transAxes)\n \n pp.setp(ax_a.spines.values(), linewidth=0.7)\n pp.setp(ax_b.spines.values(), linewidth=0.7)\n \n ax_a.tick_params(axis='both', which='major', width=0.7)\n ax_b.tick_params(axis='both', which='major', width=0.7)\n \n return ax_a, ax_b\n\ndef slope_triangle(x0, y0, dx, dy, label_x, label_y, up, ax):\n x1 = x0*np.exp(dx)\n y1 = y0*np.exp(dy)\n\n if up == 'Up':\n triangle_x = [x0, x1, x0]\n triangle_y = [y0, y1, y1]\n\n text_x = x0*0.70, np.sqrt(y0*y1)\n text_y = np.sqrt(x0*x1), y1*1.05\n\n va_x = 'center'\n ha_x = 'left'\n\n va_y = 'bottom'\n ha_y = 'center'\n\n else:\n triangle_x = [x0, x1, x1]\n triangle_y = [y0, y0, y1]\n\n text_x = np.sqrt(x0*x1), y0*0.9\n text_y = x1*1.2, np.sqrt(y0*y1)\n\n va_x = 'top'\n ha_x = 'center'\n\n va_y = 'center'\n ha_y = 'left'\n\n ax.fill(triangle_x, triangle_y,\n edgecolor='dimgrey',\n facecolor='lightgrey',\n alpha=0.25)\n\n ax.text(text_x[0], text_x[1], r'$%s$' % label_x,\n verticalalignment=va_x,\n horizontalalignment=ha_x,\n fontsize=12)\n\n ax.text(text_y[0], text_y[1], r'$%s$' % label_y,\n verticalalignment=va_y,\n horizontalalignment=ha_y,\n fontsize=12)\n\n"
]
| [
[
"numpy.sqrt",
"numpy.exp",
"matplotlib.pyplot.figure",
"matplotlib.rc"
]
]
|
gpengzhi/fairseq | [
"4775610f48b770f271e05245991dc7b44d45667b",
"4775610f48b770f271e05245991dc7b44d45667b"
]
| [
"fairseq/options.py",
"fairseq/models/speech_to_text/xm_transformer.py"
]
| [
"# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport argparse\nfrom pathlib import Path\nfrom typing import Callable, List, Optional, Union\n\nimport torch\nfrom fairseq import utils\nfrom fairseq.data.indexed_dataset import get_available_dataset_impl\nfrom fairseq.dataclass.configs import (\n CheckpointConfig,\n CommonConfig,\n CommonEvalConfig,\n DatasetConfig,\n DistributedTrainingConfig,\n EvalLMConfig,\n GenerationConfig,\n InteractiveConfig,\n OptimizationConfig,\n EMAConfig,\n)\nfrom fairseq.dataclass.utils import gen_parser_from_dataclass\n\n# this import is for backward compatibility\nfrom fairseq.utils import csv_str_list, eval_bool, eval_str_dict, eval_str_list # noqa\n\n\ndef get_preprocessing_parser(default_task=\"translation\"):\n parser = get_parser(\"Preprocessing\", default_task)\n add_preprocess_args(parser)\n return parser\n\n\ndef get_training_parser(default_task=\"translation\"):\n parser = get_parser(\"Trainer\", default_task)\n add_dataset_args(parser, train=True)\n add_distributed_training_args(parser)\n add_model_args(parser)\n add_optimization_args(parser)\n add_checkpoint_args(parser)\n add_ema_args(parser)\n return parser\n\n\ndef get_generation_parser(interactive=False, default_task=\"translation\"):\n parser = get_parser(\"Generation\", default_task)\n add_dataset_args(parser, gen=True)\n add_distributed_training_args(parser, default_world_size=1)\n add_generation_args(parser)\n add_checkpoint_args(parser)\n if interactive:\n add_interactive_args(parser)\n return parser\n\n\ndef get_interactive_generation_parser(default_task=\"translation\"):\n return get_generation_parser(interactive=True, default_task=default_task)\n\n\ndef get_eval_lm_parser(default_task=\"language_modeling\"):\n parser = get_parser(\"Evaluate Language Model\", default_task)\n add_dataset_args(parser, gen=True)\n add_distributed_training_args(parser, default_world_size=1)\n add_eval_lm_args(parser)\n return parser\n\n\ndef get_validation_parser(default_task=None):\n parser = get_parser(\"Validation\", default_task)\n add_dataset_args(parser, train=True)\n add_distributed_training_args(parser, default_world_size=1)\n group = parser.add_argument_group(\"Evaluation\")\n gen_parser_from_dataclass(group, CommonEvalConfig())\n return parser\n\n\ndef parse_args_and_arch(\n parser: argparse.ArgumentParser,\n input_args: List[str] = None,\n parse_known: bool = False,\n suppress_defaults: bool = False,\n modify_parser: Optional[Callable[[argparse.ArgumentParser], None]] = None,\n):\n \"\"\"\n Args:\n parser (ArgumentParser): the parser\n input_args (List[str]): strings to parse, defaults to sys.argv\n parse_known (bool): only parse known arguments, similar to\n `ArgumentParser.parse_known_args`\n suppress_defaults (bool): parse while ignoring all default values\n modify_parser (Optional[Callable[[ArgumentParser], None]]):\n function to modify the parser, e.g., to set default values\n \"\"\"\n if suppress_defaults:\n # Parse args without any default values. This requires us to parse\n # twice, once to identify all the necessary task/model args, and a second\n # time with all defaults set to None.\n args = parse_args_and_arch(\n parser,\n input_args=input_args,\n parse_known=parse_known,\n suppress_defaults=False,\n )\n suppressed_parser = argparse.ArgumentParser(add_help=False, parents=[parser])\n suppressed_parser.set_defaults(**{k: None for k, v in vars(args).items()})\n args = suppressed_parser.parse_args(input_args)\n return argparse.Namespace(\n **{k: v for k, v in vars(args).items() if v is not None}\n )\n\n from fairseq.models import ARCH_MODEL_REGISTRY, ARCH_CONFIG_REGISTRY, MODEL_REGISTRY\n\n # Before creating the true parser, we need to import optional user module\n # in order to eagerly import custom tasks, optimizers, architectures, etc.\n usr_parser = argparse.ArgumentParser(add_help=False, allow_abbrev=False)\n usr_parser.add_argument(\"--user-dir\", default=None)\n usr_args, _ = usr_parser.parse_known_args(input_args)\n utils.import_user_module(usr_args)\n\n if modify_parser is not None:\n modify_parser(parser)\n\n # The parser doesn't know about model/criterion/optimizer-specific args, so\n # we parse twice. First we parse the model/criterion/optimizer, then we\n # parse a second time after adding the *-specific arguments.\n # If input_args is given, we will parse those args instead of sys.argv.\n args, _ = parser.parse_known_args(input_args)\n\n # Add model-specific args to parser.\n if hasattr(args, \"arch\"):\n model_specific_group = parser.add_argument_group(\n \"Model-specific configuration\",\n # Only include attributes which are explicitly given as command-line\n # arguments or which have default values.\n argument_default=argparse.SUPPRESS,\n )\n if args.arch in ARCH_MODEL_REGISTRY:\n ARCH_MODEL_REGISTRY[args.arch].add_args(model_specific_group)\n elif args.arch in MODEL_REGISTRY:\n MODEL_REGISTRY[args.arch].add_args(model_specific_group)\n else:\n raise RuntimeError()\n\n if hasattr(args, \"task\"):\n from fairseq.tasks import TASK_REGISTRY\n\n TASK_REGISTRY[args.task].add_args(parser)\n if getattr(args, \"use_bmuf\", False):\n # hack to support extra args for block distributed data parallelism\n from fairseq.optim.bmuf import FairseqBMUF\n\n FairseqBMUF.add_args(parser)\n\n # Add *-specific args to parser.\n from fairseq.registry import REGISTRIES\n\n for registry_name, REGISTRY in REGISTRIES.items():\n choice = getattr(args, registry_name, None)\n if choice is not None:\n cls = REGISTRY[\"registry\"][choice]\n if hasattr(cls, \"add_args\"):\n cls.add_args(parser)\n elif hasattr(cls, \"__dataclass\"):\n gen_parser_from_dataclass(parser, cls.__dataclass())\n\n # Modify the parser a second time, since defaults may have been reset\n if modify_parser is not None:\n modify_parser(parser)\n\n # Parse a second time.\n if parse_known:\n args, extra = parser.parse_known_args(input_args)\n else:\n args = parser.parse_args(input_args)\n extra = None\n # Post-process args.\n if (\n hasattr(args, \"batch_size_valid\") and args.batch_size_valid is None\n ) or not hasattr(args, \"batch_size_valid\"):\n args.batch_size_valid = args.batch_size\n if hasattr(args, \"max_tokens_valid\") and args.max_tokens_valid is None:\n args.max_tokens_valid = args.max_tokens\n if getattr(args, \"memory_efficient_fp16\", False):\n args.fp16 = True\n if getattr(args, \"memory_efficient_bf16\", False):\n args.bf16 = True\n args.tpu = getattr(args, \"tpu\", False)\n args.bf16 = getattr(args, \"bf16\", False)\n if args.bf16:\n args.tpu = True\n if args.tpu and args.fp16:\n raise ValueError(\"Cannot combine --fp16 and --tpu, use --bf16 on TPUs\")\n\n if getattr(args, \"seed\", None) is None:\n args.seed = 1 # default seed for training\n args.no_seed_provided = True\n else:\n args.no_seed_provided = False\n\n # Apply architecture configuration.\n if hasattr(args, \"arch\") and args.arch in ARCH_CONFIG_REGISTRY:\n ARCH_CONFIG_REGISTRY[args.arch](args)\n\n if parse_known:\n return args, extra\n else:\n return args\n\n\ndef get_parser(desc, default_task=\"translation\"):\n # Before creating the true parser, we need to import optional user module\n # in order to eagerly import custom tasks, optimizers, architectures, etc.\n usr_parser = argparse.ArgumentParser(add_help=False, allow_abbrev=False)\n usr_parser.add_argument(\"--user-dir\", default=None)\n usr_args, _ = usr_parser.parse_known_args()\n utils.import_user_module(usr_args)\n\n parser = argparse.ArgumentParser(allow_abbrev=False)\n gen_parser_from_dataclass(parser, CommonConfig())\n\n from fairseq.registry import REGISTRIES\n\n for registry_name, REGISTRY in REGISTRIES.items():\n parser.add_argument(\n \"--\" + registry_name.replace(\"_\", \"-\"),\n default=REGISTRY[\"default\"],\n choices=REGISTRY[\"registry\"].keys(),\n )\n\n # Task definitions can be found under fairseq/tasks/\n from fairseq.tasks import TASK_REGISTRY\n\n parser.add_argument(\n \"--task\",\n metavar=\"TASK\",\n default=default_task,\n choices=TASK_REGISTRY.keys(),\n help=\"task\",\n )\n # fmt: on\n return parser\n\n\ndef add_preprocess_args(parser):\n group = parser.add_argument_group(\"Preprocessing\")\n # fmt: off\n group.add_argument(\"-s\", \"--source-lang\", default=None, metavar=\"SRC\",\n help=\"source language\")\n group.add_argument(\"-t\", \"--target-lang\", default=None, metavar=\"TARGET\",\n help=\"target language\")\n group.add_argument(\"--trainpref\", metavar=\"FP\", default=None,\n help=\"train file prefix (also used to build dictionaries)\")\n group.add_argument(\"--validpref\", metavar=\"FP\", default=None,\n help=\"comma separated, valid file prefixes \"\n \"(words missing from train set are replaced with <unk>)\")\n group.add_argument(\"--testpref\", metavar=\"FP\", default=None,\n help=\"comma separated, test file prefixes \"\n \"(words missing from train set are replaced with <unk>)\")\n group.add_argument(\"--align-suffix\", metavar=\"FP\", default=None,\n help=\"alignment file suffix\")\n group.add_argument(\"--destdir\", metavar=\"DIR\", default=\"data-bin\",\n help=\"destination dir\")\n group.add_argument(\"--thresholdtgt\", metavar=\"N\", default=0, type=int,\n help=\"map words appearing less than threshold times to unknown\")\n group.add_argument(\"--thresholdsrc\", metavar=\"N\", default=0, type=int,\n help=\"map words appearing less than threshold times to unknown\")\n group.add_argument(\"--tgtdict\", metavar=\"FP\",\n help=\"reuse given target dictionary\")\n group.add_argument(\"--srcdict\", metavar=\"FP\",\n help=\"reuse given source dictionary\")\n group.add_argument(\"--nwordstgt\", metavar=\"N\", default=-1, type=int,\n help=\"number of target words to retain\")\n group.add_argument(\"--nwordssrc\", metavar=\"N\", default=-1, type=int,\n help=\"number of source words to retain\")\n group.add_argument(\"--alignfile\", metavar=\"ALIGN\", default=None,\n help=\"an alignment file (optional)\")\n parser.add_argument('--dataset-impl', metavar='FORMAT', default='mmap',\n choices=get_available_dataset_impl(),\n help='output dataset implementation')\n group.add_argument(\"--joined-dictionary\", action=\"store_true\",\n help=\"Generate joined dictionary\")\n group.add_argument(\"--only-source\", action=\"store_true\",\n help=\"Only process the source language\")\n group.add_argument(\"--padding-factor\", metavar=\"N\", default=8, type=int,\n help=\"Pad dictionary size to be multiple of N\")\n group.add_argument(\"--workers\", metavar=\"N\", default=1, type=int,\n help=\"number of parallel workers\")\n group.add_argument(\"--dict-only\", action='store_true',\n help=\"if true, only builds a dictionary and then exits\")\n # fmt: on\n return parser\n\n\ndef add_dataset_args(parser, train=False, gen=False):\n group = parser.add_argument_group(\"dataset_data_loading\")\n gen_parser_from_dataclass(group, DatasetConfig())\n # fmt: on\n return group\n\n\ndef add_distributed_training_args(parser, default_world_size=None):\n group = parser.add_argument_group(\"distributed_training\")\n if default_world_size is None:\n default_world_size = max(1, torch.cuda.device_count())\n gen_parser_from_dataclass(\n group, DistributedTrainingConfig(distributed_world_size=default_world_size)\n )\n return group\n\n\ndef add_optimization_args(parser):\n group = parser.add_argument_group(\"optimization\")\n # fmt: off\n gen_parser_from_dataclass(group, OptimizationConfig())\n # fmt: on\n return group\n\n\ndef add_checkpoint_args(parser):\n group = parser.add_argument_group(\"checkpoint\")\n # fmt: off\n gen_parser_from_dataclass(group, CheckpointConfig())\n # fmt: on\n return group\n\n\ndef add_common_eval_args(group):\n gen_parser_from_dataclass(group, CommonEvalConfig())\n\n\ndef add_eval_lm_args(parser):\n group = parser.add_argument_group(\"LM Evaluation\")\n add_common_eval_args(group)\n gen_parser_from_dataclass(group, EvalLMConfig())\n\n\ndef add_generation_args(parser):\n group = parser.add_argument_group(\"Generation\")\n add_common_eval_args(group)\n gen_parser_from_dataclass(group, GenerationConfig())\n return group\n\n\ndef add_interactive_args(parser):\n group = parser.add_argument_group(\"Interactive\")\n gen_parser_from_dataclass(group, InteractiveConfig())\n\n\ndef add_model_args(parser):\n group = parser.add_argument_group(\"Model configuration\")\n # fmt: off\n\n # Model definitions can be found under fairseq/models/\n #\n # The model architecture can be specified in several ways.\n # In increasing order of priority:\n # 1) model defaults (lowest priority)\n # 2) --arch argument\n # 3) --encoder/decoder-* arguments (highest priority)\n from fairseq.models import ARCH_MODEL_REGISTRY\n group.add_argument('--arch', '-a', metavar='ARCH',\n choices=ARCH_MODEL_REGISTRY.keys(),\n help='model architecture')\n # fmt: on\n return group\n\n\ndef get_args(\n data: Union[str, Path],\n task: str = \"translation\",\n arch: str = \"transformer\",\n **overrides\n):\n parser = get_training_parser(task)\n args = parse_args_and_arch(parser, [str(data), \"--task\", task, \"--arch\", arch])\n\n for k, v in overrides.items():\n setattr(args, k, v)\n\n return args\n\n\ndef add_ema_args(parser):\n group = parser.add_argument_group(\"EMA configuration\")\n gen_parser_from_dataclass(group, EMAConfig())\n",
"#!/usr/bin/env python3\n# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport logging\nimport copy\nfrom typing import Dict, List, Optional, Tuple\n\nfrom fairseq import utils, checkpoint_utils\nfrom fairseq.models import (FairseqEncoderDecoderModel, FairseqEncoder,\n register_model, register_model_architecture)\nfrom fairseq.models.transformer import Embedding, TransformerDecoder\nfrom fairseq.models.wav2vec import Wav2VecEncoder\nfrom fairseq.modules.layer_norm import LayerNorm\nfrom fairseq.data.data_utils import lengths_to_padding_mask\nfrom torch import Tensor\nimport torch.nn as nn\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass Conv1dAdaptor(nn.Module):\n def __init__(self, in_dim, out_dim, n_layers=3, kernel_size=3, stride=2,\n add_layernorm=False):\n super().__init__()\n self.layers = nn.ModuleList(\n nn.Conv1d(in_dim if i == 0 else out_dim, out_dim * 2, kernel_size,\n stride=stride, padding=kernel_size // 2)\n for i in range(n_layers)\n )\n self.layernorms = None\n if add_layernorm:\n self.layernorms = nn.ModuleList(LayerNorm(out_dim)\n for _ in range(n_layers))\n self.stride = stride\n\n @classmethod\n def add_args(cls, parser):\n parser.add_argument(\"--adaptor-n-layers\", type=int)\n parser.add_argument(\"--adaptor-kernel-size\", type=int)\n parser.add_argument(\"--adaptor-stride\", type=int)\n parser.add_argument(\"--adaptor-layernorm\", action='store_true')\n\n def get_out_seq_lens_tensor(self, in_seq_lens_tensor):\n out = in_seq_lens_tensor.clone()\n for _ in self.layers:\n out = ((out.float() - 1) / self.stride + 1).floor().long()\n return out\n\n def forward(self, x, padding_mask):\n # T x B x C -> B x C x T\n x = x.transpose(0, 1).transpose(1, 2)\n for i, layer in enumerate(self.layers):\n x = nn.functional.glu(layer(x), dim=1)\n if self.layernorms is not None:\n x = self.layernorms[i](x.transpose(1, 2)).transpose(1, 2)\n # B x C x T -> T x B x C\n x = x.transpose(1, 2).transpose(0, 1)\n\n if padding_mask is None:\n out_padding_mask = None\n else:\n out_lengths = self.get_out_seq_lens_tensor((~padding_mask).sum(1))\n out_padding_mask = lengths_to_padding_mask(out_lengths)\n return x, out_padding_mask\n\n\ndef add_wav2vec_asr_args(parser):\n parser.add_argument(\"--w2v-path\", help=\"path to wav2vec 2.0 model\")\n parser.add_argument(\n \"--no-pretrained-weights\",\n action=\"store_true\",\n help=\"if true, does not load pretrained weights\",\n )\n parser.add_argument(\n \"--dropout-input\",\n type=float,\n metavar=\"D\",\n help=\"dropout to apply to the input (after feat extr)\",\n )\n parser.add_argument(\n \"--final-dropout\",\n type=float,\n metavar=\"D\",\n help=\"dropout after transformer and before final projection\",\n )\n parser.add_argument(\n \"--apply-mask\", action=\"store_true\", help=\"apply masking during fine-tuning\"\n )\n parser.add_argument(\n \"--dropout\",\n type=float,\n metavar=\"D\",\n help=\"dropout probability inside wav2vec 2.0 model\",\n )\n parser.add_argument(\n \"--attention-dropout\",\n type=float,\n metavar=\"D\",\n help=\"dropout probability for attention weights inside wav2vec 2.0 model\",\n )\n parser.add_argument(\n \"--activation-dropout\",\n \"--relu-dropout\",\n type=float,\n metavar=\"D\",\n help=\"dropout probability after activation in FFN inside wav2vec 2.0 model\",\n )\n\n parser.add_argument(\n \"--mask-length\", type=int, help=\"repeat the mask indices multiple times\"\n )\n\n parser.add_argument(\n \"--mask-prob\", type=float, help=\"probability of replacing a token with mask\"\n )\n\n parser.add_argument(\n \"--mask-selection\",\n type=str,\n choices=[\"static\", \"uniform\", \"normal\", \"poisson\"],\n help=\"how to choose masks\",\n )\n\n parser.add_argument(\n \"--mask-other\",\n type=float,\n help=\"stdev of the mask length in case of 'normal' selection strategy\",\n )\n\n parser.add_argument(\n \"--no-mask-overlap\",\n action=\"store_true\",\n help=\"whether to allow masks to overlap\",\n )\n\n parser.add_argument(\n \"--mask-channel-length\", type=int, help=\"repeat the mask indices multiple times\"\n )\n\n parser.add_argument(\n \"--mask-channel-prob\",\n type=float,\n help=\"probability of replacing a token with mask\",\n )\n\n parser.add_argument(\n \"--mask-channel-selection\",\n type=str,\n choices=[\"static\", \"uniform\", \"normal\", \"poisson\"],\n help=\"how to choose masks\",\n )\n\n parser.add_argument(\n \"--mask-channel-other\",\n type=float,\n help=\"stdev of the mask length in case of 'normal' selection strategy\",\n )\n\n parser.add_argument(\n \"--no-mask-channel-overlap\",\n action=\"store_true\",\n help=\"whether to allow masks to overlap\",\n )\n\n parser.add_argument(\n \"--freeze-finetune-updates\",\n default=0,\n type=int,\n help=\"dont finetune wav2vec for this many updates\",\n )\n\n parser.add_argument(\n \"--feature-grad-mult\",\n default=None,\n type=float,\n help=\"reset feature grad mult in wav2vec 2.0 to this\",\n )\n\n parser.add_argument(\n \"--layerdrop\",\n default=0.0,\n type=float,\n help=\"probability of dropping a layer in wav2vec 2.0\",\n )\n parser.add_argument(\"--w2v-args\", default=None)\n\n\nclass Wav2VecEncoderWithAdaptor(FairseqEncoder):\n def __init__(self, args):\n super().__init__(None)\n self.w2v_encoder = Wav2VecEncoder(args)\n encoder_out_dim = self.w2v_encoder.w2v_model.encoder.embedding_dim\n # Projection + 8x shrinking\n self.adaptor = Conv1dAdaptor(\n encoder_out_dim, args.decoder_embed_dim,\n n_layers=args.adaptor_n_layers,\n kernel_size=args.adaptor_kernel_size, stride=args.adaptor_stride,\n add_layernorm=args.adaptor_layernorm\n )\n for k, p in self.w2v_encoder.w2v_model.named_parameters():\n # Freeze pretrained models by default\n if hasattr(args, 'finetune_w2v_params') and XMTransformerModel.finetune_params(\n args.finetune_w2v_params, k):\n p.requires_grad = True\n else:\n p.requires_grad = False\n\n @classmethod\n def add_args(cls, parser):\n add_wav2vec_asr_args(parser)\n parser.add_argument(\n \"--normalize\", action=\"store_true\",\n help=\"if set, normalizes input to have 0 mean and unit variance\",\n )\n parser.add_argument(\"--finetune-w2v-params\", type=str, metavar=\"STR\",\n help=\"comma-separated param strings to finetune.\")\n Conv1dAdaptor.add_args(parser)\n\n def forward(self, src_tokens, src_lengths=None, **kwargs):\n padding_mask = lengths_to_padding_mask(src_lengths)\n out = self.w2v_encoder.forward(src_tokens, padding_mask, tbc=True)\n x = out[\"encoder_out\"]\n enc_padding_mask = None\n if out[\"encoder_padding_mask\"] is not None:\n enc_padding_mask = out[\"encoder_padding_mask\"].transpose(0, 1) # T X B --> B X T\n\n x, enc_padding_mask = self.adaptor(x, enc_padding_mask)\n\n return {\n \"encoder_out\": [x], # T x B x C\n \"encoder_padding_mask\": [enc_padding_mask] if enc_padding_mask.any() else [], # B x T\n \"encoder_embedding\": [], # B x T x C\n \"encoder_states\": [], # List[T x B x C]\n \"src_tokens\": [],\n \"src_lengths\": [],\n }\n\n def reorder_encoder_out(self, encoder_out, new_order):\n new_encoder_out = (\n [] if len(encoder_out[\"encoder_out\"]) == 0\n else [x.index_select(1, new_order) for x in encoder_out[\"encoder_out\"]]\n )\n\n new_encoder_padding_mask = (\n [] if len(encoder_out[\"encoder_padding_mask\"]) == 0\n else [x.index_select(0, new_order) for x in\n encoder_out[\"encoder_padding_mask\"]]\n )\n\n new_encoder_embedding = (\n [] if len(encoder_out[\"encoder_embedding\"]) == 0\n else [x.index_select(0, new_order) for x in\n encoder_out[\"encoder_embedding\"]]\n )\n\n encoder_states = encoder_out[\"encoder_states\"]\n if len(encoder_states) > 0:\n for idx, state in enumerate(encoder_states):\n encoder_states[idx] = state.index_select(1, new_order)\n\n return {\n \"encoder_out\": new_encoder_out, # T x B x C\n \"encoder_padding_mask\": new_encoder_padding_mask, # B x T\n \"encoder_embedding\": new_encoder_embedding, # B x T x C\n \"encoder_states\": encoder_states, # List[T x B x C]\n \"src_tokens\": [], # B x T\n \"src_lengths\": [], # B x 1\n }\n\n\ndef add_decoder_args(parser):\n parser.add_argument(\"--activation-fn\", type=str, default='relu',\n choices=utils.get_available_activation_fns(),\n help=\"activation function to use\")\n parser.add_argument(\"--decoder-dropout\", type=float, metavar=\"D\",\n help=\"dropout probability\")\n parser.add_argument(\"--decoder-attention-dropout\", type=float,\n metavar=\"D\",\n help=\"dropout probability for attention weights\")\n parser.add_argument(\"--decoder-activation-dropout\", type=float,\n metavar=\"D\",\n help=\"dropout probability after activation in FFN.\")\n parser.add_argument(\"--decoder-embed-dim\", type=int, metavar=\"N\",\n help=\"decoder embedding dimension\")\n parser.add_argument(\"--decoder-ffn-embed-dim\", type=int, metavar=\"N\",\n help=\"decoder embedding dimension for FFN\")\n parser.add_argument(\"--decoder-layers\", type=int, metavar=\"N\",\n help=\"num decoder layers\")\n parser.add_argument(\"--decoder-attention-heads\", type=int, metavar=\"N\",\n help=\"num decoder attention heads\")\n parser.add_argument(\"--decoder-normalize-before\", action=\"store_true\",\n help=\"apply layernorm before each decoder block\")\n parser.add_argument(\"--layernorm-embedding\", action=\"store_true\",\n help=\"add layernorm to embedding\")\n parser.add_argument(\"--no-scale-embedding\", action=\"store_true\",\n help=\"if True, dont scale embeddings\")\n parser.add_argument(\n \"--load-pretrained-decoder-from\", type=str, metavar=\"STR\",\n help=\"model to take decoder weights from (for initialization)\"\n )\n parser.add_argument(\"--finetune-decoder-params\", type=str,\n metavar=\"STR\",\n help=\"comma-separated param strings to finetune.\")\n parser.add_argument(\"--checkpoint-activations\", action=\"store_true\")\n\n\n@register_model(\"xm_transformer\")\nclass XMTransformerModel(FairseqEncoderDecoderModel):\n def __init__(self, encoder, decoder):\n super().__init__(encoder, decoder)\n\n @classmethod\n def add_args(cls, parser):\n \"\"\"Add model-specific arguments to the parser.\"\"\"\n Wav2VecEncoderWithAdaptor.add_args(parser)\n add_decoder_args(parser)\n\n @classmethod\n def build_encoder(cls, args):\n _args = copy.deepcopy(args)\n state = checkpoint_utils.load_checkpoint_to_cpu(args.w2v_path)\n if state.get(\"cfg\") is not None:\n encoder_embed_dim = state[\"cfg\"]._content[\"model\"][\"encoder_embed_dim\"]\n elif state.get(\"args\") is not None:\n encoder_embed_dim = state[\"args\"].encoder_embed_dim\n else:\n raise ValueError(f\"Invalid config in {args.w2v_path}\")\n _args.decoder_embed_dim = encoder_embed_dim\n encoder = Wav2VecEncoderWithAdaptor(_args)\n return encoder\n\n @classmethod\n def build_decoder(cls, args, task, embed_tokens):\n _args = copy.deepcopy(args)\n _args.dropout = args.decoder_dropout\n _args.attention_dropout = args.decoder_attention_dropout\n _args.activation_dropout = args.decoder_activation_dropout\n _args.max_target_positions = 1024\n\n decoder = TransformerDecoder(_args, task.target_dictionary,\n embed_tokens)\n if getattr(args, \"load_pretrained_decoder_from\", None):\n decoder = checkpoint_utils.load_pretrained_component_from_model(\n component=decoder, checkpoint=args.load_pretrained_decoder_from\n )\n for k, p in decoder.named_parameters():\n # Freeze pretrained models by default\n if hasattr(args, 'finetune_decoder_params') and XMTransformerModel.finetune_params(\n args.finetune_decoder_params, k):\n p.requires_grad = True\n else:\n p.requires_grad = False\n return decoder\n\n @classmethod\n def build_model(cls, args, task):\n \"\"\"Build a new model instance.\"\"\"\n\n # make sure all arguments are present in older models\n base_architecture(args)\n\n def build_embedding(dictionary, embed_dim):\n num_embeddings = len(dictionary)\n padding_idx = dictionary.pad()\n return Embedding(num_embeddings, embed_dim, padding_idx)\n\n decoder_embed_tokens = build_embedding(task.target_dictionary,\n args.decoder_embed_dim)\n encoder = cls.build_encoder(args)\n decoder = cls.build_decoder(args, task, decoder_embed_tokens)\n return cls(encoder, decoder)\n\n def get_normalized_probs(\n self,\n net_output: Tuple[Tensor, Optional[Dict[str, List[Optional[Tensor]]]]],\n log_probs: bool,\n sample: Optional[Dict[str, Tensor]] = None,\n ):\n # net_output['encoder_out'] is a (B, T, D) tensor\n lprobs = self.get_normalized_probs_scriptable(net_output, log_probs,\n sample)\n lprobs.batch_first = True\n return lprobs\n\n def forward(self, src_tokens, src_lengths, prev_output_tokens, **kwargs):\n \"\"\"\n The forward method inherited from the base class has a **kwargs\n argument in its input, which is not supported in torchscript. This\n method overrites the forward method definition without **kwargs.\n \"\"\"\n encoder_out = self.encoder(src_tokens=src_tokens,\n src_lengths=src_lengths, **kwargs)\n decoder_out = self.decoder(prev_output_tokens=prev_output_tokens,\n encoder_out=encoder_out)\n return decoder_out\n\n def upgrade_state_dict(self, state_dict):\n for k, _ in state_dict.items():\n if 'adaptor.layers' in state_dict:\n print(k)\n new = k.replace('adaptor.layers', 'adaptor_layers')\n state_dict[new] = state_dict[k]\n del state_dict[k]\n\n @staticmethod\n def finetune_params(finetune_params, param_name):\n if finetune_params == \"all\":\n return True\n finetune_params_list = finetune_params.split(\",\")\n for finetune_param in finetune_params_list:\n if finetune_param in param_name:\n return True\n return False\n\n\ndef set_default_w2v_encoder_args(args):\n args.no_pretrained_weights = getattr(args, \"no_pretrained_weights\", False)\n args.dropout_input = getattr(args, \"dropout_input\", 0)\n args.final_dropout = getattr(args, \"final_dropout\", 0)\n args.apply_mask = getattr(args, \"apply_mask\", False)\n args.dropout = getattr(args, \"dropout\", 0)\n args.attention_dropout = getattr(args, \"attention_dropout\", 0)\n args.activation_dropout = getattr(args, \"activation_dropout\", 0)\n\n args.mask_length = getattr(args, \"mask_length\", 10)\n args.mask_prob = getattr(args, \"mask_prob\", 0.5)\n args.mask_selection = getattr(args, \"mask_selection\", \"static\")\n args.mask_other = getattr(args, \"mask_other\", 0)\n args.no_mask_overlap = getattr(args, \"no_mask_overlap\", False)\n args.mask_channel_length = getattr(args, \"mask_channel_length\", 10)\n args.mask_channel_prob = getattr(args, \"mask_channel_prob\", 0.5)\n args.mask_channel_before = getattr(args, \"mask_channel_before\", False)\n args.mask_channel_selection = getattr(args, \"mask_channel_selection\",\n \"static\")\n args.mask_channel_other = getattr(args, \"mask_channel_other\", 0)\n args.no_mask_channel_overlap = getattr(args, \"no_mask_channel_overlap\",\n False)\n\n args.freeze_finetune_updates = getattr(args, \"freeze_finetune_updates\", 0)\n args.feature_grad_mult = 0.1\n args.layerdrop = getattr(args, \"layerdrop\", 0.0)\n\n args.normalize = getattr(args, \"normalize\", False)\n\n\ndef set_default_adaptor_args(args):\n args.adaptor_n_layers = getattr(args, \"adaptor_n_layers\", 3)\n args.adaptor_kernel_size = getattr(args, \"adaptor_kernel_size\", 3)\n args.adaptor_stride = getattr(args, \"adaptor_stride\", 2)\n args.adaptor_layernorm = getattr(args, \"adaptor_layernorm\", False)\n\n\ndef set_default_mbart_decoder_args(args):\n args.decoder_embed_path = getattr(args, 'decoder_embed_path', None)\n args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 1024)\n args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim',\n 4 * 1024)\n args.decoder_layers = getattr(args, 'decoder_layers', 12)\n args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 16)\n args.decoder_normalize_before = getattr(args, 'decoder_normalize_before',\n True)\n args.decoder_learned_pos = getattr(args, 'decoder_learned_pos', True)\n args.decoder_layerdrop = getattr(args, \"decoder_layerdrop\", 0.0)\n args.adaptive_input = getattr(args, \"adaptive_input\", False)\n args.decoder_attention_dropout = getattr(args, 'decoder_attention_dropout',\n 0.)\n args.decoder_activation_dropout = getattr(args,\n 'decoder_activation_dropout', 0.)\n args.decoder_dropout = getattr(args, 'decoder_dropout', 0.1)\n args.adaptive_softmax_cutoff = getattr(args, 'adaptive_softmax_cutoff',\n None)\n args.adaptive_softmax_dropout = getattr(args, 'adaptive_softmax_dropout', 0)\n args.share_decoder_input_output_embed = getattr(\n args, 'share_decoder_input_output_embed', True\n )\n args.no_token_positional_embeddings = getattr(\n args, \"no_token_positional_embeddings\", False\n )\n\n args.decoder_output_dim = getattr(args, 'decoder_output_dim',\n args.decoder_embed_dim)\n args.decoder_input_dim = getattr(args, 'decoder_input_dim',\n args.decoder_embed_dim)\n\n args.no_scale_embedding = getattr(args, 'no_scale_embedding', False)\n args.quant_noise_pq = getattr(args, \"quant_noise_pq\", 0)\n args.layernorm_embedding = getattr(args, 'layernorm_embedding', True)\n\n args.activation_fn = getattr(args, 'activation_fn', 'gelu')\n args.pooler_activation_fn = getattr(args, 'pooler_activation_fn', 'tanh')\n args.pooler_dropout = getattr(args, 'pooler_dropout', 0.0)\n args.checkpoint_activations = getattr(args, \"checkpoint_activations\", False)\n\n\n@register_model_architecture(model_name=\"xm_transformer\",\n arch_name=\"xm_transformer\")\ndef base_architecture(args):\n set_default_w2v_encoder_args(args)\n set_default_adaptor_args(args)\n set_default_mbart_decoder_args(args)\n"
]
| [
[
"torch.cuda.device_count"
],
[
"torch.nn.Conv1d"
]
]
|
JELAshford/OxHack-2020 | [
"ab981c145bad7554b451dac227f40e80c82f2a21"
]
| [
"James_IdeaTesting/CollatzVisualiser/collatz_process.py"
]
| [
"import numpy as np\nimport time\n\n# Code for the basic finding of collatz paths to 1\nmem = {}\n\ndef collatz(val, max_step=10000):\n \n collatz_list = [val]\n step = 0\n current_val, new_val = val, 0\n\n while current_val > 1 and step < max_step:\n\n # Pull from memory if possible \n if current_val in mem.keys():\n new_val = mem[current_val]\n \n # Update val by conjecture rules\n else:\n if current_val % 2 == 0:\n new_val = current_val / 2\n else:\n new_val = (3 * current_val) + 1\n \n # Store this to memory for speed!\n mem[current_val] = new_val\n\n # Add to list and update step\n current_val = new_val\n collatz_list.append(int(current_val))\n step += 1\n\n return(collatz_list)\n\ndef repeated_random_collatz(n, min_val=1, max_val=100000):\n multi_collatz = []\n for start_val in np.random.randint(min_val, max_val, size=(1, n))[0]:\n collatz_out = collatz(start_val)\n multi_collatz.append(collatz_out)\n\n return(multi_collatz)\n\nstart = time.time()\nsingle_out = collatz(20)\nprint(single_out)\nprint(time.time()-start)\n\nout = repeated_random_collatz(10)\n"
]
| [
[
"numpy.random.randint"
]
]
|
a3sha2/3DUnetCNN | [
"71c440a98b306355b1c3fddaf8b852cd51b50440"
]
| [
"unet3d/model/unet.py"
]
| [
"import numpy as np\nfrom tensorflow.keras import backend as K\nfrom keras.engine import Input, Model\nfrom keras.layers import Conv3D, MaxPooling3D, UpSampling3D, Activation, BatchNormalization, PReLU, Deconvolution3D\nfrom keras.optimizers import Adam\nfrom keras_contrib.layers.normalization.instancenormalization import InstanceNormalization\nfrom unet3d.metrics import dice_coefficient_loss, get_label_dice_coefficient_function, dice_coefficient\n\nK.set_image_data_format(\"channels_first\")\n\ntry:\n from keras.engine import merge\nexcept ImportError:\n from keras.layers.merge import concatenate\n\n\ndef unet_model_3d(input_shape, pool_size=(2, 2, 2), n_labels=1, initial_learning_rate=0.00001, deconvolution=False,\n depth=4, n_base_filters=32, include_label_wise_dice_coefficients=False, metrics=dice_coefficient,\n batch_normalization=False, activation_name=\"sigmoid\"):\n \"\"\"\n Builds the 3D UNet Keras model.f\n :param metrics: List metrics to be calculated during model training (default is dice coefficient).\n :param include_label_wise_dice_coefficients: If True and n_labels is greater than 1, model will report the dice\n coefficient for each label as metric.\n :param n_base_filters: The number of filters that the first layer in the convolution network will have. Following\n layers will contain a multiple of this number. Lowering this number will likely reduce the amount of memory required\n to train the model.\n :param depth: indicates the depth of the U-shape for the model. The greater the depth, the more max pooling\n layers will be added to the model. Lowering the depth may reduce the amount of memory required for training.\n :param input_shape: Shape of the input data (n_chanels, x_size, y_size, z_size). The x, y, and z sizes must be\n divisible by the pool size to the power of the depth of the UNet, that is pool_size^depth.\n :param pool_size: Pool size for the max pooling operations.\n :param n_labels: Number of binary labels that the model is learning.\n :param initial_learning_rate: Initial learning rate for the model. This will be decayed during training.\n :param deconvolution: If set to True, will use transpose convolution(deconvolution) instead of up-sampling. This\n increases the amount memory required during training.\n :return: Untrained 3D UNet Model\n \"\"\"\n inputs = Input(input_shape)\n current_layer = inputs\n levels = list()\n\n # add levels with max pooling\n for layer_depth in range(depth):\n layer1 = create_convolution_block(input_layer=current_layer, n_filters=n_base_filters*(2**layer_depth),\n batch_normalization=batch_normalization)\n layer2 = create_convolution_block(input_layer=layer1, n_filters=n_base_filters*(2**layer_depth)*2,\n batch_normalization=batch_normalization)\n if layer_depth < depth - 1:\n current_layer = MaxPooling3D(pool_size=pool_size)(layer2)\n levels.append([layer1, layer2, current_layer])\n else:\n current_layer = layer2\n levels.append([layer1, layer2])\n\n # add levels with up-convolution or up-sampling\n for layer_depth in range(depth-2, -1, -1):\n up_convolution = get_up_convolution(pool_size=pool_size, deconvolution=deconvolution,\n n_filters=current_layer._keras_shape[1])(current_layer)\n concat = concatenate([up_convolution, levels[layer_depth][1]], axis=1)\n current_layer = create_convolution_block(n_filters=levels[layer_depth][1]._keras_shape[1],\n input_layer=concat, batch_normalization=batch_normalization)\n current_layer = create_convolution_block(n_filters=levels[layer_depth][1]._keras_shape[1],\n input_layer=current_layer,\n batch_normalization=batch_normalization)\n\n final_convolution = Conv3D(n_labels, (1, 1, 1))(current_layer)\n act = Activation(activation_name)(final_convolution)\n model = Model(inputs=inputs, outputs=act)\n\n if not isinstance(metrics, list):\n metrics = [metrics]\n\n if include_label_wise_dice_coefficients and n_labels > 1:\n label_wise_dice_metrics = [get_label_dice_coefficient_function(index) for index in range(n_labels)]\n if metrics:\n metrics = metrics + label_wise_dice_metrics\n else:\n metrics = label_wise_dice_metrics\n\n model.compile(optimizer=Adam(lr=initial_learning_rate), loss=dice_coefficient_loss, metrics=metrics)\n return model\n\n\ndef create_convolution_block(input_layer, n_filters, batch_normalization=False, kernel=(3, 3, 3), activation=None,\n padding='same', strides=(1, 1, 1), instance_normalization=False):\n \"\"\"\n\n :param strides:\n :param input_layer:\n :param n_filters:\n :param batch_normalization:\n :param kernel:\n :param activation: Keras activation layer to use. (default is 'relu')\n :param padding:\n :return:\n \"\"\"\n layer = Conv3D(n_filters, kernel, padding=padding, strides=strides)(input_layer)\n if batch_normalization:\n layer = BatchNormalization(axis=1)(layer)\n elif instance_normalization:\n layer =InstanceNormalization(axis=1)(layer)\n #try:\n #from keras_contrib.layers.normalization import instancenormalization\n #except ImportError:\n #raise ImportError(\"Install keras_contrib in order to use instance normalization.\"\n #\"\\nTry: pip install git+https://www.github.com/farizrahman4u/keras-contrib.git\")\n #layer = instancenormalization(axis=1)(layer)\n if activation is None:\n return Activation('relu')(layer)\n else:\n return activation()(layer)\n\n\ndef compute_level_output_shape(n_filters, depth, pool_size, image_shape):\n \"\"\"\n Each level has a particular output shape based on the number of filters used in that level and the depth or number \n of max pooling operations that have been done on the data at that point.\n :param image_shape: shape of the 3d image.\n :param pool_size: the pool_size parameter used in the max pooling operation.\n :param n_filters: Number of filters used by the last node in a given level.\n :param depth: The number of levels down in the U-shaped model a given node is.\n :return: 5D vector of the shape of the output node \n \"\"\"\n output_image_shape = np.asarray(np.divide(image_shape, np.power(pool_size, depth)), dtype=np.int32).tolist()\n return tuple([None, n_filters] + output_image_shape)\n\n\ndef get_up_convolution(n_filters, pool_size, kernel_size=(2, 2, 2), strides=(2, 2, 2),\n deconvolution=False):\n if deconvolution:\n return Deconvolution3D(filters=n_filters, kernel_size=kernel_size,\n strides=strides)\n else:\n return UpSampling3D(size=pool_size)\n"
]
| [
[
"numpy.power",
"tensorflow.keras.backend.set_image_data_format"
]
]
|
Quanscendence/braynai | [
"ab828ca95571c6dffef2b2392522e6a4160a2304"
]
| [
"dataintegration/views.py"
]
| [
"from __future__ import print_function\nfrom django.shortcuts import render,redirect\nfrom django.views.generic import TemplateView,CreateView,View,UpdateView,ListView\nfrom dataintegration.forms import DriveDetailsForm,SheetDetailsForm,DropboxDetailsForm,OneDriveDetailsForm,ApiDataForm,SheetUrlForm\nfrom dataintegration.models import CustomerAPIDetails\nfrom coreapp.models import Project,ProjectType,ProjectJsonStorage,ApiDataGet,ProjectEndPoint,ProjectDashboard,ProjectMetaData,ProjectJsonStorageMetadata,ProjectUser\nfrom login.models import Profile,Customer\nimport pickle\nimport os.path\nimport numpy as np\nfrom django.db.models import Q\nfrom django.contrib.sites.shortcuts import get_current_site\nfrom django.http import JsonResponse\nfrom django.core.files.storage import default_storage\nfrom googleapiclient.discovery import build\nfrom google_auth_oauthlib.flow import InstalledAppFlow\nfrom google.auth.transport.requests import Request\nfrom coreapp.choices import INTEGRATION_CHOICES\nfrom django.contrib.auth.models import User\nfrom django.contrib.auth.models import Group\nimport dropbox\nimport onedrivesdk\nfrom onedrivesdk.helpers import GetAuthCodeServer\nimport pandas as pd\nfrom functools import reduce\nimport analytics\nfrom io import StringIO\nfrom cryptography.fernet import Fernet\nimport base64\nfrom datetime import datetime, timedelta\nfrom braces.views import GroupRequiredMixin\nfrom .keys import key\nimport io\nfrom googleapiclient.http import MediaIoBaseDownload\nfrom google_drive_downloader import GoogleDriveDownloader as gdd\nfrom seo_app.models import SiteSeo\nimport csv\nimport json\nimport gspread\nfrom oauth2client.service_account import ServiceAccountCredentials\nfrom django.core.files import File\nfrom engine.cleaner import FillNan, FileReader, FillNan,ColumnCombine,DeleteColumn\nimport requests\n\n# Create your views here.\n\n\n'''Start of class to list out the data integration types'''\nclass IntegrationListView(ListView):\n def get(self,request,pk):\n template_name='integration_list.html'\n project=Project.objects.get(pk=pk)\n return render(request,template_name,context={'project':project})\n'''End of class to list out the data integration types'''\n\nclass DriveDetailsView(CreateView):\n def get(self,request,pk):\n template_name='file_details.html'\n seo=SiteSeo.objects.get(choices='Google Drive Integration')\n drive_details_form=DriveDetailsForm()\n context={\n 'drive_details_form':drive_details_form,\n 'seo':seo\n }\n return render(request,template_name,context)\n def post(self,request,pk):\n template_name='file_details.html'\n seo=SiteSeo.objects.get(choices='Google Drive Integration')\n project = Project.objects.get(pk=pk)\n drive_details_form=DriveDetailsForm(request.POST,request.FILES)\n if drive_details_form.is_valid():\n #print(\"valid\")\n credential=request.FILES[\"credential\"]\n #print(\"the file entered is \",credential)\n file_id=drive_details_form.cleaned_data[\"file_id\"]\n file_id_encrypted = encrypt(file_id)\n drive_details,created = CustomerAPIDetails.objects.get_or_create(project=project,integration_choice='Google Drive',credentials=credential,file_id=file_id_encrypted)\n #print(\"created\")\n pk=drive_details.pk\n # credential_read = CustomerAPIDetails.objects.get(credentials=drive_details.credentials)\n drive_details.credentials.open('r')\n lines = drive_details.credentials.read()\n drive_details.credentials.close()\n file_lines_encrypted=encrypt(lines)\n #print('file_lines_encrypted',file_lines_encrypted)\n drive_details.credentials.open('w')\n drive_details.credentials.write(file_lines_encrypted)\n drive_details_update = CustomerAPIDetails.objects.filter(pk=pk).update(credentials = drive_details.credentials)\n drive_details.credentials.close()\n updated_credential_data = CustomerAPIDetails.objects.get(pk=pk)\n #print('updated_credential_data.token_file',updated_credential_data.token_file)\n updated_credential_data.credentials.open('r')\n lines = updated_credential_data.credentials.read()\n updated_credential_data.credentials.close()\n file_lines_decrypted=decrypt(lines)\n encrypted_file = open('decrypted_credentials.json','w')\n encrypted_file.write(file_lines_decrypted)\n encrypted_file.close()\n credentials = os.path.abspath(\"decrypted_credentials.json\")\n # If modifying these scopes, delete the file token.pickle.\n SCOPES = ['https://www.googleapis.com/auth/drive.file']\n\n \"\"\"Shows basic usage of the Drive v3 API.\n Prints the names and ids of the first 10 files the user has access to.\n \"\"\"\n creds = None\n # The file token.pickle stores the user's access and refresh tokens, and is\n # created automatically when the authorization flow completes for the first\n # time.\n # if os.path.exists('token.pickle_drive'+request.user.username):\n # with open('token.pickle_drive'+request.user.username, 'rb') as token:\n # creds = pickle.load(token)\n # #print(creds)\n # if updated_credential_data.token_file:\n # #print(\"inside token file if condition\")\n # with open('token.pickle_drive'+request.user.username, 'wb') as token:\n # creds = token.write(updated_credential_data.token_file)\n # #print(creds)\n # else:\n # #print(\"token file not available\")\n # If there are no (valid) credentials available, let the user log in.\n if not creds or not creds.valid:\n #print(\"inside first if\")\n if creds and creds.expired and creds.refresh_token:\n creds.refresh(Request())\n else:\n #print(\"place to enter the credentials file\")\n #print('credentials',credentials)\n flow = InstalledAppFlow.from_client_secrets_file(\n credentials, SCOPES)\n #print(flow)\n creds = flow.run_local_server(port=0)\n # Save the credentials for the next run\n with open('token.pickle_drive'+request.user.username, 'wb') as token:\n pickle.dump(creds, token)\n with open('token.pickle_drive'+request.user.username, 'rb') as f:\n file_content = f.read()\n #print('file_content',file_content)\n drive_details_update = CustomerAPIDetails.objects.filter(pk=pk).update(token_file = file_content)\n\n\n os.remove('decrypted_credentials.json')\n os.remove('token.pickle_drive'+request.user.username)\n service = build('drive', 'v3', credentials=creds)\n file_id_decrypted = decrypt(file_id_encrypted)\n '''downloading the file using user's credential.json file and file id'''\n gdd.download_file_from_google_drive(file_id=file_id_decrypted,\n dest_path='./GoogleDrive_files.csv',\n unzip=True)\n #print(\"downloaded\")\n file= pd.read_csv('GoogleDrive_files.csv', encoding = \"ISO-8859-1\")\n\n res_df =reduce(lambda left,right: pd.merge(left,right,on=file, how='outer'), [file])\n #print(res_df.columns.to_list())\n res_json = file.to_json(orient='index')\n # #print(\"project\",res_json)\n\n projectjson, created = ProjectJsonStorage.objects.get_or_create(project=project, js = res_json)\n #print(projectjson)\n\n os.remove('GoogleDrive_files.csv')\n\n return redirect('/dashboard/')\n\nclass SheetDetailsView(CreateView):\n def get(self,request,pk):\n template_name='sheet_details.html'\n project = Project.objects.get(pk=pk)\n permission = identify_user_permission(project,request.user)\n user = User.objects.get(pk=request.user.pk)\n pk=str(project.pk)\n current_site = get_current_site(request)\n site_name = current_site.name\n domain = current_site.domain\n if domain.startswith('127.0.'):\n domain = 'https://'+domain\n else:\n domain = 'https://'+domain\n if ProjectEndPoint.objects.filter(project=project).exists():\n project_endpoints = ProjectEndPoint.objects.filter(project=project).order_by('name')\n else:\n project_endpoints=None\n\n customer=Customer.objects.get(user=request.user)\n if ProjectDashboard.objects.filter(Q(project=project) ).exists():\n # #print(\"project admin\")\n dashboard=ProjectDashboard.objects.filter(Q(project=project) ).order_by('-id')\n dashboard_count=ProjectDashboard.objects.filter(Q(project=project) ).count()\n else:\n dashboard = None\n dashboard_count = None\n sheet_details_form=SheetDetailsForm()\n url_sheet_form = SheetUrlForm()\n context={\n 'sheet_details_form':sheet_details_form,\n 'project':project,\n 'dashboard':dashboard,\n 'permission':permission,\n 'customer':customer,\n 'project_endpoints':project_endpoints,\n 'url_sheet_form':url_sheet_form\n }\n return render(request,template_name,context)\n def post(self,request,pk):\n template_name='sheet_details.html'\n project = Project.objects.get(pk=pk)\n sheet_details_form=SheetDetailsForm(request.POST,request.FILES)\n url_sheet_form = SheetUrlForm(request.POST,request.FILES)\n if sheet_details_form.is_valid():\n # #print(\"valid\")\n credential=request.FILES[\"credential\"]\n name = request.POST['sheet_name']\n\n spread_sheet_id=sheet_details_form.cleaned_data[\"spreadsheet_id\"]\n data_range = sheet_details_form.cleaned_data[\"data_range\"]\n sheet_details,created = CustomerAPIDetails.objects.get_or_create(name=name,project=project,integration_choice='Google Sheets',credentials=credential,file_id=spread_sheet_id,range=data_range)\n try:\n\n scope=['htps://spreadsheets.google.com/feeds','https://www.googleapis.com/auth/drive']\n file = sheet_details.credentials.open()\n file_content = file.read()\n js_str = json.loads(file_content.decode('utf-8'))\n creadentials = ServiceAccountCredentials.from_json_keyfile_dict(js_str)\n gc = gspread.authorize(creadentials)\n wks = gc.open(spread_sheet_id).sheet1\n\n data = wks.get_all_records()\n except:\n context={\n 'project':project,\n 'sheet_details_form':sheet_details_form,\n 'msg':'Unable to Read The Sheet Please Check The Credentials/ File name'\n }\n return render(request,template_name,context)\n df = pd.DataFrame(data)\n df = df.replace('',np.nan)\n data = self.create_df(df,project,sheet_details,request)\n return JsonResponse(data, safe=False)\n elif url_sheet_form.is_valid():\n url = url_sheet_form.cleaned_data['url']\n name = request.POST['sheet_url_name']\n cron_frequency = request.POST['cron_frequency']\n header =request.POST.get('header',None)\n if header:\n sheet_details,created = CustomerAPIDetails.objects.get_or_create(name= name,project=project,integration_choice='Google Sheets',sheet_url=url ,range=cron_frequency,sheet_header=int(header))\n else:\n sheet_details,created = CustomerAPIDetails.objects.get_or_create(name= name,project=project,integration_choice='Google Sheets',sheet_url=url ,range=cron_frequency)\n try:\n if header:\n df = pd.read_html(url,encoding='utf8',index_col=0,header=int(header))\n else:\n df = pd.read_html(url,encoding='utf8',index_col=0,header=1)\n\n except:\n data={'error':\"There is a Error in Reading Sheet\"}\n return JsonResponse(data, safe=False)\n df = df[0]\n data = self.create_df(df,project,sheet_details,request)\n return JsonResponse(data, safe=False)\n\n # #print(\"the entered file is \",credential)\n # spread_sheet_id=sheet_details_form.cleaned_data[\"spreadsheet_id\"]\n # spread_sheet_id_encrypted = encrypt(spread_sheet_id)\n # sheet_details,created = CustomerAPIDetails.objects.get_or_create(project=project,integration_choice='Google Sheets',credentials=credential,file_id=spread_sheet_id_encrypted)\n # #print(\"created\")\n # sheet_details.credentials.open('r')\n # lines = sheet_details.credentials.read()\n # sheet_details.credentials.close()\n # file_lines_encrypted=encrypt(lines)\n # #print('file_lines_encrypted',file_lines_encrypted)\n # sheet_details.credentials.open('w')\n # sheet_details.credentials.write(file_lines_encrypted)\n # pk= sheet_details.pk\n # sheet_details_update = CustomerAPIDetails.objects.filter(pk=pk).update(credentials = sheet_details.credentials)\n # sheet_details.credentials.close()\n # updated_credential_data = CustomerAPIDetails.objects.get(pk=pk)\n # #print('updated_credential_data.token_file',updated_credential_data.token_file)\n # updated_credential_data.credentials.open('r')\n # lines = updated_credential_data.credentials.read()\n # updated_credential_data.credentials.close()\n # file_lines_decrypted=decrypt(lines)\n # encrypted_file = open('decrypted_credentials.json','w')\n # encrypted_file.write(file_lines_decrypted)\n # encrypted_file.close()\n # credentials = os.path.abspath(\"decrypted_credentials.json\")\n\n\n SCOPES = ['https://www.googleapis.com/auth/spreadsheets']\n\n # The ID and range of a sample spreadsheet.\n\n SAMPLE_RANGE_NAME = 'Sheet1'\n\n\n \"\"\"Shows basic usage of the Sheets API.\n Prints values from a sample spreadsheet.\n \"\"\"\n creds = None\n # The file token.pickle stores the user's access and refresh tokens, and is\n # created automatically when the authorization flow completes for the first\n # time.\n if os.path.exists('token.pickle_sheet'+request.user.username):\n with open('token.pickle_sheet'+request.user.username, 'rb') as token:\n creds = pickle.load(token)\n # If there are no (valid) credentials available, let the user log in.\n if not creds or not creds.valid:\n if creds and creds.expired and creds.refresh_token:\n creds.refresh(Request())\n else:\n flow = InstalledAppFlow.from_client_secrets_file(\n credentials, SCOPES)\n creds = flow.run_local_server(port=0)\n # Save the credentials for the next run\n with open('token.pickle_sheet'+request.user.username, 'wb') as token:\n pickle.dump(creds, token)\n os.remove('decrypted_credentials.json')\n service = build('sheets', 'v4', credentials=creds)\n spread_sheet_id_decrypted = decrypt(spread_sheet_id_encrypted)\n # Call the Sheets API\n result = service.spreadsheets().values().get(\n spreadsheetId=spread_sheet_id_decrypted, range=SAMPLE_RANGE_NAME).execute()\n values = result.get('values')\n if not values:\n pass\n else:\n for row in values:\n str_value=''\n rows = str_value.join(row)\n #print(rows)\n file = open('googlesheets.csv','a')\n file.write(rows)\n file.close()\n df_file = pd.read_csv('googlesheets.csv', encoding = \"ISO-8859-1\")\n #print(df_file)\n res_df =reduce(lambda left,right: pd.merge(left,right,on=df_file, how='outer'), [df_file])\n #print(res_df.columns.to_list())\n res_json = df_file.to_json(orient='index')\n projectjson, created = ProjectJsonStorage.objects.get_or_create(project=project, js = res_json)\n #print(projectjson)\n os.remove('token.pickle')\n os.remove('googlesheets.csv')\n return redirect('/dashboard/')\n\n def create_df(self,df,project,sheet_details,request):\n # #print(\"the original df \",df)\n df.columns = df.columns.str.strip().str.lower().str.replace(' ', '_').str.replace('(', '').str.replace(')', '').str.replace('.', '_').str.replace(',','_').str.replace('/','_').str.replace(':','').str.replace(':','')\n new_df_columns = df.columns.tolist()\n #print(\"new df columns\",new_df_columns)\n project_json = ProjectJsonStorage.objects.filter(project=project).order_by('id')[0]\n json_string = json.loads(project_json.js)\n json_df = pd.DataFrame(json_string)\n # #print(type(json_df.head()))\n\n transposed_df = json_df.transpose()\n rows = transposed_df.shape[0]\n\n metadata = ProjectMetaData. objects.get(project=project)\n columns = metadata.columns['columns']\n\n\n transposed_df_columns = transposed_df.columns.tolist()\n\n column_list = [x for x in transposed_df_columns if x in new_df_columns]\n\n\n\n new_columns = [x for x in new_df_columns if x not in columns ]\n\n for key, value in metadata.meta_data.items():\n if key in transposed_df_columns:\n if value['dtype'] == 'int':\n transposed_df[key] =pd.to_numeric(transposed_df[key])\n if key in new_df_columns:\n df[key] = pd.to_numeric(df[key])\n elif value['dtype'] == 'float':\n #print(\"final key\",key,transposed_df[key],transposed_df[key].dtypes)\n transposed_df[key] = pd.to_numeric(transposed_df[key])\n if key in new_df_columns:\n df[key] = pd.to_numeric(df[key])\n elif value['dtype'] == 'object':\n transposed_df[key] = transposed_df[key].astype(str)\n if key in new_df_columns:\n df[key] = df[key].astype(str)\n elif value['dtype'] == 'bool':\n transposed_df[key] = transposed_df[key].astype(bool)\n if key in new_df_columns:\n df[key] = df[key].astype(bool)\n elif value['dtype'] == 'DateTime':\n transposed_df[key] =pd.to_datetime(transposed_df[key])\n #print(\"the data key\",key)\n if key in new_df_columns:\n df[key] = pd.to_datetime(df[key])\n\n\n\n\n # truncate the y_df based on the mother df rows\n # #print(\"the dtypes \",transposed_df.dtypes,df.dtypes)\n if len(column_list)>0:\n result_df = reduce(lambda left,right: pd.merge(left,right,on=column_list, how='outer'), [transposed_df,df])\n else:\n try:\n result_df = reduce(lambda left,right: pd.merge(left,right,left_on= transposed_df_columns[0],right_on=new_df_columns[0],how='outer'), [transposed_df,df])\n except:\n result_df = reduce(lambda left,right: pd.merge(left,right,left_index=True,right_index=True,how='outer'), [transposed_df,df])\n # #print(\"the df is \",result_df) erge the mother df with new rows ofss data\n result_columns = result_df.columns.tolist()\n # result_df = result_df.fillna(0)\n for colum in result_columns:\n try:\n result_df[column] = result_df[column].astype(float)\n except:\n pass\n #print(\"the result df \",result_df.head(70))\n result_df_rows = result_df.shape[0]\n df_rows = df.shape[0]\n #if there are new columns\n # if len(new_columns)>0:\n # result_df_rows = result_df.shape[0]\n # df_rows = df.shape[0]\n # #print(\"the aded df, and result_df rows\",df_rows,result_df_rows)\n\n # if result_df_rows == df_rows:\n # for column in new_columns:\n # #print(\"column is\",column)\n # result_df[column] = df[column]\n # #print(\"the result df is \",result_df)\n # else:\n # pass\n\n result_df.columns = result_df.columns.str.strip().str.lower().str.replace(' ', '_').str.replace('(', '').str.replace(')', '').str.replace('.', '_').str.replace(',','_').str.replace('/','_').str.replace(':','').str.replace(':','')\n #print(\"the final data frame\",result_df)\n result_df_columns = result_df.columns.tolist()\n delete_column_list = []\n custom_column_list = {}\n today = datetime.now()\n metadata = ProjectMetaData. objects.get(project=project)\n\n #print(\"the shappe before duplicate\",result_df.shape[0],transposed_df.shape[0],df.shape[0])\n\n result_df.drop_duplicates(subset = transposed_df_columns, inplace = True)\n\n # result_df.fillna(0,inplace=True)\n #print(\"the shappe aftrer duplicate\",result_df.tail(50),result_df.shape[0],transposed_df.shape[0],df.shape[0])\n\n if len(new_columns)>0:\n column_conbine_obj = ColumnCombine()\n dfs=[result_df]\n data = column_conbine_obj.list_combine_with_datatype_integration(dfs,new_columns)\n try:\n error = data['Error']\n data = {'error_msg':all_columns['error']}\n # #print(\"error\",data)\n return JsonResponse(data,safe=False)\n except:\n data = column_conbine_obj.list_combine_with_datatype_integration(dfs,new_columns)\n data['sheet_pk']=sheet_details.pk\n #print(\"the data for metadata collection is\",data)\n return data\n\n\n result_df = result_df.replace('',np.nan)\n fillna_obj = FillNan(result_df)\n for key, value in metadata.meta_data.items():\n\n if key in result_df_columns:\n #print(\"key present in metadata \",key)\n if value['handle_missing_data'] == 0:\n fillna_obj.fillnan_with_0(key)\n elif value['handle_missing_data'] == 'None':\n fillna_obj.fillnan_with_None_value(key)\n elif value['handle_missing_data'] == 'previous':\n fillna_obj.fillnan_with_previous_value(key)\n elif value['handle_missing_data'] == 'drop':\n l = [key]\n result_df = fillna_obj.drop_row(l)\n elif value['handle_missing_data'] == 'delete_column':\n co = key.lower().replace(' ', '_').replace('(', '').replace(')', '').replace('.', '_').replace(',','_').replace('/','_').replace(':','')\n delete_column_list.append(co)\n else:\n try:\n\n custom_column_list[key]=int(value['handle_missing_data'])\n except:\n custom_column_list[key]=value['handle_missing_data']\n\n #print(\"the original df after nan filling is\",result_df)\n if len(custom_column_list)>0:\n #print(\"the before nan fill \",len(result_df) - result_df.count())\n\n result_df.fillna(value=custom_column_list,inplace=True)\n #print(\"the result after nan fill\",result_df,custom_column_list)\n #print(\"after nan fill \",len(result_df) - result_df.count())\n\n if len(delete_column_list)>0:\n\n #print(\"the value \",delete_column_list)\n column_delete_obj = DeleteColumn(result_df)\n result_df = column_delete_obj.delete_column(delete_column_list)\n #print(\"columns \",result_df.columns.tolist())\n\n meta_data_obj = metadata.meta_data\n today = str(datetime.now())\n delete_column_list = []\n custom_column_list = {}\n\n\n for c in new_columns:\n column = {}\n ##print(\"the dataframe\",c)\n if df.dtypes[c] == np.int64:\n column['dtype'] = 'int'\n elif df.dtypes[c] == np.float64:\n column['dtype'] = 'float'\n elif df.dtypes[c] == np.object:\n column['dtype'] = 'object'\n elif df.dtypes[c] == np.bool:\n column['dtype']= 'bool'\n elif np.issubdtype(df[c].dtype, np.datetime64):\n column['dtype'] = \"DateTime\"\n df[c] = df[c].astype('str')\n\n missing_data = None\n missing_data_input=None\n try:\n\n cu = c+'_select'\n missing_data = request.POST[cu]\n except:\n pass\n try:\n cu = c+'_input'\n missing_data_input = request.POST[cu]\n except:\n pass\n if missing_data and missing_data == 'zero':\n column['handle_missing_data']= 0\n today = str(datetime.now().date())\n ##print(\"type of date\",type(today))\n column['start_date']= today\n column['end_date']= ''\n fillna_obj.fillnan_with_0(c)\n ##print(\"nan filled df 0 \",c)\n elif missing_data and missing_data == 'None':\n column['handle_missing_data']= 'None'\n fillna_obj.fillnan_with_None_value(c)\n ##print(\"nan filled df None \",c)\n today = str(datetime.now().date())\n ##print(\"type of date\",type(today))\n column['start_date']= today\n column['end_date']= ''\n column['column_deleted'] = False\n elif missing_data and missing_data == 'previous':\n column['handle_missing_data']= 'previous'\n result_df = fillna_obj.fillnan_with_previous_value(c)\n ##print(\"nan filled df \",result_df)\n today = str(datetime.now().date())\n ##print(\"type of date\",type(today))\n column['start_date']= today\n column['end_date']= ''\n column['column_deleted'] = False\n elif missing_data and missing_data == 'drop':\n column['handle_missing_data']= 'drop'\n l = [c]\n fillna_obj.drop_row(l)\n ##print(\"nan filled df \",result_df)\n today = str(datetime.now().date())\n ##print(\"type of date\",type(today))\n column['start_date']= today\n column['end_date']= ''\n column['column_deleted'] = False\n elif missing_data and missing_data == 'delete_column':\n column['handle_missing_data']= missing_data\n today = str(datetime.now().date())\n ##print(\"type of date\",type(today))\n column['start_date']= today\n column['end_date']= today\n column['column_deleted'] = True\n\n co = c.lower().replace(' ', '_').replace('(', '').replace(')', '').replace('.', '_').replace(',','_').replace('/','_').replace(':','')\n delete_column_list.append(co)\n #print(\"the columns are \",delete_column_list)\n elif missing_data_input:\n column['handle_missing_data']= missing_data_input\n today = str(datetime.now().date())\n ##print(\"type of date\",type(today))\n column['start_date']= today\n column['end_date']= ''\n column['column_deleted'] = False\n custom_column_list[c]=missing_data_input\n\n c_name = c.lower().replace(' ', '_').replace('(', '').replace(')', '').replace('.', '_').replace(',','_').replace('/','_').replace(':','')\n meta_data_obj[c_name]=column\n #print(\"the custom value is \",custom_column_list)\n if len(custom_column_list)>0:\n result_df = result_df.fillna(value=custom_column_list)\n #print(\"the result after nan fill\",result_df,delete_column_list)\n\n if len(delete_column_list)>0:\n\n #print(\"the value \",delete_column_list)\n column_delete_obj = DeleteColumn(result_df)\n result_df = column_delete_obj.delete_column(delete_column_list)\n #print(\"columns \",result_df.columns.tolist())\n\n \n result_columns = result_df.columns.tolist()\n c = {'columns':result_columns}\n if metadata.date_column_name:\n result_df[metadata.date_column_name] = result_df[metadata.date_column_name].astype(str)\n res_json=result_df.to_json(orient='index')\n rows = result_df.shape[0]\n columns = result_df.shape[1]\n df_head=result_df.head(5)\n df_tail = result_df.tail(5)\n df_head_json = df_head.to_json(orient='index')\n df_tail_json = df_tail.to_json(orient='index')\n project_json = ProjectJsonStorage.objects.filter(project=project).update(js=res_json,columns=c)\n project_json = ProjectJsonStorage.objects.filter(project=project).order_by('id')[0]\n c_list = {'columns':result_df_columns}\n #print(\"the \")\n update = ProjectMetaData.objects.filter(project=project).update(meta_data=meta_data_obj,columns=c_list)\n project_json_metadata = ProjectJsonStorageMetadata.objects.filter(project_json=project_json).update(rows=rows,columns=columns,head_json=df_head_json,tail_json=df_tail_json)\n pk = str(project.pk)\n data = {'pk':pk}\n return data\n\n\n''' sheetsNan filling '''\nclass SheetNnaFillView(CreateView):\n def get(self,request,pk):\n template_name='sheet_details.html'\n sheet_details_form=SheetDetailsForm()\n context={\n 'sheet_details_form':sheet_details_form\n }\n return render(request,template_name,context)\n def post(self,request,pk):\n template_name='sheet_details.html'\n project = Project.objects.get(pk=pk)\n sheet_details_form=SheetDetailsForm(request.POST,request.FILES)\n url_sheet_form = SheetUrlForm(request.POST,request.FILES)\n if sheet_details_form.is_valid():\n # #print(\"valid\")\n credential=request.FILES[\"credential\"]\n\n spread_sheet_id=sheet_details_form.cleaned_data[\"spreadsheet_id\"]\n data_range = sheet_details_form.cleaned_data[\"data_range\"]\n sheet_pk = request.POST.get('sheet_pk',None)\n sheet_details= CustomerAPIDetails.objects.get(pk=int(sheet_pk))\n try:\n\n scope=['htps://spreadsheets.google.com/feeds','https://www.googleapis.com/auth/drive']\n file = sheet_details.credentials.open()\n file_content = file.read()\n js_str = json.loads(file_content.decode('utf-8'))\n creadentials = ServiceAccountCredentials.from_json_keyfile_dict(js_str)\n gc = gspread.authorize(creadentials)\n wks = gc.open(spread_sheet_id).sheet1\n\n data = wks.get_all_records()\n except:\n context={\n 'sheet_details_form':sheet_details_form,\n 'msg':'Unable to Read The Sheet Please Check The Credentials/ File name'\n }\n return render(request,template_name,context)\n\n df = pd.DataFrame(data)\n df = df.replace('',np.nan)\n data = self.update_df(df,project,sheet_details,request)\n return JsonResponse(data, safe=False)\n elif url_sheet_form.is_valid():\n url = url_sheet_form.cleaned_data['url']\n name = request.POST['sheet_url_name']\n cron_frequency = request.POST['cron_frequency']\n header =request.POST.get('header',None)\n\n sheet_pk = request.POST.get('sheet_pk',None)\n sheet_details= CustomerAPIDetails.objects.get(pk=int(sheet_pk))\n if header:\n df = pd.read_html(url,encoding='utf8',index_col=0,header=int(header))\n else:\n df = pd.read_html(url,encoding='utf8',index_col=0,header=1)\n df=df[0]\n data = self.update_df(df,project,sheet_details,request)\n return JsonResponse(data, safe=False)\n # #print(\"the original df \",df)\n \n\n # #print(\"the entered file is \",credential)\n # spread_sheet_id=sheet_details_form.cleaned_data[\"spreadsheet_id\"]\n # spread_sheet_id_encrypted = encrypt(spread_sheet_id)\n # sheet_details,created = CustomerAPIDetails.objects.get_or_create(project=project,integration_choice='Google Sheets',credentials=credential,file_id=spread_sheet_id_encrypted)\n # #print(\"created\")\n # sheet_details.credentials.open('r')\n # lines = sheet_details.credentials.read()\n # sheet_details.credentials.close()\n # file_lines_encrypted=encrypt(lines)\n # #print('file_lines_encrypted',file_lines_encrypted)\n # sheet_details.credentials.open('w')\n # sheet_details.credentials.write(file_lines_encrypted)\n # pk= sheet_details.pk\n # sheet_details_update = CustomerAPIDetails.objects.filter(pk=pk).update(credentials = sheet_details.credentials)\n # sheet_details.credentials.close()\n # updated_credential_data = CustomerAPIDetails.objects.get(pk=pk)\n # #print('updated_credential_data.token_file',updated_credential_data.token_file)\n # updated_credential_data.credentials.open('r')\n # lines = updated_credential_data.credentials.read()\n # updated_credential_data.credentials.close()\n # file_lines_decrypted=decrypt(lines)\n # encrypted_file = open('decrypted_credentials.json','w')\n # encrypted_file.write(file_lines_decrypted)\n # encrypted_file.close()\n # credentials = os.path.abspath(\"decrypted_credentials.json\")\n\n\n SCOPES = ['https://www.googleapis.com/auth/spreadsheets']\n\n # The ID and range of a sample spreadsheet.\n\n SAMPLE_RANGE_NAME = 'Sheet1'\n\n\n \"\"\"Shows basic usage of the Sheets API.\n Prints values from a sample spreadsheet.\n \"\"\"\n creds = None\n # The file token.pickle stores the user's access and refresh tokens, and is\n # created automatically when the authorization flow completes for the first\n # time.\n if os.path.exists('token.pickle_sheet'+request.user.username):\n with open('token.pickle_sheet'+request.user.username, 'rb') as token:\n creds = pickle.load(token)\n # If there are no (valid) credentials available, let the user log in.\n if not creds or not creds.valid:\n if creds and creds.expired and creds.refresh_token:\n creds.refresh(Request())\n else:\n flow = InstalledAppFlow.from_client_secrets_file(\n credentials, SCOPES)\n creds = flow.run_local_server(port=0)\n # Save the credentials for the next run\n with open('token.pickle_sheet'+request.user.username, 'wb') as token:\n pickle.dump(creds, token)\n os.remove('decrypted_credentials.json')\n service = build('sheets', 'v4', credentials=creds)\n spread_sheet_id_decrypted = decrypt(spread_sheet_id_encrypted)\n # Call the Sheets API\n result = service.spreadsheets().values().get(\n spreadsheetId=spread_sheet_id_decrypted, range=SAMPLE_RANGE_NAME).execute()\n values = result.get('values')\n if not values:\n pass\n else:\n for row in values:\n str_value=''\n rows = str_value.join(row)\n #print(rows)\n file = open('googlesheets.csv','a')\n file.write(rows)\n file.close()\n df_file = pd.read_csv('googlesheets.csv', encoding = \"ISO-8859-1\")\n #print(df_file)\n res_df =reduce(lambda left,right: pd.merge(left,right,on=df_file, how='outer'), [df_file])\n #print(res_df.columns.to_list())\n res_json = df_file.to_json(orient='index')\n projectjson, created = ProjectJsonStorage.objects.get_or_create(project=project, js = res_json)\n #print(projectjson)\n os.remove('token.pickle')\n os.remove('googlesheets.csv')\n return redirect('/dashboard/')\n def update_df(self,df,project,sheet_details,request):\n df.columns = df.columns.str.strip().str.lower().str.replace(' ', '_').str.replace('(', '').str.replace(')', '').str.replace('.', '_').str.replace(',','_').str.replace('/','_').str.replace(':','').str.replace(':','')\n new_df_columns = df.columns.tolist()\n #print(\"new df columns\",new_df_columns)\n project_json = ProjectJsonStorage.objects.filter(project=project).order_by('id')[0]\n json_string = json.loads(project_json.js)\n json_df = pd.DataFrame(json_string)\n # #print(type(json_df.head()))\n\n transposed_df = json_df.transpose()\n rows = transposed_df.shape[0]\n\n metadata = ProjectMetaData. objects.get(project=project)\n columns = metadata.columns['columns']\n\n\n transposed_df_columns = transposed_df.columns.tolist()\n\n column_list = [x for x in transposed_df_columns if x in new_df_columns]\n\n new_columns = [x for x in new_df_columns if x not in columns ]\n #print(\"new columns are\",new_columns)\n\n for key, value in metadata.meta_data.items():\n if key in transposed_df_columns:\n if value['dtype'] == 'int':\n transposed_df[key] =pd.to_numeric(transposed_df[key])\n if key in new_df_columns:\n df[key] = pd.to_numeric(df[key])\n elif value['dtype'] == 'float':\n #print(\"final key\",key,transposed_df[key],transposed_df[key].dtypes)\n transposed_df[key] = pd.to_numeric(transposed_df[key])\n if key in new_df_columns:\n df[key] = pd.to_numeric(df[key])\n elif value['dtype'] == 'object':\n transposed_df[key] = transposed_df[key].astype(str)\n if key in new_df_columns:\n df[key] = df[key].astype(str)\n elif value['dtype'] == 'bool':\n transposed_df[key] = transposed_df[key].astype(bool)\n if key in new_df_columns:\n df[key] = df[key].astype(bool)\n elif value['dtype'] == 'DateTime':\n transposed_df[key] =pd.to_datetime(transposed_df[key])\n #print(\"the data key\",key)\n if key in new_df_columns:\n df[key] = pd.to_datetime(df[key])\n\n if len(column_list)>0:\n result_df = reduce(lambda left,right: pd.merge(left,right,on=column_list, how='outer'), [transposed_df,df])\n else:\n try:\n result_df = reduce(lambda left,right: pd.merge(left,right,left_on= transposed_df_columns[0],right_on=new_df_columns[0],how='outer'), [transposed_df,df])\n except:\n result_df = reduce(lambda left,right: pd.merge(left,right,left_index=True,right_index=True,how='outer'), [transposed_df,df])\n \n #print(\"the result df \",result_df.head(70))\n result_df_rows = result_df.shape[0]\n df_rows = df.shape[0]\n\n\n result_df.columns = result_df.columns.str.strip().str.lower().str.replace(' ', '_').str.replace('(', '').str.replace(')', '').str.replace('.', '_').str.replace(',','_').str.replace('/','_').str.replace(':','').str.replace(':','')\n # #print(\"the final data frame\",result_df)\n result_df_columns = result_df.columns.tolist()\n delete_column_list = []\n custom_column_list = {}\n today = datetime.now()\n metadata = ProjectMetaData. objects.get(project=project)\n\n # #print(\"the shappe before duplicate\",result_df.shape[0],transposed_df.shape[0],df.shape[0])\n\n result_df.drop_duplicates(subset = transposed_df_columns, inplace = True)\n\n\n\n fillna_obj = FillNan(result_df)\n for key, value in metadata.meta_data.items():\n\n if key in result_df_columns:\n #print(\"key present in metadata \",key)\n if value['handle_missing_data'] == 0:\n fillna_obj.fillnan_with_0(key)\n elif value['handle_missing_data'] == 'None':\n fillna_obj.fillnan_with_None_value(key)\n elif value['handle_missing_data'] == 'previous':\n fillna_obj.fillnan_with_previous_value(key)\n elif value['handle_missing_data'] == 'drop':\n l = [key]\n result_df = fillna_obj.drop_row(l)\n elif value['handle_missing_data'] == 'delete_column':\n co = key.lower().replace(' ', '_').replace('(', '').replace(')', '').replace('.', '_').replace(',','_').replace('/','_').replace(':','')\n delete_column_list.append(co)\n else:\n try:\n\n custom_column_list[key]=int(value['handle_missing_data'])\n except:\n custom_column_list[key]=value['handle_missing_data']\n \n\n if len(custom_column_list)>0:\n result_df.fillna(custom_column_list)\n\n\n if len(delete_column_list)>0:\n\n #print(\"the value \",delete_column_list)\n column_delete_obj = DeleteColumn(result_df)\n result_df = column_delete_obj.delete_column(delete_column_list)\n #print(\"columns \",result_df.columns.tolist())\n\n meta_data_obj = metadata.meta_data\n today = str(datetime.now())\n delete_column_list = []\n\n\n\n for c in new_columns:\n column = {}\n custom_column_list = {}\n ##print(\"the dataframe\",c)\n if df.dtypes[c] == np.int64:\n column['dtype'] = 'int'\n elif df.dtypes[c] == np.float64:\n column['dtype'] = 'float'\n elif df.dtypes[c] == np.object:\n column['dtype'] = 'object'\n elif df.dtypes[c] == np.bool:\n column['dtype']= 'bool'\n elif np.issubdtype(df[c].dtype, np.datetime64):\n column['dtype'] = \"DateTime\"\n df[c] = df[c].astype('str')\n\n missing_data = None\n missing_data_input=None\n try:\n\n cu = c+'_select'\n missing_data = request.POST[cu]\n except:\n pass\n try:\n cu = c+'_input'\n missing_data_input = request.POST[cu]\n except:\n pass\n if missing_data and missing_data == 'zero':\n column['handle_missing_data']= 0\n today = str(datetime.now().date())\n ##print(\"type of date\",type(today))\n column['start_date']= today\n column['end_date']= ''\n fillna_obj.fillnan_with_0(c)\n ##print(\"nan filled df 0 \",c)\n elif missing_data and missing_data == 'None':\n column['handle_missing_data']= 'None'\n fillna_obj.fillnan_with_None_value(c)\n ##print(\"nan filled df None \",c)\n today = str(datetime.now().date())\n ##print(\"type of date\",type(today))\n column['start_date']= today\n column['end_date']= ''\n column['column_deleted'] = False\n elif missing_data and missing_data == 'previous':\n column['handle_missing_data']= 'previous'\n result_df = fillna_obj.fillnan_with_previous_value(c)\n ##print(\"nan filled df \",result_df)\n today = str(datetime.now().date())\n ##print(\"type of date\",type(today))\n column['start_date']= today\n column['end_date']= ''\n column['column_deleted'] = False\n elif missing_data and missing_data == 'drop':\n column['handle_missing_data']= 'drop'\n l = [c]\n fillna_obj.drop_row(l)\n ##print(\"nan filled df \",result_df)\n today = str(datetime.now().date())\n ##print(\"type of date\",type(today))\n column['start_date']= today\n column['end_date']= ''\n column['column_deleted'] = False\n elif missing_data and missing_data == 'delete_column':\n column['handle_missing_data']= missing_data\n today = str(datetime.now().date())\n ##print(\"type of date\",type(today))\n column['start_date']= today\n column['end_date']= today\n column['column_deleted'] = True\n\n co = c.lower().replace(' ', '_').replace('(', '').replace(')', '').replace('.', '_').replace(',','_').replace('/','_').replace(':','')\n delete_column_list.append(co)\n #print(\"the columns are \",delete_column_list)\n elif missing_data_input:\n column['handle_missing_data']= missing_data_input\n today = str(datetime.now().date())\n ##print(\"type of date\",type(today))\n column['start_date']= today\n column['end_date']= ''\n column['column_deleted'] = False\n co = c.lower().replace(' ', '_').replace('(', '').replace(')', '').replace('.', '_').replace(',','_').replace('/','_').replace(':','')\n try:\n custom_column_list[co]=int(missing_data_input)\n except:\n custom_column_list[co]=missing_data_input\n result_df = result_df.fillna(custom_column_list)\n #print(\"in for \",result_df[co])\n\n\n\n c_name = c.lower().replace(' ', '_').replace('(', '').replace(')', '').replace('.', '_').replace(',','_').replace('/','_').replace(':','')\n meta_data_obj[c_name]=column\n #print(\"meta_data_obj[c_name]\",meta_data_obj[c_name])\n #print(\"custom columns list\",custom_column_list)\n if len(custom_column_list)>0:\n result_df = result_df.fillna(custom_column_list)\n # #print(\"the result after nan fill\",result_df,delete_column_list)\n\n if len(delete_column_list)>0:\n\n ###print(\"the value \",delete_column_list)\n column_delete_obj = DeleteColumn(result_df)\n result_df = column_delete_obj.delete_column(delete_column_list)\n\n\n\n result_columns = result_df.columns.tolist()\n c = {'columns':result_columns}\n if metadata.date_column_name:\n result_df[metadata.date_column_name] = result_df[metadata.date_column_name].astype(str)\n res_json=result_df.to_json(orient='index')\n rows = result_df.shape[0]\n columns = result_df.shape[1]\n df_head=result_df.head(5)\n df_tail = result_df.tail(5)\n df_head_json = df_head.to_json(orient='index')\n df_tail_json = df_tail.to_json(orient='index')\n project_json = ProjectJsonStorage.objects.filter(project=project).update(js=res_json,columns=c)\n project_json = ProjectJsonStorage.objects.filter(project=project).order_by('id')[0]\n c_list = {'columns':result_df_columns}\n #print(\"the \")\n update = ProjectMetaData.objects.filter(project=project).update(meta_data=meta_data_obj,columns=c_list)\n project_json_metadata = ProjectJsonStorageMetadata.objects.filter(project_json=project_json).update(rows=rows,columns=columns,head_json=df_head_json,tail_json=df_tail_json)\n pk = str(project.pk)\n data = {'pk':pk}\n return data\n\n'''Start of class to receive dropbox details'''\nclass DropBoxDetailsView(CreateView):\n def get(self,request,pk):\n template_name='dropbox_details.html'\n #print('pk',pk)\n seo=SiteSeo.objects.get(choices='DropBox Integration')\n project = Project.objects.get(pk=pk)\n projects_delete = Project.objects.filter(Q(admin_user=user) and Q(delete_datetime__isnull=False) ).count()\n #print(\"project count \",projects_delete)\n if projects_delete > 0:\n projects_delete = True\n else:\n projects_delete = False\n permission = identify_user_permission(project,request.user)\n user = User.objects.get(pk=request.user.pk)\n pk=str(project.pk)\n current_site = get_current_site(request)\n site_name = current_site.name\n domain = current_site.domain\n if domain.startswith('127.0.'):\n domain = 'https://'+domain\n else:\n domain = 'https://'+domain\n if ProjectEndPoint.objects.filter(project=project).exists():\n project_endpoints = ProjectEndPoint.objects.filter(project=project).order_by('name')\n else:\n project_endpoints=None\n\n customer=Customer.objects.get(user=request.user)\n if ProjectDashboard.objects.filter(Q(project=project) ).exists():\n # #print(\"project admin\")\n dashboard=ProjectDashboard.objects.filter(Q(project=project) ).order_by('-id')\n dashboard_count=ProjectDashboard.objects.filter(Q(project=project) ).count()\n else:\n dashboard = None\n dashboard_count = None\n dropbox_details_form=DropboxDetailsForm()\n context={\n 'dropbox_details_form':dropbox_details_form,\n 'seo':seo,\n 'project':project,\n 'dashboard':dashboard,\n 'permission':permission,\n 'customer':customer,\n 'project_endpoints':project_endpoints,\n 'projects_delete':projects_delete,\n }\n return render(request,template_name,context)\n def post(self,request,pk):\n template_name='dropbox_details.html'\n project=Project.objects.get(pk=pk)\n #print(project)\n seo=SiteSeo.objects.get(choices='DropBox Integration')\n dropbox_details_form=DropboxDetailsForm(request.POST,request.FILES)\n if dropbox_details_form.is_valid():\n #print(\"valid\")\n access_token = dropbox_details_form.cleaned_data[\"access_token\"]\n access_token_encrypted=encrypt(access_token)\n #print('access_token_encrypted',access_token_encrypted)\n path_name = dropbox_details_form.cleaned_data[\"path_name\"]\n data_range=dropbox_details_form.cleaned_data[\"data_range\"]\n #print(\"path_name\",path_name)\n dropbox_details, created = CustomerAPIDetails.objects.get_or_create(project=project,access_token=access_token_encrypted,integration_choice='DropBox',file_id=path_name,range=data_range)\n #print(dropbox_details)\n access_token=dropbox_details.access_token\n access_token_decrypted=decrypt(access_token)\n #print('access_token_decrypted',access_token_decrypted)\n path_name=dropbox_details.file_id\n splitted_name=path_name.split('.')\n dbx = dropbox.Dropbox(access_token_decrypted)\n if splitted_name[1] == 'csv':\n file_name = 'drop_box'+str(project.pk)+str(request.user.pk)+'.csv'\n file_exists = default_storage.exists(file_name)\n if file_exists:\n default_storage.delete(file_name)\n file_id = default_storage.open(file_name,'wb')\n metadata, res = dbx.files_download(path_name)\n file_id.write(res.content)\n file_id.close()\n path = default_storage.path(file_name)\n\n file= pd.read_csv(path , encoding = \"ISO-8859-1\")\n elif splitted_name[1] == 'xls':\n file_name = 'drop_box'+str(project.pk)+str(request.user.pk)+'.xls'\n file_exists = default_storage.exists(file_name)\n if file_exists:\n default_storage.delete(file_name)\n file_id = default_storage.open(file_name,'wb')\n metadata, res = dbx.files_download(path_name)\n file_id.write(res.content)\n file_id.close()\n path = default_storage.path(file_name)\n file = pd.read_excel(path, encoding='utf-8')\n elif splitted_name[1] == 'xlsx':\n file_name = 'drop_box'+str(project.pk)+str(request.user.pk)+'.xls'\n file_exists = default_storage.exists(file_name)\n if file_exists:\n default_storage.delete(file_name)\n file_id = default_storage.open(file_name,'wb')\n metadata, res = dbx.files_download(path_name)\n file_id.write(res.content)\n #print(\"the contetnt of the file\",res.content)\n file_id.close()\n file_id = default_storage.open(file_name,'rb')\n file_content = file_id.read()\n s=str(file_content)\n\n data = StringIO(s)\n file = pd.read_csv(data, encoding='utf-8')\n # res_df =reduce(lambda left,right: pd.merge(left,right,on=file, how='outer'), [file])\n # # #print(\"the result\",res_df)\n # #print(res_df.columns.to_list())\n # res_json = file.to_json(orient='index')\n # #print(\"project\",res_json)\n\n # projectjson, created = ProjectJsonStorage.objects.get_or_create(project=project, js = res_json)\n # #print(projectjson)\n # if splitted_name[1] == 'csv':\n # os.remove('googlesheets.csv')\n # elif splitted_name[1] == 'xls':\n # os.remove('googlesheets.xls')\n # elif splitted_name[1] == 'xlsx':\n # os.remove('googlesheets.xlsx')\n # return redirect('/dashboard/')\n else:\n context={\n 'dropbox_details_form':dropbox_details_form,\n 'seo':seo\n }\n return render(request,template_name,context)\n\n'''Start of class to receive dropbox details'''\nclass OneDriveDetailsView(CreateView):\n def get(self,request,pk):\n template_name='onedrive_details.html'\n onedrive_details_form=OneDriveDetailsForm()\n context={\n 'onedrive_details_form':onedrive_details_form\n }\n return render(request,template_name,context)\n def post(self,request,pk):\n template_name='onedrive_details.html'\n project=Project.objects.get(pk=pk)\n #print(project)\n onedrive_details_form=OneDriveDetailsForm(request.POST,request.FILES)\n if onedrive_details_form.is_valid():\n #print(\"valid\")\n client_id = onedrive_details_form.cleaned_data[\"client_id\"]\n client_secret_key = onedrive_details_form.cleaned_data[\"client_secret_key\"]\n onedrive_details, created = CustomerAPIDetails.objects.get_or_create(project=project,client_id=client_id,integration_choice='OneDrive',client_secret_key=client_secret_key)\n #print(onedrive_details)\n secret_key=onedrive_details.client_secret_key\n client_id=onedrive_details.client_id\n #print(client_id)\n redirect_uri = 'http://localhost:8080/'\n client_secret = secret_key\n\n api_base_url='https://api.onedrive.com/v1.0/'\n scopes=['onedrive.readwrite']\n\n http_provider = onedrivesdk.HttpProvider()\n auth_provider = onedrivesdk.AuthProvider(\n http_provider=http_provider,\n client_id=client_id,\n scopes=scopes)\n\n client = onedrivesdk.OneDriveClient(api_base_url, auth_provider, http_provider)\n auth_url = client.auth_provider.get_auth_url(redirect_uri)\n # Ask for the code\n #print('Paste this URL into your browser, approve the app\\'s access.')\n #print('Copy everything in the address bar after \"code=\", and paste it below.')\n #print(auth_url)\n code = raw_input('Paste code here: ')\n #print(client)\n return redirect('/dashboard/')\n\nclass SegmentDetailsView(CreateView):\n def get(self,request):\n template_name='segment_details.html'\n user=request.user\n analytics.identify(user.username, {\n 'email': user.email,\n 'name': user.first_name,\n\n })\n analytics.track(user.username, 'Signed Up', {\n\n })\n context={\n 'analytics':analytics\n }\n\n return render(request,template_name,context)\n# class SegmentReceive(View):\n# def post(self,request):\n# template_name='segment_details.html'\n# #print('segment_receive')\n# if request.method == 'POST':\n# received_json_data=json.loads(request.POST['data'])\n# #print(received_json_data)\n# context={\n# 'received_json_data':received_json_data\n# }\n# return render(request,template_name,context)\n\n\ndef encrypt(txt):\n try:# convert integer etc to string first\n txt = str(txt)\n # get the key from settings\n cipher_suite = Fernet(key) # key should be byte\n # #input should be byte, so convert the text to byte\n encrypted_text = cipher_suite.encrypt(txt.encode('ascii'))\n # encode to urlsafe base64 format\n encrypted_text = base64.urlsafe_b64encode(encrypted_text).decode(\"ascii\")\n return encrypted_text\n except:\n pass\n\ndef decrypt(txt):\n #print('key',key)\n # base64 decode\n txt = base64.urlsafe_b64decode(txt)\n cipher_suite = Fernet(key)\n decoded_text = cipher_suite.decrypt(txt).decode(\"ascii\")\n return decoded_text\n\n\ndef update_filename(instance, filename):\n format = str(instance.project.admin_user ) + \"_\" + instance.project.name\n return format\n\n\n\nclass ApiDataRead(GroupRequiredMixin,View):\n login_url = '/customer/login/'\n redirect_field_name = 'redirect_to'\n template_name='dashboard/index.html'\n\n def dispatch(self, request, *args, **kwargs):\n pk = kwargs['pk']\n project = Project.objects.get(pk=pk)\n pk= str(project.pk)\n self.login_url = '/customer/login/'\n self.redirect_field_name = 'redirect_to'\n self.template_name='dashboard/index.html'\n admin_name = pk+\"_Admin\"\n #print(type(admin_name), admin_name)\n admin_encode_name = admin_name.encode()\n admin_unicode_name = admin_encode_name.decode('utf-8')\n write_name = pk+\"_Write\"\n #print(type(write_name), write_name)\n write_encode_name = write_name.encode()\n write_unicode_name = write_encode_name.decode('utf-8')\n read_name = pk+\"_Read\"\n #print(type(write_name), write_name)\n read_encode_name = read_name.encode()\n read_unicode_name = read_encode_name.decode('utf-8')\n delete_name = pk+\"_Delete\"\n delete_encode_name = delete_name.encode()\n delete_unicode_name = delete_encode_name.decode('utf-8')\n l= []\n self.group_required= admin_unicode_name\n #print(\"the self of dispatcher\",self.group_required)\n\n return super(ApiDataRead, self).dispatch(request, *args, **kwargs)\n\n def get(self,request,pk):\n #print(\"get of api data\")\n template_name = 'api_data.html'\n\n project = Project.objects.get(pk=pk)\n permission = identify_user_permission(project,request.user)\n if ApiDataGet.objects.filter(project=project):\n api_data = ApiDataGet.objects.get(project=project)\n form = ApiDataForm(initial={'api':api_data.api,'basic_token':api_data.basic_key,'frequency':api_data.frequency})\n else:\n form = ApiDataForm()\n customer = Customer.objects.get(user=request.user)\n\n if ProjectDashboard.objects.filter(project=project ).exists():\n # #print(\"project admin\")\n dashboard=ProjectDashboard.objects.filter(project=project ).order_by('-id')\n dashboard_count=ProjectDashboard.objects.filter(project=project) .count()\n else:\n dashboard= None\n dashboard_count= None\n if ProjectEndPoint.objects.filter(project=project).exists():\n project_endpoints = ProjectEndPoint.objects.filter(project=project).order_by('name')\n else:\n project_endpoints=None\n #print(\"get of api-data\")\n return render(request,template_name,{'permission':permission,'customer':customer,'form':form,'dashboard':dashboard,'dashboard_count':dashboard_count,'project_endpoints':project_endpoints,'project':project})\n\n def post(self,request,pk):\n project= Project.objects.get(pk=pk)\n form = ApiDataForm(request.POST)\n if form.is_valid():\n\n api = form.cleaned_data['api']\n basic_token = form.cleaned_data['basic_token']\n frequency = form.cleaned_data['frequency']\n name = request.POST['api_name']\n api_data, created = CustomerAPIDetails.objects.get_or_create(name=name,project=project,api=api,token=basic_token,range=frequency,integration_choice='API')\n if api_data.token:\n\n\n headers = {'content-type' : 'application/json',\n 'Authorization':api_data.token}\n\n JSONContent = requests.get(api_data.api,\n\n headers=headers, verify=True)\n else:\n #print(\"the api\",api)\n headers = {'content-type' : 'application/json',\n }\n\n JSONContent = requests.get(api_data.api,\n headers=headers, verify=True)\n if 'error' not in JSONContent:\n data_str = JSONContent.text\n data_json =json.loads(data_str)\n\n try:\n df = pd.json_normalize(data_json['results'])\n except:\n try:\n\n df = pd.json_normalize(data_json['data'])\n except:\n df = pd.json_normalize(data_json)\n\n df.columns = df.columns.str.strip().str.lower().str.replace(' ', '_').str.replace('(', '').str.replace(')', '').str.replace('.', '_').str.replace(',','_').str.replace('/','_')\n new_df_columns = df.columns.tolist()\n\n project_json = ProjectJsonStorage.objects.filter(project=project).order_by('id')[0]\n json_string = json.loads(project_json.js)\n json_df = pd.DataFrame(json_string)\n # #print(type(json_df.head()))\n\n transposed_df = json_df.transpose()\n rows = transposed_df.shape[0]\n\n metadata = ProjectMetaData. objects.get(project=project)\n columns = metadata.columns['columns']\n\n\n transposed_df_columns = transposed_df.columns.tolist()\n y_df = pd.DataFrame()\n # separate the mother df columns df into y_df\n\n try:\n for column in columns:\n y_df[column] = df[column]\n # #print(\"the columns are\",columns,new_df_columns)\n \n \n \n except:\n pass\n new_columns = [x for x in new_df_columns if x not in columns ]\n column_list = [x for x in transposed_df_columns if x in new_df_columns]\n if len(column_list)>0:\n result_df = reduce(lambda left,right: pd.merge(left,right,on=column_list, how='outer'), [transposed_df,df])\n else:\n try:\n result_df = reduce(lambda left,right: pd.merge(left,right,left_on= transposed_df_columns[0],right_on=new_df_columns[0],how='outer'), [transposed_df,df])\n except:\n result_df = reduce(lambda left,right: pd.merge(left,right,left_index=True,right_index=True,how='outer'), [transposed_df,df])\n \n # #print(\"the df is \",result_df)\n result_df_rows = result_df.shape[0]\n df_rows = df.shape[0]\n #if there are new columns\n\n if len(new_columns)>0:\n column_conbine_obj = ColumnCombine()\n dfs=[result_df]\n data = column_conbine_obj.list_combine_with_datatype_integration(dfs,new_columns)\n try:\n error = data['Error']\n data = {'error_msg':all_columns['error']}\n # #print(\"error\",data)\n return JsonResponse(data,safe=False)\n except:\n data = column_conbine_obj.list_combine_with_datatype_integration(dfs,new_columns)\n data['api_pk']=api_data.pk\n #print(\"the data for metadata collection is\",data)\n return JsonResponse(data,safe=False)\n\n\n else:\n pass\n result_df.columns = result_df.columns.str.strip().str.lower().str.replace(' ', '_').str.replace('(', '').str.replace(')', '').str.replace('.', '_').str.replace(',','_').str.replace('/','_').str.replace(':','').str.replace(':','')\n\n result_df_columns = result_df.columns.tolist()\n fillna_obj = FillNan(result_df)\n delete_column_list = []\n custom_column_list = {}\n today = datetime.now()\n metadata = ProjectMetaData. objects.get(project=project)\n meta_data_obj = metadata.meta_data\n\n\n\n for key, value in metadata.meta_data.items():\n\n if key in result_df_columns:\n #print(\"key present in metadata \",key)\n if result_df.dtypes[key] == np.int64:\n dtype = 'int'\n elif result_df.dtypes[key] == np.float64:\n dtype = 'float'\n elif result_df.dtypes[key] == np.object:\n dtype = 'object'\n if value['dtype'] == dtype:\n if value['handle_missing_data']== 'drop':\n l = [key]\n result_df[l].dropna(subset=l)\n # result_df = fillna_obj.drop_row(l)\n elif value['handle_missing_data'] == 0:\n result_df[column].fillna(0, inplace=True)\n # result_df = fillna_obj.fillnan_with_0(key)\n #print(\"after filling nan\",result_df)\n elif value['handle_missing_data'] == 'None':\n\n # result_df = fillna_obj.fillnan_with_None_value(key)\n result_df[key].fillna('None', inplace=True)\n #print(\"after filling nan\",result_df)\n\n elif value['handle_missing_data'] == 'previous':\n result_df[column].fillna(method='ffill', inplace=True)\n # result_df = fillna_obj.fillnan_with_previous_value(key)\n #print(\"after filling nan\",result_df)\n\n\n\n elif value['handle_missing_data'] == 'delete_column':\n delete_column_list.append(key)\n else:\n try:\n\n custom_column_list[key]=int(value['handle_missing_data'])\n except:\n custom_column_list[key]=value['handle_missing_data']\n else:\n if value['handle_missing_data']== 'drop':\n l = [key]\n result_df[l].dropna(subset=l)\n # result_df = fillna_obj.drop_row(l)\n elif value['handle_missing_data'] == 0:\n result_df[column].fillna(0, inplace=True)\n # result_df = fillna_obj.fillnan_with_0(key)\n #print(\"after filling nan\",result_df)\n elif value['handle_missing_data'] == 'None':\n\n # result_df = fillna_obj.fillnan_with_None_value(key)\n result_df[key].fillna('None', inplace=True)\n #print(\"after filling nan\",result_df)\n\n elif value['handle_missing_data'] == 'previous':\n result_df[key].fillna(method='ffill', inplace=True)\n # result_df = fillna_obj.fillnan_with_previous_value(key)\n #print(\"after filling nan\",result_df)\n\n\n\n elif value['handle_missing_data'] == 'delete_column':\n delete_column_list.append(key)\n else:\n try:\n\n custom_column_list[key]=int(value['handle_missing_data'])\n except:\n custom_column_list[key]=value['handle_missing_data']\n\n #print(\"the final new_columns\",new_columns)\n for c in new_columns:\n d= {}\n if result_df.dtypes[c] == np.int64:\n\n d['dtype'] = 'int'\n\n elif result_df.dtypes[c] == np.float64:\n\n d['dtype'] = 'float'\n elif result_df.dtypes[c] == np.object:\n\n d['dtype'] = 'object'\n\n elif df.dtypes[c] == np.bool:\n d['dtype']= 'bool'\n elif np.issubdtype(result_df[c].dtype, np.datetime64):\n d['dtype'] = \"DateTime\"\n df[c] = df[c].astype('str')\n\n\n\n missing_data = None\n missing_data_input=None\n try:\n # reading mising data handling\n ###print(\"the request post is\",request.POST)\n cu = c+'_select'\n missing_data = request.POST[cu]\n except:\n pass\n try:\n # reading mising data handling\n ###print(\"the request post is\",request.POST)\n cu = c+'_input'\n missing_data_input = request.POST[cu]\n except:\n pass\n if missing_data and missing_data == 'zero':\n d['handle_missing_data']= 0\n today = str(datetime.now().date())\n ####print(\"type of date\",type(today))\n d['start_date']= today\n d['end_date']= ''\n result_df = fillna_obj.fillnan_with_0(c)\n #int(\"nan filled df 0 \",c)\n elif missing_data and missing_data == 'None':\n d['handle_missing_data']= 'None'\n result_df = fillna_obj.fillnan_with_None_value(c)\n ####print(\"nan filled df None \",c)\n today = str(datetime.now().date())\n ####print(\"type of date\",type(today))\n d['start_date']= today\n d['end_date']= ''\n d['column_deleted'] = False\n elif missing_data and missing_data == 'previous':\n d['handle_missing_data']= 'previous'\n result_df = fillna_obj.fillnan_with_previous_value(c)\n ####print(\"nan filled df \",result_df)\n today = str(datetime.now().date())\n ####print(\"type of date\",type(today))\n d['start_date']= today\n d['end_date']= ''\n d['column_deleted'] = False\n elif missing_data and missing_data == 'drop':\n d['handle_missing_data']= 'drop'\n l = [c]\n result_df = fillna_obj.drop_row(l)\n ####print(\"nan filled df \",result_df)\n today = str(datetime.now().date())\n ####print(\"type of date\",type(today))\n d['start_date']= today\n d['end_date']= ''\n d['column_deleted'] = False\n elif missing_data and missing_data == 'delete_column':\n d['handle_missing_data']= missing_data\n today = str(datetime.now().date())\n ####print(\"type of date\",type(today))\n d['start_date']= today\n d['end_date']= today\n d['column_deleted'] = True\n\n co = c.lower().replace(' ', '_').replace('(', '').replace(')', '').replace('.', '_').replace(',','_').replace('/','_').replace(':','')\n delete_column_list.append(co)\n ##print(\"the columns are \",delete_column_list)\n elif missing_data_input:\n d['handle_missing_data']= missing_data_input\n today = str(datetime.now().date())\n ####print(\"type of date\",type(tod\n d['start_date']= today\n d['end_date']= ''\n d['column_deleted'] = False\n custom_column_list[c]=missing_data_input\n\n c_name = c.lower().replace(' ', '_').replace('(', '').replace(')', '').replace('.', '_').replace(',','_').replace('/','_').replace(':','')\n meta_data_obj[c_name]=d\n\n\n\n\n #print(\"the cu\",custom_column_list)\n if len(custom_column_list)>0:\n #print(\"the cu\",custom_column_list)\n\n result_df = result_df.fillna(value=custom_column_list)\n #print(\"the result after nan fill\",result_df,custom_column_list)\n\n if len(delete_column_list)>0:\n\n #print(\"the value \",delete_column_list)\n column_delete_obj = DeleteColumn(result_df)\n result_df = column_delete_obj.delete_column(delete_column_list)\n #print(\"columns \",result_df.columns.tolist())\n\n\n\n result_columns = result_df.columns.tolist()\n c = {'columns':result_columns}\n res_json=result_df.to_json(orient='index')\n rows = result_df.shape[0]\n columns = result_df.shape[1]\n df_head=result_df.head(5)\n df_tail = result_df.tail(5)\n df_head_json = df_head.to_json(orient='index')\n df_tail_json = df_tail.to_json(orient='index')\n project_json = ProjectJsonStorage.objects.filter(project=project).update(js=res_json,columns=c)\n project_json = ProjectJsonStorage.objects.filter(project=project).order_by('id')[0]\n c_list = {'columns':result_df_columns}\n #print(\"the \")\n update = ProjectMetaData.objects.filter(project=project).update(meta_data=meta_data_obj,columns=c_list)\n project_json_metadata = ProjectJsonStorageMetadata.objects.filter(project_json=project_json).update(rows=rows,columns=columns,head_json=df_head_json,tail_json=df_tail_json)\n pk = str(project.pk)\n\n return redirect('/single-project/'+pk+'/')\n pk = str(pk)\n api-data\n\n\n return redirect('/single-project/'+pk+'/')\n\nclass ApiNnaFillView(CreateView):\n def get(self,request,pk):\n template_name='sheet_details.html'\n sheet_details_form=SheetDetailsForm()\n context={\n 'sheet_details_form':sheet_details_form\n }\n return render(request,template_name,context)\n def post(self,request,pk):\n project= Project.objects.get(pk=pk)\n form = ApiDataForm(request.POST)\n if form.is_valid():\n\n api = form.cleaned_data['api']\n basic_token = form.cleaned_data['basic_token']\n frequency = form.cleaned_data['frequency']\n name = request.POST['api_name']\n api_pk = request.POST['api_pk']\n api_data = CustomerAPIDetails.objects.get(pk=int(api_pk))\n \n if api_data.token:\n\n\n headers = {'content-type' : 'application/json',\n 'Authorization':api_data.token}\n\n JSONContent = requests.get(api_data.api,\n\n headers=headers, verify=True)\n else:\n #print(\"the api\",api)\n headers = {'content-type' : 'application/json',\n }\n\n JSONContent = requests.get(api_data.api,\n headers=headers, verify=True)\n if 'error' not in JSONContent:\n data_str = JSONContent.text\n data_json =json.loads(data_str)\n\n try:\n df = pd.json_normalize(data_json['results'])\n except:\n try:\n\n df = pd.json_normalize(data_json['data'])\n except:\n df = pd.json_normalize(data_json)\n\n df = df.replace('',np.nan)\n # #print(\"the original df \",df)\n df.columns = df.columns.str.strip().str.lower().str.replace(' ', '_').str.replace('(', '').str.replace(')', '').str.replace('.', '_').str.replace(',','_').str.replace('/','_').str.replace(':','').str.replace(':','')\n new_df_columns = df.columns.tolist()\n #print(\"new df columns\",new_df_columns)\n project_json = ProjectJsonStorage.objects.filter(project=project).order_by('id')[0]\n json_string = json.loads(project_json.js)\n json_df = pd.DataFrame(json_string)\n # #print(type(json_df.head()))\n\n transposed_df = json_df.transpose()\n rows = transposed_df.shape[0]\n\n metadata = ProjectMetaData. objects.get(project=project)\n columns = metadata.columns['columns']\n\n\n transposed_df_columns = transposed_df.columns.tolist()\n\n column_list = [x for x in transposed_df_columns if x in new_df_columns]\n\n new_columns = [x for x in new_df_columns if x not in columns ]\n #print(\"new columns are\",new_columns)\n\n for key, value in metadata.meta_data.items():\n if key in transposed_df_columns:\n if value['dtype'] == 'int':\n transposed_df[key] =pd.to_numeric(transposed_df[key])\n if key in new_df_columns:\n df[key] = pd.to_numeric(df[key])\n elif value['dtype'] == 'float':\n #print(\"final key\",key,transposed_df[key],transposed_df[key].dtypes)\n transposed_df[key] = pd.to_numeric(transposed_df[key])\n if key in new_df_columns:\n df[key] = pd.to_numeric(df[key])\n elif value['dtype'] == 'object':\n transposed_df[key] = transposed_df[key].astype(str)\n if key in new_df_columns:\n df[key] = df[key].astype(str)\n elif value['dtype'] == 'bool':\n transposed_df[key] = transposed_df[key].astype(bool)\n if key in new_df_columns:\n df[key] = df[key].astype(bool)\n elif value['dtype'] == 'DateTime':\n transposed_df[key] =pd.to_datetime(transposed_df[key])\n #print(\"the data key\",key)\n if key in new_df_columns:\n df[key] = pd.to_datetime(df[key])\n\n if len(column_list)>0:\n result_df = reduce(lambda left,right: pd.merge(left,right,on=column_list, how='outer'), [transposed_df,df])\n else:\n try:\n result_df = reduce(lambda left,right: pd.merge(left,right,left_on= transposed_df_columns[0],right_on=new_df_columns[0],how='outer'), [transposed_df,df])\n except:\n result_df = reduce(lambda left,right: pd.merge(left,right,left_index=True,right_index=True,how='outer'), [transposed_df,df])\n # #print(\"the df is \",result_df) erge the mother df with new rows ofss data\n\n # result_df = result_df.fillna(0)\n #print(\"the result df \",result_df.head(70))\n result_df_rows = result_df.shape[0]\n df_rows = df.shape[0]\n # if there are new columns\n # if len(new_columns)>0:\n # result_df_rows = result_df.shape[0]\n # df_rows = df.shape[0]\n # #print(\"the aded df, and result_df rows\",df_rows,result_df_rows)\n\n # if result_df_rows == df_rows:\n # for column in new_columns:\n # #print(\"column is\",column)\n # result_df[column] = df[column]\n # #print(\"the result df is \",result_df)\n # else:\n # pass\n\n result_df.columns = result_df.columns.str.strip().str.lower().str.replace(' ', '_').str.replace('(', '').str.replace(')', '').str.replace('.', '_').str.replace(',','_').str.replace('/','_').str.replace(':','').str.replace(':','')\n # #print(\"the final data frame\",result_df)\n result_df_columns = result_df.columns.tolist()\n delete_column_list = []\n custom_column_list = {}\n today = datetime.now()\n metadata = ProjectMetaData. objects.get(project=project)\n\n # #print(\"the shappe before duplicate\",result_df.shape[0],transposed_df.shape[0],df.shape[0])\n\n result_df.drop_duplicates(subset = transposed_df_columns, inplace = True)\n\n # result_df.fillna(0,inplace=True)\n # #print(\"the shappe aftrer duplicate\",result_df.tail(50),result_df.shape[0],transposed_df.shape[0],df.shape[0])\n\n # if len(new_columns)>0:\n # column_conbine_obj = ColumnCombine()\n # data = column_conbine_obj.list_combine_with_datatype_integration(dfs,new_columns)\n # try:\n # error = data['Error']\n # data = {'error_msg':all_columns['error']}\n # # #print(\"error\",data)\n # return JsonResponse(data,safe=False)\n # except:\n # data = column_conbine_obj.list_combine_with_datatype_integration(dfs,new_columns)\n # #print(\"the data for metadata collection is\",data)\n # return JsonResponse(data,safe=False)\n\n\n fillna_obj = FillNan(result_df)\n for key, value in metadata.meta_data.items():\n\n if key in result_df_columns:\n #print(\"key present in metadata \",key)\n if value['handle_missing_data'] == 0:\n fillna_obj.fillnan_with_0(key)\n elif value['handle_missing_data'] == 'None':\n fillna_obj.fillnan_with_None_value(key)\n elif value['handle_missing_data'] == 'previous':\n fillna_obj.fillnan_with_previous_value(key)\n elif value['handle_missing_data'] == 'drop':\n l = [key]\n result_df = fillna_obj.drop_row(l)\n elif value['handle_missing_data'] == 'delete_column':\n co = key.lower().replace(' ', '_').replace('(', '').replace(')', '').replace('.', '_').replace(',','_').replace('/','_').replace(':','')\n delete_column_list.append(co)\n else:\n try:\n\n custom_column_list[key]=int(value['handle_missing_data'])\n except:\n custom_column_list[key]=value['handle_missing_data']\n # if value['dtype'] == dtype:\n # if value['handle_missing_data']== 'drop':\n # l = [key]\n # result_df[l].dropna(subset=l,inplace=True)\n # # result_df = fillna_obj.drop_row(l)\n # elif value['handle_missing_data'] == 0:\n # result_df[key].fillna(0, inplace=True)\n # # result_df = fillna_obj.fillnan_with_0(key)\n # #print(\"after filling nan\",key,result_df)\n # elif value['handle_missing_data'] == 'None':\n\n # # result_df = fillna_obj.fillnan_with_None_value(key)\n # result_df[key].fillna('None', inplace=True)\n # #print(\"after filling nan\",key,result_df)\n\n # elif value['handle_missing_data'] == 'previous':\n # result_df[key].fillna(method='ffill', inplace=True)\n # # result_df = fillna_obj.fillnan_with_previous_value(key)\n # #print(\"after filling nan\",key,result_df)\n\n\n\n # elif value['handle_missing_data'] == 'delete_column':\n # delete_column_list.append(key)\n # else:\n # try:\n\n # custom_column_list[key]=int(value['handle_missing_data'])\n # except:\n # custom_column_list[key]=value['handle_missing_data']\n # else:\n # if value['handle_missing_data']== 'drop':\n # l = [key]\n # result_df[l].dropna(subset=l,inplace=True)\n # # result_df = fillna_obj.drop_row(l)\n # elif value['handle_missing_data'] == 0:\n # result_df[key].fillna(0,inplace=True)\n # # result_df = fillna_obj.fillnan_with_0(key)\n # #print(\"after filling nan\",key,result_df)\n # elif value['handle_missing_data'] == 'None':\n\n # # result_df = fillna_obj.fillnan_with_None_value(key)\n # result_df[key].fillna('None', inplace=True)\n # #print(\"after filling nan\",key,result_df)\n\n # elif value['handle_missing_data'] == 'previous':\n # result_df[key].fillna(method='ffill', inplace=True)\n # # result_df = fillna_obj.fillnan_with_previous_value(key)\n # #print(\"after filling nan\",key,result_df)\n\n\n\n # elif value['handle_missing_data'] == 'delete_column':\n # delete_column_list.append(key)\n # else:\n # try:\n\n # custom_column_list[key]=int(value['handle_missing_data'])\n # except:\n # custom_column_list[key]=value['handle_missing_data']\n\n if len(custom_column_list)>0:\n result_df.fillna(custom_column_list)\n\n\n if len(delete_column_list)>0:\n\n #print(\"the value \",delete_column_list)\n column_delete_obj = DeleteColumn(result_df)\n result_df = column_delete_obj.delete_column(delete_column_list)\n #print(\"columns \",result_df.columns.tolist())\n\n meta_data_obj = metadata.meta_data\n today = str(datetime.now())\n delete_column_list = []\n\n\n\n for c in new_columns:\n column = {}\n custom_column_list = {}\n ##print(\"the dataframe\",c)\n if df.dtypes[c] == np.int64:\n column['dtype'] = 'int'\n elif df.dtypes[c] == np.float64:\n column['dtype'] = 'float'\n elif df.dtypes[c] == np.object:\n column['dtype'] = 'object'\n elif df.dtypes[c] == np.bool:\n column['dtype']= 'bool'\n elif np.issubdtype(df[c].dtype, np.datetime64):\n column['dtype'] = \"DateTime\"\n df[c] = df[c].astype('str')\n\n missing_data = None\n missing_data_input=None\n try:\n\n cu = c+'_select'\n missing_data = request.POST[cu]\n except:\n pass\n try:\n cu = c+'_input'\n missing_data_input = request.POST[cu]\n except:\n pass\n if missing_data and missing_data == 'zero':\n column['handle_missing_data']= 0\n today = str(datetime.now().date())\n ##print(\"type of date\",type(today))\n column['start_date']= today\n column['end_date']= ''\n fillna_obj.fillnan_with_0(c)\n ##print(\"nan filled df 0 \",c)\n elif missing_data and missing_data == 'None':\n column['handle_missing_data']= 'None'\n fillna_obj.fillnan_with_None_value(c)\n ##print(\"nan filled df None \",c)\n today = str(datetime.now().date())\n ##print(\"type of date\",type(today))\n column['start_date']= today\n column['end_date']= ''\n column['column_deleted'] = False\n elif missing_data and missing_data == 'previous':\n column['handle_missing_data']= 'previous'\n result_df = fillna_obj.fillnan_with_previous_value(c)\n ##print(\"nan filled df \",result_df)\n today = str(datetime.now().date())\n ##print(\"type of date\",type(today))\n column['start_date']= today\n column['end_date']= ''\n column['column_deleted'] = False\n elif missing_data and missing_data == 'drop':\n column['handle_missing_data']= 'drop'\n l = [c]\n fillna_obj.drop_row(l)\n ##print(\"nan filled df \",result_df)\n today = str(datetime.now().date())\n ##print(\"type of date\",type(today))\n column['start_date']= today\n column['end_date']= ''\n column['column_deleted'] = False\n elif missing_data and missing_data == 'delete_column':\n column['handle_missing_data']= missing_data\n today = str(datetime.now().date())\n ##print(\"type of date\",type(today))\n column['start_date']= today\n column['end_date']= today\n column['column_deleted'] = True\n\n co = c.lower().replace(' ', '_').replace('(', '').replace(')', '').replace('.', '_').replace(',','_').replace('/','_').replace(':','')\n delete_column_list.append(co)\n #print(\"the columns are \",delete_column_list)\n elif missing_data_input:\n column['handle_missing_data']= missing_data_input\n today = str(datetime.now().date())\n ##print(\"type of date\",type(today))\n column['start_date']= today\n column['end_date']= ''\n column['column_deleted'] = False\n co = c.lower().replace(' ', '_').replace('(', '').replace(')', '').replace('.', '_').replace(',','_').replace('/','_').replace(':','')\n try:\n custom_column_list[co]=int(missing_data_input)\n except:\n custom_column_list[co]=missing_data_input\n result_df = result_df.fillna(custom_column_list)\n #print(\"in for \",result_df[co])\n\n\n\n c_name = c.lower().replace(' ', '_').replace('(', '').replace(')', '').replace('.', '_').replace(',','_').replace('/','_').replace(':','')\n meta_data_obj[c_name]=column\n #print(\"custom columns list\",custom_column_list)\n if len(custom_column_list)>0:\n result_df = result_df.fillna(custom_column_list)\n #print(\"the result after nan fill\",result_df,delete_column_list)\n\n if len(delete_column_list)>0:\n\n ###print(\"the value \",delete_column_list)\n column_delete_obj = DeleteColumn(result_df)\n result_df = column_delete_obj.delete_column(delete_column_list)\n #print(\"the result after nan fill\",result_df,delete_column_list)\n\n # d= {}\n # if result_df.dtypes[c] == np.int64:\n # #print(\"its int type\",c)\n # d['dtype'] = 'int'\n # d['handle_missing_data'] = 0\n # d['start_date']= str(today)\n # d['end_date']= ''\n # d['column_deleted'] = False\n # result_df[c].fillna(0, inplace=True)\n # #print(\"after nan fill\",c,result_df,c)\n\n\n # elif result_df.dtypes[c] == np.flm oat64:\n # #print(\"its float type\",c)\n # d['dtype'] = 'float'\n # d['handle_missing_data'] =0\n # d['start_date']= str(today)\n # d['end_date']= ''\n # d['column_deleted'] = False\n # result_df[c].fillna(0, inplace=True)\n # #print(\"after nan fill\",c,result_df)\n # elif result_df.dtypes[c] == np.object:\n # #print(\"its object type\",c)\n # d['dtype'] = 'object'\n # d['handle_missing_data'] = 'None'\n # d['start_date']= str(today)\n # d['end_date']= ''\n # d['column_deleted'] = False\n # result_df[c].fillna(\"None\", inplace=True)\n # #print(\"result df\",c,result_df)\n # meta_data_obj[c]=d\n\n\n\n result_columns = result_df.columns.tolist()\n c = {'columns':result_columns}\n if metadata.date_column_name:\n result_df[metadata.date_column_name] = result_df[metadata.date_column_name].astype(str)\n res_json=result_df.to_json(orient='index')\n rows = result_df.shape[0]\n columns = result_df.shape[1]\n df_head=result_df.head(5)\n df_tail = result_df.tail(5)\n df_head_json = df_head.to_json(orient='index')\n df_tail_json = df_tail.to_json(orient='index')\n project_json = ProjectJsonStorage.objects.filter(project=project).update(js=res_json,columns=c)\n project_json = ProjectJsonStorage.objects.filter(project=project).order_by('id')[0]\n c_list = {'columns':result_df_columns}\n #print(\"the \")\n update = ProjectMetaData.objects.filter(project=project).update(meta_data=meta_data_obj,columns=c_list)\n project_json_metadata = ProjectJsonStorageMetadata.objects.filter(project_json=project_json).update(rows=rows,columns=columns,head_json=df_head_json,tail_json=df_tail_json)\n pk = str(project.pk)\n data = {'pk':pk}\n\n return JsonResponse(data, safe=False)\n\n # #print(\"the entered file is \",credential)\n # spread_sheet_id=sheet_details_form.cleaned_data[\"spreadsheet_id\"]\n # spread_sheet_id_encrypted = encrypt(spread_sheet_id)\n # sheet_details,created = CustomerAPIDetails.objects.get_or_create(project=project,integration_choice='Google Sheets',credentials=credential,file_id=spread_sheet_id_encrypted)\n # #print(\"created\")\n # sheet_details.credentials.open('r')\n # lines = sheet_details.credentials.read()\n # sheet_details.credentials.close()\n # file_lines_encrypted=encrypt(lines)\n # #print('file_lines_encrypted',file_lines_encrypted)\n # sheet_details.credentials.open('w')\n # sheet_details.credentials.write(file_lines_encrypted)\n # pk= sheet_details.pk\n # sheet_details_update = CustomerAPIDetails.objects.filter(pk=pk).update(credentials = sheet_details.credentials)\n # sheet_details.credentials.close()\n # updated_credential_data = CustomerAPIDetails.objects.get(pk=pk)\n # #print('updated_credential_data.token_file',updated_credential_data.token_file)\n # updated_credential_data.credentials.open('r')\n # lines = updated_credential_data.credentials.read()\n # updated_credential_data.credentials.close()\n # file_lines_decrypted=decrypt(lines)\n # encrypted_file = open('decrypted_credentials.json','w')\n # encrypted_file.write(file_lines_decrypted)\n # encrypted_file.close()\n # credentials = os.path.abspath(\"decrypted_credentials.json\")\n\n\n SCOPES = ['https://www.googleapis.com/auth/spreadsheets']\n\n # The ID and range of a sample spreadsheet.\n\n SAMPLE_RANGE_NAME = 'Sheet1'\n\n\n \"\"\"Shows basic usage of the Sheets API.\n Prints values from a sample spreadsheet.\n \"\"\"\n creds = None\n # The file token.pickle stores the user's access and refresh tokens, and is\n # created automatically when the authorization flow completes for the first\n # time.\n if os.path.exists('token.pickle_sheet'+request.user.username):\n with open('token.pickle_sheet'+request.user.username, 'rb') as token:\n creds = pickle.load(token)\n # If there are no (valid) credentials available, let the user log in.\n if not creds or not creds.valid:\n if creds and creds.expired and creds.refresh_token:\n creds.refresh(Request())\n else:\n flow = InstalledAppFlow.from_client_secrets_file(\n credentials, SCOPES)\n creds = flow.run_local_server(port=0)\n # Save the credentials for the next run\n with open('token.pickle_sheet'+request.user.username, 'wb') as token:\n pickle.dump(creds, token)\n os.remove('decrypted_credentials.json')\n service = build('sheets', 'v4', credentials=creds)\n spread_sheet_id_decrypted = decrypt(spread_sheet_id_encrypted)\n # Call the Sheets API\n result = service.spreadsheets().values().get(\n spreadsheetId=spread_sheet_id_decrypted, range=SAMPLE_RANGE_NAME).execute()\n values = result.get('values')\n if not values:\n pass\n else:\n for row in values:\n str_value=''\n rows = str_value.join(row)\n #print(rows)\n file = open('googlesheets.csv','a')\n file.write(rows)\n file.close()\n df_file = pd.read_csv('googlesheets.csv', encoding = \"ISO-8859-1\")\n #print(df_file)\n res_df =reduce(lambda left,right: pd.merge(left,right,on=df_file, how='outer'), [df_file])\n #print(res_df.columns.to_list())\n res_json = df_file.to_json(orient='index')\n projectjson, created = ProjectJsonStorage.objects.get_or_create(project=project, js = res_json)\n #print(projectjson)\n os.remove('token.pickle')\n os.remove('googlesheets.csv')\n return redirect('/dashboard/')\n\nclass ProjectGoogleSheetsView(View):\n def post(self,request,pk):\n form = SheetDetailsForm(request.POST,request.FILES)\n url_form = SheetUrlForm(request.POST,request.FILES)\n # #print(\"the form \",url_form)\n if form.is_valid():\n project = Project.objects.get(pk=pk)\n credential=request.FILES[\"credential\"]\n\n spread_sheet_id=request.POST[\"spreadsheet_id\"]\n name = request.POST['sheet_name']\n data_range = request.POST[\"data_range\"]\n sheet_details,created = CustomerAPIDetails.objects.get_or_create(name= name,project=project,integration_choice='Google Sheets',credentials=credential,file_id=spread_sheet_id,range=data_range)\n try:\n\n scope=['htps://spreadsheets.google.com/feeds','https://www.googleapis.com/auth/drive']\n file = sheet_details.credentials.open()\n file_content = file.read()\n js_str = json.loads(file_content.decode('utf-8'))\n creadentials = ServiceAccountCredentials.from_json_keyfile_dict(js_str)\n gc = gspread.authorize(creadentials)\n wks = gc.open(spread_sheet_id).sheet1\n data = wks.get_all_records()\n df = pd.DataFrame(data)\n except:\n data={'error':\"There is a Error in Reading Sheet\"}\n return JsonResponse(data, safe=False)\n data = df_readline(df,0)\n return JsonResponse(data,safe=False)\n\n \n elif url_form.is_valid():\n #print(\"in if\")\n project = Project.objects.get(pk=pk)\n url = url_form.cleaned_data['url']\n name = request.POST['sheet_url_name']\n cron_frequency = request.POST['cron_frequency']\n header =request.POST.get('header',None)\n if header:\n sheet_details,created = CustomerAPIDetails.objects.get_or_create(name= name,project=project,integration_choice='Google Sheets',sheet_url=url ,range=cron_frequency,sheet_header=int(header))\n else:\n sheet_details,created = CustomerAPIDetails.objects.get_or_create(name= name,project=project,integration_choice='Google Sheets',sheet_url=url ,range=cron_frequency)\n try:\n if header:\n df = pd.read_html(url,encoding='utf8',index_col=0,header=int(header))\n else:\n df = pd.read_html(url,encoding='utf8',index_col=0,header=1)\n\n except:\n data={'error':\"There is a Error in Reading Sheet\"}\n return JsonResponse(data, safe=False)\n # if header:\n # df = pd.read_html(url,encoding='utf8',index_col=0,header=int(header))\n # else:\n # df = pd.read_html(url,encoding='utf8',index_col=0,header=1)\n if header:\n\n data = df_readline(df[0],int(header)+1)\n else:\n data = df_readline(df[0],1)\n\n return JsonResponse(data,safe=False)\n\n else:\n #print(\"the errors\", url_form.errors)\n data={'error':\"There is a Error in Reading Sheet\"}\n return JsonResponse(data, safe=False)\n\ndef df_readline(df,i):\n columns = df.columns.tolist()\n # #print(\"the columns are \",columns)\n data = {}\n lines = []\n index =len(df.index)\n c_str = ' '\n #print(\"the read df is\",type(df),columns )\n for column in columns:\n c_str = c_str+','+column\n column_list = c_str.replace(' ','',1)\n if index>5:\n for ind in range(i,i+5):\n one= \" \"\n for col in columns:\n #print(\"the col\",col,ind)\n try:\n tst = df[col][ind]\n except:\n tst = df[col][ind+1]\n one = one+','+str(tst)\n\n lines.append(one.replace(\" \", \"\", 1))\n # #print(\"the lines are\", lines)\n line_1 =lines[0]\n line_2 = lines[1]\n line_3 = lines[2]\n line_4 = lines[3]\n line_5= lines[4]\n data = {'line_1': line_1, 'line_2': line_2, 'line_3': line_3, 'line_4': line_4, 'line_5': line_5,'column_list':column_list}\n\n elif (index>=4):\n for ind in range(i,i+4):\n one= \" \"\n for col in columns:\n # #print(\"the col\",df[col][ind])\n tst = df[col][ind]\n one = one+','+str(tst)\n lines.append(one.replace(\" \", \"\", 1))\n line_1 = lines[0]\n line_2 = lines[1]\n line_3 = lines[2]\n line_4 = lines[3]\n line_5 = ''\n data = {'line_1': line_1, 'line_2': line_2, 'line_3': line_3, 'line_4': line_4, 'line_5': line_5,'column_list':column_list}\n elif (index>=3):\n for ind in range(i,i+3):\n one= \" \"\n for col in columns:\n # #print(\"the col\",df[col][ind])\n tst = df[col][ind]\n one = one+','+str(tst)\n lines.append(one.replace(\" \", \"\", 1))\n # #print(\"the lines are\", lines)\n line_1 = lines[0]\n line_2 = lines[1]\n line_3 = lines[2]\n line_4 = ''\n line_5 = ''\n data = {'line_1': line_1, 'line_2': line_2, 'line_3': line_3, 'line_4': line_4, 'line_5': line_5,'column_list':column_list}\n elif (index>=2):\n for ind in range(i,i+2):\n one= \" \"\n for col in columns:\n # #print(\"the col\",df[col][ind])\n tst = df[col][ind]\n one = one+','+str(tst)\n lines.append(one.replace(\" \", \"\", 1))\n # #print(\"the lines are\", lines)\n line_1 = lines[0]\n line_2 = lines[1]\n line_3 = ''\n line_4 = ''\n line_5 = ''\n data = {'line_1': line_1, 'line_2': line_2, 'line_3': line_3, 'line_4': line_4, 'line_5': line_5,'column_list':column_list}\n\n else:\n for ind in range(i,i+1):\n one= \" \"\n for col in columns:\n # #print(\"the col\",df[col][ind])\n tst = df[col][ind]\n one = one+','+str(tst)\n lines.append(one.replace(\" \", \"\", 1))\n line_1 = lines[0]\n line_2 = ''\n line_3 = ''\n line_4 = ''\n line_5 = ''\n data ={'line_1':line_1,'line_2':line_2,'line_3':line_3,'line_4':line_4,'line_5':line_5,'column_list':column_list}\n # #print(\"the lines are\",data)\n\n data={'data':data}\n return data\n\n\n\n\ndef identify_user_permission(project,user):\n if Project.objects.filter(pk=project.pk,admin_user=user).exists():\n permission=\"Admin\"\n elif ProjectUser.objects.filter(project=project,project_user=user).exists():\n user_group = User.objects.get(pk=user.pk)\n\n # # vl.fullbari(\"ProjectPermissionMixin::identify_user_permission user_groups=\"+str(vl.one_string(user_group.groups.all())))\n for g in user_group.groups.all():\n if g.name == str(project.pk)+\"_Read\":\n permission=\"Read\"\n elif g.name == str(project.pk)+\"_Write\":\n permission=\"Write\"\n elif g.name == str(project.pk)+\"_Delete\":\n permission=\"Delete\"\n elif g.name == str(project.pk)+\"_Admin\":\n permission=\"Admin\"\n\n else:\n permission=None\n return permission\n"
]
| [
[
"pandas.to_datetime",
"pandas.merge",
"pandas.json_normalize",
"pandas.DataFrame",
"pandas.read_excel",
"numpy.issubdtype",
"pandas.read_csv",
"pandas.to_numeric",
"pandas.read_html"
]
]
|
HimariO/VideoSum | [
"3a81276df3b429c24ebf9a1841b5a9168c0c3ccf"
]
| [
"tensorflow_toolbox/moreLSTM.py"
]
| [
"'''\nsupercell\nhttps://github.com/hardmaru/supercell/\ninspired by http://supercell.jp/\n'''\n\nimport tensorflow as tf\nimport numpy as np\n\n# Orthogonal Initializer from\n# https://github.com/OlavHN/bnlstm\ndef orthogonal(shape):\n flat_shape = (shape[0], np.prod(shape[1:]))\n a = np.random.normal(0.0, 1.0, flat_shape)\n u, _, v = np.linalg.svd(a, full_matrices=False)\n q = u if u.shape == flat_shape else v\n return q.reshape(shape)\n\ndef lstm_ortho_initializer(scale=1.0):\n def _initializer(shape, dtype=tf.float32, partition_info=None):\n size_x = shape[0]\n size_h = shape[1]/4 # assumes lstm.\n t = np.zeros(shape)\n t[:, :size_h] = orthogonal([size_x, size_h])*scale\n t[:, size_h:size_h*2] = orthogonal([size_x, size_h])*scale\n t[:, size_h*2:size_h*3] = orthogonal([size_x, size_h])*scale\n t[:, size_h*3:] = orthogonal([size_x, size_h])*scale\n return tf.constant(t, dtype)\n return _initializer\n\ndef layer_norm_all(h, batch_size, base, num_units, scope=\"layer_norm\", reuse=False, gamma_start=1.0, epsilon = 1e-3, use_bias=True):\n # Layer Norm (faster version, but not using defun)\n #\n # Performas layer norm on multiple base at once (ie, i, g, j, o for lstm)\n #\n # Reshapes h in to perform layer norm in parallel\n h_reshape = tf.reshape(h, [batch_size, base, num_units])\n mean = tf.reduce_mean(h_reshape, [2], keep_dims=True)\n var = tf.reduce_mean(tf.square(h_reshape - mean), [2], keep_dims=True)\n epsilon = tf.constant(epsilon)\n rstd = tf.rsqrt(var + epsilon)\n h_reshape = (h_reshape - mean) * rstd\n # reshape back to original\n h = tf.reshape(h_reshape, [batch_size, base * num_units])\n with tf.variable_scope(scope):\n if reuse == True:\n tf.get_variable_scope().reuse_variables()\n gamma = tf.get_variable('ln_gamma', [4*num_units], initializer=tf.constant_initializer(gamma_start))\n if use_bias:\n beta = tf.get_variable('ln_beta', [4*num_units], initializer=tf.constant_initializer(0.0))\n if use_bias:\n return gamma*h + beta\n return gamma * h\n\ndef layer_norm(x, num_units, scope=\"layer_norm\", reuse=False, gamma_start=1.0, epsilon = 1e-3, use_bias=True):\n axes = [1]\n mean = tf.reduce_mean(x, axes, keep_dims=True)\n x_shifted = x-mean\n var = tf.reduce_mean(tf.square(x_shifted), axes, keep_dims=True)\n inv_std = tf.rsqrt(var + epsilon)\n with tf.variable_scope(scope):\n if reuse == True:\n tf.get_variable_scope().reuse_variables()\n gamma = tf.get_variable('ln_gamma', [num_units], initializer=tf.constant_initializer(gamma_start))\n if use_bias:\n beta = tf.get_variable('ln_beta', [num_units], initializer=tf.constant_initializer(0.0))\n output = gamma*(x_shifted)*inv_std\n if use_bias:\n output = output + beta\n return output\n\ndef super_linear(x, output_size, scope=None, reuse=False,\n init_w=\"ortho\", weight_start=0.0, use_bias=True, bias_start=0.0, input_size=None):\n # support function doing linear operation. uses ortho initializer defined earlier.\n shape = x.get_shape().as_list()\n with tf.variable_scope(scope or \"linear\"):\n if reuse == True:\n tf.get_variable_scope().reuse_variables()\n\n w_init = None # uniform\n if input_size == None:\n x_size = shape[1]\n else:\n x_size = input_size\n h_size = output_size\n if init_w == \"zeros\":\n w_init=tf.constant_initializer(0.0)\n elif init_w == \"constant\":\n w_init=tf.constant_initializer(weight_start)\n elif init_w == \"gaussian\":\n w_init=tf.random_normal_initializer(stddev=weight_start)\n elif init_w == \"ortho\":\n w_init=lstm_ortho_initializer(1.0)\n\n w = tf.get_variable(\"super_linear_w\",\n [x_size, output_size], tf.float32, initializer=w_init)\n if use_bias:\n b = tf.get_variable(\"super_linear_b\", [output_size], tf.float32,\n initializer=tf.constant_initializer(bias_start))\n return tf.matmul(x, w) + b\n return tf.matmul(x, w)\n\ndef hyper_norm(layer, hyper_output, embedding_size, num_units,\n scope=\"hyper\", use_bias=True):\n '''\n HyperNetwork norm operator\n\n provides context-dependent weights\n layer: layer to apply operation on\n hyper_output: output of the hypernetwork cell at time t\n embedding_size: embedding size of the output vector (see paper)\n num_units: number of hidden units in main rnn\n '''\n # recurrent batch norm init trick (https://arxiv.org/abs/1603.09025).\n init_gamma = 0.10 # cooijmans' da man.\n with tf.variable_scope(scope):\n zw = super_linear(hyper_output, embedding_size, init_w=\"constant\",\n weight_start=0.00, use_bias=True, bias_start=1.0, scope=\"zw\")\n alpha = super_linear(zw, num_units, init_w=\"constant\",\n weight_start=init_gamma / embedding_size, use_bias=False, scope=\"alpha\")\n result = tf.mul(alpha, layer)\n return result\n\ndef hyper_bias(layer, hyper_output, embedding_size, num_units,\n scope=\"hyper\"):\n '''\n HyperNetwork norm operator\n\n provides context-dependent bias\n layer: layer to apply operation on\n hyper_output: output of the hypernetwork cell at time t\n embedding_size: embedding size of the output vector (see paper)\n num_units: number of hidden units in main rnn\n '''\n\n with tf.variable_scope(scope):\n zb = super_linear(hyper_output, embedding_size, init_w=\"gaussian\",\n weight_start=0.01, use_bias=False, bias_start=0.0, scope=\"zb\")\n beta = super_linear(zb, num_units, init_w=\"constant\",\n weight_start=0.00, use_bias=False, scope=\"beta\")\n return layer + beta\n\nclass LSTMCell(tf.contrib.rnn.RNNCell):\n \"\"\"\n Layer-Norm, with Ortho Initialization and\n Recurrent Dropout without Memory Loss.\n https://arxiv.org/abs/1607.06450 - Layer Norm\n https://arxiv.org/abs/1603.05118 - Recurrent Dropout without Memory Loss\n derived from\n https://github.com/OlavHN/bnlstm\n https://github.com/LeavesBreathe/tensorflow_with_latest_papers\n \"\"\"\n\n def __init__(self, num_units, forget_bias=1.0, use_layer_norm=False,\n use_recurrent_dropout=False, dropout_keep_prob=0.90):\n \"\"\"Initialize the Layer Norm LSTM cell.\n Args:\n num_units: int, The number of units in the LSTM cell.\n forget_bias: float, The bias added to forget gates (default 1.0).\n use_recurrent_dropout: float, Whether to use Recurrent Dropout (default False)\n dropout_keep_prob: float, dropout keep probability (default 0.90)\n \"\"\"\n self.num_units = num_units\n self.forget_bias = forget_bias\n self.use_layer_norm = use_layer_norm\n self.use_recurrent_dropout = use_recurrent_dropout\n self.dropout_keep_prob = dropout_keep_prob\n\n @property\n def output_size(self):\n return self.num_units\n\n @property\n def state_size(self):\n return tf.contrib.rnn.LSTMStateTuple(self.num_units, self.num_units)\n\n def __call__(self, x, state, scope=None):\n with tf.variable_scope(scope or type(self).__name__): # \"BasicLSTMCell\"\n c, h = state\n\n h_size = self.num_units\n\n batch_size = x.get_shape().as_list()[0]\n x_size = x.get_shape().as_list()[1]\n\n w_init=None # uniform\n\n h_init=lstm_ortho_initializer()\n\n W_xh = tf.get_variable('W_xh',\n [x_size, 4 * self.num_units], initializer=w_init)\n\n W_hh = tf.get_variable('W_hh_i',\n [self.num_units, 4*self.num_units], initializer=h_init)\n\n W_full = tf.concat([W_xh, W_hh], 0)\n\n bias = tf.get_variable('bias',\n [4 * self.num_units], initializer=tf.constant_initializer(0.0))\n\n concat = tf.concat([x, h], 1) # concat for speed.\n concat = tf.matmul(concat, W_full) + bias\n\n # new way of doing layer norm (faster)\n if self.use_layer_norm:\n concat = layer_norm_all(concat, batch_size, 4, self.num_units, 'ln')\n\n # i = input_gate, j = new_input, f = forget_gate, o = output_gate\n i, j, f, o = tf.split(concat, 4, 1)\n\n if self.use_recurrent_dropout:\n g = tf.nn.dropout(tf.tanh(j), self.dropout_keep_prob)\n else:\n g = tf.tanh(j)\n\n new_c = c*tf.sigmoid(f+self.forget_bias) + tf.sigmoid(i)*g\n if self.use_layer_norm:\n new_h = tf.tanh(layer_norm(new_c, self.num_units, 'ln_c')) * tf.sigmoid(o)\n else:\n new_h = tf.tanh(new_c) * tf.sigmoid(o)\n\n return new_h, tf.contrib.rnn.LSTMStateTuple(new_c, new_h)\n\nclass HyperLSTMCell(tf.contrib.rnn.RNNCell):\n '''\n HyperLSTM, with Ortho Initialization,\n Layer Norm and Recurrent Dropout without Memory Loss.\n\n https://arxiv.org/abs/1609.09106\n '''\n\n def __init__(self, num_units, forget_bias=1.0,\n use_recurrent_dropout=False, dropout_keep_prob=0.90, use_layer_norm=True,\n hyper_num_units=128, hyper_embedding_size=16,\n hyper_use_recurrent_dropout=False):\n '''Initialize the Layer Norm HyperLSTM cell.\n Args:\n num_units: int, The number of units in the LSTM cell.\n forget_bias: float, The bias added to forget gates (default 1.0).\n use_recurrent_dropout: float, Whether to use Recurrent Dropout (default False)\n dropout_keep_prob: float, dropout keep probability (default 0.90)\n use_layer_norm: boolean. (default True)\n Controls whether we use LayerNorm layers in main LSTM and HyperLSTM cell.\n hyper_num_units: int, number of units in HyperLSTM cell.\n (default is 128, recommend experimenting with 256 for larger tasks)\n hyper_embedding_size: int, size of signals emitted from HyperLSTM cell.\n (default is 4, recommend trying larger values but larger is not always better)\n hyper_use_recurrent_dropout: boolean. (default False)\n Controls whether HyperLSTM cell also uses recurrent dropout. (Not in Paper.)\n Recommend turning this on only if hyper_num_units becomes very large (>= 512)\n '''\n self.num_units = num_units\n self.forget_bias = forget_bias\n self.use_recurrent_dropout = use_recurrent_dropout\n self.dropout_keep_prob = dropout_keep_prob\n self.use_layer_norm = use_layer_norm\n self.hyper_num_units = hyper_num_units\n self.hyper_embedding_size = hyper_embedding_size\n self.hyper_use_recurrent_dropout = hyper_use_recurrent_dropout\n\n self.total_num_units = self.num_units + self.hyper_num_units\n\n self.hyper_cell=LSTMCell(hyper_num_units,\n use_recurrent_dropout=hyper_use_recurrent_dropout,\n use_layer_norm=use_layer_norm,\n dropout_keep_prob=dropout_keep_prob)\n\n @property\n def output_size(self):\n return self.num_units\n\n @property\n def state_size(self):\n return tf.contrib.rnn.LSTMStateTuple(self.num_units+self.hyper_num_units,\n self.num_units+self.hyper_num_units)\n\n def __call__(self, x, state, timestep = 0, scope=None):\n with tf.variable_scope(scope or type(self).__name__):\n total_c, total_h = state\n c = total_c[:, 0:self.num_units]\n h = total_h[:, 0:self.num_units]\n hyper_state = tf.contrib.rnn.LSTMStateTuple(total_c[:,self.num_units:],\n total_h[:,self.num_units:])\n\n w_init=None # uniform\n\n h_init=lstm_ortho_initializer(1.0)\n\n x_size = x.get_shape().as_list()[1]\n embedding_size = self.hyper_embedding_size\n num_units = self.num_units\n batch_size = x.get_shape().as_list()[0]\n\n W_xh = tf.get_variable('W_xh',\n [x_size, 4*num_units], initializer=w_init)\n W_hh = tf.get_variable('W_hh',\n [num_units, 4*num_units], initializer=h_init)\n bias = tf.get_variable('bias',\n [4*num_units], initializer=tf.constant_initializer(0.0))\n\n # concatenate the input and hidden states for hyperlstm input\n hyper_input = tf.concat([x, h], 1)\n hyper_output, hyper_new_state = self.hyper_cell(hyper_input, hyper_state)\n\n xh = tf.matmul(x, W_xh)\n hh = tf.matmul(h, W_hh)\n\n # split Wxh contributions\n ix, jx, fx, ox = tf.split(xh, 4, 1)\n ix = hyper_norm(ix, hyper_output, embedding_size, num_units, 'hyper_ix')\n jx = hyper_norm(jx, hyper_output, embedding_size, num_units, 'hyper_jx')\n fx = hyper_norm(fx, hyper_output, embedding_size, num_units, 'hyper_fx')\n ox = hyper_norm(ox, hyper_output, embedding_size, num_units, 'hyper_ox')\n\n # split Whh contributions\n ih, jh, fh, oh = tf.split(hh, 4, 1)\n ih = hyper_norm(ih, hyper_output, embedding_size, num_units, 'hyper_ih')\n jh = hyper_norm(jh, hyper_output, embedding_size, num_units, 'hyper_jh')\n fh = hyper_norm(fh, hyper_output, embedding_size, num_units, 'hyper_fh')\n oh = hyper_norm(oh, hyper_output, embedding_size, num_units, 'hyper_oh')\n\n # split bias\n ib, jb, fb, ob = tf.split(bias, 4, 0) # bias is to be broadcasted.\n ib = hyper_bias(ib, hyper_output, embedding_size, num_units, 'hyper_ib')\n jb = hyper_bias(jb, hyper_output, embedding_size, num_units, 'hyper_jb')\n fb = hyper_bias(fb, hyper_output, embedding_size, num_units, 'hyper_fb')\n ob = hyper_bias(ob, hyper_output, embedding_size, num_units, 'hyper_ob')\n\n # i = input_gate, j = new_input, f = forget_gate, o = output_gate\n i = ix + ih + ib\n j = jx + jh + jb\n f = fx + fh + fb\n o = ox + oh + ob\n\n if self.use_layer_norm:\n concat = tf.concat([i, j, f, o], 1)\n concat = layer_norm_all(concat, batch_size, 4, num_units, 'ln_all')\n i, j, f, o = tf.split(concat, 4, 1)\n\n if self.use_recurrent_dropout:\n g = tf.nn.dropout(tf.tanh(j), self.dropout_keep_prob)\n else:\n g = tf.tanh(j)\n\n new_c = c*tf.sigmoid(f+self.forget_bias) + tf.sigmoid(i)*g\n if self.use_layer_norm:\n new_h = tf.tanh(layer_norm(new_c, num_units, 'ln_c')) * tf.sigmoid(o)\n else:\n new_h = tf.tanh(new_c) * tf.sigmoid(o)\n\n hyper_c, hyper_h = hyper_new_state\n new_total_c = tf.concat([new_c, hyper_c], 1)\n new_total_h = tf.concat([new_h, hyper_h], 1)\n\n return new_h, tf.contrib.rnn.LSTMStateTuple(new_total_c, new_total_h)\n"
]
| [
[
"tensorflow.constant_initializer",
"tensorflow.matmul",
"tensorflow.reshape",
"tensorflow.mul",
"tensorflow.tanh",
"tensorflow.random_normal_initializer",
"numpy.random.normal",
"tensorflow.concat",
"tensorflow.sigmoid",
"tensorflow.contrib.rnn.LSTMStateTuple",
"tensorflow.constant",
"tensorflow.variable_scope",
"numpy.prod",
"tensorflow.get_variable_scope",
"tensorflow.split",
"numpy.zeros",
"tensorflow.rsqrt",
"tensorflow.get_variable",
"numpy.linalg.svd",
"tensorflow.reduce_mean",
"tensorflow.square"
]
]
|
douch/Paddle | [
"dbd6e2df9d074973b7ee177e2d6b96ed2318008e"
]
| [
"python/paddle/fluid/tests/unittests/process_group_gloo.py"
]
| [
"# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import print_function\n\nimport unittest\nimport random\nimport numpy as np\nimport os\nimport shutil\n\nimport paddle\nfrom paddle.fluid import core\nimport datetime\nfrom datetime import timedelta\nimport paddle.fluid.core as core\nfrom paddle.fluid.framework import _test_eager_guard\nfrom paddle.fluid.dygraph.parallel import ParallelEnv\n\n\nclass TestProcessGroupFp32(unittest.TestCase):\n def setUp(self):\n paddle.seed(2022)\n random.seed(2022)\n np.random.seed(2022)\n self.config()\n\n def config(self):\n self.dtype = \"float32\"\n self.shape = (2, 10, 5)\n\n def test_create_process_group_gloo(self):\n with _test_eager_guard():\n nranks = ParallelEnv().nranks\n rank = ParallelEnv().local_rank\n is_master = True if rank == 0 else False\n store = paddle.fluid.core.TCPStore(\"127.0.0.1\", 6272, is_master,\n nranks, datetime.timedelta(0))\n pg = paddle.fluid.core.ProcessGroupGloo(store, rank, nranks)\n\n # test allreduce sum\n # rank 0\n paddle.device.set_device('cpu')\n x = np.random.random(self.shape).astype(self.dtype)\n tensor_x = paddle.to_tensor(x)\n # rank 1\n y = np.random.random(self.shape).astype(self.dtype)\n tensor_y = paddle.to_tensor(y)\n\n sum_result = x + y\n if rank == 0:\n task = pg.allreduce(tensor_x)\n task.wait()\n assert np.array_equal(tensor_x, sum_result)\n else:\n task = pg.allreduce(tensor_y)\n task.wait()\n assert np.array_equal(tensor_y, sum_result)\n\n print(\"test allreduce sum api ok\")\n\n # test allreduce max\n # rank 0\n x = np.random.random(self.shape).astype(self.dtype)\n tensor_x = paddle.to_tensor(x)\n # rank 1\n y = np.random.random(self.shape).astype(self.dtype)\n tensor_y = paddle.to_tensor(y)\n\n max_result = paddle.maximum(tensor_x, tensor_y)\n\n if rank == 0:\n task = pg.allreduce(tensor_x, core.ReduceOp.MAX)\n task.wait()\n assert np.array_equal(tensor_x, max_result)\n else:\n task = pg.allreduce(tensor_y, core.ReduceOp.MAX)\n task.wait()\n assert np.array_equal(tensor_y, max_result)\n\n print(\"test allreduce max api ok\")\n\n # test broadcast\n # rank 0\n x = np.random.random(self.shape).astype(self.dtype)\n tensor_x = paddle.to_tensor(x)\n # rank 1\n y = np.random.random(self.shape).astype(self.dtype)\n tensor_y = paddle.to_tensor(y)\n\n broadcast_result = paddle.assign(tensor_x)\n if rank == 0:\n task = pg.broadcast(tensor_x, 0)\n assert np.array_equal(broadcast_result, tensor_x)\n else:\n task = pg.broadcast(tensor_y, 0)\n assert np.array_equal(broadcast_result, tensor_y)\n print(\"test broadcast api ok\")\n\n # test barrier\n # rank 0\n if pg.rank() == 0:\n task = pg.barrier()\n task.wait()\n # rank 1\n else:\n task = pg.barrier()\n task.wait()\n\n print(\"test barrier api ok\\n\")\n\n # test allgather\n # rank 0\n x = np.random.random(self.shape).astype(self.dtype)\n y = np.random.random(self.shape).astype(self.dtype)\n tensor_x = paddle.to_tensor(x)\n tensor_y = paddle.to_tensor(y)\n out_shape = list(self.shape)\n out_shape[0] *= 2\n out = np.random.random(out_shape).astype(self.dtype)\n tensor_out = paddle.to_tensor(out)\n if pg.rank() == 0:\n task = pg.all_gather(tensor_x, tensor_out)\n task.wait()\n paddle.device.cuda.synchronize()\n # rank 1\n else:\n task = pg.all_gather(tensor_y, tensor_out)\n task.wait()\n out_1 = paddle.slice(tensor_out, [0], [0], [out_shape[0] // 2])\n out_2 = paddle.slice(tensor_out, [0], [out_shape[0] // 2],\n [out_shape[0]])\n assert np.array_equal(tensor_x, out_1)\n assert np.array_equal(tensor_y, out_2)\n print(\"test allgather api ok\\n\")\n\n # test Reduce\n # rank 0\n x = np.random.random(self.shape).astype(self.dtype)\n y = np.random.random(self.shape).astype(self.dtype)\n tensor_x = paddle.to_tensor(x)\n tensor_y = paddle.to_tensor(y)\n sum_result = tensor_x + tensor_y\n if pg.rank() == 0:\n task = pg.reduce(tensor_x, 0)\n task.wait()\n # rank 1\n else:\n task = pg.reduce(tensor_y, 0)\n task.wait()\n if pg.rank() == 0:\n assert np.array_equal(tensor_x, sum_result)\n print(\"test reduce sum api ok\\n\")\n\n # test Scatter\n # rank 0\n in_shape = list(self.shape)\n in_shape[0] *= 2\n x = np.random.random(in_shape).astype(self.dtype)\n y = np.random.random(self.shape).astype(self.dtype)\n tensor_x = paddle.to_tensor(x)\n tensor_y = paddle.to_tensor(y)\n if pg.rank() == 0:\n task = pg.scatter(tensor_x, tensor_y, 0)\n task.wait()\n # rank 1\n else:\n task = pg.scatter(tensor_x, tensor_y, 0)\n task.wait()\n out1 = paddle.slice(tensor_x, [0], [0], [self.shape[0]])\n out2 = paddle.slice(tensor_x, [0], [self.shape[0]],\n [self.shape[0] * 2])\n if pg.rank() == 0:\n assert np.array_equal(tensor_y, out1)\n else:\n assert np.array_equal(tensor_y, out2)\n print(\"test scatter api ok\\n\")\n\n\nif __name__ == \"__main__\":\n unittest.main()\n"
]
| [
[
"numpy.random.seed",
"numpy.random.random",
"numpy.array_equal"
]
]
|
juan-carlos-calvo/navigate | [
"b804638aa3a51f9c9db2ffea75ad6f0519365105"
]
| [
"navigate/dqn_agent.py"
]
| [
"import random\nfrom collections import deque, namedtuple\nfrom typing import Any\n\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\nimport torch.optim as optim\n\nfrom config import settings\nfrom navigate.utils import load_obj\n\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\n\nclass Agent:\n def __init__(self, env: Any, seed: int = 0):\n \"\"\"Interacts with and learns from the environment.\n\n Args:\n env (Any): environment. Must have step and reset methods,\n as well action_size and space_size properties.\n seed (int, optional): random seed. Defaults to 0.\n \"\"\"\n self.env = env\n self.seed = random.seed(seed)\n\n # Q-Network\n qmodel_class = load_obj(settings.qmodel.class_name)\n self.qnetwork_local = qmodel_class(\n self.env.state_size, self.env.action_size, **settings.qmodel.kwargs\n ).to(device)\n self.qnetwork_target = qmodel_class(\n self.env.state_size, self.env.action_size, **settings.qmodel.kwargs\n ).to(device)\n self.optimizer = optim.Adam(\n self.qnetwork_local.parameters(), lr=settings.agent.lr\n )\n\n # Replay memory\n self.memory = ReplayBuffer(self.env.action_size, **settings.buffer.kwargs)\n # Initialize time step (for updating every UPDATE_EVERY steps)\n self.t_step = 0\n\n def step(self, state, action, reward, next_state, done):\n \"\"\"add experience to buffer and train on a batch if\n the internal step counter is 0 mod `update_every`\n\n Args:\n state ([type]): [description]\n action ([type]): [description]\n reward ([type]): [description]\n next_state ([type]): [description]\n done (function): [description]\n \"\"\"\n # Save experience in replay memory\n self.memory.add(state, action, reward, next_state, done)\n\n # Learn every UPDATE_EVERY time steps.\n self.t_step = (self.t_step + 1) % settings.agent.update_every\n if self.t_step == 0:\n # If enough samples are available in memory, get random subset and learn\n if len(self.memory) > self.memory.batch_size:\n experiences = self.memory.sample()\n self.train(experiences, settings.agent.gamma)\n\n def act(self, state, eps=0.0):\n \"\"\"Returns actions for given state as per current policy.\n\n Params\n ======\n state (array_like): current state\n eps (float): epsilon, for epsilon-greedy action selection\n \"\"\"\n state = torch.from_numpy(state).float().unsqueeze(0).to(device)\n self.qnetwork_local.eval()\n with torch.no_grad():\n action_values = self.qnetwork_local(state)\n self.qnetwork_local.train()\n\n # Epsilon-greedy action selection\n if random.random() > eps:\n return np.argmax(action_values.cpu().data.numpy())\n else:\n return random.choice(np.arange(self.env.action_size))\n\n def train(self, experiences, gamma):\n \"\"\"Update value parameters using given batch of experience tuples.\n\n Params\n ======\n experiences (Tuple[torch.Tensor]): tuple of (s, a, r, s', done) tuples\n gamma (float): discount factor\n \"\"\"\n states, actions, rewards, next_states, dones = experiences\n\n # Get max predicted Q values (for next states) from target model\n Q_targets_next = (\n self.qnetwork_target(next_states).detach().max(1)[0].unsqueeze(1)\n )\n # Compute Q targets for current states\n Q_targets = rewards + (gamma * Q_targets_next * (1 - dones))\n\n # Get expected Q values from local model\n Q_expected = self.qnetwork_local(states).gather(1, actions)\n\n # Compute loss\n loss = F.mse_loss(Q_expected, Q_targets)\n # Minimize the loss\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()\n\n # ------------------- update target network ------------------- #\n self.soft_update(self.qnetwork_local, self.qnetwork_target, settings.agent.tau)\n\n def soft_update(self, local_model, target_model, tau):\n \"\"\"Soft update model parameters.\n θ_target = τ*θ_local + (1 - τ)*θ_target\n\n Params\n ======\n local_model (PyTorch model): weights will be copied from\n target_model (PyTorch model): weights will be copied to\n tau (float): interpolation parameter\n \"\"\"\n for target_param, local_param in zip(\n target_model.parameters(), local_model.parameters()\n ):\n target_param.data.copy_(\n tau * local_param.data + (1.0 - tau) * target_param.data\n )\n\n def play(self, n_episodes: int = 3, mat_t: int = 1000):\n eps_start = 0\n eps_end = 0\n train_mode = False\n self._interact(n_episodes, mat_t, eps_start, eps_end, train_mode=train_mode)\n\n def _interact(\n self,\n n_episodes=2000,\n max_t=1000,\n eps_start=1.0,\n eps_end=0.01,\n eps_decay=0.995,\n model_save_path: str = \"checkpoint.pth\",\n train_mode: bool = True,\n ):\n \"\"\"Deep Q-Learning.\n\n Params\n ======\n n_episodes (int): maximum number of training episodes\n max_t (int): maximum number of timesteps per episode\n eps_start (float): starting value of epsilon, for epsilon-greedy action selection\n eps_end (float): minimum value of epsilon\n eps_decay (float): multiplicative factor (per episode) for decreasing epsilon\n \"\"\"\n scores = [] # list containing scores from each episode\n scores_window = deque(maxlen=100) # last 100 scores\n eps = eps_start # initialize epsilon\n for i_episode in range(1, n_episodes + 1):\n state = self.env.reset(train_mode=train_mode)\n score = 0\n for t in range(max_t):\n action = self.act(state, eps)\n next_state, reward, done, _ = self.env.step(action)\n if train_mode:\n self.step(state, action, reward, next_state, done)\n state = next_state\n score += reward\n if done:\n break\n scores_window.append(score) # save most recent score\n scores.append(score) # save most recent score\n eps = max(eps_end, eps_decay * eps) # decrease epsilon\n print(\n \"\\rEpisode {}\\tAverage Score: {:.2f}\".format(\n i_episode, np.mean(scores_window)\n ),\n end=\"\",\n )\n if i_episode % 100 == 0:\n print(\n \"\\rEpisode {}\\tAverage Score: {:.2f}\".format(\n i_episode, np.mean(scores_window)\n )\n )\n if train_mode:\n torch.save(self.qnetwork_local.state_dict(), model_save_path)\n return score\n\n def learn(\n self,\n n_episodes=2000,\n max_t=1000,\n eps_start=1.0,\n eps_end=0.01,\n eps_decay=0.995,\n model_save_path: str = \"checkpoint.pth\",\n ):\n \"\"\"Deep Q-Learning.\n\n Params\n ======\n n_episodes (int): maximum number of training episodes\n max_t (int): maximum number of timesteps per episode\n eps_start (float): starting value of epsilon, for epsilon-greedy action selection\n eps_end (float): minimum value of epsilon\n eps_decay (float): multiplicative factor (per episode) for decreasing epsilon\n \"\"\"\n self._interact(\n n_episodes,\n max_t,\n eps_start,\n eps_end,\n eps_decay,\n model_save_path,\n train_mode=True,\n )\n\n\nclass ReplayBuffer:\n \"\"\"Fixed-size buffer to store experience tuples.\"\"\"\n\n def __init__(\n self, action_size, buffer_size: int = 1e5, batch_size: int = 64, seed: int = 0\n ):\n \"\"\"Initialize a ReplayBuffer object.\n\n Params\n ======\n action_size (int): dimension of each action\n buffer_size (int): maximum size of buffer\n batch_size (int): size of each training batch\n seed (int): random seed\n \"\"\"\n self.action_size = action_size\n self.memory = deque(maxlen=buffer_size)\n self.batch_size = batch_size\n self.experience = namedtuple(\n \"Experience\",\n field_names=[\"state\", \"action\", \"reward\", \"next_state\", \"done\"],\n )\n self.seed = random.seed(seed)\n\n def add(self, state, action, reward, next_state, done):\n \"\"\"Add a new experience to memory.\"\"\"\n e = self.experience(state, action, reward, next_state, done)\n self.memory.append(e)\n\n def sample(self):\n \"\"\"Randomly sample a batch of experiences from memory.\"\"\"\n experiences = random.sample(self.memory, k=self.batch_size)\n fields = experiences[0]._fields\n return tuple(map(iter2tensor, zip(fields, zip(*experiences))))\n\n def __len__(self):\n \"\"\"Return the current size of internal memory.\"\"\"\n return len(self.memory)\n\n\ndef iter2tensor(iter):\n field, arr = iter\n dtype = \"long\" if field == \"action\" else \"float\"\n return getattr(torch.from_numpy(np.vstack(arr)), dtype)().to(device)\n"
]
| [
[
"torch.no_grad",
"numpy.mean",
"torch.from_numpy",
"torch.nn.functional.mse_loss",
"torch.cuda.is_available",
"numpy.arange",
"numpy.vstack"
]
]
|
cthorey/CraterInspector | [
"f71de6dfddd3d538d76da229b4b9605c40f3fbac"
]
| [
"pdsimage/PDS_Extractor.py"
]
| [
"# Import library\nfrom __future__ import print_function\nimport numpy as np\nimport pandas as pd\nimport os\nimport sys\nfrom distutils.util import strtobool\n# Library to help read header in binary WAC FILE\nimport pvl\nfrom pvl import load as load_label\n\n# Helper to catch images from URLs\nimport urllib\nimport requests\n\n\nclass BinaryTable(object):\n \"\"\" Class to read image binary file from the LRO experiment \n\n For the moment, it can gather information about the topography\n (from LRO LOLA experiment) and texture (from the LRO WAC\n experiment). More information about the Lunar Reconnaissance\n Orbiter mission (LRO) can be found `here`_\n\n LRO LOLA - Informations can be found at `LRO/LOLA website`_.\n In particular, the header, located in a separate file .LBL file,\n contains all the informations.\n\n LROC WAC - Informations can be found at `LROC/WAC website`_.\n In particular, HEADER in the binary file that contain all the information -\n Read with the module `pvl module`_ for informations about how the header is\n extracted directly from the file.\n\n This class has a method able to download the images,\n though it might be better to download them before\n as it takes a lot of time, especially for large resolution.\n\n Both are NASA PDS FILE - Meaning, they are binary table whose format depends on\n the file. All the information can be found in the Header whose\n reference are above. Line usualy index latitude while sample on the line\n refers to longitude.\n\n THIS CLASS SUPPORT ONLY CYLINDRICAL PROJECTION FOR THE MOMENT.\n PROJECTION : [WAC : 'EQUIRECTANGULAR', LOLA : '\"SIMPLE\"']\n FURTHER WORK IS NEEDED FOR IT TO BECOMES MORE GENERAL.\n\n Args:\n fname (str): Name of the image.\n path_pdsfiles (Optional[str]): Path where the pds files are stored.\n Defaults, the path is set to the folder ``PDS_FILES`` next to\n the module files where the library is install.\n\n See ``defaut_pdsfile`` variable of the class\n\n Attributes:\n fname (str): Name of the image.\n path_pdsfiles (str): path where the pds files are stored.\n lolapath (str): path for LOLA images\n wacpath (str): path for WAC images\n grid (str): WAC or LOLA\n img (str): name of the image\n lbl (str): name of the lbl file, where information are stored. Empty for WAC.\n\n Note:\n It is important to respect the structure of the PDS_FILES folder. It\n should contain 2 subfolder called ``LOLA`` and ``LROC_WAC`` where the\n corresponding images should be download.\n\n I also integrate all the specification of the image contained in\n the header or the .LBL file as attribute of the class. However,\n the list is long and I do not introduce them into the\n documentation. See the file directly for details.\n\n The abreaviations correspond to:\n\n - **LRO** Lunar Reconnaissance Orbiter\n - **LOLA** Lunar Orbiter Laser Altimeter\n - **LROC** Lunar Reconnaissance Orbiter Camera\n - **WAC** Wide Angle Camera\n\n .. _here:\n http://www.nasa.gov/mission_pages/LRO/spacecraft/#.VpOMDpMrKL4\n\n .. _LRO/LOLA website:\n http://pds-geosciences.wustl.edu/lro/lro-l-lola-3-rdr-v1/lrolol_1xxx/aareadme.txt\n\n .. _LROC/WAC website:\n http://lroc.sese.asu.edu/data/LRO-L-LROC-5-RDR-V1.0/LROLRC_2001/AAREADME.TXT\n\n .. _pvl module:\n http://pvl.readthedocs.org/en/latest/\n\n \"\"\"\n\n defaut_pdsfile = os.path.join(\n '/'.join(os.path.abspath(__file__).split('/')[:-1]), 'PDS_FILES')\n\n def __init__(self, fname, path_pdsfile=defaut_pdsfile):\n\n self.fname = fname.upper()\n self.path_pdsfiles = path_pdsfile\n if not os.path.isdir(self.path_pdsfiles):\n print('% s: The directory were PDS_FILES should be do\\\n not exist. Creation of the directory.' % (self.path_pdsfiles))\n try:\n os.mkdir(self.path_pdsfiles)\n except:\n raise BaseException('The creation of %s abort.\\\n Might be a permission problem if you\\\n do not provide any path and you install\\\n the library in a read - only directory. Please\\\n provide a valid path.')\n elif not os.access(self.path_pdsfiles, os.W_OK):\n raise BaseException(\"% s: The directory where the PDS file are\\\n is read only. It might be the defaut\\\n path if you install in a directory\\\n without any rights. Please change it\\\n for a path with more permission to\\\n store PDS_FILES\" % (self.path_pdsfiles))\n else:\n print('PDS FILES used are in: %s' % (self.path_pdsfiles))\n\n self.lolapath = os.path.join(self.path_pdsfiles, 'LOLA')\n self.wacpath = os.path.join(self.path_pdsfiles, 'LROC_WAC')\n if not os.path.isdir(self.lolapath):\n print('Creating a directory LOLA under %s' % (self.lolapath))\n os.mkdir(self.lolapath)\n if not os.path.isdir(self.wacpath):\n print('Creating a directory WAC_LROC under %s' % (self.wacpath))\n os.mkdir(self.wacpath)\n self._category()\n self._maybe_download()\n self._load_info_lbl()\n\n assert self.MAP_PROJECTION_TYPE in [\n '\"SIMPLE', 'EQUIRECTANGULAR'], \"Only cylindrical projection is possible - %s NOT IMPLEMENTED\" % (self.MAP_PROJECTION_TYPE)\n\n def _category(self):\n \"\"\" Type of the image: LOLA or WAC\n\n Note: Specify the attribute ``grid``, ``img`` and ``lbl`\n \"\"\"\n\n if self.fname.split('_')[0] == 'WAC':\n self.grid = 'WAC'\n self.img = os.path.join(self.wacpath, self.fname + '.IMG')\n self.lbl = ''\n elif self.fname.split('_')[0] == 'LDEM':\n self.grid = 'LOLA'\n self.img = os.path.join(self.lolapath, self.fname + '.IMG')\n self.lbl = os.path.join(self.lolapath, self.fname + '.LBL')\n else:\n raise ValueError(\"%s : This type of image is not recognized. Possible\\\n images are from %s only\" % (self.fname, ', '.join(('WAC', 'LOLA'))))\n\n def _report(self, blocknr, blocksize, size):\n ''' helper for downloading the file '''\n\n current = blocknr * blocksize\n sys.stdout.write(\"\\r{0:.2f}%\".format(100.0 * current / size))\n\n def _downloadfile(self, url, fname):\n ''' Download the image '''\n\n print(\"The file %s need to be download - Wait\\n \" %\n (fname.split('/')[-1]))\n urllib.urlretrieve(url, fname, self._report)\n print(\"\\n The download of the file %s has succeded \\n \" %\n (fname.split('/')[-1]))\n\n def _user_yes_no_query(self, question):\n \"\"\" Helper asking if the user want to download the file\n\n Note:\n Dowloading huge file can take a while\n\n \"\"\"\n sys.stdout.write('%s [y/n]\\n' % question)\n while True:\n try:\n return strtobool(raw_input().lower())\n except ValueError:\n sys.stdout.write('Please respond with \\'y\\' or \\'n\\'.\\n')\n\n def _detect_size(self, url):\n \"\"\" Helper that detect the size of the image to be download\"\"\"\n\n site = urllib.urlopen(url)\n meta = site.info()\n return float(meta.getheaders(\"Content-Length\")[0]) / 1e6\n\n def _maybe_download(self):\n \"\"\" Helper to downlaod the image if not in path \"\"\"\n if self.grid == 'WAC':\n urlpath = 'http://lroc.sese.asu.edu/data/LRO-L-LROC-5-RDR-V1.0/LROLRC_2001/DATA/BDR/WAC_GLOBAL/'\n r = requests.get(urlpath) # List file in the cloud\n images = [elt.split('\"')[7].split('.')[0]\n for elt in r.iter_lines() if len(elt.split('\"')) > 7]\n if self.fname not in images:\n raise ValueError(\"%s : Image does not exist\\n.\\\n Possible images are:\\n %s\" % (self.fname, '\\n, '.join(images[2:])))\n elif not os.path.isfile(self.img):\n urlname = os.path.join(urlpath, self.img.split('/')[-1])\n print(\"The size is ?: %.1f Mo \\n\\n\" %\n (self._detect_size(urlname)))\n download = self._user_yes_no_query(\n 'Do you really want to download %s ?\\n\\n' % (self.fname))\n if download:\n self._downloadfile(urlname, self.img)\n else:\n raise ValueError(\"You need to download the file somehow\")\n\n elif self.grid == 'LOLA':\n urlpath = 'http://imbrium.mit.edu/DATA/LOLA_GDR/CYLINDRICAL/IMG/'\n r = requests.get(urlpath) # List file in this server\n images = [elt.split('\"')[7].split('.')[0]\n for elt in r.iter_lines() if len(elt.split('\"')) > 7]\n if self.fname not in images:\n raise ValueError(\"%s : Image does not exist\\n.\\\n Possible images are:\\n %s\" % (self.fname, '\\n, '.join(images[2:])))\n\n elif (not os.path.isfile(self.img)) and (self.fname in images):\n urlname = os.path.join(urlpath, self.img.split('/')[-1])\n print(\"The size is ?: %.1f Mo \\n\\n\" %\n (self._detect_size(urlname)))\n download = self._user_yes_no_query(\n 'Do you really want to download %s ?\\n\\n' % (self.fname))\n if download:\n self._downloadfile(urlname, self.img)\n else:\n raise ValueError(\"You need to download the file somehow\")\n\n urlname = os.path.join(urlpath, self.lbl.split('/')[-1])\n self._downloadfile(urlname, self.lbl)\n\n def _load_info_lbl(self):\n \"\"\" Load info on the image\n\n Note:\n If the image is from LOLA, the .LBL is parsed and the\n information is returned.\n If the image is from WAC, the .IMG file is parsed using\n the library `pvl`_ which provide nice method to extract\n the information in the header of the image.\n\n .. _pvl: http://pvl.readthedocs.org/en/latest/\n\n \"\"\"\n if self.grid == 'WAC':\n label = load_label(self.img)\n for key, val in label.iteritems():\n if type(val) == pvl._collections.PVLObject:\n for key, value in val.iteritems():\n try:\n setattr(self, key, value.value)\n except:\n setattr(self, key, value)\n else:\n setattr(self, key, val)\n self.start_byte = self.RECORD_BYTES\n self.bytesize = 4\n self.projection = str(label['IMAGE_MAP_PROJECTION'][\n 'MAP_PROJECTION_TYPE'])\n self.dtype = np.float32\n else:\n with open(self.lbl, 'r') as f:\n for line in f:\n attr = [f.strip() for f in line.split('=')]\n if len(attr) == 2:\n setattr(self, attr[0], attr[1].split(' ')[0])\n self.start_byte = 0\n self.bytesize = 2\n self.projection = ''\n self.dtype = np.int16\n\n def lat_id(self, line):\n ''' Return the corresponding latitude\n\n Args:\n line (int): Line number\n\n Returns:\n Correponding latitude in degree\n '''\n if self.grid == 'WAC':\n lat = ((1 + self.LINE_PROJECTION_OFFSET - line) *\n self.MAP_SCALE * 1e-3 / self.A_AXIS_RADIUS)\n return lat * 180 / np.pi\n else:\n lat = float(self.CENTER_LATITUDE) - \\\n (line - float(self.LINE_PROJECTION_OFFSET) - 1)\\\n / float(self.MAP_RESOLUTION)\n return lat\n\n def long_id(self, sample):\n ''' Return the corresponding longitude\n\n Args:\n sample (int): sample number on a line\n\n Returns:\n Correponding longidude in degree\n '''\n if self.grid == 'WAC':\n lon = self.CENTER_LONGITUDE + (sample - self.SAMPLE_PROJECTION_OFFSET - 1)\\\n * self.MAP_SCALE * 1e-3 / (self.A_AXIS_RADIUS * np.cos(self.CENTER_LATITUDE * np.pi / 180.0))\n return lon * 180 / np.pi\n else:\n lon = float(self.CENTER_LONGITUDE) + \\\n (sample - float(self.SAMPLE_PROJECTION_OFFSET) - 1)\\\n / float(self.MAP_RESOLUTION)\n return lon\n\n def _control_sample(self, sample):\n ''' Control the asked sample is ok '''\n if sample > float(self.SAMPLE_LAST_PIXEL):\n return int(self.SAMPLE_LAST_PIXEL)\n elif sample < float(self.SAMPLE_FIRST_PIXEL):\n return int(self.SAMPLE_FIRST_PIXEL)\n else:\n return sample\n\n def sample_id(self, lon):\n ''' Return the corresponding sample\n\n Args:\n lon (int): longidute in degree\n\n Returns:\n Correponding sample\n\n '''\n if self.grid == 'WAC':\n sample = np.rint(float(self.SAMPLE_PROJECTION_OFFSET) + 1.0 +\n (lon * np.pi / 180.0 - float(self.CENTER_LONGITUDE)) *\n self.A_AXIS_RADIUS *\n np.cos(self.CENTER_LATITUDE * np.pi / 180.0)\n / (self.MAP_SCALE * 1e-3))\n else:\n sample = np.rint(float(self.SAMPLE_PROJECTION_OFFSET) + float(self.MAP_RESOLUTION)\n * (lon - float(self.CENTER_LONGITUDE))) + 1\n return self._control_sample(sample)\n\n def _control_line(self, line):\n ''' Control the asked line is ok '''\n if line > float(self.LINE_LAST_PIXEL):\n return int(self.LINE_LAST_PIXEL)\n elif line < float(self.LINE_FIRST_PIXEL):\n return int(self.LINE_FIRST_PIXEL)\n else:\n return line\n\n def line_id(self, lat):\n ''' Return the corresponding line\n\n Args:\n lat (int): latitude in degree\n\n Returns:\n Correponding line\n\n '''\n if self.grid == 'WAC':\n line = np.rint(1.0 + self.LINE_PROJECTION_OFFSET -\n self.A_AXIS_RADIUS * np.pi * lat / (self.MAP_SCALE * 1e-3 * 180))\n else:\n line = np.rint(float(self.LINE_PROJECTION_OFFSET) - float(self.MAP_RESOLUTION)\n * (lat - float(self.CENTER_LATITUDE))) + 1\n return self._control_line(line)\n\n def array(self, size_chunk, start, bytesize):\n ''' Read part of the binary file\n\n Args:\n size_chunk (int) : Size of the chunk to read\n start (int): Starting byte\n bytesize (int): Ending byte\n\n Returns:\n (np.array): array of the corresponding values\n '''\n\n with open(self.img, 'rb') as f1:\n f1.seek(self.start_byte + start * self.bytesize)\n data = f1.read(size_chunk * self.bytesize)\n Z = np.fromstring(data, dtype=self.dtype, count=size_chunk)\n if self.grid == 'LOLA':\n return Z * float(self.SCALING_FACTOR)\n else:\n return Z\n\n def extract_all(self):\n ''' Extract all the image\n\n Returns:\n A tupple of three arrays ``(X,Y,Z)`` with ``X`` contains the\n longitudes, ``Y`` contains the latitude and ``Z`` the values\n extracted from the image.\n\n Note:\n All return arrays have the same size.\n\n All coordinate are in degree.\n\n '''\n\n longmin, longmax, latmin, latmax = self.Boundary()\n sample_min, sample_max = map(\n int, (self.SAMPLE_FIRST_PIXEL, self.SAMPLE_LAST_PIXEL))\n line_min, line_max = map(\n int, (self.LINE_FIRST_PIXEL, self.LINE_LAST_PIXEL))\n\n X = np.array(map(self.long_id, (range(sample_min, sample_max + 1, 1))))\n Y = np.array(map(self.lat_id, (range(line_min, line_max + 1, 1))))\n for i, line in enumerate(range(int(line_min), int(line_max) + 1)):\n start = (line - 1) * int(self.SAMPLE_LAST_PIXEL) + sample_min\n chunk_size = int(sample_max - sample_min)\n Za = self.array(chunk_size, start, self.bytesize)\n if i == 0:\n Z = Za\n else:\n Z = np.vstack((Z, Za))\n\n X, Y = np.meshgrid(X, Y)\n\n return X, Y, Z\n\n def extract_grid(self, longmin, longmax, latmin, latmax):\n ''' Extract part of the image ``img``\n\n Args:\n longmin (float): Minimum longitude of the window\n longmax (float): Maximum longitude of the window\n latmin (float): Minimum latitude of the window\n latmax (float): Maximum latitude of the window\n\n Returns:\n A tupple of three arrays ``(X,Y,Z)`` with ``X`` contains the\n longitudes, ``Y`` contains the latitude and ``Z`` the values\n extracted from the window.\n\n Note:\n All return arrays have the same size.\n\n All coordinate are in degree.\n\n '''\n\n sample_min, sample_max = map(\n int, map(self.sample_id, [longmin, longmax]))\n line_min, line_max = map(int, map(self.line_id, [latmax, latmin]))\n X = np.array(map(self.long_id, (range(sample_min, sample_max, 1))))\n Y = np.array(map(self.lat_id, (range(line_min, line_max + 1, 1))))\n\n for i, line in enumerate(range(int(line_min), int(line_max) + 1)):\n start = (line - 1) * int(self.SAMPLE_LAST_PIXEL) + sample_min\n chunk_size = int(sample_max - sample_min)\n Za = self.array(chunk_size, start, self.bytesize)\n if i == 0:\n Z = Za\n else:\n Z = np.vstack((Z, Za))\n\n X, Y = np.meshgrid(X, Y)\n\n return X, Y, Z\n\n def boundary(self):\n \"\"\" Get the image boundary\n\n Returns:\n A tupple composed by the westernmost_longitude,\n the westernmost_longitude, the minimum_latitude and\n the maximum_latitude.\n\n \"\"\"\n\n return (int(self.WESTERNMOST_LONGITUDE),\n int(self.EASTERNMOST_LONGITUDE),\n int(self.MINIMUM_LATITUDE),\n int(self.MAXIMUM_LATITUDE))\n\n def _kp_func(self, lat, lon, lat0, long0):\n\n kp = float(1.0) + np.sin(lat0) * np.sin(lat) + \\\n np.cos(lat0) * np.cos(lat) * np.cos(lon - long0)\n kp = np.sqrt(float(2) / kp)\n return kp\n\n def lambert_window(self, radius, lat0, long0):\n ''' Square Lambert Azimuthal equal area projection of\n a window centered at (lat0, long0) with a given radius (km).\n\n Args:\n radius(float): Radius of the window (km).\n lat0(float): Latitude at the center (degree).\n long0(float): Longitude at the center (degree).\n\n Returns:\n A tuple ``(longll, longtr, latll, lattr)` with ``longll``\n the longitude of the lower left corner, ``longtr`` the\n longitude of the top right corner, ``latll`` the latitude\n of the lower left corner and ``lattr`` the latitude of the\n top right corner.\n\n Note:\n All return coordinates are in degree\n\n '''\n\n radius = radius * 360.0 / (np.pi * 2 * 1734.4)\n radius = radius * np.pi / 180.0\n lat0 = lat0 * np.pi / 180.0\n long0 = long0 * np.pi / 180.0\n\n bot = self._kp_func(lat0 - radius, long0, lat0, long0)\n bot = bot * (np.cos(lat0) * np.sin(lat0 - radius) -\n np.sin(lat0) * np.cos(lat0 - radius))\n x = bot\n y = bot\n rho = np.sqrt(x**2 + y**2)\n c = 2.0 * np.arcsin(rho / float(2.0))\n latll = np.arcsin(np.cos(c) * np.sin(lat0) + y * np.sin(c)\n * np.cos(lat0) / rho) * float(180.0) / np.pi\n lon = long0 + np.arctan2(x * np.sin(c), rho * np.cos(lat0)\n * np.cos(c) - y * np.sin(lat0) * np.sin(c))\n longll = lon * 180.0 / np.pi\n\n x = -bot\n y = -bot\n rho = np.sqrt(x**2 + y**2)\n c = 2.0 * np.arcsin(rho / 2.0)\n lattr = np.arcsin(np.cos(c) * np.sin(lat0) + y * np.sin(c)\n * np.cos(lat0) / rho) * float(180.0) / np.pi\n lon = long0 + np.arctan2(x * np.sin(c), rho * np.cos(lat0)\n * np.cos(c) - y * np.sin(lat0) * np.sin(c))\n longtr = lon * 180.0 / np.pi\n\n return longll, longtr, latll, lattr\n\n def cylindrical_window(self, radius, lat0, long0):\n ''' Cylindrical projection of a window centered\n at (lat0, long0) with a given radius (km).\n\n Args:\n radius(float): Radius of the window (km).\n lat0(float): Latitude at the center (degree).\n long0(float): Longitude at the center (degree).\n\n Returns:\n A tuple ``(longll, longtr, latll, lattr)`` with ``longll``\n the longitude of the lower left corner, ``longtr`` the\n longitude of the top right corner, ``latll`` the latitude\n of the lower left corner and ``lattr`` the latitude of the\n top right corner.\n\n Note:\n All return coordinates are in degree \n '''\n\n # Passage en radian\n radi = radius * 2 * np.pi / (2 * 1734.4 * np.pi)\n lamb0 = long0 * np.pi / 180.0\n phi0 = lat0 * np.pi / 180.0\n\n # Long/lat min (voir wikipedia)\n longll = -radi / np.cos(phi0) + lamb0\n latll = np.arcsin((-radi + np.sin(phi0) / np.cos(phi0)) * np.cos(phi0))\n if np.isnan(latll):\n latll = -90 * np.pi / 180.0\n # Long/lat max (voir wikipedia)\n longtr = radi / np.cos(phi0) + lamb0\n lattr = np.arcsin((radi + np.tan(phi0)) * np.cos(phi0))\n\n return longll * 180 / np.pi, longtr * 180 / np.pi, latll * 180 / np.pi, lattr * 180 / np.pi\n\n\nclass WacMap(object):\n '''Class to handle the creation of LROC WAC GLOBAL images\n\n This class is specifically designed to handle the creation of image\n from LROC WAC images. It is able to identify the image (or the groupe\n of images) necessary to extract an array over a given window.\n Four cases are possible and taken care of:\n\n 1. The desired structure is entirely contained into one image.\n 2. The span in latitude of the image is ok but not longitudes(2 images).\n 3. The span in longitude of the image is ok but not latitudes (2 images).\n 4. Both latitude and longitude are not contained in one image(4 images).\n\n Args:\n ppd (int): Required resolution\n lonm (float): Lower left window longitude (degree)\n lonM (float): Upper right window longitude (degree)\n latm (float): Lower left window latitude (degree)\n latM (float): Upper right window latitude (degree)\n path_pdsfiles (Optional[str]): Path where the pds files are stored.\n Defaults, the path is set to the folder ``PDS_FILES`` next to\n the module files where the library is install.\n\n See ``defaut_pdsfile`` variable of the class\n\n\n Attributes:\n ppd (int): Required resolution\n lonm (float): Lower left window longitude (degree)\n lonM (float): Upper right window longitude (degree)\n latm (float): Lower left window latitude (degree)\n latM (float): Upper right window latitude (degree)\n path_pdsfiles (str): Path where the pds_files are stored.\n\n Note:\n It is important to respect the structure of the PDS_FILES folder. WAC\n images should be contained within a subfolder called ``LROC_WAC``.\n\n Possible resolution are stored in the class variable ``implemented_res``.\n Longitude in the code spans 0 to 360.\n\n The abreaviations correspond to:\n\n - **LROC** Lunar Reconnaissance Orbiter Camera\n - **WAC** Wide Angle Camera\n\n Example:\n This class allows to simply gather three arrays X, Y, Z given a specific\n window. For instance, if we want to gather the data for a window which\n spans 10 to 20 degree in longitude and the same in latitude, simply ask.\n\n >>> X, Y, Z = WacMap(512,10,20,10,20).image()\n\n '''\n\n implemented_res = [4, 8, 16, 32, 64, 128, 256]\n defaut_pdsfile = os.path.join(\n '/'.join(os.path.abspath(__file__).split('/')[:-1]), 'PDS_FILES')\n\n def __init__(self, ppd, lonm, lonM, latm, latM, path_pdsfile=defaut_pdsfile):\n self.path_pdsfiles = path_pdsfile\n self.ppd = ppd\n self.lonm = lonm\n self.lonM = lonM\n self.latm = latm\n self.latM = latM\n self._control_longitude()\n self._confirm_resolution(WacMap.implemented_res)\n\n def _control_longitude(self):\n ''' Control on longitude values '''\n\n if self.lonm < 0.0:\n self.lonm = 360.0 + self.lonm\n if self.lonM < 0.0:\n self.lonM = 360.0 + self.lonM\n if self.lonm > 360.0:\n self.lonm = self.lonm - 360.0\n if self.lonM > 360.0:\n self.lonM = self.lonM - 360.0\n\n def _confirm_resolution(self, implemented_res):\n ''' Control on resolution '''\n\n assert self.ppd in implemented_res, \\\n ' Resolution %d ppd not implemented yet\\n.\\\n Consider using one of the implemented resolutions %s'\\\n % (self.ppd, ', '.join([f + ' ppd' for f in map(str, implemented_res)]))\n\n if self.ppd == 256:\n assert (np.abs(self.latM) < 60) and (np.abs(self.latm) < 60),\\\n 'This resolution is available in\\n \\\n in cylindrical geometry only for -60<latitude<60 '\n\n def _map_center(self, coord, val):\n ''' Identitify the center of the Image correspond to one coordinate. '''\n\n if self.ppd in [4, 8, 16, 32, 64]:\n res = {'lat': 0, 'long': 360}\n return res[coord] / 2.0\n elif self.ppd in [128]:\n res = {'lat': 90, 'long': 90}\n return (val // res[coord] + 1) * res[coord] - res[coord] / 2.0\n elif self.ppd in [256]:\n res = {'lat': 60, 'long': 90}\n return (val // res[coord] + 1) * res[coord] - res[coord] / 2.0\n\n def _define_case(self):\n ''' Identify case '''\n\n lonBool = self._map_center(\n 'long', self.lonM) != self._map_center('long', self.lonm)\n latBool = self._map_center(\n 'lat', self.latM) != self._map_center('lat', self.latm)\n\n if not lonBool and not latBool:\n print('No overlap - Processing should be quick')\n return self._cas_1()\n elif lonBool and not latBool:\n print('Longitude overlap - 2 images have to be proceded \\n \\\n Processing could take a few seconds')\n return self._cas_2()\n elif not lonBool and latBool:\n print('Latitude overlap - 2 images have to be proceded \\n\\\n Processing could take a few seconds')\n return self._cas_3()\n else:\n print('Latitude/Longidude overlaps - 4 images have to be proceded \\n\\\n Processing could take a few seconds')\n return self._cas_4()\n\n def _format_lon(self, lon):\n ''' Format longitude to fit the image name '''\n\n lonf = self._map_center('long', lon)\n st = str(lonf).split('.')\n loncenter = ''.join((\"{0:0>3}\".format(st[0]), st[1]))\n return loncenter\n\n def _format_lat(self, lat):\n ''' Format latitude to fit the image name '''\n\n if self.ppd in [4, 8, 16, 32, 64]:\n latcenter = '000N'\n elif self.ppd in [128]:\n if lat < 0:\n latcenter = '450S'\n else:\n latcenter = '450N'\n\n return latcenter\n\n def _format_name_map(self, lonc, latc):\n ''' Return the name of the map in the good format '''\n\n return '_'.join(['WAC', 'GLOBAL'] +\n ['E' + latc + lonc, \"{0:0>3}\".format(self.ppd) + 'P'])\n\n def _cas_1(self):\n '''1 - The desired structure is entirely contained into one image.'''\n\n lonc = self._format_lon(self.lonm)\n latc = self._format_lat(self.latm)\n img = self._format_name_map(lonc, latc)\n img_map = BinaryTable(img, self.path_pdsfiles)\n\n return img_map.extract_grid(self.lonm, self.lonM, self.latm, self.latM)\n\n def _cas_2(self):\n ''' Longitude overlap (2 images). '''\n\n lonc_left = self._format_lon(self.lonm)\n lonc_right = self._format_lon(self.lonM)\n latc = self._format_lat(self.latm)\n\n print(lonc_left, lonc_right, self.lonm, self.lonM)\n img_name_left = self._format_name_map(lonc_left, latc)\n print(img_name_left)\n img_left = BinaryTable(img_name_left, self.path_pdsfiles)\n X_left, Y_left, Z_left = img_left.extract_grid(self.lonm,\n float(\n img_left.EASTERNMOST_LONGITUDE),\n self.latm,\n self.latM)\n\n img_name_right = self._format_name_map(lonc_right, latc)\n img_right = BinaryTable(img_name_right, self.path_pdsfiles)\n X_right, Y_right, Z_right = img_right.extract_grid(float(img_right.WESTERNMOST_LONGITUDE),\n self.lonM,\n self.latm,\n self.latM)\n\n X_new = np.hstack((X_left, X_right))\n Y_new = np.hstack((Y_left, Y_right))\n Z_new = np.hstack((Z_left, Z_right))\n\n return X_new, Y_new, Z_new\n\n def _cas_3(self):\n ''' Latitude overlap (2 images). '''\n\n lonc = self._format_lon(self.lonm)\n latc_top = self._format_lat(self.latM)\n latc_bot = self._format_lat(self.latm)\n\n img_name_top = self._format_name_map(lonc, latc_top)\n print(img_name_top)\n img_top = BinaryTable(img_name_top, self.path_pdsfiles)\n print(self.lonm, self.lonM, float(img_top.MINIMUM_LATITUDE), self.latM)\n X_top, Y_top, Z_top = img_top.extract_grid(self.lonm,\n self.lonM,\n float(\n img_top.MINIMUM_LATITUDE),\n self.latM)\n\n img_name_bottom = self._format_name_map(lonc, latc_bot)\n print(img_name_bottom)\n img_bottom = BinaryTable(img_name_bottom, self.path_pdsfiles)\n X_bottom, Y_bottom, Z_bottom = img_bottom.extract_grid(self.lonm,\n self.lonM,\n self.latm,\n float(img_bottom.MAXIMUM_LATITUDE))\n\n X_new = np.vstack((X_top, X_bottom))\n Y_new = np.vstack((Y_top, Y_bottom))\n Z_new = np.vstack((Z_top, Z_bottom))\n\n return X_new, Y_new, Z_new\n\n def _cas_4(self):\n ''' Longitude/Lagitude overlap (4 images) '''\n\n lonc_left = self._format_lon(self.lonm)\n lonc_right = self._format_lon(self.lonM)\n latc_top = self._format_lat(self.latM)\n latc_bot = self._format_lat(self.latm)\n\n img_name_00 = self._format_name_map(lonc_left, latc_top)\n img_00 = BinaryTable(img_name_00, self.path_pdsfiles)\n X_00, Y_00, Z_00 = img_00.extract_grid(self.lonm,\n float(\n img_00.EASTERNMOST_LONGITUDE),\n float(img_00.MINIMUM_LATITUDE),\n self.latM)\n\n img_name_01 = self._format_name_map(lonc_right, latc_top)\n img_01 = BinaryTable(img_name_01, self.path_pdsfiles)\n X_01, Y_01, Z_01 = img_01.extract_grid(float(img_01.WESTERNMOST_LONGITUDE),\n self.lonM,\n float(img_01.MINIMUM_LATITUDE),\n self.latM)\n\n img_name_10 = self._format_name_map(lonc_left, latc_bot)\n img_10 = BinaryTable(img_name_10, self.path_pdsfiles)\n X_10, Y_10, Z_10 = img_10.extract_grid(self.lonm,\n float(\n img_10.EASTERNMOST_LONGITUDE),\n self.latm,\n float(img_10.MAXIMUM_LATITUDE))\n\n img_name_11 = self._format_name_map(lonc_right, latc_bot)\n img_11 = BinaryTable(img_name_11, self.path_pdsfiles)\n X_11, Y_11, Z_11 = img_11.extract_grid(float(img_11.WESTERNMOST_LONGITUDE),\n self.lonM,\n self.latm,\n float(img_11.MAXIMUM_LATITUDE))\n\n X_new_top = np.hstack((X_00, X_01))\n X_new_bot = np.hstack((X_10, X_11))\n X_new = np.vstack((X_new_top, X_new_bot))\n\n Y_new_top = np.hstack((Y_00, Y_01))\n Y_new_bot = np.hstack((Y_10, Y_11))\n Y_new = np.vstack((Y_new_top, Y_new_bot))\n\n Z_new_top = np.hstack((Z_00, Z_01))\n Z_new_bot = np.hstack((Z_10, Z_11))\n Z_new = np.vstack((Z_new_top, Z_new_bot))\n\n return X_new, Y_new, Z_new\n\n def image(self):\n ''' Return the values over the required window\n\n Returns:\n A tupple of three arrays ``(X,Y,Z)`` with ``X`` contains the\n longitudes, ``Y`` contains the latitude and ``Z`` the values\n extracted over the window.\n\n Note:\n All return arrays have the same size.\n\n All coordinates are in degree.\n\n '''\n return self._define_case()\n\n\nclass LolaMap(WacMap):\n '''Class to handle the creation of LRO LOLA images\n\n This class is specifically designed to handle the creation of image\n from LOLA images. It is able to identify the image (or the groupe\n of images) necessary to extract an array over a given window.\n Four cases are possible and taken care of:\n\n 1. The desired structure is entirely contained into one image.\n 2. The span in latitude of the image is ok but not longitudes(2 images).\n 3. The span in longitude of the image is ok but not latitudes (2 images).\n 4. Both latitude and longitude are not contained in one image(4 images).\n\n Args:\n ppd (int): Required resolution\n lonm (float): Lower left window longitude (degree)\n lonM (float): Upper right window longitude (degree)\n latm (float): Lower left window latitude (degree)\n latM (float): Upper right window latitude (degree)\n path_pdsfiles (Optional[str]): Path where the pds files are stored.\n Defaults, the path is set to the folder ``PDS_FILES`` next to\n the module files where the library is install.\n\n See ``defaut_pdsfile`` variable of the class\n\n\n Attributes:\n ppd (int): Required resolution\n lonm (float): Lower left window longitude (degree)\n lonM (float): Upper right window longitude (degree)\n latm (float): Lower left window latitude (degree)\n latM (float): Upper right window latitude (degree)\n path_pdsfiles (str): Path where the pds_files are stored.\n\n Note:\n It is important to respect the structure of the PDS_FILES folder. WAC\n images should be contained within a subfolder called ``LROC_WAC``.\n\n Possible resolution are stored in the class variable ``implemented_res``.\n Longitude in the code spans 0 to 360.\n\n The abreviations correspond to:\n\n - **LRO** Lunar Reconnaissance Orbiter\n - **LOLA** Lunar Orbiter Laser Altimeter\n\n Example:\n This class allows to simply gather three arrays X, Y, Z given a specific\n window. For instance, if we want to gather the data for a window which\n span 10 to 20 degree in longitude and the same in latitude, simply ask.\n\n >>> X, Y, Z = LolaMap(512,10,20,10,20).image()\n\n '''\n\n implemented_res = [4, 16, 64, 128, 256, 512, 1024]\n\n def __init__(self, ppd, lonm, lonM, latm, latM, path_pdsfile=WacMap.defaut_pdsfile):\n if path_pdsfile == 'base':\n self.path_pdsfiles = LolaMap.defaut_pdsfile\n else:\n self.path_pdsfiles = path_pdsfile\n self.ppd = ppd\n self.lonm = lonm\n self.lonM = lonM\n self.latm = latm\n self.latM = latM\n self._control_longitude()\n self._confirm_resolution(LolaMap.implemented_res)\n\n def _map_center(self, coord, val):\n ''' Identitify the center of the Image correspond to one coordinate. '''\n\n if self.ppd in [4, 16, 64, 128]:\n res = {'lat': 0, 'long': 360}\n return res[coord] / 2.0\n elif self.ppd in [256]:\n res = {'lat': 90, 'long': 180}\n c = (val // res[coord] + 1) * res[coord]\n return c - res[coord], c\n elif self.ppd in [512]:\n res = {'lat': 45, 'long': 90}\n c = (val // res[coord] + 1) * res[coord]\n return c - res[coord], c\n elif self.ppd in [1024]:\n res = {'lat': 15, 'long': 30}\n c = (val // res[coord] + 1) * res[coord]\n return c - res[coord], c\n\n def _format_lon(self, lon):\n ''' Returned a formated longitude format for the file '''\n if self.ppd in [4, 16, 64, 128]:\n return None\n else:\n return map(lambda x: \"{0:0>3}\".format(int(x)), self._map_center('long', lon))\n\n def _format_lat(self, lat):\n ''' Returned a formated latitude format for the file '''\n if self.ppd in [4, 16, 64, 128]:\n return None\n else:\n if lat < 0:\n return map(lambda x: \"{0:0>2}\"\n .format(int(np.abs(x))) + 'S', self._map_center('lat', lat))\n else:\n return map(lambda x: \"{0:0>2}\"\n .format(int(x)) + 'N', self._map_center('lat', lat))\n\n def _format_name_map(self, lon, lat):\n ''' Return the name of the map in the good format '''\n\n if self.ppd in [4, 16, 64, 128]:\n lolaname = '_'.join(['LDEM', str(self.ppd)])\n elif self.ppd in [512]:\n lolaname = '_'.join(\n ['LDEM', str(self.ppd), lat[0], lat[1], lon[0], lon[1]])\n return lolaname\n"
]
| [
[
"numpy.sin",
"numpy.isnan",
"numpy.arcsin",
"numpy.rint",
"numpy.tan",
"numpy.sqrt",
"numpy.cos",
"numpy.abs",
"numpy.hstack",
"numpy.fromstring",
"numpy.meshgrid",
"numpy.vstack"
]
]
|
alm818/epipy | [
"4d861c27f70f9fc62c561c56950017046de7e164"
]
| [
"epipy/sparse/csr.py"
]
| [
"import numpy as np\nfrom scipy.sparse import csr_matrix\nfrom numba import jit, prange\n\n# Deprecated\n# @jit(nopython=True, parallel=True)\n# def coo_tocsr(M, N, data_, row_ind, col_ind):\n# \"\"\"\n# Numba parallel version of https://github.com/scipy/scipy/blob/3b36a57/scipy/sparse/sparsetools/coo.h#L34\n# and https://github.com/scipy/scipy/blob/3b36a574dc657d1ca116f6e230be694f3de31afc/scipy/sparse/sparsetools/csr.h#L319\n# \"\"\"\n# # coo_tocsr\n# nnz = len(data_)\n# data = np.zeros(nnz)\n# indices = np.zeros(nnz, dtype=np.int32)\n# indptr = np.zeros(M+1, dtype=np.int32)\n#\n# for i in prange(nnz):\n# indptr[row_ind[i]] += 1\n#\n# cumsum = 0\n# for i in range(M):\n# temp = indptr[i]\n# indptr[i] = cumsum\n# cumsum += temp\n# indptr[M] = nnz\n#\n# for i in prange(nnz):\n# row = int(row_ind[i])\n# dest = indptr[row]\n#\n# indices[dest] = col_ind[i]\n# data[dest] = data_[i]\n#\n# indptr[row] += 1\n#\n# last = 0\n# for i in range(M+1):\n# temp = indptr[i]\n# indptr[i] = last\n# last = temp\n#\n# # csr_sort_indices\n# for i in prange(M):\n# row_start = indptr[i]\n# row_end = indptr[i+1]\n#\n# temp2 = np.zeros((row_end - row_start, 2))\n# temp2[:,0] = indices[row_start:row_end]\n# temp2[:,1] = data[row_start:row_end]\n#\n# sorted_ind = temp2[:,0].argsort()\n# temp2 = temp2[sorted_ind]\n#\n# indices[row_start:row_end] = temp2[:,0]\n# data[row_start:row_end] = temp2[:,1]\n#\n# return data, indices, indptr\n\n@jit(nopython=True, parallel=True)\ndef coo_sparse_copy(data, row, col):\n nnz = len(data)\n data_, row_, col_ = np.zeros((3, nnz))\n data_ = np.zeros(nnz)\n row_ = np.zeros(nnz)\n col_ = np.zeros(nnz)\n for i in prange(nnz):\n data_[i] = data[i]\n row_[i] = row[i]\n col_[i] = col[i]\n return data_, row_, col_\n\n@jit(nopython=True, parallel=True)\ndef csr_sparse_copy(data, indices, indptr):\n M = len(indptr)-1\n nnz = len(data)\n data_ = np.zeros(nnz)\n indices_ = np.zeros(nnz)\n indptr_ = np.zeros(M+1)\n for i in prange(nnz):\n data_[i] = data[i]\n indices_[i] = indices[i]\n for i in prange(M+1):\n indptr_[i] = indptr[i]\n return data_, indices_, indptr_\n\n@jit(nopython=True, parallel=True)\ndef sparse_vec_multiplication(data, indices, indptr, b):\n M = len(indptr)-1\n res = np.zeros(len(b))\n for row in prange(M):\n for i in prange(indptr[row], indptr[row+1]):\n res[row] += data[i]*b[indices[i]]\n return res\n\n@jit(nopython=True, parallel=True)\ndef sparse_right_scale(data, indices, indptr, b):\n data_, indices_, indptr_ = csr_sparse_copy(data, indices, indptr)\n M = len(indptr_)-1\n for row in prange(M):\n for i in prange(indptr_[row], indptr_[row+1]):\n data_[i] *= b[int(indices_[i])]\n return data_, indices_, indptr_\n\n@jit(nopython=True, parallel=True)\ndef sparse_transform(data, indices, indptr, values, row_ind, col_ind, t):\n data_, indices_, indptr_ = csr_sparse_copy(data, indices, indptr)\n for i in prange(len(values)):\n row = int(row_ind[i])\n col = int(col_ind[i])\n left = int(indptr_[row])\n right = int(indptr_[row+1]-1)\n while left <= right:\n mid = int((left+right) / 2)\n if indices_[mid] < col:\n left = mid + 1\n elif indices_[mid] > col:\n right = mid - 1\n else:\n # indices_[mid] == col\n data_[mid] += (values[i]-data_[mid])*t\n break\n return data_, indices_, indptr_\n\nclass rigid_csr_matrix:\n \"\"\"\n Rigid Compressed Sparse Row matrix:\n - a csr matrix that does not allow new nz entries after initialization\n - fast matrix-vector multiplication, matrix-matrix addition, matrix-scaling by utilizing parallelism\n\n This can be instantiated in several ways:\n\n rigid_csr_matrix((data, (row_ind, col_ind)), shape=(M, N))\n where ``data``, ``row_ind`` and ``col_ind`` satisfy the\n relationship ``a[row_ind[k], col_ind[k]] = data[k]``.\n\n Input: row_ind, col_ind should not be duplicated\n\n rigid_csr_matrix((data, indices, indptr), shape=(M, N))\n is the standard CSR representation where the column indices for\n row i are stored in ``indices[indptr[i]:indptr[i+1]]`` and their\n corresponding values are stored in ``data[indptr[i]:indptr[i+1]]``.\n If the shape parameter is not supplied, the matrix dimensions\n are inferred from the index arrays.\n\n Attributes\n ----------\n shape : 2-tuple\n Shape of the matrix\n nnz\n Number of stored values, including explicit zeros\n data\n CSR format data array of the matrix\n indices\n CSR format index array of the matrix\n indptr\n CSR format index pointer array of the matrix\n \"\"\"\n\n\n def __init__(self, args, shape):\n self.shape = shape\n if len(args) == 3:\n self.data, self.indices, self.indptr = args\n else:\n wrapper = csr_matrix(args, shape=shape)\n self.data, self.indices, self.indptr = wrapper.data, wrapper.indices, wrapper.indptr\n self.nnz = len(self.data)\n\n def right_scale(self, vec):\n \"\"\"sparse matrix multiply with a diagonal matrix with entries vec\n Parameters\n ----------\n vec\n ndarray of shape (n,)\n Returns\n -------\n rigid_csr_matrix\n \"\"\"\n assert self.shape[1] == vec.shape[0], \"Bad dimension\"\n data_, indices_, indptr_ = sparse_right_scale(self.data, self.indices, self.indptr, vec)\n return rigid_csr_matrix((data_, indices_, indptr_), shape=self.shape)\n\n def mul_vec(self, vec):\n \"\"\"sparse matrix multiply with a vector vec\n Parameters\n ----------\n vec\n ndarray of shape (n,)\n Returns\n -------\n ndarray of shape (n,)\n \"\"\"\n assert self.shape[1] == vec.shape[0], \"Bad dimension\"\n return sparse_vec_multiplication(self.data, self.indices, self.indptr, vec)\n\n def transform(self, values, row_ind, col_ind, t):\n \"\"\"sparse matrix transform values at row_ind[i],col_ind[i] to data[i] after one unit time for t time\n Note: the row_ind[i], col_ind[i] has to be a nnz entry of the sparse matrix\n values, row_ind, col_ind have to be ndarray\n row_ind, col_ind cannot be repeated\n Examples\n --------\n [[4, 0, 9, 0],\n [0, 7, 0, 0],\n [0, 0, 0, 0],\n [0, 0, 0, 5]]\n values = [6], row_ind = [0], col_ind = [0], t = 0.75\n Returns\n -------\n [[5.5, 0, 9, 0],\n [0, 7, 0, 0],\n [0, 0, 0, 0],\n [0, 0, 0, 5]]\n \"\"\"\n data_, indices_, indptr_ = sparse_transform(self.data, self.indices, self.indptr, values, row_ind, col_ind, t)\n return rigid_csr_matrix((data_, indices_, indptr_), shape=self.shape)\n\n def get_csr_matrix(self):\n \"\"\"return scipy csr_matrix\n \"\"\"\n return csr_matrix((self.data, self.indices, self.indptr), shape=self.shape)\n\n def get_transpose(self):\n \"\"\"return a transpose rigid_csr_matrix\n \"\"\"\n mat = self.get_csr_matrix().tocoo()\n return rigid_csr_matrix((mat.data, (mat.col, mat.row)), shape=(self.shape[1], self.shape[0]))\n"
]
| [
[
"scipy.sparse.csr_matrix",
"numpy.zeros"
]
]
|
meganhfowler/optimal-ph | [
"faa9cfcc3b7262cb5757b2f72a17209b3baea577"
]
| [
"src/predict.py"
]
| [
"#!/usr/bin/env python3\nimport argparse\nimport pandas as pd\nfrom model import NeuralNet, BaselineModel\nimport torch\nimport config\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--input_csv', default='submission/input.csv')\nargs = parser.parse_args()\n\n# Config\noutput_file_path = 'test/predictions.csv'\n\n# Load input.csv\nwith open(args.input_csv) as input_csv:\n df = pd.read_csv(input_csv)\n\ndef neural_predict():\n # Run predictions\n y_predictions = NeuralNet(model_file_path='src/model.pickle').predict(df)\n # Save predictions to file\n y_predictions = y_predictions.detach().numpy()\n df_predictions = pd.DataFrame(y_predictions)\n df_predictions.columns = ['prediction']\n df_predictions.to_csv(output_file_path, index=False)\n\n print(f'{len(y_predictions)} predictions saved to a csv file')\n\n\ndef encoded_predict():\n # old version:\n # Run predictions\n y_predictions = BaselineModel(model_file_path='src/model.pickle').predict(df)\n\n # Save predictions to file\n df_predictions = pd.DataFrame({'prediction': y_predictions})\n df_predictions.to_csv(output_file_path, index=False)\n\n print(f'{len(y_predictions)} predictions saved to a csv file')\n\n\nif config.use_neural_net:\n neural_predict()\nelse:\n encoded_predict()\n"
]
| [
[
"pandas.DataFrame",
"pandas.read_csv"
]
]
|
binder-oilgains/py-pde | [
"d76977095f1e915c63230e6895391f063d0778d8"
]
| [
"pde/grids/boundaries/axis.py"
]
| [
"r\"\"\"\n.. codeauthor:: David Zwicker <[email protected]>\n\nThis module handles the boundaries of a single axis of a grid. There are\ngenerally only two options, depending on whether the axis of the underlying\ngrid is defined as periodic or not. If it is periodic, the class \n:class:`~pde.grids.boundaries.axis.BoundaryPeriodic` should be used, while\nnon-periodic axes have more option, which are represented by\n:class:`~pde.grids.boundaries.axis.BoundaryPair`.\n\"\"\"\n\nfrom typing import Callable, Dict, Tuple, Union\n\nimport numpy as np\nfrom numba.extending import register_jitable\n\nfrom ..base import DomainError, GridBase\nfrom .local import BCBase, BoundaryData, NeumannBC, _make_get_arr_1d\n\nBoundaryPairData = Union[\n Dict[str, BoundaryData], BoundaryData, Tuple[BoundaryData, BoundaryData]\n]\n\n\nclass BoundaryAxisBase:\n \"\"\" base class for defining boundaries of a single axis in a grid \"\"\"\n\n grid: GridBase\n \"\"\" :class:`~pde.grids.base.GridBase`:\n The grid for which the boundaries are defined \"\"\"\n axis: int\n \"\"\" int: The axis along which the boundaries are defined \"\"\"\n\n\nclass BoundaryPair(BoundaryAxisBase):\n \"\"\" represents the two boundaries of an axis along a single dimension \"\"\"\n\n periodic = False\n\n def __init__(self, low: BCBase, high: BCBase):\n \"\"\"\n Args:\n low (:class:`~pde.grids.boundaries.local.BCBase`):\n Instance describing the lower boundary\n high (:class:`~pde.grids.boundaries.local.BCBase`):\n Instance describing the upper boundary\n \"\"\"\n # check data consistency\n assert low.grid == high.grid\n assert low.axis == high.axis\n assert low.rank == high.rank\n assert high.upper and not low.upper\n\n self.low = low\n self.high = high\n self.grid = low.grid\n self.axis = low.axis\n\n def __iter__(self):\n yield self.low\n yield self.high\n\n def __repr__(self):\n return f\"{self.__class__.__name__}({self.low!r}, {self.high!r})\"\n\n def __str__(self):\n if self.low == self.high:\n return str(self.low)\n else:\n return f\"({self.low}, {self.high})\"\n\n def __eq__(self, other):\n if not isinstance(other, self.__class__):\n return NotImplemented\n return (\n self.__class__ == other.__class__\n and self.low == other.low\n and self.high == other.high\n )\n\n def __ne__(self, other):\n if not isinstance(other, self.__class__):\n return NotImplemented\n return (\n self.__class__ != other.__class__\n or self.low != other.low\n or self.high != other.high\n )\n\n def _cache_hash(self) -> int:\n \"\"\" returns a value to determine when a cache needs to be updated \"\"\"\n return hash((self.low._cache_hash(), self.high._cache_hash()))\n\n def copy(self) -> \"BoundaryPair\":\n \"\"\" return a copy of itself, but with a reference to the same grid \"\"\"\n return self.__class__(self.low.copy(), self.high.copy())\n\n def __getitem__(self, index: Union[int, bool]) -> BCBase:\n \"\"\" returns one of the sides \"\"\"\n if index == 0 or index is False:\n return self.low\n elif index == 1 or index is True:\n return self.high\n else:\n raise IndexError(\"Index can be either 0/False or 1/True\")\n\n def set_value(self, value=0):\n \"\"\"set the value of both boundary conditions\n\n Args:\n value (float or array):\n Sets the value stored with the boundary conditions. The\n interpretation of this value depends on the type of boundary\n condition.\n \"\"\"\n self.low.value = value\n self.high.value = value\n\n def scale_value(self, factor: float = 1):\n \"\"\"scales the value of the boundary condition with the given factor\n\n Args:\n value (float):\n Scales the value associated with the boundary condition by the factor\n \"\"\"\n self.low.value = factor * self.low.value # type: ignore\n self.high.value = factor * self.high.value # type: ignore\n\n @classmethod\n def get_help(cls) -> str:\n \"\"\" Return information on how boundary conditions can be set \"\"\"\n return (\n \"Boundary conditions for each side can be set using a tuple: \"\n f\"(lower_bc, upper_bc). {BCBase.get_help()}\"\n )\n\n @classmethod\n def from_data(\n cls, grid: GridBase, axis: int, data, rank: int = 0\n ) -> \"BoundaryPair\":\n \"\"\"create boundary pair from some data\n\n Args:\n grid (:class:`~pde.grids.GridBase`):\n The grid for which the boundary conditions are defined\n axis (int):\n The axis to which this boundary condition is associated\n data (str or dict):\n Data that describes the boundary pair\n rank (int):\n The tensorial rank of the value associated with the boundary\n conditions.\n\n Returns:\n :class:`~pde.grids.boundaries.axis.BoundaryPair`:\n the instance created from the data\n\n Throws:\n ValueError if `data` cannot be interpreted as a boundary pair\n \"\"\"\n # handle the simple cases\n if isinstance(data, dict):\n if \"low\" in data or \"high\" in data:\n # separate conditions for low and high\n data_copy = data.copy()\n low = BCBase.from_data(\n grid, axis, upper=False, data=data_copy.pop(\"low\"), rank=rank\n )\n high = BCBase.from_data(\n grid, axis, upper=True, data=data_copy.pop(\"high\"), rank=rank\n )\n if data_copy:\n raise ValueError(f\"Data items {data_copy.keys()} were not used.\")\n else:\n # one condition for both sides\n low = BCBase.from_data(grid, axis, upper=False, data=data, rank=rank)\n high = BCBase.from_data(grid, axis, upper=True, data=data, rank=rank)\n\n elif isinstance(data, (str, BCBase)):\n # a type for both boundaries\n low = BCBase.from_data(grid, axis, upper=False, data=data, rank=rank)\n high = BCBase.from_data(grid, axis, upper=True, data=data, rank=rank)\n\n else:\n # the only remaining valid format is a list of conditions for the\n # lower and upper boundary\n try:\n # try obtaining the length\n data_len = len(data)\n except TypeError:\n # if len is not supported, the format must be wrong\n raise ValueError(\n f\"Unsupported boundary format: `{data}`. \" + cls.get_help()\n )\n else:\n if data_len == 2:\n # assume that data is given for each boundary\n low = BCBase.from_data(\n grid, axis, upper=False, data=data[0], rank=rank\n )\n high = BCBase.from_data(\n grid, axis, upper=True, data=data[1], rank=rank\n )\n else:\n # if the length is strange, the format must be wrong\n raise ValueError(\n \"Expected two conditions for the two sides of the axis, but \"\n f\"got `{data}`. \" + cls.get_help()\n )\n\n return cls(low, high)\n\n @property\n def _scipy_border_mode(self) -> dict:\n \"\"\"dict: a dictionary that can be used in scipy functions\n\n This returns arguments that can be passed to functions of the\n scipy.ndimage module to specify border conditions.\n\n Raise:\n RuntimeError if the boundary cannot be represented\n \"\"\"\n if self.low != self.high:\n raise RuntimeError(\"Incompatible boundaries\")\n\n # check whether both sides have vanishing derivative conditions\n zero_neumann_bcs = all(\n isinstance(b, NeumannBC) and np.all(b.value == 0)\n for b in [self.low, self.high]\n )\n if zero_neumann_bcs:\n return {\"mode\": \"reflect\"}\n else:\n # BoundaryCondition.value cannot be supported since the scipy value\n # mode='constant' applies the boundary conditions at a different\n # position then we would\n raise RuntimeError(\"Unsupported boundaries\")\n\n def extract_component(self, *indices):\n \"\"\"extracts the boundary pair of the given index.\n\n Args:\n *indices:\n One or two indices for vector or tensor fields, respectively\n \"\"\"\n bc_sub_low = self.low.extract_component(*indices)\n bc_sub_high = self.high.extract_component(*indices)\n return self.__class__(bc_sub_low, bc_sub_high)\n\n def check_value_rank(self, rank: int):\n \"\"\"check whether the values at the boundaries have the correct rank\n\n Args:\n rank (int): The rank of the value that is stored with this\n boundary condition\n\n Throws:\n RuntimeError: if the value does not have rank `rank`\n \"\"\"\n self.low.check_value_rank(rank)\n self.high.check_value_rank(rank)\n\n def get_data(self, idx: Tuple[int, ...]) -> Tuple[float, Dict[int, float]]:\n \"\"\"sets the elements of the sparse representation of this condition\n\n Args:\n idx (tuple):\n The index of the point that must lie on the boundary condition\n\n Returns:\n float, dict: A constant value and a dictionary with indices and\n factors that can be used to calculate this virtual point\n \"\"\"\n axis_coord = idx[self.axis]\n if axis_coord == -1:\n # the virtual point on the lower side\n return self.low.get_data(idx)\n elif axis_coord == self.grid.shape[self.axis]:\n # the virtual point on the upper side\n return self.high.get_data(idx)\n else:\n # the normal case of an interior point\n return 0, {axis_coord: 1}\n\n def make_virtual_point_evaluators(self) -> Tuple[Callable, Callable]:\n \"\"\"returns two functions evaluating the value at virtual support points\n\n Args:\n size (int): Number of support points along the axis\n dx (float): Discretization, i.e., distance between support points\n\n Returns:\n tuple: Two functions that each take a 1d array as an argument and\n return the associated value at the virtual support point outside the\n lower and upper boundary, respectively.\n \"\"\"\n eval_low = self.low.make_virtual_point_evaluator()\n eval_high = self.high.make_virtual_point_evaluator()\n return (eval_low, eval_high)\n\n @property\n def differentiated(self) -> \"BoundaryPair\":\n \"\"\" BoundaryPair: differentiated version of this boundary condition \"\"\"\n return self.__class__(self.low.differentiated, self.high.differentiated)\n\n def get_point_evaluator(self, fill: np.array = None) -> Callable:\n \"\"\"return a function to evaluate values at a given point\n\n The point can either be a point inside the domain or a virtual point\n right outside the domain\n\n Args:\n fill (:class:`numpy.ndarray`, optional):\n Determines how values out of bounds are handled. If `None`, a\n `DomainError` is raised when out-of-bounds points are requested.\n Otherwise, the given value is returned.\n\n Returns:\n function: A function taking a 1d array and an index as an argument,\n returning the value of the array at this index.\n \"\"\"\n size = self.low.grid.shape[self.low.axis]\n get_arr_1d = _make_get_arr_1d(self.grid.num_axes, self.axis)\n\n eval_low = self.low.make_virtual_point_evaluator()\n eval_high = self.high.make_virtual_point_evaluator()\n\n @register_jitable\n def evaluate(arr, idx):\n \"\"\" evaluate values of the 1d array `arr_1d` at an index `i` \"\"\"\n arr_1d, i, _ = get_arr_1d(arr, idx)\n\n if i == -1:\n # virtual point on the lower side of the axis\n return eval_low(arr, idx)\n\n elif i == size:\n # virtual point on the upper side of the axis\n return eval_high(arr, idx)\n\n elif 0 <= i < size:\n # inner point of the axis\n return arr_1d[..., i]\n\n elif fill is None:\n # point is outside the domain and no fill value is specified\n raise DomainError(\"Point index lies outside bounds\")\n\n else:\n # Point is outside the domain, but fill value is specified. Note\n # that fill value needs to be given with the correct shape.\n return fill\n\n return evaluate # type: ignore\n\n def make_region_evaluator(self) -> Callable:\n \"\"\"return a function to evaluate values in a neighborhood of a point\n\n Returns:\n function: A function that can be called with the data array and a\n tuple indicating around what point the region is evaluated. The\n function returns the data values left of the point, at the point,\n and right of the point along the axis associated with this boundary\n condition. The function takes boundary conditions into account if\n the point lies on the boundary.\n \"\"\"\n get_arr_1d = _make_get_arr_1d(self.grid.num_axes, self.axis)\n ap_low = self.low.make_adjacent_evaluator()\n ap_high = self.high.make_adjacent_evaluator()\n\n @register_jitable\n def region_evaluator(arr, idx: Tuple[int, ...]) -> Tuple[float, float, float]:\n \"\"\" compiled function return the values in the region \"\"\"\n # extract the 1d array along axis\n arr_1d, i_point, bc_idx = get_arr_1d(arr, idx)\n return (\n ap_low(arr_1d, i_point, bc_idx),\n arr_1d[..., i_point],\n ap_high(arr_1d, i_point, bc_idx),\n )\n\n return region_evaluator # type: ignore\n\n\nclass BoundaryPeriodic(BoundaryAxisBase):\n \"\"\" represent a periodic axis \"\"\"\n\n periodic = True\n _scipy_border_mode = {\"mode\": \"wrap\"}\n\n def __init__(self, grid: GridBase, axis: int):\n \"\"\"\n Args:\n grid (:class:`~pde.grids.GridBase`):\n The grid for which the boundary conditions are defined\n axis (int):\n The axis to which this boundary condition is associated\n \"\"\"\n self.grid = grid\n self.axis = axis\n\n def __iter__(self):\n return iter(()) # there are no sub-boundaries to iterate over\n\n def __repr__(self):\n return f\"{self.__class__.__name__}(grid={self.grid}, axis={self.axis})\"\n\n def __str__(self):\n return '\"periodic\"'\n\n def __eq__(self, other):\n if not isinstance(other, self.__class__):\n return NotImplemented\n return (\n self.__class__ == other.__class__\n and self.grid == other.grid\n and self.axis == other.axis\n )\n\n def __ne__(self, other):\n if not isinstance(other, self.__class__):\n return NotImplemented\n return (\n self.__class__ != other.__class__\n or self.grid != other.grid\n or self.axis != other.axis\n )\n\n def _cache_hash(self) -> int:\n \"\"\" returns a value to determine when a cache needs to be updated \"\"\"\n return hash((self.grid._cache_hash(), self.axis))\n\n def copy(self) -> \"BoundaryPeriodic\":\n \"\"\" return a copy of itself, but with a reference to the same grid \"\"\"\n return self.__class__(grid=self.grid, axis=self.axis)\n\n def extract_component(self, *indices):\n \"\"\"extracts the boundary pair of the given extract_component.\n\n Args:\n *indices:\n One or two indices for vector or tensor fields, respectively\n \"\"\"\n return self\n\n def check_value_rank(self, rank: int):\n \"\"\"check whether the values at the boundaries have the correct rank\n\n Args:\n rank (int): The rank of the value that is stored with this\n boundary condition\n \"\"\"\n return True\n\n def make_virtual_point_evaluators(self) -> Tuple[Callable, Callable]:\n \"\"\"returns two functions evaluating the value at virtual support points\n\n Returns:\n tuple: Two functions that each take a 1d array as an argument and\n return the associated value at the virtual support point outside the\n lower and upper boundary, respectively.\n \"\"\"\n size = self.grid.shape[self.axis]\n\n @register_jitable\n def value_low(arr):\n \"\"\" evaluate the virtual point using the data array `arr` \"\"\"\n return arr[size - 1]\n\n @register_jitable\n def value_high(arr):\n \"\"\" evaluate the virtual point using the data array `arr` \"\"\"\n return arr[0]\n\n return (value_low, value_high)\n\n def get_data(self, idx: Tuple[int, ...]) -> Tuple[float, Dict[int, float]]:\n \"\"\"sets the elements of the sparse representation of this condition\n\n Args:\n idx (tuple):\n The index of the point that must lie on the boundary condition\n\n Returns:\n float, dict: A constant value and a dictionary with indices and\n factors that can be used to calculate this virtual point\n \"\"\"\n axis_coord = idx[self.axis]\n size = self.grid.shape[self.axis]\n if axis_coord == -1:\n # the virtual point on the lower side\n return 0, {size - 1: 1}\n elif axis_coord == size:\n # the virtual point on the upper side\n return 0, {0: 1}\n else:\n # the normal case of an interior point\n return 0, {axis_coord: 1}\n\n @property\n def differentiated(self) -> \"BoundaryPeriodic\":\n \"\"\" BoundaryPeriodic: differentiated boundary condition \"\"\"\n return self\n\n def get_point_evaluator(self, fill: float = None) -> Callable:\n \"\"\"return a function to evaluate values at a given point\n\n The point can either be a point inside the domain or a virtual point\n right outside the domain.\n\n Args:\n fill: This argument is ignored.\n\n Returns:\n function: A function taking a 1d array and an index as an argument,\n returning the value of the array at this index.\n \"\"\"\n size = self.grid.shape[self.axis]\n\n get_arr_1d = _make_get_arr_1d(self.grid.num_axes, self.axis)\n\n @register_jitable\n def evaluate(arr, idx):\n \"\"\" evaluate values of the array `arr` at an index `idx` \"\"\"\n arr_1d, i, _ = get_arr_1d(arr, idx)\n return arr_1d[..., i % size] # wrap around for periodic boundaries\n\n return evaluate # type: ignore\n\n def make_region_evaluator(self) -> Callable:\n \"\"\"return a function to evaluate values in a neighborhood of a point\n\n Returns:\n function: A function that can be called with the data array and a\n tuple indicating around what point the region is evaluated. The\n function returns the data values left of the point, at the point,\n and right of the point along the axis associated with this boundary\n condition. The function takes boundary conditions into account if\n the point lies on the boundary.\n \"\"\"\n size = self.grid.shape[self.axis]\n get_arr_1d = _make_get_arr_1d(self.grid.num_axes, self.axis)\n\n @register_jitable\n def region_evaluator(arr, idx: Tuple[int, ...]) -> Tuple[float, float, float]:\n \"\"\" compiled function return the values in the region \"\"\"\n # extract the 1d array along axis\n arr_1d, i, _ = get_arr_1d(arr, idx)\n\n # determine the indices in the vicinity\n im = size - 1 if i == 0 else i - 1\n ip = 0 if i == size - 1 else i + 1\n\n # return the values in the region around the point\n return arr_1d[..., im], arr_1d[..., i], arr_1d[..., ip]\n\n return region_evaluator # type: ignore\n\n\ndef get_boundary_axis(\n grid: GridBase, axis: int, data, rank: int = 0\n) -> BoundaryAxisBase:\n \"\"\"return object representing the boundary condition for a single axis\n\n Args:\n grid (:class:`~pde.grids.GridBase`):\n The grid for which the boundary conditions are defined\n axis (int):\n The axis to which this boundary condition is associated\n data (str or tuple or dict):\n Data describing the boundary conditions for this axis\n rank (int):\n The tensorial rank of the value associated with the boundary\n conditions.\n\n Returns:\n BoundaryAxisBase: The boundary condition for the axis\n \"\"\"\n # handle special constructs that describe boundary conditions\n if data == \"natural\" or data == \"auto_periodic_neumann\":\n # automatic choice between periodic and Neumann condition\n data = \"periodic\" if grid.periodic[axis] else \"derivative\"\n elif data == \"auto_periodic_dirichlet\":\n # automatic choice between periodic and Dirichlet condition\n data = \"periodic\" if grid.periodic[axis] else \"value\"\n\n # handle different types of data that specify boundary conditions\n if isinstance(data, BoundaryAxisBase):\n # boundary is already in the correct format\n return data\n elif data == \"periodic\" or data == (\"periodic\", \"periodic\"):\n # initialize a periodic boundary condition\n return BoundaryPeriodic(grid, axis)\n elif isinstance(data, dict) and data.get(\"type\") == \"periodic\":\n # initialize a periodic boundary condition\n return BoundaryPeriodic(grid, axis)\n else:\n # initialize independent boundary conditions for the two sides\n return BoundaryPair.from_data(grid, axis, data, rank=rank)\n"
]
| [
[
"numpy.all"
]
]
|
hieu1999210/image_compression | [
"3faf90d704782e1d6a186b0c8ea7fb1e2ec97a2c"
]
| [
"test/test_gdn.py"
]
| [
"# Copyright 2020 Hieu Nguyen\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\nfrom modelling.layers.gdn import GDN\nimport torch\n\n\ndef test_output():\n inputs = torch.rand(2,3,4,5)\n outputs = GDN(3, inverse=False, relu=False)(inputs)\n expected_outputs = inputs/torch.sqrt(1+ 0.1*(inputs**2))\n diff = torch.abs(expected_outputs-outputs)\n error = diff.max()\n # print(error)\n assert (error <= 1e-6), \"failed gdn output test\"\n\n\ndef test_igdn_output():\n inputs = torch.rand(2,3,4,5)\n outputs = GDN(3, inverse=True, relu=False)(inputs)\n expected_outputs = inputs*torch.sqrt(1+ 0.1*(inputs**2))\n diff = torch.abs(expected_outputs-outputs)\n error = diff.max()\n # print(error)\n assert (error <= 1e-6), \"failed igdn output test\"\n\n\ndef test_rgdn_output():\n inputs = torch.rand(2,3,4,5)-0.5\n outputs = GDN(3, inverse=False, relu=True)(inputs)\n inputs = torch.max(inputs, torch.tensor(0.))\n expected_outputs = inputs/torch.sqrt(1+ 0.1*(inputs**2))\n diff = torch.abs(expected_outputs-outputs)\n error = diff.max()\n # print(error)\n assert (error <= 1e-6), \"failed rgdn output test\"\n\n\ndef test_has_grad():\n inputs = torch.rand(2,3,4,5)\n layer = GDN(3, inverse=False, relu=False)\n outputs = layer(inputs)\n x = outputs.mean()\n x.backward()\n for name, param in layer.named_parameters():\n # print(name, param.grad)\n assert param.grad is not None\n \n\nif __name__ == \"__main__\":\n test_output()\n test_igdn_output()\n test_rgdn_output()\n test_has_grad()"
]
| [
[
"torch.abs",
"torch.rand",
"torch.sqrt",
"torch.tensor"
]
]
|
Omerside/dfcx-scrapi | [
"33845a26ffc59684478869503b290cf26baeb666"
]
| [
"src/dfcx_scrapi/tools/dataframe_functions.py"
]
| [
"\"\"\"Utility file for dataframe functions in support of Dialogflow CX.\"\"\"\n\n# Copyright 2021 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport json\nimport logging\nimport time\nfrom typing import Dict, List\nimport gspread\nimport pandas as pd\nimport numpy as np\nfrom pyasn1.type.univ import Boolean\nfrom tabulate import tabulate\nfrom gspread_dataframe import set_with_dataframe\nfrom oauth2client.service_account import ServiceAccountCredentials\n\nimport google.cloud.dialogflowcx_v3beta1.types as types\n\nfrom dfcx_scrapi.core import (\n scrapi_base,\n intents,\n entity_types,\n flows,\n pages,\n transition_route_groups,\n)\n\ng_drive_scope = [\n \"https://spreadsheets.google.com/feeds\",\n \"https://www.googleapis.com/auth/drive\",\n]\n\n# logging config\nlogging.basicConfig(\n level=logging.INFO,\n format=\"%(asctime)s %(levelname)-8s %(message)s\",\n datefmt=\"%Y-%m-%d %H:%M:%S\",\n)\n\n\nclass DataframeFunctions(scrapi_base.ScrapiBase):\n \"\"\"Class that supports dataframe functions in DFCX.\"\"\"\n\n def __init__(\n self,\n creds_path: str = None,\n creds_dict: dict = None,\n creds=None,\n scope=False,\n ):\n super().__init__(\n creds_path=creds_path,\n creds_dict=creds_dict,\n creds=creds,\n scope=scope,\n )\n\n logging.info(\"create dfcx creds %s\", creds_path)\n self.entities = entity_types.EntityTypes(creds_path, creds_dict)\n self.intents = intents.Intents(creds_path, creds_dict)\n self.flows = flows.Flows(creds_path, creds_dict)\n self.pages = pages.Pages(creds_path, creds_dict)\n self.route_groups = transition_route_groups.TransitionRouteGroups(\n creds_path, creds_dict\n )\n self.creds_path = creds_path\n\n @staticmethod\n def progress_bar(current, total, bar_length=50, type_=\"Progress\"):\n \"\"\"Display progress bar for processing.\"\"\"\n percent = float(current) * 100 / total\n arrow = \"-\" * int(percent / 100 * bar_length - 1) + \">\"\n spaces = \" \" * (bar_length - len(arrow))\n print(\n \"{2}({0}/{1})\".format(current, total, type_)\n + \"[%s%s] %d %%\" % (arrow, spaces, percent),\n end=\"\\r\",\n )\n\n @staticmethod\n def _coerce_to_string(dataframe: pd.DataFrame, fields: List[str]):\n \"\"\"Coerce incoming object type to string\"\"\"\n for field in fields:\n dataframe = dataframe.astype({field: \"string\"})\n\n return dataframe\n\n @staticmethod\n def _coerce_to_int(dataframe: pd.DataFrame, fields: List[str]):\n \"\"\"Coerce incoming object type to int\"\"\"\n for field in fields:\n dataframe = dataframe.astype({field: \"int32\"})\n\n return dataframe\n\n @staticmethod\n def _make_schema(columns: List[str]) -> pd.DataFrame:\n\n dataframe = pd.DataFrame(columns=columns)\n\n type_map = {\n \"display_name\": \"string\",\n \"text\": \"string\",\n \"parameter_id\": \"string\",\n \"training_phrase\": \"int32\",\n \"part\": \"int32\",\n \"id\": \"string\",\n \"entity_type\": \"string\",\n }\n\n temp_data = {}\n for column in dataframe.columns:\n dataframe = dataframe.astype({column: type_map[column]})\n temp_data[column] = type_map[column]\n\n dataframe = dataframe.append(temp_data, ignore_index=True)\n\n return dataframe\n\n @staticmethod\n def _remap_intent_values(original_intent: types.Intent) -> types.Intent:\n\n new_intent = types.intent.Intent()\n new_intent.name = original_intent.name\n new_intent.display_name = original_intent.display_name\n new_intent.priority = original_intent.priority\n new_intent.is_fallback = original_intent.is_fallback\n new_intent.labels = dict(original_intent.labels)\n new_intent.description = original_intent.description\n\n return new_intent\n\n def _update_intent_from_dataframe(\n self,\n intent_id: str,\n train_phrases: pd.DataFrame,\n params=None,\n mode: str = \"basic\",\n ):\n \"\"\"Make an Updated Intent Object based on already existing Intent.\n\n The intent must exist in the agent.\n This method will modify the existing Intent object based on the\n incoming dataframe parameters.\n *Note* this is an internal method and should not be used on its own to\n update the Intent object.\n\n Args:\n intent_id: name parameter of the intent to update\n train_phrases: dataframe of training phrases in advanced have\n training_phrase and parts column to track the build\n params(optional): dataframe of parameters\n mode: basic - build assuming one row is one training phrase no\n entities, advance - build keeping track of training phrases and\n parts with the training_phrase and parts column.\n\n Returns:\n intent_pb: the new intents protobuf object\n \"\"\"\n\n if mode == \"basic\":\n if hasattr(train_phrases, \"text\"):\n train_phrases = train_phrases[[\"text\"]]\n train_phrases = self._coerce_to_string(train_phrases, [\"text\"])\n else:\n tp_schema = self._make_schema([\"text\", \"parameter_id\"])\n\n logging.error(\n \"%s mode train_phrases schema must be: \\n%s\",\n mode,\n tabulate(tp_schema, headers=\"keys\", tablefmt=\"psql\"),\n )\n raise KeyError(\"Missing column 'text' in DataFrame columns\")\n\n elif mode == \"advanced\":\n if all(\n k in train_phrases\n for k in [\"training_phrase\", \"part\", \"text\", \"parameter_id\"]\n ):\n\n train_phrases = train_phrases[\n [\"training_phrase\", \"part\", \"text\", \"parameter_id\"]\n ]\n train_phrases = self._coerce_to_int(\n train_phrases, [\"training_phrase\", \"part\"]\n )\n train_phrases = self._coerce_to_string(\n train_phrases, [\"text\", \"parameter_id\"]\n )\n\n if not params.empty:\n params = params[[\"id\", \"entity_type\"]]\n params = self._coerce_to_string(\n params, [\"id\", \"entity_type\"]\n )\n\n else:\n tp_schema = self._make_schema(\n [\"training_phrase\", \"part\", \"text\", \"parameter_id\"]\n )\n p_schema = self._make_schema([\"id\", \"entity_type\"])\n\n logging.error(\n \"%s mode train_phrases schema must be: \\n%s\",\n mode,\n tabulate(\n tp_schema.transpose(),\n headers=\"keys\",\n tablefmt=\"psql\",\n ),\n )\n logging.error(\n \"%s mode parameter schema must be %s \\n\",\n mode,\n tabulate(\n p_schema.transpose(),\n headers=\"keys\",\n tablefmt=\"psql\",\n ),\n )\n raise KeyError(\"Missing column name in DataFrame. See schema.\")\n\n else:\n raise ValueError(\"Mode must be 'basic' or 'advanced'\")\n\n original = self.intents.get_intent(intent_id=intent_id)\n intent = self._remap_intent_values(original)\n\n # training phrases\n if mode == \"advanced\":\n training_phrases = []\n for phrase in list(set(train_phrases[\"training_phrase\"])):\n tp_parts = train_phrases[\n train_phrases[\"training_phrase\"].astype(int) == int(phrase)\n ]\n parts = []\n for _, row in tp_parts.iterrows():\n part = {\n \"text\": row[\"text\"],\n \"parameter_id\": row[\"parameter_id\"],\n }\n parts.append(part)\n\n training_phrase = {\"parts\": parts, \"repeat_count\": 1, \"id\": \"\"}\n training_phrases.append(training_phrase)\n\n intent.training_phrases = training_phrases\n parameters = []\n for _, row in params.iterrows():\n parameter = {\n \"id\": row[\"id\"],\n \"entity_type\": row[\"entity_type\"],\n \"is_list\": False,\n \"redact\": False,\n }\n parameters.append(parameter)\n\n if parameters:\n intent.parameters = parameters\n\n elif mode == \"basic\":\n training_phrases = []\n for _, row in train_phrases.iterrows():\n part = {\"text\": row[\"text\"], \"parameter_id\": None}\n parts = [part]\n training_phrase = {\"parts\": parts, \"repeat_count\": 1, \"id\": \"\"}\n training_phrases.append(training_phrase)\n intent.training_phrases = training_phrases\n else:\n raise ValueError(\"mode must be basic or advanced\")\n\n # json_intent = json.dumps(intent)\n # intent_pb = types.Intent.from_json(json_intent)\n return intent\n\n def bulk_update_intents_from_dataframe(\n self,\n agent_id: str,\n tp_df: pd.DataFrame,\n params_df: pd.DataFrame = None,\n mode: str = \"basic\",\n update_flag: Boolean = False,\n rate_limiter: int = 5,\n language_code: str = None\n ):\n \"\"\"Update existing Intent, TPs and Parameters from a Dataframe.\n\n Args:\n agent_id: name parameter of the agent to update_flag - full path to\n agent\n tp_df: dataframe of bulk training phrases required columns:\n text, display_name in advanced mode have training_phrase and parts\n column to track the build\n params_df(optional): dataframe of bulk parameters\n mode: basic|advanced\n basic, build assuming one row is one training phrase no entities\n advanced, build keeping track of training phrases and parts with the\n training_phrase and parts column.\n update_flag: True to update_flag the intents in the agent\n rate_limiter: seconds to sleep between operations.\n\n Returns:\n modified_intents: dictionary with intent display names as keys and\n the new intent protobufs as values\n \"\"\"\n\n if mode == \"basic\":\n if all(k in tp_df for k in [\"display_name\", \"text\"]):\n tp_df = tp_df[[\"display_name\", \"text\"]]\n tp_df = self._coerce_to_string(tp_df, [\"display_name\", \"text\"])\n\n else:\n tp_schema = pd.DataFrame(\n index=[\"display_name\", \"text\", \"parameter_id\"],\n columns=[0],\n data=[\"string\", \"string\", \"string\"],\n ).astype({0: \"string\"})\n logging.error(\n \"%s mode train_phrases schema must be %s \\n\",\n mode,\n tabulate(\n tp_schema.transpose(),\n headers=\"keys\",\n tablefmt=\"psql\",\n ),\n )\n\n elif mode == \"advanced\":\n if all(\n k in tp_df\n for k in [\n \"display_name\",\n \"training_phrase\",\n \"part\",\n \"text\",\n \"parameter_id\",\n ]\n ):\n\n tp_df = tp_df[\n [\n \"display_name\",\n \"training_phrase\",\n \"part\",\n \"text\",\n \"parameter_id\",\n ]\n ]\n\n\n tp_df = self._coerce_to_string(\n tp_df, [\"display_name\", \"text\", \"parameter_id\"]\n )\n\n tp_df = self._coerce_to_int(tp_df, [\"training_phrase\", \"part\"])\n\n if not params_df.empty:\n params_df = params_df[[\"display_name\", \"id\", \"entity_type\"]]\n params_df = params_df.astype(\n {\n \"display_name\": \"string\",\n \"id\": \"string\",\n \"entity_type\": \"string\",\n }\n )\n\n else:\n tp_schema = pd.DataFrame(\n index=[\n \"display_name\",\n \"training_phrase\",\n \"part\",\n \"text\",\n \"parameter_id\",\n ],\n columns=[0],\n data=[\"string\", \"int32\", \"int32\", \"string\", \"string\"],\n ).astype({0: \"string\"})\n p_schema = pd.DataFrame(\n index=[\"display_name\", \"id\", \"entity_type\"],\n columns=[0],\n data=[\"string\", \"string\", \"string\"],\n ).astype({0: \"string\"})\n logging.error(\n \"%s mode train_phrases schema must be %s \\n\",\n mode,\n tabulate(\n tp_schema.transpose(),\n headers=\"keys\",\n tablefmt=\"psql\",\n ),\n )\n logging.error(\n \"%s mode parameter schema must be %s \\n\",\n mode,\n tabulate(\n p_schema.transpose(),\n headers=\"keys\",\n tablefmt=\"psql\",\n ),\n )\n\n else:\n raise ValueError(\"mode must be basic or advanced\")\n\n intents_map = self.intents.get_intents_map(\n agent_id=agent_id, reverse=True\n )\n\n intent_names = list(set(tp_df[\"display_name\"]))\n\n new_intents = {}\n i = 0\n for intent_name in intent_names:\n if intent_name in ([\"\", np.nan, None]):\n logging.warning(\"empty intent_name\")\n continue\n\n tps = tp_df.copy()[tp_df[\"display_name\"] == intent_name].drop(\n columns=\"display_name\"\n )\n params = pd.DataFrame()\n if mode == \"advanced\":\n params = params_df.copy()[\n params_df[\"display_name\"] == intent_name\n ].drop(columns=\"display_name\")\n\n if intent_name not in intents_map.keys():\n logging.error(\n \"FAIL to update - intent not found: [%s]\", intent_name\n )\n continue\n\n new_intent = self._update_intent_from_dataframe(\n intent_id=intents_map[intent_name],\n train_phrases=tps,\n params=params,\n mode=mode,\n )\n new_intents[intent_name] = new_intent\n i += 1\n self.progress_bar(i, len(intent_names))\n if update_flag:\n self.intents.update_intent(\n intent_id=new_intent.name,\n obj=new_intent,\n language_code=language_code\n )\n time.sleep(rate_limiter)\n\n return new_intents\n\n def _create_intent_from_dataframe(\n self,\n display_name: str,\n tp_df: pd.DataFrame,\n params_df: pd.DataFrame = None,\n meta: Dict[str, str] = None,\n mode: str = \"basic\",\n ):\n \"\"\"Create an intent from a DataFrame.\n\n Args:\n display_name: display_name parameter of the intent to create\n train_phrases: dataframe of training phrases in advanced have\n training_phrase and parts column to track the build\n params(optional): dataframe of parameters\n meta: dictionary\n mode: basic - build assuming one row is one training phrase no\n entities, advance - build keeping track of training phrases\n and parts with the training_phrase and parts column.\n\n Returns:\n intent_pb: the new intents protobuf object\n \"\"\"\n if mode == \"basic\":\n if all(k in tp_df for k in [\"text\"]):\n tp_df = tp_df[[\"text\"]]\n tp_df = self._coerce_to_string(tp_df, [\"text\"])\n\n else:\n tp_schema = self._make_schema([\"text\", \"parameter_id\"])\n\n logging.error(\n \"%s mode train_phrases schema must be %s \\n\",\n mode,\n tabulate(\n tp_schema.transpose(),\n headers=\"keys\",\n tablefmt=\"psql\",\n ),\n )\n\n elif mode == \"advanced\":\n if all(\n k in tp_df\n for k in [\"training_phrase\", \"part\", \"text\", \"parameter_id\"]\n ):\n tp_df = tp_df[\n [\"training_phrase\", \"part\", \"text\", \"parameter_id\"]\n ]\n tp_df = self._coerce_to_string(tp_df, [\"text\", \"parameter_id\"])\n tp_df = self._coerce_to_int(tp_df, [\"training_phrase\", \"part\"])\n\n if not params_df.empty:\n params_df = params_df[[\"id\", \"entity_type\"]]\n params_df = params_df.astype(\n {\"id\": \"string\", \"entity_type\": \"string\"}\n )\n else:\n tp_schema = self._make_schema(\n [\"training_phrase\", \"part\", \"text\", \"parameter_id\"]\n )\n p_schema = self._make_schema([\"id\", \"entity_type\"])\n\n logging.error(\n \"%s mode train_phrases schema must be %s \\n\",\n mode,\n tabulate(\n tp_schema.transpose(),\n headers=\"keys\",\n tablefmt=\"psql\",\n ),\n )\n logging.error(\n \"%s mode parameter schema must be %s \\n\",\n mode,\n tabulate(\n p_schema.transpose(),\n headers=\"keys\",\n tablefmt=\"psql\",\n ),\n )\n\n else:\n raise ValueError(\"mode must be basic or advanced\")\n\n intent = {}\n intent[\"display_name\"] = display_name\n\n if meta:\n intent[\"priority\"] = meta.get(\"priority\", 500000)\n intent[\"is_fallback\"] = meta.get(\"is_fallback\", False)\n intent[\"labels\"] = meta.get(\"labels\", {})\n intent[\"description\"] = meta.get(\"description\", \"\")\n\n # training phrases\n if mode == \"advanced\":\n training_phrases = []\n for phrase in list(set(tp_df[\"training_phrase\"])):\n tp_parts = tp_df[\n tp_df[\"training_phrase\"].astype(int) == int(phrase)\n ]\n parts = []\n for _, row in tp_parts.iterrows():\n part = {\n \"text\": row[\"text\"],\n \"parameter_id\": row[\"parameter_id\"],\n }\n parts.append(part)\n\n training_phrase = {\"parts\": parts, \"repeat_count\": 1, \"id\": \"\"}\n training_phrases.append(training_phrase)\n\n intent[\"training_phrases\"] = training_phrases\n parameters = []\n for _, row in params_df.iterrows():\n parameter = {\n \"id\": row[\"id\"],\n \"entity_type\": row[\"entity_type\"],\n \"is_list\": False,\n \"redact\": False,\n }\n parameters.append(parameter)\n\n if parameters:\n intent[\"parameters\"] = parameters\n\n elif mode == \"basic\":\n training_phrases = []\n for _, row in tp_df.iterrows():\n part = {\"text\": row[\"text\"], \"parameter_id\": None}\n parts = [part]\n training_phrase = {\"parts\": parts, \"repeat_count\": 1, \"id\": \"\"}\n training_phrases.append(training_phrase)\n intent[\"training_phrases\"] = training_phrases\n else:\n raise ValueError(\"mode must be basic or advanced\")\n\n json_intent = json.dumps(intent)\n intent_pb = types.Intent.from_json(json_intent)\n\n return intent_pb\n\n def bulk_create_intent_from_dataframe(\n self,\n agent_id: str,\n tp_df: pd.DataFrame,\n params_df: pd.DataFrame = None,\n mode: str = \"basic\",\n update_flag: Boolean = False,\n rate_limiter: int = 5,\n meta: Dict[str, str] = None,\n ):\n \"\"\"Create Intents in DFCX from a DataFrame.\n\n Args:\n agent_id: name parameter of the agent to update_flag - full path to\n agent\n train_phrases_df: dataframe of bulk training phrases required\n columns of text, display_name in advanced mode have training_phrase\n and parts column to track the build\n params_df(optional): dataframe of bulk parameters\n mode: basic|advanced\n basic - build assuming one row is one training phrase no entities\n advanced - build keeping track of training phrases and parts with\n the training_phrase and parts column.\n update_flag: True to update_flag the intents in the agent\n rate_limiter: number of seconds to wait between calls\n meta: dictionary\n\n Returns:\n new_intents: dictionary with intent display names as keys and the new\n intent protobufs as values\n\n \"\"\"\n if mode == \"basic\":\n if all(k in tp_df for k in [\"display_name\", \"text\"]):\n tp_df = tp_df[[\"display_name\", \"text\"]]\n tp_df = self._coerce_to_string(tp_df, [\"display_name\", \"text\"])\n\n else:\n tp_schema = self._make_schema(\n [\"display_name\", \"text\", \"parameter_id\"]\n )\n\n raise ValueError(\n \"%s mode train_phrases schema must be %s\" % mode,\n tabulate(\n tp_schema.transpose(),\n headers=\"keys\",\n tablefmt=\"psql\",\n ),\n )\n\n elif mode == \"advanced\":\n if all(\n k in tp_df\n for k in [\n \"display_name\",\n \"training_phrase\",\n \"part\",\n \"text\",\n \"parameter_id\",\n ]\n ):\n if \"meta\" not in tp_df.columns:\n tp_df[\"meta\"] = [dict()] * len(tp_df)\n\n tp_df = tp_df[\n [\n \"display_name\",\n \"training_phrase\",\n \"part\",\n \"text\",\n \"parameter_id\",\n \"meta\",\n ]\n ]\n tp_df = self._coerce_to_string(\n tp_df, [\"display_name\", \"text\", \"parameter_id\"]\n )\n tp_df = self._coerce_to_int(tp_df, [\"training_phrase\", \"part\"])\n\n if not params_df.empty:\n params_df = params_df[[\"display_name\", \"id\", \"entity_type\"]]\n params_df = self._coerce_to_string(\n params_df, [\"display_name\", \"id\", \"entity_type\"]\n )\n\n else:\n tp_schema = self._make_schema(\n [\n \"display_name\",\n \"training_phrase\",\n \"part\",\n \"text\",\n \"parameter_id\",\n ]\n )\n\n p_schema = self._make_schema(\n [\"display_name\", \"id\", \"entity_type\"]\n )\n\n raise ValueError(\n \"%s mode train_phrases schema must be %s \\n parameter\\\n schema must be %s\"\n % mode,\n tabulate(\n tp_schema.transpose(),\n headers=\"keys\",\n tablefmt=\"psql\",\n ),\n tabulate(\n p_schema.transpose(),\n headers=\"keys\",\n tablefmt=\"psql\",\n ),\n )\n\n else:\n raise ValueError(\"mode must be basic or advanced\")\n\n temp_intents = list(set(tp_df[\"display_name\"]))\n new_intents = {}\n i = 0\n for intent in temp_intents:\n tps = tp_df.copy()[tp_df[\"display_name\"] == intent].drop(\n columns=\"display_name\"\n )\n params = pd.DataFrame()\n if mode == \"advanced\":\n params = params_df.copy()[\n params_df[\"display_name\"] == intent\n ].drop(columns=\"display_name\")\n\n new_intent = self._create_intent_from_dataframe(\n display_name=intent,\n tp_df=tps,\n params_df=params,\n meta=meta,\n mode=mode,\n )\n new_intents[intent] = new_intent\n i += 1\n self.progress_bar(i, len(temp_intents))\n if update_flag:\n time.sleep(rate_limiter)\n self.intents.create_intent(agent_id=agent_id, obj=new_intent)\n\n return new_intents\n\n def create_entity_from_dataframe(\n self,\n display_name: str,\n entity_df: pd.DataFrame,\n meta: Dict[str, str] = None,\n ):\n \"\"\"Create an entity.\n\n Args:\n display_name: display_name parameter of the entity to update\n entity_df: dataframe values and synonyms\n meta: dictionary\n\n Returns:\n entity_pb: the new entity protobuf object\n \"\"\"\n\n entity_obj = {}\n entity_obj[\"display_name\"] = display_name\n entity_obj[\"kind\"] = meta.get(\"kind\", 1)\n entity_obj[\"auto_expansion_mode\"] = meta.get(\"auto_expansion_mode\", 0)\n entity_obj[\"excluded_phrases\"] = meta.get(\"excluded_phrases\", [])\n entity_obj[\"enable_fuzzy_extraction\"] = meta.get(\n \"enable_fuzzy_extraction\", False\n )\n\n values = []\n for _, row in entity_df.iterrows():\n value = row[\"value\"]\n synonyms = json.loads(row[\"synonyms\"])\n\n part = {\"value\": value, \"synonyms\": synonyms}\n values.append(part)\n\n entity_obj[\"entities\"] = values\n entity_pb = types.EntityType.from_json(json.dumps(entity_obj))\n\n return entity_pb\n\n def bulk_create_entity_from_dataframe(\n self, agent_id, entities_df, update_flag=False, rate_limiter=5\n ):\n \"\"\"Bulk create entities from a dataframe.\n\n Args:\n agent_id: name parameter of the agent to update_flag - full path to\n agent\n entities_df: dataframe of bulk entities;\n required columns: display_name, value, synonyms\n update_flag: True to update_flag the entities in the agent\n rate_limiter: seconds to sleep between operations.\n\n Returns:\n new_entities: dictionary with entity display names as keys and the\n new entity protobufs as values\n \"\"\"\n\n if \"meta\" in entities_df.columns:\n meta = (\n entities_df.copy()[[\"display_name\", \"meta\"]]\n .drop_duplicates()\n .reset_index()\n )\n\n i, custom_entities = 0, {}\n for entity in list(set(entities_df[\"display_name\"])):\n one_entity = entities_df[entities_df[\"display_name\"] == entity]\n if \"meta\" in locals():\n meta_ = meta[meta[\"display_name\"] == entity][\"meta\"].iloc[0]\n meta_ = json.loads(meta_)\n new_entity = self.create_entity_from_dataframe(\n display_name=entity, entity_df=one_entity, meta=meta\n )\n\n else:\n new_entity = self.create_entity_from_dataframe(\n display_name=entity, entity_df=one_entity\n )\n\n custom_entities[entity] = new_entity\n i += 1\n\n if update_flag:\n self.entities.create_entity_type(\n agent_id=agent_id, obj=new_entity\n )\n time.sleep(rate_limiter)\n\n self.progress_bar(\n i, len(list(set(\n entities_df[\"display_name\"]))), type_=\"entities\"\n )\n return custom_entities\n\n def create_transition_route_from_dataframe(self, route_df):\n \"\"\"Create transition route.\n\n Args:\n route_df: dataframe with a singular routes data. Should only be one\n row\n intent: intent id\n condition: string condition. ex.\n $session.params.dtmf_diy_opt_in = 1 AND\n $session.params.dtmf_2_techinternet = 2\n target_page: page id\n target_flow: flow id\n webhook: webhook id\n webhook_tag: string webhook tag\n custom_payload: a singular payload or list of payloads ex. [{}, {}]\n fulfillment_text: = list of text [\"yo\", \"hi\"]\n parameter_presets: = dictionary of parameter presets ex.\n {\"param1\":\"value\",\"param2\":\"othervalues\"}\n rate_limiter: seconds to sleep between operations.\n\n Returns:\n transitionRoute: transition route protobuf\n \"\"\"\n\n transition_route = types.TransitionRoute()\n\n route_dict = route_df.to_dict()\n transition_route.intent = route_dict.get(\"intent\", None)\n transition_route.condition = route_dict.get(\"condition\", None)\n transition_route.target_page = route_dict.get(\"target_page\", None)\n transition_route.target_flow = route_dict.get(\"target_flow\", None)\n\n # fulfillment\n fulfillment = types.Fulfillment()\n fulfillment.webhook = route_dict.get(\"webhook\", None)\n fulfillment.tag = route_dict.get(\"webhook_tag\", None)\n\n custom_payload = route_dict.get(\"custom_payload\", None)\n custom_payload_list = []\n if custom_payload:\n custom_payload = json.loads(custom_payload)\n if ~isinstance(custom_payload, list):\n custom_payload = [custom_payload]\n for single_payload in custom_payload:\n custom_payload_list.append({\"payload\": single_payload})\n\n fulfillment_text = route_dict.get(\"fulfillment_text\", None)\n\n # custom payloads and text\n payload = {\n \"messages\": custom_payload_list\n + [{\"text\": {\"text\": fulfillment_text}}]\n }\n\n payload_json = json.dumps(payload)\n payload_json = json.dumps(payload)\n fulfillment = types.Fulfillment.from_json(payload_json)\n\n # parameter - presets\n set_param_actions = []\n parameter_presets = route_dict.get(\"parameter_presets\", None)\n if parameter_presets:\n parameter_presets = json.loads(parameter_presets)\n for param in parameter_presets.keys():\n set_param_action = types.Fulfillment.SetParameterAction()\n set_param_action.parameter = param\n set_param_action.value = parameter_presets[param]\n set_param_actions.append(set_param_action)\n fulfillment.set_parameter_actions = set_param_actions\n transition_route.trigger_fulfillment = fulfillment\n\n return transition_route\n\n def bulk_create_route_group_from_dataframe(\n self, display_name, agent_id, flow_id, route_group_df, update_flag=False\n ):\n \"\"\"create transition route - no support for end_session / just end flow.\n\n Args:\n display_name: name for the route group\n agent_id: agent id of target agent\n flow_id: flow id where to create route group\n route_group_df: dataframe with a routes data\n intent: intent id\n condition: string condition. ex.\n $session.params.dtmf_diy_opt_in = 1 AND\n $session.params.dtmf_2_techinternet = 2\n target_page: page id\n target_flow: flow id\n webhook: webhook id\n webhook_tag: string webhook tag\n custom_payload: a singular payload or list of payloads ex. [{}, {}]\n fulfillment_text: = list of text [\"yo\", \"hi\"]\n parameter_presets: = dictionary of parameter presets ex.\n {\"param1\":\"value\",\"param2\":\"othervalues\"}\n update_flag: True to create the route group in the provided\n flow id\n\n Returns:\n rg: route group protobuf\n \"\"\"\n if \"intent\" in route_group_df.columns:\n intents_map = self.intents.get_intents_map(\n agent_id=agent_id, reverse=True\n )\n route_group_df[\"intent\"] = route_group_df.apply(\n lambda x: intents_map[x[\"intent\"]], axis=1\n )\n\n if \"target_flow\" in route_group_df.columns:\n flows_map = self.flows.get_flows_map(\n agent_id=agent_id, reverse=True\n )\n route_group_df[\"target_flow\"] = route_group_df.apply(\n lambda x: flows_map[x[\"target_flow\"]], axis=1\n )\n\n if \"target_page\" in route_group_df.columns:\n pages_map = self.pages.get_pages_map(flow_id=flow_id, reverse=True)\n pages_map[\"End Flow\"] = flow_id + \"/pages/END_FLOW\"\n route_group_df[\"target_page\"] = route_group_df.apply(\n lambda x: pages_map[x[\"target_page\"]], axis=1\n )\n\n transition_routes = []\n for _, row in route_group_df.iterrows():\n transition_route = self.create_transition_route_from_dataframe(row)\n transition_routes.append(transition_route)\n\n route_group = types.TransitionRouteGroup()\n route_group.display_name = display_name\n route_group.transition_routes = transition_routes\n\n if update_flag:\n self.route_groups.create_transition_route_group(\n flow_id=flow_id, obj=route_group\n )\n\n return route_group\n\n def sheets_to_dataframe(self, sheet_name, worksheet_name):\n \"\"\"Move Intent/TP data from Google Sheets to a DataFrame.\"\"\"\n scope = [\n \"https://spreadsheets.google.com/feeds\",\n \"https://www.googleapis.com/auth/drive\",\n ]\n creds_gdrive = ServiceAccountCredentials.from_json_keyfile_name(\n self.creds_path, scope\n )\n client = gspread.authorize(creds_gdrive)\n g_sheets = client.open(sheet_name)\n sheet = g_sheets.worksheet(worksheet_name)\n data_pull = sheet.get_all_values()\n return pd.DataFrame(columns=data_pull[0], data=data_pull[1:])\n\n def dataframe_to_sheets(self, sheet_name, worksheet_name, dataframe):\n \"\"\"Move Intent/TP data from a DataFrame to Google Sheets.\"\"\"\n scope = [\n \"https://spreadsheets.google.com/feeds\",\n \"https://www.googleapis.com/auth/drive\",\n ]\n creds_gdrive = ServiceAccountCredentials.from_json_keyfile_name(\n self.creds_path, scope\n )\n client = gspread.authorize(creds_gdrive)\n g_sheets = client.open(sheet_name)\n worksheet = g_sheets.worksheet(worksheet_name)\n set_with_dataframe(worksheet, dataframe)\n"
]
| [
[
"pandas.DataFrame"
]
]
|
Tina-Rezaei/A-learning-model-to-detect-maliciousness-of-portable-executable-using-integrated-feature-set | [
"14e984d78d5ededd8f85d55600f1a90addab8561"
]
| [
"main.py"
]
| [
"import random\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn import tree\nfrom sklearn import svm\nfrom sklearn.model_selection import cross_validate\nimport numpy as np\nimport time\nimport click\nimport feature_extraction\n\nstart_time = time.time()\nX = []\nY = []\n\ndef store_features(path):\n # saving final extracted features for probabilistic future use\n file = open(path, 'w')\n for i, x in enumerate(X):\n for item in x:\n file.write('{},'.format(str(item)))\n file.write(str(Y[i][0]) + '\\n')\n file.close()\n\n\ndef load_features(path):\n X = []\n Y = []\n file = open(path, 'r')\n lines = file.readlines()\n for i, line in enumerate(lines):\n X.append([float(x) for x in line.split(',')[0:-1]])\n Y.append(int(line.split(',')[-1]))\n file.close()\n\n return X, Y\n\n\ndef load_data(malwarepath, benignpath, benignheaderfieldspath, malwareheaderfieldspath, malwaresectionnamespath,\n benignsectionnamespath):\n file = open(malwareheaderfieldspath, 'r')\n malware_header_fields = file.readlines()\n file.close()\n\n file = open(malwaresectionnamespath, 'r')\n malware_section_names = file.readlines()\n file.close()\n\n file = open(benignheaderfieldspath, 'r')\n benign_header_fields = file.readlines()\n file.close()\n\n file = open(benignsectionnamespath, 'r')\n benign_section_names = file.readlines()\n file.close()\n\n return malwarepath, benignpath, benign_header_fields, malware_header_fields, benign_section_names, malware_section_names\n\ndef log(message):\n print(message)\n\n\ndef final_features_extraction(path, header_fields, section_names, label):\n for i, row in enumerate(header_fields):\n\n final_features = []\n Y.append([label])\n row = row.split('\\t,')\n sample_name = row[-1].strip('\\n')\n\n # derived features\n entropies = feature_extraction.entropy(sample_name, path)\n final_features.append(entropies[0])\n final_features.append(entropies[1])\n final_features.append(entropies[2])\n\n sectionnames = section_names[i]\n sectionnames = sectionnames.split(',')\n sectionnames.remove(sectionnames[-1])\n section_name_features = feature_extraction.section_name_checker(sectionnames)\n final_features.append(section_name_features[0])\n final_features.append(section_name_features[1])\n\n final_features.append(feature_extraction.compilation_time(row[21]))\n\n final_features.append(feature_extraction.extract_file_size(sample_name, path))\n\n final_features.append(feature_extraction.extract_file_info(sample_name, path))\n\n final_features.append(feature_extraction.Image_Base_checker(row[34]))\n\n final_features.append(feature_extraction.sectionalignment_checker(int(row[35]), int(row[36])))\n\n final_features.append(feature_extraction.filealignment_checker(int(row[35]), int(row[36])))\n\n final_features.append(feature_extraction.sizeofimage_checker(int(row[44]), int(row[35])))\n\n final_features.append(feature_extraction.size_of_header_checker(sample_name, path))\n\n # Expanded features\n zerofill = bin(int(row[25]))[2:].zfill(16)\n characteristics = zerofill[0:6] + zerofill[7:]\n for c in characteristics:\n final_features.append(c)\n\n Dllzerofill = bin(int(row[48]))[2:].zfill(16)\n dllcharacteristics = Dllzerofill[5:]\n for d in dllcharacteristics:\n final_features.append(d)\n\n # raw features\n final_features.append(row[0])\n final_features.append(row[1])\n final_features.append(row[2])\n final_features.append(row[3])\n final_features.append(row[4])\n final_features.append(row[5])\n final_features.append(row[19])\n final_features.append(row[26])\n final_features.append(row[27])\n final_features.append(row[28])\n final_features.append(row[29])\n final_features.append(row[30])\n final_features.append(row[31])\n final_features.append(row[32])\n final_features.append(row[33])\n final_features.append(row[34])\n final_features.append(row[35])\n final_features.append(row[36])\n final_features.append(row[37])\n final_features.append(row[38])\n final_features.append(row[39])\n final_features.append(row[40])\n final_features.append(row[41])\n final_features.append(row[42])\n final_features.append(row[43])\n final_features.append(row[44])\n final_features.append(row[45])\n final_features.append(row[46])\n\n X.append(final_features)\n\n return X, Y\n\n\ndef learning(X, Y):\n algorithms = {\n \"RandomForest\": RandomForestClassifier(),\n \"SVM\": svm.SVC(),\n \"Knn\": KNeighborsClassifier(n_neighbors=5),\n \"DecisionTree\": tree.DecisionTreeClassifier(),\n }\n\n for algo in algorithms:\n print('{} results'.format(algo))\n start_time = time.time()\n clf = algorithms[algo]\n scores = cross_validate(clf, X, Y, cv=10, scoring=('accuracy', 'f1', 'recall', 'precision'))\n for score_name in ['test_accuracy', 'test_precision', 'test_recall', 'test_f1']:\n print('{} : {}'.format(score_name, np.mean(scores[score_name])))\n end_time = time.time()\n execution_time = end_time - start_time\n print('{} execution time {} \\n'.format(algo, execution_time))\n\n\[email protected]()\[email protected](\"--malwarepath\", required=True, help=\"path of malware samples\")\[email protected](\"--benignpath\", required=True, help=\"path of benign samples\")\[email protected](\"--benignheaderfieldspath\", required=True, help=\"path of stored header fields file for benign samples\")\[email protected](\"--malwareheaderfieldspath\", required=True, help=\"path of stored header fields file for malware samples\")\[email protected](\"--malwaresectionnamespath\", required=True, help=\"path of stored header fields file for malware samples\")\[email protected](\"--benignsectionnamespath\", required=True, help=\"path of stored header fields file for malware samples\")\ndef main(malwarepath, benignpath, benignheaderfieldspath, malwareheaderfieldspath, malwaresectionnamespath,\n benignsectionnamespath):\n\n malware_path, benign_path, benign_header_fields, malware_header_fields, benign_section_names, malware_section_names = \\\n load_data(malwarepath, benignpath, benignheaderfieldspath, malwareheaderfieldspath, malwaresectionnamespath,\n benignsectionnamespath)\n log(\"processing malwares for extracting features\")\n X, Y = final_features_extraction(malware_path, malware_header_fields, malware_section_names, 1)\n\n log(\"processing benign samples for extracting features\")\n X, Y = final_features_extraction(benign_path, benign_header_fields, benign_section_names, 0)\n\n global start_time\n end_time = time.time()\n feature_extraction_time = end_time - start_time\n print('feature extraction time {}'.format(feature_extraction_time))\n\n # saving final extracted features for probabilistic future use\n store_features('final_features.txt')\n\n # extracted features loading\n X, Y = load_features('final_features.txt')\n\n # shuffle\n start_time = time.time()\n features_label = list(zip(X, Y))\n random.shuffle(features_label)\n X, Y = zip(*features_label)\n\n # learning\n learning(X, Y)\n\n\nif __name__ == '__main__':\n main()\n"
]
| [
[
"sklearn.ensemble.RandomForestClassifier",
"sklearn.model_selection.cross_validate",
"sklearn.neighbors.KNeighborsClassifier",
"numpy.mean",
"sklearn.svm.SVC",
"sklearn.tree.DecisionTreeClassifier"
]
]
|
ragibson/ModularityPruning | [
"683e85d8612bb7b6d3aa6aa5c26d161d78e41be9",
"683e85d8612bb7b6d3aa6aa5c26d161d78e41be9"
]
| [
"utilities/parameter_estimation_utilities.py",
"experiments/miscellaneous_tests/runtime_comparison_with_louvain.py"
]
| [
"from .louvain_utilities import louvain_part_with_membership, sorted_tuple, check_multilayer_louvain_capabilities\nfrom .champ_utilities import CHAMP_2D, CHAMP_3D\nfrom .partition_utilities import num_communities\nimport igraph as ig\nimport louvain\nfrom math import log\nimport numpy as np\nfrom scipy.optimize import fsolve\nimport warnings\n\n\ndef estimate_singlelayer_SBM_parameters(G, partition, m=None):\n r\"\"\"Estimate singlelayer SBM parameters from a graph and a partition.\n\n See https://doi.org/10.1103/PhysRevE.94.052315 for more details.\n\n :param G: graph of interest\n :type G: igraph.Graph\n :param partition: partition of interest\n :type partition: louvain.RBConfigurationVertexPartition\n :param m: total edge weight of graph (if None, will be computed)\n :type m: float\n :return: SBM parameter estimates :math:`(\\omega_{in}, \\omega_{out})`\n :rtype: tuple[float, float]\n \"\"\"\n\n if m is None:\n m = sum(G.es['weight'])\n\n assert isinstance(partition, louvain.RBConfigurationVertexPartition)\n community = partition.membership\n\n m_in = sum(e['weight'] * (community[e.source] == community[e.target]) for e in G.es)\n kappa_r_list = [0] * len(partition)\n for e in G.es:\n kappa_r_list[community[e.source]] += e['weight']\n kappa_r_list[community[e.target]] += e['weight']\n sum_kappa_sqr = sum(x ** 2 for x in kappa_r_list)\n\n omega_in = (2 * m_in) / (sum_kappa_sqr / (2 * m))\n # guard for div by zero with single community partition\n omega_out = (2 * m - 2 * m_in) / (2 * m - sum_kappa_sqr / (2 * m)) if len(partition) > 1 else 0\n\n # return estimates for omega_in, omega_out\n return omega_in, omega_out\n\n\ndef estimate_multilayer_SBM_parameters(G_intralayer, G_interlayer, layer_vec, partition, model, N=None, T=None,\n Nt=None, m_t=None):\n \"\"\"Estimates multilayer SBM parameters from a graph and a partition\n\n :param G_intralayer: intralayer graph of interest\n :type G_intralayer: igraph.Graph\n :param G_interlayer: interlayer graph of interest\n :type G_interlayer: igraph.Graph\n :param layer_vec: list of each vertex's layer membership\n :type layer_vec: list[int]\n :param partition: partition of interest\n :type partition: louvain.RBConfigurationVertexPartitionWeightedLayers\n :param model: network layer topology (temporal, multilevel, multiplex)\n :type model: str\n :param N: number of nodes per layer (automatically computed if None)\n :type N: int\n :param T: number of layers in input graph (automatically computed if None)\n :type T: int\n :param Nt: vector of nodes per layer (automatically computed if None)\n :type Nt: int\n :param m_t: vector of total edge weights per layer (automatically computed if None)\n :type m_t: int\n :return: theta_in, theta_out, p, K\n :rtype: float, float, float, int\n \"\"\"\n\n if 'weight' not in G_intralayer.es:\n G_intralayer.es['weight'] = [1.0] * G_intralayer.ecount()\n\n # TODO: check if these None parameters and potentially caching calculate_persistence helps performance\n if T is None:\n T = max(layer_vec) + 1 # layer count\n\n if N is None:\n N = G_intralayer.vcount() // T\n\n if m_t is None: # compute total edge weights per layer\n m_t = [0] * T\n for e in G_intralayer.es:\n m_t[layer_vec[e.source]] += e['weight']\n\n if Nt is None: # compute total node counts per layer\n Nt = [0] * T\n for layer in layer_vec:\n Nt[layer] += 1\n\n K = len(partition)\n\n community = partition.membership\n m_t_in = [0] * T\n for e in G_intralayer.es:\n if community[e.source] == community[e.target] and layer_vec[e.source] == layer_vec[e.target]:\n m_t_in[layer_vec[e.source]] += e['weight']\n\n kappa_t_r_list = [[0] * K for _ in range(T)]\n for e in G_intralayer.es:\n layer = layer_vec[e.source]\n kappa_t_r_list[layer][community[e.source]] += e['weight']\n kappa_t_r_list[layer][community[e.target]] += e['weight']\n sum_kappa_t_sqr = [sum(x ** 2 for x in kappa_t_r_list[t]) for t in range(T)]\n\n theta_in = sum(2 * m_t_in[t] for t in range(T)) / sum(sum_kappa_t_sqr[t] / (2 * m_t[t]) for t in range(T))\n\n # guard for div by zero with e.g. a single community partition\n theta_out_numerator = sum(2 * m_t[t] - 2 * m_t_in[t] for t in range(T))\n theta_out_denominator = sum(2 * m_t[t] - sum_kappa_t_sqr[t] / (2 * m_t[t]) for t in range(T))\n if theta_out_denominator == 0:\n theta_out = 0\n else:\n theta_out = theta_out_numerator / theta_out_denominator\n\n calculate_persistence = persistence_function_from_model(model, G_interlayer, layer_vec=layer_vec, N=N, T=T, Nt=Nt)\n pers = calculate_persistence(community)\n if model == 'multiplex':\n # estimate p by solving polynomial root-finding problem with starting estimate p=0.5\n def f(x):\n coeff = 2 * (1 - 1 / K) / (T * (T - 1))\n return coeff * sum((T - n) * x ** n for n in range(1, T)) + 1 / K - pers\n\n # guard for div by zero with single community partition\n # (in this case, all community assignments persist across layers)\n p = fsolve(f, np.array([0.5]))[0] if pers < 1.0 and K > 1 else 1.0\n if p < 0:\n p = 0\n else:\n # guard for div by zero with single community partition\n # (in this case, all community assignments persist across layers)\n p = max((K * pers - 1) / (K - 1), 0) if pers < 1.0 and K > 1 else 1.0\n\n return theta_in, theta_out, p, K\n\n\ndef gamma_estimate(G, partition):\n \"\"\"Compute the \"correct\" value of gamma where modularity maximization becomes equivalent to maximum likelihood\n methods on a degree-corrected, planted partition stochastic block model.\n\n See https://doi.org/10.1103/PhysRevE.94.052315 for more details.\n\n :param G: graph of interest\n :type G: igraph.Graph\n :param partition: partition of interest\n :type partition: tuple[int] or louvain.RBConfigurationVertexPartition\n :return: gamma estimate\n :rtype: float\n \"\"\"\n\n if 'weight' not in G.es:\n G.es['weight'] = [1.0] * G.vcount()\n\n if not isinstance(partition, louvain.RBConfigurationVertexPartition):\n partition = louvain_part_with_membership(G, partition)\n\n omega_in, omega_out = estimate_singlelayer_SBM_parameters(G, partition)\n return gamma_estimate_from_parameters(omega_in, omega_out)\n\n\ndef gamma_estimate_from_parameters(omega_in, omega_out):\n \"\"\"Compute the \"correct\" value of gamma as in :meth:`~modularitypruning.parameter_estimation_utilities.gamma_estimate` from SBM parameters.\n\n :param omega_in: within-community edge propensity of a degree-corrected, planted partition SBM\n :type omega_in: float\n :param omega_out: within-community edge propensity of a degree-corrected, planted partition SBM\n :type omega_out: float\n :return: gamma estimate\n :rtype: float\n \"\"\"\n\n if omega_in == 0 or omega_out == 0:\n return None # degenerate partition, this could reasonably be taken to be 0\n\n return (omega_in - omega_out) / (np.log(omega_in) - np.log(omega_out))\n\n\ndef multiplex_omega_estimate_from_parameters(theta_in, theta_out, p, K, T, omega_max=1000):\n \"\"\"Returns the omega estimate for a multiplex multilayer model\n\n :param theta_in: SBM parameter\n :type theta_in: float\n :param theta_out: SBM parameter\n :type theta_out: float\n :param p: SBM parameter\n :type p: float\n :param K: number of blocks in SBM\n :type K: int\n :param T: number of layers in SBM\n :type T: int\n :param omega_max: maximum allowed value for omega\n :type omega_max: float\n :return: omega estimate\n :rtype: float\n \"\"\"\n\n if p == 0:\n return 0\n\n # if p is 1, the optimal omega is infinite (here, omega_max)\n if p >= 1.0 or theta_in == 1.0:\n return omega_max\n\n if theta_out == 0:\n return log(1 + p * K / (1 - p)) / (T * log(theta_in))\n return log(1 + p * K / (1 - p)) / (T * (log(theta_in) - log(theta_out)))\n\n\ndef temporal_multilevel_omega_estimate_from_parameters(theta_in, theta_out, p, K, omega_max=1000):\n \"\"\"Returns the omega estimate for a temporal or multilevel multilayer model\n\n :param theta_in: SBM parameter\n :type theta_in: float\n :param theta_out: SBM parameter\n :type theta_out: float\n :param p: SBM parameter\n :type p: float\n :param K: number of blocks in SBM\n :type K: int\n :param omega_max: maximum allowed value for omega\n :type omega_max: float\n :return: omega estimate\n :rtype: float\n \"\"\"\n\n if p == 0:\n return 0\n\n if theta_out == 0:\n return log(1 + p * K / (1 - p)) / (2 * log(theta_in)) if p < 1.0 else omega_max\n # if p is 1, the optimal omega is infinite (here, omega_max)\n return log(1 + p * K / (1 - p)) / (2 * (log(theta_in) - log(theta_out))) if p < 1.0 else omega_max\n\n\ndef ordinal_persistence(G_interlayer, community, N, T):\n # ordinal persistence (temporal model)\n return sum(community[e.source] == community[e.target] for e in G_interlayer.es) / (N * (T - 1))\n\n\ndef multilevel_persistence(G_interlayer, community, layer_vec, Nt, T):\n pers_per_layer = [0] * T\n for e in G_interlayer.es:\n pers_per_layer[layer_vec[e.target]] += (community[e.source] == community[e.target])\n\n pers_per_layer = [pers_per_layer[layer] / Nt[layer] for layer in range(T)]\n return sum(pers_per_layer) / (T - 1)\n\n\ndef categorical_persistence(G_interlayer, community, N, T):\n # categorical persistence (multiplex model)\n return sum(community[e.source] == community[e.target] for e in G_interlayer.es) / (N * T * (T - 1))\n\n\ndef omega_function_from_model(model, omega_max, T):\n \"\"\"Returns an appropriate function (depending on the model) for computing omega from multilayer SBM parameters\n\n Specifically, it will return versions of\n :meth:`temporal_multilevel_omega_estimate_from_parameters` or :meth:`multiplex_omega_estimate_from_parameters`.\n\n :param model: network layer topology (temporal, multilevel, multiplex)\n :type model: str\n :param omega_max: maximum allowed value for omega\n :type omega_max: float\n :param T: number of layers in input graph (automatically computed if None)\n :type T: int\n :return: an \"update_omega\" function\n :rtype: function\n \"\"\"\n if model == 'multiplex':\n def update_omega(theta_in, theta_out, p, K):\n return multiplex_omega_estimate_from_parameters(theta_in, theta_out, p, K, T, omega_max=omega_max)\n elif model == 'temporal' or model == 'multilevel':\n def update_omega(theta_in, theta_out, p, K):\n return temporal_multilevel_omega_estimate_from_parameters(theta_in, theta_out, p, K, omega_max=omega_max)\n else:\n raise ValueError(f\"Model {model} is not temporal, multilevel, or multiplex\")\n\n return update_omega\n\n\ndef persistence_function_from_model(model, G_interlayer, layer_vec=None, N=None, T=None, Nt=None):\n \"\"\"\n Returns a function to calculate persistence according to a given multilayer model\n\n :param model: network layer topology (temporal, multilevel, multiplex)\n :param G_interlayer: input graph containing all inter-layer edges\n :param layer_vec: vector of each vertex's layer membership\n :param N: number of nodes per layer\n :param T: number of layers in input graph\n :param Nt: vector of nodes per layer\n :return: calculate_persistence function\n \"\"\"\n\n # Note: non-uniform cases are not implemented\n if model == 'temporal':\n if N is None or T is None:\n raise ValueError(\"Parameters N and T cannot be None for temporal persistence calculation\")\n\n def calculate_persistence(community):\n return ordinal_persistence(G_interlayer, community, N, T)\n elif model == 'multilevel':\n if Nt is None or T is None or layer_vec is None:\n raise ValueError(\"Parameters layer_vec, Nt, T cannot be None for multilevel persistence calculation\")\n\n def calculate_persistence(community):\n return multilevel_persistence(G_interlayer, community, layer_vec, Nt, T)\n elif model == 'multiplex':\n if N is None or T is None:\n raise ValueError(\"Parameters N and T cannot be None for multiplex persistence calculation\")\n\n def calculate_persistence(community):\n return categorical_persistence(G_interlayer, community, N, T)\n else:\n raise ValueError(f\"Model {model} is not temporal, multilevel, or multiplex\")\n\n return calculate_persistence\n\n\ndef gamma_omega_estimate(G_intralayer, G_interlayer, layer_vec, membership, omega_max=1000, model='temporal',\n N=None, T=None, Nt=None, m_t=None):\n \"\"\"Returns the (gamma, omega) estimate for a multilayer network and a partition\n\n :param G_intralayer: intralayer graph of interest\n :type G_intralayer: igraph.Graph\n :param G_interlayer: interlayer graph of interest\n :type G_interlayer: igraph.Graph\n :param layer_vec: list of each vertex's layer membership\n :type layer_vec: list[int]\n :param membership: partition membership vector\n :type membership: tuple[int]\n :param omega_max: maximum allowed value for omega\n :type omega_max: float\n :param model: network layer topology (temporal, multilevel, multiplex)\n :type model: str\n :param N: number of nodes per layer (automatically computed if None)\n :type N: int\n :param T: number of layers in input graph (automatically computed if None)\n :type T: int\n :param Nt: vector of nodes per layer (automatically computed if None)\n :type Nt: int\n :param m_t: vector of total edge weights per layer (automatically computed if None)\n :type m_t: int\n :return: gamma estimate, omega estimate\n :rtype: float, float\n \"\"\"\n if T is None:\n T = max(layer_vec) + 1 # layer count\n\n partition = louvain_part_with_membership(G_intralayer, membership)\n theta_in, theta_out, p, K = estimate_multilayer_SBM_parameters(G_intralayer, G_interlayer, layer_vec, partition,\n model, N=N, T=T, Nt=Nt, m_t=m_t)\n update_omega = omega_function_from_model(model, omega_max, T=T)\n update_gamma = gamma_estimate_from_parameters\n\n gamma = update_gamma(theta_in, theta_out)\n omega = update_omega(theta_in, theta_out, p, K)\n return gamma, omega\n\n\ndef ranges_to_gamma_estimates(G, ranges):\n \"\"\"Compute gamma estimates as in :meth:`~modularitypruning.parameter_estimation_utilities.gamma_estimate`, given domains of optimality from :meth:`~modularitypruning.champ_utilities.CHAMP_2D`.\n\n :param G: graph of interest\n :type G: igraph.Graph\n :param ranges: list of ``(gamma_start, gamma_end, membership)`` tuples as returned from :meth:`~modularitypruning.champ_utilities.CHAMP_2D`\n :type ranges: list of tuple[float, float, tuple[int]]\n :return: a copy of input ranges with the corresponding gamma estimate appended to each tuple\n :rtype: list of tuple[float, float, tuple[int], float]\n \"\"\"\n\n return [(gamma_start, gamma_end, part, gamma_estimate(G, part)) for\n gamma_start, gamma_end, part in ranges]\n\n\ndef gamma_estimates_to_stable_partitions(gamma_estimates):\n \"\"\"Computes the stable partitions (i.e. those whose gamma estimates are within their domains of optimality), given\n domains of optimality and gamma estimates from :meth:`ranges_to_gamma_estimates`.\n\n See **[CITATION FORTHCOMING]** for more details.\n\n :param gamma_estimates: list of ``(gamma_start, gamma_end, membership, gamma_estimate)`` tuples as returned from\n :meth:`~modularitypruning.champ_utilities.CHAMP_2D`\n :type gamma_estimates: list[tuple]\n :return: list of community membership tuples of the stable partitions\n :rtype: list of tuple[int]\n \"\"\"\n\n return [membership for gamma_start, gamma_end, membership, gamma_estimate in gamma_estimates\n if gamma_estimate is not None and gamma_start <= gamma_estimate <= gamma_end]\n\n\ndef domains_to_gamma_omega_estimates(G_intralayer, G_interlayer, layer_vec, domains, model='temporal'):\n \"\"\"Compute (gamma, omega) estimates as in :meth:`~modularitypruning.parameter_estimation_utilities.gamma_omega_estimate`, given domains of optimality from :meth:`~modularitypruning.champ_utilities.CHAMP_3D`.\n\n :param G_intralayer: intralayer graph of interest\n :type G_intralayer: igraph.Graph\n :param G_interlayer: interlayer graph of interest\n :type G_interlayer: igraph.Graph\n :param layer_vec: list of each vertex's layer membership\n :type layer_vec: list[int]\n :param domains: list of ``(domain_vertices, membership)`` tuples as returned from :meth:`~modularitypruning.champ_utilities.CHAMP_3D`\n :type domains: list of tuple[list[float], tuple[int]]\n :param model: network layer topology (temporal, multilevel, multiplex)\n :type model: str\n :return: a copy of input domains with the corresponding gamma and omega estimates appended to each tuple\n :rtype: list of tuple[list[float], tuple[int], float, float]\n \"\"\"\n\n domains_with_estimates = []\n for polyverts, membership in domains:\n gamma_est, omega_est = gamma_omega_estimate(G_intralayer, G_interlayer, layer_vec, membership,\n model=model)\n domains_with_estimates.append((polyverts, membership, gamma_est, omega_est))\n return domains_with_estimates\n\n\ndef gamma_omega_estimates_to_stable_partitions(domains_with_estimates, return_membership_only=False):\n \"\"\"Computes the stable partitions (i.e. those whose resolution parameter estimates are within their domains of\n optimality), given domains of optimality and (gamma, omega) estimates from\n :meth:`domains_to_gamma_omega_estimates`.\n\n See **[CITATION FORTHCOMING]** for more details.\n\n :param domains_with_estimates: list of ``(domain_vertices, membership, gamma_estimate, omega_estimate)`` tuples as\n returned from :meth:`~modularitypruning.champ_utilities.CHAMP_3D`\n :type domains_with_estimates: list[tuple]\n :return: list of community membership tuples of the stable partitions\n :rtype: list of tuple[int]\n \"\"\"\n\n def left_or_right(x1, y1, x2, y2, x, y):\n \"\"\"Returns whether the point (x,y) is to the left or right of the line between (x1, y1) and (x2, y2).\"\"\"\n return (x - x1) * (y2 - y1) - (y - y1) * (x2 - x1) >= 0\n\n stable_partitions = []\n\n for polyverts, membership, gamma_est, omega_est in domains_with_estimates:\n if gamma_est is None or omega_est is None:\n print(gamma_est, omega_est)\n continue\n\n centroid_x = np.mean([x[0] for x in polyverts])\n centroid_y = np.mean([x[1] for x in polyverts])\n polygon_edges = []\n for i in range(len(polyverts)):\n p1, p2 = polyverts[i], polyverts[(i + 1) % len(polyverts)]\n if left_or_right(p1[0], p1[1], p2[0], p2[1], centroid_x, centroid_y):\n p1, p2 = p2, p1\n polygon_edges.append((p1, p2))\n\n left_or_rights = []\n for p1, p2 in polygon_edges:\n left_or_rights.append(left_or_right(p1[0], p1[1], p2[0], p2[1], gamma_est, omega_est))\n\n if all(x for x in left_or_rights) or all(not x for x in left_or_rights):\n # if the (gamma, omega) estimate is on the same side of all polygon edges, it lies within the domain\n if return_membership_only:\n stable_partitions.append(membership)\n else:\n stable_partitions.append((polyverts, membership, gamma_est, omega_est))\n\n return stable_partitions\n\n\ndef prune_to_stable_partitions(G, parts, gamma_start, gamma_end, restrict_num_communities=None,\n single_threaded=False):\n \"\"\"Runs our full pruning pipeline on a singlelayer network. Returns the pruned list of stable partitions.\n\n See **[CITATION FORTHCOMING]** for more details.\n\n :param G: graph of interest\n :type G: igraph.Graph\n :param parts: partitions to prune\n :type parts: iterable[tuple]\n :param gamma_start: starting gamma value for CHAMP\n :type gamma_start: float\n :param gamma_end: ending gamma value for CHAMP\n :type gamma_end: float\n :param restrict_num_communities: if not None, only use input partitions of this many communities\n :type restrict_num_communities: int or None\n :param single_threaded: if True, run the CHAMP step in serial\n :type single_threaded: bool\n :return: list of community membership tuples\n :rtype: list[tuple[int]]\n \"\"\"\n\n if not G.is_connected():\n warnings.warn(\"The pruning pipeline has not been thoroughly tested on disconnected graphs. If you run into \"\n \"problems, consider using the largest connected component of your graph.\")\n\n if G.is_weighted() and any(x != 1.0 for x in G.es['weight']):\n warnings.warn(\"The pruning pipeline does not fully handle weighted graphs and will proceed as though the input \"\n \"graph is unweighted.\")\n\n if isinstance(parts, louvain.RBConfigurationVertexPartition):\n # convert to (canonically represented) membership vectors if necessary\n parts = {sorted_tuple(part.membership) for part in parts}\n else:\n # assume parts contains membership vectors\n parts = {sorted_tuple(part) for part in parts}\n\n if restrict_num_communities is not None:\n parts = {part for part in parts if num_communities(part) == restrict_num_communities}\n\n if len(parts) == 0:\n return []\n\n ranges = CHAMP_2D(G, parts, gamma_start, gamma_end, single_threaded=single_threaded)\n gamma_estimates = ranges_to_gamma_estimates(G, ranges)\n stable_parts = gamma_estimates_to_stable_partitions(gamma_estimates)\n\n return stable_parts\n\n\ndef prune_to_multilayer_stable_partitions(G_intralayer, G_interlayer, layer_vec, model,\n parts, gamma_start, gamma_end, omega_start, omega_end,\n restrict_num_communities=None, single_threaded=False):\n \"\"\"Runs our full pruning pipeline on a multilayer network. Returns the pruned list of stable partitions.\n\n See **[CITATION FORTHCOMING]** for more details.\n\n NOTE: This method truncates omega estimates to ``omega_end - 1e-3`` in order to properly identify stable partitions\n with infinite interlayer coupling estimates (e.g. when all membership labels persist across layers). If\n ``omega_end`` is set too low, such partitions may be incorrectly identified as stable. As such, you should be\n somewhat wary of the returned partitions with zero community structure differences across layers.\n\n :param G_intralayer: intralayer graph of interest\n :type G_intralayer: igraph.Graph\n :param G_interlayer: interlayer graph of interest\n :type G_interlayer: igraph.Graph\n :param layer_vec: list of each vertex's layer membership\n :type layer_vec: list[int]\n :param model: network layer topology (temporal, multilevel, multiplex)\n :type model: str\n :param parts: partitions to prune\n :type parts: iterable[tuple]\n :param gamma_start: starting gamma value for CHAMP\n :type gamma_start: float\n :param gamma_end: ending gamma value for CHAMP\n :type gamma_end: float\n :param omega_start: starting omega value for CHAMP\n :type omega_start: float\n :param omega_end: ending omega value for CHAMP\n :type omega_end: float\n :param restrict_num_communities: if not None, only use input partitions of this many communities\n :type restrict_num_communities: int or None\n :param single_threaded: if True, run the CHAMP step in serial\n :type single_threaded: bool\n :return: list of community membership tuples\n :rtype: list[tuple[int]]\n \"\"\"\n check_multilayer_louvain_capabilities()\n\n if single_threaded:\n raise NotImplementedError(\"Single-threaded multilayer CHAMP was never implemented. This would be fairly easy\"\n \"to add, so please raise an issue if this feature is desired.\")\n\n # check graph is connected (cannot just check intra/interlayer graphs since they will both be disconnected)\n G_combined = ig.Graph(edges=[(e.source, e.target) for e in G_intralayer.es] +\n [(e.source, e.target) for e in G_interlayer.es])\n if not G_combined.is_connected() or G_combined.vcount() != G_interlayer.vcount():\n warnings.warn(\"The pruning pipeline has not been thoroughly tested on disconnected graphs. If you run into \"\n \"problems, consider using the largest connected component of your graph.\")\n\n if (G_intralayer.is_weighted() and any(x != 1.0 for x in G_intralayer.es['weight'])) or (\n G_interlayer.is_weighted() and any(x != 1.0 for x in G_interlayer.es['weight'])):\n warnings.warn(\"The pruning pipeline does not fully handle weighted graphs and will proceed as though the input \"\n \"graph is unweighted.\")\n\n if isinstance(parts, louvain.RBConfigurationVertexPartitionWeightedLayers):\n # convert to (canonically represented) membership vectors if necessary\n parts = {sorted_tuple(part.membership) for part in parts}\n else:\n # assume parts contains membership vectors\n parts = {sorted_tuple(part) for part in parts}\n\n if restrict_num_communities is not None:\n parts = {part for part in parts if num_communities(part) == restrict_num_communities}\n\n if len(parts) == 0:\n return []\n\n domains = CHAMP_3D(G_intralayer, G_interlayer, layer_vec, parts, gamma_start, gamma_end,\n omega_start, omega_end)\n domains_with_estimates = domains_to_gamma_omega_estimates(G_intralayer, G_interlayer, layer_vec, domains, model)\n\n # Truncate infinite omega solutions to our maximum omega\n domains_with_estimates = [(polyverts, membership, g_est, min(o_est, omega_end - 1e-3))\n for polyverts, membership, g_est, o_est in domains_with_estimates\n if g_est is not None]\n\n stable_parts = gamma_omega_estimates_to_stable_partitions(domains_with_estimates, return_membership_only=True)\n\n return stable_parts\n",
"# Runs a simple comparision between the runtime of our pruning method and the time required to run Louvain\n# In this example, our method runs in ~5% of the time required for Louvain, though we expect our method will be\n# (relatively) faster for larger graphs and those with stronger community structure.\n\nimport igraph as ig\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom modularitypruning import prune_to_stable_partitions\nfrom modularitypruning.louvain_utilities import repeated_parallel_louvain_from_gammas\nimport pickle\nimport os\nfrom time import time\n\nGAMMA_START = 0\nGAMMA_END = 3\n\n\ndef generate_runtimes(G, num_gammas):\n louvain_durations = []\n pruning_durations = []\n num_unique_partitions = []\n\n for num_louvain_iterations in num_gammas:\n gammas = np.linspace(GAMMA_START, GAMMA_END, num_louvain_iterations)\n louvain_start = time()\n parts = repeated_parallel_louvain_from_gammas(G, gammas, show_progress=False, chunk_dispatch=False)\n louvain_duration = time() - louvain_start\n\n pruning_start = time()\n _ = prune_to_stable_partitions(G, parts, GAMMA_START, GAMMA_END)\n pruning_duration = time() - pruning_start\n\n louvain_durations.append(louvain_duration)\n pruning_durations.append(pruning_duration)\n num_unique_partitions.append(len(parts))\n\n return louvain_durations, pruning_durations, num_unique_partitions\n\n\nif __name__ == \"__main__\":\n num_gammas = range(0, 25001, 1000)\n G = ig.Graph.Erdos_Renyi(n=1000, m=5000, directed=False)\n while not G.is_connected():\n G = ig.Graph.Erdos_Renyi(n=1000, m=5000, directed=False)\n\n if not os.path.exists(\"runtime_comparison_results.p\"):\n pickle.dump(generate_runtimes(G, num_gammas), open(\"runtime_comparison_results.p\", \"wb\"))\n\n louvain_durations, pruning_durations, num_unique_partitions = pickle.load(open(\"runtime_comparison_results.p\",\n \"rb\"))\n\n plt.figure()\n plt.plot(num_gammas, louvain_durations, linestyle='--', marker='o', label=\"Louvain\")\n plt.plot(num_gammas, pruning_durations, linestyle='--', marker='o', label=\"ModularityPruning\")\n plt.title(\"Runtime of Louvain and ModularityPruning\", fontsize=14)\n plt.xlabel(\"Number of Louvain iterations\", fontsize=14)\n plt.ylabel(\"Runtime (s)\", fontsize=14)\n plt.legend(fontsize=14)\n plt.tight_layout()\n plt.savefig(\"ER_louvain_pruning_runtime.pdf\")\n\n plt.figure()\n plt.plot(num_gammas, num_unique_partitions, linestyle='--', marker='o')\n plt.title(\"Number of Unique Partitions Returned by Louvain\", fontsize=14)\n plt.xlabel(\"Number of Louvain iterations\", fontsize=14)\n plt.ylabel(\"Number of unique partitions\", fontsize=14)\n plt.tight_layout()\n plt.savefig(\"ER_unique_partition_count.pdf\")\n"
]
| [
[
"numpy.log",
"numpy.array",
"numpy.mean"
],
[
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.title",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.ylabel",
"numpy.linspace"
]
]
|
NaiboWang/Federated-Learning-PyTorch | [
"6f811ebbb783b9d279e5462789ff242968e17bc0"
]
| [
"src/Federated_distributed_order.py"
]
| [
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# Python version: 3.6\n\n\"\"\"\n有序的从1到num_users训练model,第i个model初始化为第i-1个model训练后的模型\n\n与所有用户把数据交给center分批训练的唯一区别就在于互相看不到对方的数据\n\n\"\"\"\n\nimport os\nimport copy\nimport time\nimport pickle\nimport numpy as np\nimport torch.nn as nn\nfrom tqdm import tqdm\n\nimport torch\nfrom tensorboardX import SummaryWriter\n\nfrom options import args_parser\nfrom update import LocalUpdate, test_inference\nfrom models import MLP, CNNMnist, CNNFashion_Mnist, CNNCifar, modelC, CNNCifaro, ModerateCNN\nfrom utils import get_dataset, average_weights, exp_details\n\nos.environ['CUDA_VISIBLE_DEVICES'] = '0,1'\n\nif __name__ == '__main__':\n start_time = time.time()\n\n # define paths\n path_project = os.path.abspath('..')\n logger = SummaryWriter('../logs')\n\n args = args_parser()\n exp_details(args)\n\n if args.gpu:\n print(\"args.gpu\",args.gpu)\n torch.cuda.set_device(int(args.gpu))\n device = 'cuda' if args.gpu else 'cpu'\n # load dataset and user groups\n train_dataset, test_dataset, user_groups = get_dataset(args)\n # user_groups: dict, 100个user,key是0-100,value为一个数组,组内有600个索引值(对于mnist来说),索引值对应mnist数组中的数据,根据non-iid或iid的不同来得到不同的索引\n # BUILD MODEL\n if args.model == 'cnn':\n # Convolutional neural netork\n if args.dataset == 'mnist':\n global_model = CNNMnist(args=args)\n elif args.dataset == 'fmnist':\n global_model = CNNFashion_Mnist(args=args)\n elif args.dataset == 'cifar':\n global_model = ModerateCNN(args=args)\n # global_model = CNNCifaro(args=args)\n\n elif args.model == 'mlp':\n # Multi-layer preceptron\n img_size = train_dataset[0][0].shape\n len_in = 1\n for x in img_size:\n len_in *= x\n global_model = MLP(dim_in=len_in, dim_hidden=64,\n dim_out=args.num_classes)\n else:\n exit('Error: unrecognized model')\n\n if args.parallel:\n global_model = torch.nn.DataParallel(global_model)\n global_model = global_model.to(device)\n # Set the model to train and send it to device.\n # global_model.to(device)\n # Set model to training mode\n global_model.train()\n print(global_model)\n\n # copy weights\n # state_dict() returns a dictionary containing a whole state of the module\n global_weights = global_model.state_dict()\n\n # Training\n train_loss, train_accuracy = [], []\n val_acc_list, net_list = [], []\n cv_loss, cv_acc = [], []\n print_every = 5\n val_loss_pre, counter = 0, 0\n # tqdm进度条功能 progress bar\n for epoch in tqdm(range(args.epochs)):\n print(f'\\n | Global Training Round : {epoch+1} |\\n')\n\n global_model.train() # 设置成训练模式\n idxs_users = range(args.num_users)\n\n for idx in idxs_users:\n print(\"Training at user %d/%d.\" % (idx+1,args.num_users))\n local_model = LocalUpdate(args=args, dataset=train_dataset,\n idxs=user_groups[idx], logger=logger)\n w, loss, global_model = local_model.update_weights(\n model=global_model, global_round=epoch)\n\n # update global weights将下个模型要用的模型改成上一个模型的初始值\n # global_model.load_state_dict(w)\n\n # loss_avg = sum(local_losses) / len(local_losses)\n # train_loss.append(loss_avg)\n\n # Calculate avg training accuracy over all users at every epoch\n list_acc, list_loss = [], []\n global_model.eval()\n # for c in range(args.num_users):\n # local_model = LocalUpdate(args=args, dataset=train_dataset,\n # idxs=user_groups[idx], logger=logger) # 只是返回了local_model的类\n # acc, loss = local_model.inference(model=global_model) # 这一步只是用了local_model的数据集,即用global_model在training dataset上做测试\n # list_acc.append(acc)\n # list_loss.append(loss)\n # train_accuracy.append(sum(list_acc)/len(list_acc))\n\n # print global training loss after every 'i' rounds\n if (epoch+1) % print_every == 0:\n print(f' \\nAvg Training Stats after {epoch+1} global rounds:')\n # print(f'Training Loss : {np.mean(np.array(train_loss))}')\n # print('Train Accuracy: {:.2f}% \\n'.format(100*train_accuracy[-1]))\n test_acc, test_loss = test_inference(args, global_model, train_dataset)\n print(\"test accuracy for training set: {} after {} epochs\\n\".format(test_acc, epoch + 1))\n test_acc, test_loss = test_inference(args, global_model, test_dataset)\n print(\"test accuracy for test set: {} after {} epochs\\n\".format(test_acc, epoch + 1))\n\n # Test inference after completion of training\n test_acc, test_loss = test_inference(args, global_model, test_dataset)\n\n print(f' \\n Results after {args.epochs} global rounds of training:')\n print(\"|---- Avg Train Accuracy: {:.2f}%\".format(100*train_accuracy[-1]))\n print(\"|---- Test Accuracy: {:.2f}%\".format(100*test_acc))\n\n # Saving the objects train_loss and train_accuracy:\n file_name = '../save/objects/{}_{}_{}_C[{}]_iid[{}]_E[{}]_B[{}].pkl'.\\\n format(args.dataset, args.model, args.epochs, args.frac, args.iid,\n args.local_ep, args.local_bs)\n print(\"file_name:\",file_name)\n with open(file_name, 'wb') as f:\n pickle.dump([train_loss, train_accuracy], f)\n\n print('\\n Total Run Time: {0:0.4f}'.format(time.time()-start_time))\n\n # # PLOTTING (optional)\n # import matplotlib\n # import matplotlib.pyplot as plt\n # matplotlib.use('Agg')\n # print(\"Start Plot\")\n # # Plot Loss curve\n # plt.figure()\n # plt.title('Training Loss vs Communication rounds')\n # plt.plot(range(len(train_loss)), train_loss, color='r')\n # plt.ylabel('Training loss')\n # plt.xlabel('Communication Rounds')\n # plt.savefig('../save/fed_{}_{}_{}_C[{}]_iid[{}]_E[{}]_B[{}]_loss.png'.\n # format(args.dataset, args.model, args.epochs, args.frac,\n # args.iid, args.local_ep, args.local_bs))\n # plt.show()\n # # # Plot Average Accuracy vs Communication rounds\n # plt.figure()\n # plt.title('Average Accuracy vs Communication rounds')\n # plt.plot(range(len(train_accuracy)), train_accuracy, color='k')\n # plt.ylabel('Average Accuracy')\n # plt.xlabel('Communication Rounds')\n # plt.savefig('../save/fed_{}_{}_{}_C[{}]_iid[{}]_E[{}]_B[{}]_acc.png'.\n # format(args.dataset, args.model, args.epochs, args.frac,\n # args.iid, args.local_ep, args.local_bs))\n # plt.show()\n"
]
| [
[
"torch.nn.DataParallel"
]
]
|
asprasan/unified_framework | [
"45f9c20e4c66b0f0b9199c4a0bdaf8ecd1e82ad8"
]
| [
"train_unet.py"
]
| [
"'''\r\n-----------------------------------\r\nTRAINING CODE - SHIFTVARCONV + UNET\r\n-----------------------------------\r\n'''\r\nimport os \r\nimport numpy as np\r\nimport torch\r\nimport torch.nn as nn\r\nimport logging\r\nimport glob\r\nimport argparse\r\nimport time\r\nfrom torch.utils import data\r\n\r\n\r\n## set random seed\r\ntorch.manual_seed(12)\r\nnp.random.seed(12)\r\n\r\n\r\nfrom logger import Logger\r\nfrom dataloader import Dataset_load\r\nfrom sensor import C2B\r\nfrom unet import UNet\r\nimport utils\r\n\r\n\r\n## parse arguments\r\nparser = argparse.ArgumentParser()\r\nparser.add_argument('--expt', type=str, required=True, help='expt name')\r\nparser.add_argument('--epochs', type=int, default=500, help='num epochs to train')\r\nparser.add_argument('--batch', type=int, required=True, help='batch size for training and validation')\r\nparser.add_argument('--lr', type=float, default=1e-4, help='learning rate')\r\nparser.add_argument('--blocksize', type=int, default=8, help='tile size for code default 3x3')\r\nparser.add_argument('--subframes', type=int, default=16, help='num sub frames')\r\nparser.add_argument('--ckpt', type=str, default=None, help='checkpoint to load')\r\nparser.add_argument('--mask', type=str, default='random', help='\"impulse\" or \"random\" or \"opt\"')\r\nparser.add_argument('--two_bucket', action='store_true', help='1 bucket or 2 buckets')\r\nparser.add_argument('--gpu', type=str, required=True, help='GPU ID')\r\nargs = parser.parse_args()\r\n# print(args)\r\n\r\nos.environ['CUDA_VISIBLE_DEVICES'] = args.gpu\r\n\r\n## params for DataLoader\r\ntrain_params = {'batch_size': args.batch,\r\n 'shuffle': True,\r\n 'num_workers': 20,\r\n 'pin_memory': True}\r\nval_params = {'batch_size': args.batch,\r\n 'shuffle': False,\r\n 'num_workers': 20,\r\n 'pin_memory': True}\r\n\r\n\r\nlr = args.lr\r\nnum_epochs = args.epochs\r\n\r\nsave_path = os.path.join('/data/prasan/anupama/', args.expt)\r\nutils.create_dirs(save_path)\r\n\r\n\r\n## tensorboard summary logger\r\nlogger = Logger(os.path.join(save_path, 'logs'))\r\n\r\n\r\n## configure runtime logging\r\nlogging.basicConfig(level=logging.INFO,\r\n filename=os.path.join(save_path, 'logs', 'logfile.log'), \r\n format='%(asctime)s - %(message)s', \r\n filemode='w' if not args.ckpt else 'a')\r\n# logger=logging.getLogger()#.setLevel(logging.INFO)\r\nconsole = logging.StreamHandler()\r\nconsole.setLevel(logging.INFO)\r\nconsole.setFormatter(logging.Formatter('%(asctime)s - %(message)s'))\r\nlogging.getLogger('').addHandler(console)\r\nlogging.info(args)\r\n\r\n\r\n\r\n## dataloaders using hdf5 file\r\n# data_path = '/data/prasan/anupama/dataset/GoPro_patches_ds2_s16-8_p64-32.hdf5'\r\ndata_path = '/data/prasan/anupama/dataset/GoPro_patches_ds2_s7-7_p64-32.hdf5'\r\n\r\n## initializing training and validation data generators\r\ntraining_set = Dataset_load(data_path, dataset='train', num_samples='all')\r\ntraining_generator = data.DataLoader(training_set, **train_params)\r\nlogging.info('Loaded training set: %d videos'%(len(training_set)))\r\n\r\nvalidation_set = Dataset_load(data_path, dataset='test', num_samples=60000)\r\nvalidation_generator = data.DataLoader(validation_set, **val_params)\r\nlogging.info('Loaded validation set: %d videos'%(len(validation_set)))\r\n\r\n\r\n\r\n## initialize nets\r\n# c2b = C2B(block_size=args.blocksize, sub_frames=args.subframes, mask=args.mask, two_bucket=args.two_bucket).cuda()\r\nif not args.two_bucket:\r\n uNet = UNet(in_channel=1, out_channel=args.subframes, instance_norm=False).cuda()\r\nelse:\r\n uNet = UNet(in_channel=2, out_channel=args.subframes, instance_norm=False).cuda() \r\n# uNet = UNet(n_channels=1, n_classes=16).cuda()\r\n\r\n## optimizer\r\noptimizer = torch.optim.Adam(list(uNet.parameters()), lr=lr, weight_decay=1e-5)\r\nscheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.9, \r\n patience=5, min_lr=1e-6, verbose=True)\r\n\r\n## load checkpoint\r\nif args.ckpt is None:\r\n start_epoch = 0\r\n logging.info('No checkpoint, initialized net')\r\nelse:\r\n ckpt = torch.load(os.path.join(save_path, 'model', args.ckpt))\r\n # c2b.load_state_dict(ckpt['c2b_state_dict'])\r\n uNet.load_state_dict(ckpt['unet_state_dict'])\r\n optimizer.load_state_dict(ckpt['opt_state_dict'])\r\n start_epoch = ckpt['epoch'] + 1\r\n uNet.train()\r\n logging.info('Loaded checkpoint from epoch %d'%(start_epoch-1))\r\n# torch.save(c2b.code, os.path.join(save_path, 'model', 'exposure_code.pth'))\r\n\r\n\r\nlogging.info('Starting training')\r\nfor i in range(start_epoch, start_epoch+num_epochs): \r\n ## TRAINING\r\n train_iter = 0\r\n final_loss_sum = 0.\r\n tv_loss_sum = 0.\r\n loss_sum = 0.\r\n psnr_sum = 0.\r\n\r\n for gt_vid in training_generator: \r\n gt_vid = gt_vid.cuda()\r\n if not args.two_bucket:\r\n # b1 = c2b(gt_vid) # (N,1,H,W)\r\n b1 = torch.mean(gt_vid, dim=1, keepdim=True)\r\n # interm_vid = utils.impulse_inverse(b1, block_size=args.blocksize)\r\n # assert interm_vid.shape == gt_vid.shape \r\n highres_vid = uNet(b1) # (N,16,H,W)\r\n else:\r\n b1, b0 = c2b(gt_vid)\r\n b_stack = torch.cat([b1,b0], dim=1)\r\n highres_vid = uNet(b_stack)\r\n\r\n psnr_sum += utils.compute_psnr(highres_vid, gt_vid).item()\r\n\r\n ## LOSSES\r\n final_loss = utils.weighted_L1loss(highres_vid, gt_vid)\r\n final_loss_sum += final_loss.item()\r\n\r\n tv_loss = utils.gradx(highres_vid).abs().mean() + utils.grady(highres_vid).abs().mean()\r\n tv_loss_sum += tv_loss.item()\r\n\r\n loss = final_loss + 0.1*tv_loss\r\n loss_sum += loss.item()\r\n\r\n ## BACKPROP\r\n optimizer.zero_grad()\r\n loss.backward() \r\n optimizer.step()\r\n\r\n if train_iter % 1000 == 0:\r\n logging.info('epoch: %3d \\t iter: %5d \\t loss: %.4f'%(i, train_iter, loss.item()))\r\n\r\n train_iter += 1\r\n\r\n\r\n logging.info('Total train iterations: %d'%(train_iter))\r\n logging.info('Finished epoch %3d with loss: %.4f psnr: %.4f'\r\n %(i, loss_sum/train_iter, psnr_sum/len(training_set)))\r\n\r\n\r\n ## dump tensorboard summaries\r\n logger.scalar_summary(tag='training/loss', value=loss_sum/train_iter, step=i)\r\n logger.scalar_summary(tag='training/final_loss', value=final_loss_sum/train_iter, step=i)\r\n logger.scalar_summary(tag='training/tv_loss', value=tv_loss_sum/train_iter, step=i)\r\n logger.scalar_summary(tag='training/psnr', value=psnr_sum/len(training_set), step=i)\r\n logging.info('Dumped tensorboard summaries for epoch %4d'%(i))\r\n\r\n\r\n ## VALIDATION\r\n if ((i+1) % 2 == 0) or ((i+1) == (start_epoch+num_epochs)): \r\n logging.info('Starting validation')\r\n val_iter = 0\r\n val_loss_sum = 0.\r\n val_psnr_sum = 0.\r\n val_ssim_sum = 0.\r\n uNet.eval()\r\n\r\n with torch.no_grad():\r\n for gt_vid in validation_generator:\r\n \r\n gt_vid = gt_vid.cuda()\r\n if not args.two_bucket:\r\n # b1 = c2b(gt_vid) # (N,1,H,W)\r\n b1 = torch.mean(gt_vid, dim=1, keepdim=True)\r\n # interm_vid = utils.impulse_inverse(b1, block_size=args.blocksize)\r\n highres_vid = uNet(b1) # (N,16,H,W)\r\n else:\r\n b1, b0 = c2b(gt_vid)\r\n b_stack = torch.cat([b1,b0], dim=1)\r\n highres_vid = uNet(b_stack)\r\n\r\n val_psnr_sum += utils.compute_psnr(highres_vid, gt_vid).item()\r\n val_ssim_sum += utils.compute_ssim(highres_vid, gt_vid).item()\r\n \r\n ## loss\r\n final_loss = utils.weighted_L1loss(highres_vid, gt_vid)\r\n tv_loss = utils.gradx(highres_vid).abs().mean() + utils.grady(highres_vid).abs().mean()\r\n val_loss_sum += (final_loss + 0.1*tv_loss).item()\r\n\r\n if val_iter % 1000 == 0:\r\n print('In val iter %d'%(val_iter))\r\n\r\n val_iter += 1\r\n\r\n logging.info('Total val iterations: %d'%(val_iter))\r\n logging.info('Finished validation with loss: %.4f psnr: %.4f ssim: %.4f'\r\n %(val_loss_sum/val_iter, val_psnr_sum/len(validation_set), val_ssim_sum/len(validation_set)))\r\n\r\n scheduler.step(val_loss_sum/val_iter)\r\n uNet.train()\r\n\r\n ## dump tensorboard summaries\r\n logger.scalar_summary(tag='validation/loss', value=val_loss_sum/val_iter, step=i)\r\n logger.scalar_summary(tag='validation/psnr', value=val_psnr_sum/len(validation_set), step=i)\r\n logger.scalar_summary(tag='validation/ssim', value=val_ssim_sum/len(validation_set), step=i)\r\n\r\n scheduler.step(val_loss_sum/val_iter)\r\n \r\n ## CHECKPOINT\r\n if ((i+1) % 10 == 0) or ((i+1) == (start_epoch+num_epochs)):\r\n utils.save_checkpoint(state={'epoch': i, \r\n 'unet_state_dict': uNet.state_dict(),\r\n # 'c2b_state_dict': c2b.state_dict(),\r\n 'opt_state_dict': optimizer.state_dict()},\r\n save_path=os.path.join(save_path, 'model'),\r\n filename='model_%.6d.pth'%(i))\r\n logging.info('Saved checkpoint for epoch {}'.format(i))\r\n\r\nlogger.writer.flush()\r\nlogging.info('Finished training')"
]
| [
[
"torch.cat",
"numpy.random.seed",
"torch.no_grad",
"torch.manual_seed",
"torch.optim.lr_scheduler.ReduceLROnPlateau",
"torch.utils.data.DataLoader",
"torch.mean"
]
]
|
binary-signal/some-what-homomorphic-encryption | [
"861c752416e2669a4b9e1824f93b5593a8b4abd6"
]
| [
"demos/public_key_demo.py"
]
| [
"# -*- coding: utf-8 -*-\n\nfrom keys.public_key import PublicKey\nimport matplotlib.pyplot as plt\nfrom timeit import default_timer as timer\n\n\ndef test_key_size(L_low, L_max=20, units='mb', showFigure=False, file='sec_pam_vs_key_size.png'):\n \"\"\"\n test function for key size\n \"\"\"\n print(\"\\n\\nRunning test for security parameter vs key size | L in [{}, {}]\\n\".format(1, L_max))\n\n x_axis = [x for x in range(0, L_max + 1)]\n\n sizes = [0, 0]\n\n for L in range(2, L_max + 1):\n k = PublicKey(L)\n sizes.append(k.calc_key_size(units=units, verbose=True))\n\n fig = plt.figure()\n plt.plot(x_axis, sizes)\n plt.xlabel(\"Security parameter\")\n plt.ylabel(\"Public Key Size in {}\".format(units))\n fig.savefig('sec_pam_vs_key_size.png', dpi=fig.dpi)\n\n print(\"test end\")\n if showFigure:\n plt.show()\n\n\ndef test_key_time(L_low, L_max, units='sec', showFigure=False, file='keygen_vs_time.png'):\n \"\"\"\n test function for key generation time\n \"\"\"\n print(\"\\n\\nRunning test for security parameter vs key generation time | L in [{}, {}]\\n\".format(1, L_max))\n\n conversion = {\n 'sec': [1],\n 'hour': [60 ** 2],\n 'days': [24 * (60 ** 2)]\n }\n times = [0, 0] # quick fix for keygen with L=1 assume 0 time\n\n x_axis = [x for x in range(0, L_max + 1)]\n\n for L in range(2, L_max + 1):\n k = PublicKey(L)\n print(k)\n start = timer()\n sk, pk = k.keygen(save=False)\n end = timer()\n\n t = float(end - start) / conversion[units][0]\n times.append(t)\n print(\"L: {} -> time: {:.4f} {}\".format(L, t, units))\n\n fig = plt.figure()\n\n plt.plot(x_axis, times)\n plt.xlabel(\"Security parameter\")\n plt.ylabel(\"Time for Key Generation (pk,sk) in {}\".format(units))\n fig.savefig(file, dpi=fig.dpi)\n\n print(\"test end\")\n if showFigure:\n plt.show()\n\n\nif __name__ == \"__main__\":\n units_size = 'mb'\n units_time = 'sec'\n L_low = 2\n L_max = 10\n\n test_key_size(L_low, L_max=L_max, showFigure=False, units=units_size)\n\n test_key_time(L_low, L_max=L_max, showFigure=False, units=units_time)\n"
]
| [
[
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
]
]
|
AmorosTech/RP-R-CNN | [
"45557a69ae9789e2662e3b937feb7624319a3e73"
]
| [
"rcnn/modeling/mask_rcnn/maskiou/loss.py"
]
| [
"import torch\n\nfrom models.ops import l2_loss\nfrom rcnn.core.config import cfg\n\n\nclass MaskIoULossComputation(object):\n def __init__(self, loss_weight):\n self.loss_weight = loss_weight\n\n def __call__(self, labels, pred_maskiou, gt_maskiou):\n positive_inds = torch.nonzero(labels > 0).squeeze(1)\n labels_pos = labels[positive_inds]\n if labels_pos.numel() == 0:\n return pred_maskiou.sum() * 0\n gt_maskiou = gt_maskiou.detach()\n maskiou_loss = l2_loss(pred_maskiou[positive_inds, labels_pos], gt_maskiou)\n maskiou_loss = self.loss_weight * maskiou_loss\n\n return maskiou_loss\n\n\ndef maskiou_loss_evaluator():\n loss_weight = cfg.MRCNN.MASKIOU.LOSS_WEIGHT\n loss_evaluator = MaskIoULossComputation(loss_weight)\n\n return loss_evaluator\n"
]
| [
[
"torch.nonzero"
]
]
|
Joukahainen/finmarketpy | [
"59e340e1411edceba121a0943fb500d8bda2c6f2"
]
| [
"finmarketpy/economics/marketliquidity.py"
]
| [
"__author__ = 'saeedamen' # Saeed Amen\n\n#\n# Copyright 2016-2020 Cuemacro - https://www.cuemacro.com / @cuemacro\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with the\n# License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n#\n# See the License for the specific language governing permissions and limitations under the License.\n#\n\nimport pandas\n\nfrom findatapy.util.loggermanager import LoggerManager\n\n\nclass MarketLiquidity(object):\n \"\"\"Calculates spread between bid/ask and also tick count.\n\n \"\"\"\n\n def __init__(self):\n self.logger = LoggerManager().getLogger(__name__)\n return\n\n def calculate_spreads(self, data_frame, asset, bid_field='bid', ask_field='ask'):\n if isinstance(asset, str): asset = [asset]\n\n cols = [x + '.spread' for x in asset]\n\n data_frame_spreads = pandas.DataFrame(index=data_frame.index, columns=cols)\n\n for a in asset:\n data_frame_spreads[a + '.spread'] = data_frame[a + \".\" + ask_field] - data_frame[a + \".\" + bid_field]\n\n return data_frame_spreads\n\n def calculate_tick_count(self, data_frame, asset, freq='1h'):\n if isinstance(asset, str): asset = [asset]\n\n data_frame_tick_count = data_frame.resample(freq, how='count').dropna()\n data_frame_tick_count = data_frame_tick_count[[0]]\n\n data_frame_tick_count.columns = [x + '.event' for x in asset]\n\n return data_frame_tick_count\n\n\nif __name__ == '__main__':\n # see examples\n pass\n"
]
| [
[
"pandas.DataFrame"
]
]
|
ojschumann/pyeospac | [
"db94a31866abd4bf311d82d4fea318ed5b9ee357"
]
| [
"setup_eospac.py"
]
| [
"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\nfrom setuptools import Extension\nimport os.path\nimport re\nfrom datetime import datetime\nimport numpy\n\ndef setup_eospac(cfg):\n\n EOSPAC_INCLUDE = os.path.join(cfg['path'], \"include\", cfg['arch'], cfg['compiler'])\n EOSPAC_INCLUDE2 = os.path.join(cfg['path'], \"src\")\n EOSPAC_LIB = os.path.join(cfg['path'], \"lib\", cfg['arch'], cfg['compiler'])\n\n for test_path in [cfg['path'], EOSPAC_INCLUDE, EOSPAC_LIB]:\n if not os.path.exists(test_path):\n raise OSError(\"Path does not exist: '{0}'. Please edit setup.cfg !\".format(test_path))\n\n\n\n #===============================================================================#\n # Creating constants.py from C headers eos_Interface.h\n #===============================================================================#\n\n with open(os.path.join(EOSPAC_INCLUDE, \"eos_Interface.h\"), 'r') as f:\n header = f.readlines()\n\n sections = {'tables':\n { 'expr': r'/\\* table types: \\*/',\n 'begin': 0, \n 'previous': None},\n 'options': \n { 'expr': r'/\\* Table setup and interpolation option constants \\*/',\n 'previous': 'tables'},\n 'info':\n { 'expr': r'/\\* Data information constants \\*/',\n 'previous': 'options'},\n 'errors':\n { 'expr': r'/\\* Error code constants \\*/',\n 'previous': 'info',\n 'end': -1}\n }\n\n for idx, line in enumerate(header):\n for section_name, section_dict in sections.items():\n if re.match(section_dict['expr'], line):\n section_dict['begin'] = idx+1\n if section_dict['previous']:\n sections[section_dict['previous']]['end'] = idx-1\n\n with open('eospac/eospac/constants.py', 'w') as f:\n f.write(\"\"\"#!/usr/bin/python \n# -*- coding: utf-8 -*-\n\n# Warning! This file is automatically generated from the eos_Interface.h\n# header by the setup.py script. All manual changes will be overwritten\n# at the next install.\n# Created on: {0}\\n\\n\"\"\".format(datetime.now()))\n for section_name, section_dict in sections.items():\n f.write('{0} = dict(\\n'.format(section_name))\n out_txt = []\n for line in header[section_dict['begin']:section_dict['end']]:\n if re.match('^static const EOS_INTEGER EOS.*', line):\n txt = re.sub('^static const EOS_INTEGER EOS_', ' '*4, line)\n txt = re.sub('/\\*', '#', txt)\n txt = re.sub('\\*/', '', txt)\n txt = re.sub(';', ',', txt)\n if section_name == 'options':\n # convert options section keys to lowercase\n comma_idx = txt.find(',')\n txt = txt[:comma_idx].lower() + txt[comma_idx:]\n out_txt.append(txt)\n f.write(''.join(out_txt))\n f.write(')\\n\\n')\n\n return [Extension(\"eospac.eospac.libpyeospac\",\n sources=[\"eospac/eospac/libpyeospac.pyx\"],\n include_dirs=[numpy.get_include(), EOSPAC_INCLUDE],\n library_dirs=[EOSPAC_LIB],\n libraries=['eospac6']),\n Extension(\"eospac.eospac.libsesio\",\n sources=[\"eospac/eospac/libsesio.pyx\"],\n include_dirs=[numpy.get_include(), EOSPAC_INCLUDE],\n library_dirs=[EOSPAC_LIB],\n libraries=['eospac6']),\n Extension(\"eospac.eospac.libsesutils\",\n sources=[\"eospac/eospac/libsesutils.pyx\"],\n include_dirs=[numpy.get_include(), EOSPAC_INCLUDE, EOSPAC_INCLUDE2],\n library_dirs=[EOSPAC_LIB],\n libraries=['eospac6'])]\n\n\n\n\n\n"
]
| [
[
"numpy.get_include"
]
]
|
cherise215/Cooperative_Training_and_Latent_Space_Data_Augmentation | [
"f5a987fb4babb891a41116e934a9ce6432e0d803"
]
| [
"medseg/dataset_loader/acdc_preprocess.py"
]
| [
"import os\n\nimport numpy as np\nimport glob\nimport SimpleITK as sitk\nfrom os.path import join\nimport matplotlib.pyplot as plt\nimport SimpleITK as sitk\n\nimport time\n\n\nfrom medseg.dataset_loader.dataset_utils import resample_by_spacing\n\n\ndef normalize_minmax_data(image_data):\n \"\"\"\n # 3D MRI scan is normalized to range between 0 and 1 using min-max normalization.\n Here, the minimum and maximum values are used as 2nd and 98th percentiles respectively from the 3D MRI scan.\n We expect the outliers to be away from the range of [0,1].\n input params :\n image_data : 3D MRI scan to be normalized using min-max normalization\n returns:\n final_image_data : Normalized 3D MRI scan obtained via min-max normalization.\n \"\"\"\n min_val_2p = np.percentile(image_data, 2)\n max_val_98p = np.percentile(image_data, 98)\n final_image_data = np.zeros(\n (image_data.shape[0], image_data.shape[1], image_data.shape[2]), dtype=np.float32)\n # min-max norm on total 3D volume\n image_data[image_data < min_val_2p] = min_val_2p\n image_data[image_data > max_val_98p] = max_val_98p\n\n final_image_data = (image_data - min_val_2p) / (1e-10 + max_val_98p - min_val_2p)\n\n return final_image_data\n\n\ndef crop_or_pad_slice_to_size(img_slice, nx, ny):\n \"\"\"\n To crop the input 2D slice for the given dimensions\n input params :\n image_slice : 2D slice to be cropped\n nx : dimension in x\n ny : dimension in y\n returns:\n slice_cropped : cropped 2D slice\n \"\"\"\n slice_cropped = np.zeros((nx, ny))\n x, y = img_slice.shape\n\n x_s = (x - nx) // 2\n y_s = (y - ny) // 2\n x_c = (nx - x) // 2\n y_c = (ny - y) // 2\n\n if x > nx and y > ny:\n slice_cropped = img_slice[x_s:x_s + nx, y_s:y_s + ny]\n else:\n slice_cropped = np.zeros((nx, ny))\n if x <= nx and y > ny:\n slice_cropped[x_c:x_c + x, :] = img_slice[:, y_s:y_s + ny]\n elif x > nx and y <= ny:\n slice_cropped[:, y_c:y_c + y] = img_slice[x_s:x_s + nx, :]\n else:\n slice_cropped[x_c:x_c + x, y_c:y_c + y] = img_slice[:, :]\n\n return slice_cropped\n\n\ndef correct_image(sitkImage, threshhold=0.001):\n corrector = sitk.N4BiasFieldCorrectionImageFilter()\n start = time.time()\n\n maskImage = sitkImage > threshhold\n correctedImage = corrector.Execute(sitkImage, maskImage)\n end = time.time()\n\n #print ('debiasing costs {} s'.format(end-start))\n return correctedImage, end - start\n\n\ndef resample_np_array(normalized_array, old_spacing, interp=sitk.sitkLinear, keep_z_spacing=True, new_spacing=[1.367, 1.367, -1]):\n sitkImage = sitk.GetImageFromArray(normalized_array)\n sitkImage.SetSpacing(spacing=old_spacing)\n resampleImage = resample_by_spacing(\n sitkImage, new_spacing, keep_z_spacing=keep_z_spacing, interpolator=interp)\n resampleArray = sitk.GetArrayFromImage(resampleImage)\n new_spacing = resampleImage.GetSpacing()\n ##print (new_spacing)\n return resampleArray\n\n\nif __name__ == '__main__':\n total_bias_correct_time = 0.\n count = 0\n new_spacing = [1.36719, 1.36719, -1]\n pid_list = ['%03d' % (i + 1) for i in range(29, 100)]\n\n for pid in pid_list:\n for frame in ['ED', 'ES']:\n image_path_format = \"/vol/medic02/users/cc215/data/ACDC/dataset/all/patient{}/image_\" + frame + \".nii.gz\"\n label_path_format = \"/vol/medic02/users/cc215/data/ACDC/dataset/all/patient{}/label_\" + frame + \".nii.gz\"\n output_dir = '/vol/medic02/users/cc215/data/ACDC/dataset/preprocessed/' + frame\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n count += 1\n print(pid)\n # load image and label\n sitkImage = sitk.ReadImage(image_path_format.format(pid))\n sitkImage = sitk.Cast(sitkImage, sitk.sitkFloat32)\n sitkLabel = sitk.ReadImage(label_path_format.format(pid))\n sitkLabel = sitk.Cast(sitkLabel, sitk.sitkInt16)\n\n orig_spacing = sitkImage.GetSpacing()\n\n # correct bias on images\n #sitkImage, cost_time = correct_image(sitkImage, threshhold=0.001)\n #total_bias_correct_time += cost_time\n print(sitkImage.GetDirection())\n\n # intensity normalization on images\n imgArray = sitk.GetArrayFromImage(sitkImage)\n normalized_array = normalize_minmax_data(imgArray)\n\n # resample image and label\n resampled_image_array = resample_np_array(\n normalized_array, old_spacing=orig_spacing, interp=sitk.sitkLinear, keep_z_spacing=True, new_spacing=new_spacing)\n\n label_array = sitk.GetArrayFromImage(sitkLabel)\n label_array = np.uint8(label_array)\n resampled_label_array = resample_np_array(\n label_array, old_spacing=orig_spacing, interp=sitk.sitkNearestNeighbor, keep_z_spacing=True, new_spacing=new_spacing)\n\n # change RV labels from 1 to 3 and LV from 3 to 1\n resampled_label_array = (resampled_label_array == 3) * 1 + \\\n (resampled_label_array == 2) * 2 + (resampled_label_array == 1) * 3\n\n # save images as nrrd\n img_file_path = join(output_dir, '{}_img.nrrd'.format(pid))\n seg_file_path = join(output_dir, '{}_seg.nrrd'.format(pid))\n\n image = sitk.GetImageFromArray(resampled_image_array)\n image.SetSpacing((new_spacing[0], new_spacing[1], orig_spacing[2]))\n sitk.WriteImage(image, img_file_path)\n\n seg = sitk.GetImageFromArray(resampled_label_array)\n seg.SetSpacing((new_spacing[0], new_spacing[1], orig_spacing[2]))\n sitk.WriteImage(seg, seg_file_path)\n\n # all_images_list.append(cropped_image)\n print('average time:', np.round(total_bias_correct_time / count, 3))\n"
]
| [
[
"numpy.round",
"numpy.percentile",
"numpy.uint8",
"numpy.zeros"
]
]
|
f1recracker/tensorflow-datasets | [
"d4e0e83a2c7b5ee8b807d036493ef0329e8f446e"
]
| [
"tensorflow_datasets/image/imagenet.py"
]
| [
"# coding=utf-8\n# Copyright 2019 The TensorFlow Datasets Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Imagenet datasets.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport io\nimport os\nimport tarfile\n\nimport tensorflow as tf\nimport tensorflow_datasets.public_api as tfds\n\n\n_DESCRIPTION = '''\\\nILSVRC 2012, aka ImageNet is an image dataset organized according to the\nWordNet hierarchy. Each meaningful concept in WordNet, possibly described by\nmultiple words or word phrases, is called a \"synonym set\" or \"synset\". There are\nmore than 100,000 synsets in WordNet, majority of them are nouns (80,000+). In\nImageNet, we aim to provide on average 1000 images to illustrate each synset.\nImages of each concept are quality-controlled and human-annotated. In its\ncompletion, we hope ImageNet will offer tens of millions of cleanly sorted\nimages for most of the concepts in the WordNet hierarchy.\n\nNote that labels were never publicly released for the test set, so we only\ninclude splits for the training and validation sets here.\n'''\n\n# Web-site is asking to cite paper from 2015.\n# http://www.image-net.org/challenges/LSVRC/2012/index#cite\n_CITATION = '''\\\n@article{ILSVRC15,\nAuthor = {Olga Russakovsky and Jia Deng and Hao Su and Jonathan Krause and Sanjeev Satheesh and Sean Ma and Zhiheng Huang and Andrej Karpathy and Aditya Khosla and Michael Bernstein and Alexander C. Berg and Li Fei-Fei},\nTitle = {{ImageNet Large Scale Visual Recognition Challenge}},\nYear = {2015},\njournal = {International Journal of Computer Vision (IJCV)},\ndoi = {10.1007/s11263-015-0816-y},\nvolume={115},\nnumber={3},\npages={211-252}\n}\n'''\n\n_LABELS_FNAME = 'image/imagenet2012_labels.txt'\n\n# This file contains the validation labels, in the alphabetic order of\n# corresponding image names (and not in the order they have been added to the\n# tar file).\n_VALIDATION_LABELS_FNAME = 'image/imagenet2012_validation_labels.txt'\n\n\n# From https://github.com/cytsai/ilsvrc-cmyk-image-list\nCMYK_IMAGES = [\n 'n01739381_1309.JPEG',\n 'n02077923_14822.JPEG',\n 'n02447366_23489.JPEG',\n 'n02492035_15739.JPEG',\n 'n02747177_10752.JPEG',\n 'n03018349_4028.JPEG',\n 'n03062245_4620.JPEG',\n 'n03347037_9675.JPEG',\n 'n03467068_12171.JPEG',\n 'n03529860_11437.JPEG',\n 'n03544143_17228.JPEG',\n 'n03633091_5218.JPEG',\n 'n03710637_5125.JPEG',\n 'n03961711_5286.JPEG',\n 'n04033995_2932.JPEG',\n 'n04258138_17003.JPEG',\n 'n04264628_27969.JPEG',\n 'n04336792_7448.JPEG',\n 'n04371774_5854.JPEG',\n 'n04596742_4225.JPEG',\n 'n07583066_647.JPEG',\n 'n13037406_4650.JPEG',\n]\n\nPNG_IMAGES = ['n02105855_2933.JPEG']\n\n\nclass Imagenet2012(tfds.core.GeneratorBasedBuilder):\n \"\"\"Imagenet 2012, aka ILSVRC 2012.\"\"\"\n\n VERSION = tfds.core.Version('2.0.1',\n experiments={tfds.core.Experiment.S3: False})\n SUPPORTED_VERSIONS = [\n tfds.core.Version(\n '5.0.0', 'New split API (https://tensorflow.org/datasets/splits)'),\n ]\n\n def _info(self):\n names_file = tfds.core.get_tfds_path(_LABELS_FNAME)\n return tfds.core.DatasetInfo(\n builder=self,\n description=_DESCRIPTION,\n features=tfds.features.FeaturesDict({\n 'image': tfds.features.Image(),\n 'label': tfds.features.ClassLabel(names_file=names_file),\n 'file_name': tfds.features.Text(), # Eg: 'n15075141_54.JPEG'\n }),\n supervised_keys=('image', 'label'),\n homepage='http://image-net.org/',\n citation=_CITATION,\n )\n\n @staticmethod\n def _get_validation_labels(val_path):\n \"\"\"Returns labels for validation.\n\n Args:\n val_path: path to TAR file containing validation images. It is used to\n retrieve the name of pictures and associate them to labels.\n\n Returns:\n dict, mapping from image name (str) to label (str).\n \"\"\"\n labels_path = tfds.core.get_tfds_path(_VALIDATION_LABELS_FNAME)\n with tf.io.gfile.GFile(labels_path) as labels_f:\n labels = labels_f.read().strip().split('\\n')\n with tf.io.gfile.GFile(val_path, 'rb') as tar_f_obj:\n tar = tarfile.open(mode='r:', fileobj=tar_f_obj)\n images = sorted(tar.getnames())\n return dict(zip(images, labels))\n\n def _split_generators(self, dl_manager):\n train_path = os.path.join(dl_manager.manual_dir, 'ILSVRC2012_img_train.tar')\n val_path = os.path.join(dl_manager.manual_dir, 'ILSVRC2012_img_val.tar')\n # We don't import the original test split, as it doesn't include labels.\n # These were never publicly released.\n if not tf.io.gfile.exists(train_path) or not tf.io.gfile.exists(val_path):\n raise AssertionError(\n 'ImageNet requires manual download of the data. Please download '\n 'the train and val set and place them into: {}, {}'.format(\n train_path, val_path))\n return [\n tfds.core.SplitGenerator(\n name=tfds.Split.TRAIN,\n num_shards=1000, # Ignored when using a version with S3 experiment.\n gen_kwargs={\n 'archive': dl_manager.iter_archive(train_path),\n },\n ),\n tfds.core.SplitGenerator(\n name=tfds.Split.VALIDATION,\n num_shards=5, # Ignored when using a version with S3 experiment.\n gen_kwargs={\n 'archive': dl_manager.iter_archive(val_path),\n 'validation_labels': self._get_validation_labels(val_path),\n },\n ),\n ]\n\n def _fix_image(self, image_fname, image):\n \"\"\"Fix image color system and format starting from v 3.0.0.\"\"\"\n if self.version < '3.0.0':\n return image\n if image_fname in CMYK_IMAGES:\n image = io.BytesIO(tfds.core.utils.jpeg_cmyk_to_rgb(image.read()))\n elif image_fname in PNG_IMAGES:\n image = io.BytesIO(tfds.core.utils.png_to_jpeg(image.read()))\n return image\n\n def _generate_examples(self, archive, validation_labels=None):\n \"\"\"Yields examples.\"\"\"\n if validation_labels: # Validation split\n for key, example in self._generate_examples_validation(archive,\n validation_labels):\n yield key, example\n # Training split. Main archive contains archives names after a synset noun.\n # Each sub-archive contains pictures associated to that synset.\n for fname, fobj in archive:\n label = fname[:-4] # fname is something like 'n01632458.tar'\n # TODO(b/117643231): in py3, the following lines trigger tarfile module\n # to call `fobj.seekable()`, which Gfile doesn't have. We should find an\n # alternative, as this loads ~150MB in RAM.\n fobj_mem = io.BytesIO(fobj.read())\n for image_fname, image in tfds.download.iter_archive(\n fobj_mem, tfds.download.ExtractMethod.TAR_STREAM):\n image = self._fix_image(image_fname, image)\n record = {\n 'file_name': image_fname,\n 'image': image,\n 'label': label,\n }\n yield image_fname, record\n\n def _generate_examples_validation(self, archive, labels):\n for fname, fobj in archive:\n record = {\n 'file_name': fname,\n 'image': fobj,\n 'label': labels[fname],\n }\n yield fname, record\n"
]
| [
[
"tensorflow.io.gfile.exists",
"tensorflow.io.gfile.GFile"
]
]
|
cpmpercussion/imps | [
"5707ca01d4004d11603a9969276591c8ab5c28a4"
]
| [
"utils/test_prediction_speed.py"
]
| [
"import logging\nimport time\nimport datetime\nimport numpy as np\nimport pandas as pd\n\n# Hack to get openMP working annoyingly.\nimport os\nos.environ['KMP_DUPLICATE_LIB_OK']='True'\n\nprint(\"Importing Keras and MDRNN.\")\nstart_import = time.time()\nimport empi_mdrnn\nimport tensorflow as tf\nfrom keras import backend as K\nfrom tensorflow.contrib.training.python.training.hparam import HParams\nprint(\"Done. That took\", time.time() - start_import, \"seconds.\")\n\ndef build_network(sess, compute_graph, net_config):\n \"\"\"Build the MDRNN.\"\"\"\n empi_mdrnn.MODEL_DIR = \"./models/\"\n K.set_session(sess)\n with compute_graph.as_default():\n net = empi_mdrnn.PredictiveMusicMDRNN(mode=empi_mdrnn.NET_MODE_RUN,\n dimension=net_config.dimension,\n n_hidden_units=net_config.units,\n n_mixtures=net_config.mixes,\n layers=net_config.layers)\n #net.pi_temp = net_config.pi_temp\n #net.sigma_temp = net_config.sigmatemp\n print(\"MDRNN Loaded.\")\n return net\n\n\ndef request_rnn_prediction(input_value, net):\n \"\"\" Accesses a single prediction from the RNN. \"\"\"\n start = time.time()\n output_value = net.generate_touch(input_value)\n time_delta = time.time() - start\n #print(\"Prediction took:\", time_delta)\n return output_value, time_delta\n\n\ndef run_test(tests, net_config):\n times = pd.DataFrame()\n compute_graph = tf.Graph()\n with compute_graph.as_default():\n sess = tf.Session()\n net = build_network(sess, compute_graph, net_config)\n for i in range(tests):\n ## Predictions.\n item = empi_mdrnn.random_sample(out_dim=net_config.dimension)\n K.set_session(sess)\n with compute_graph.as_default():\n rnn_output, t = request_rnn_prediction(item, net)\n out_dict = {\n 'time': t, \n 'mixes': net_config.mixes,\n 'layers': net_config.layers,\n 'units': net_config.units,\n 'dimension': net_config.dimension}\n times = times.append(out_dict, ignore_index=True)\n # clean up\n K.clear_session()\n sess.close()\n return times\n\n\nif __name__ == \"__main__\":\n experiment_frames = []\n # hparams = HParams(mixes=5, layers=2, units=64, dimension=2)\n mdrnn_units = [64, 128, 256, 512]\n dimensions = [2, 3, 4, 5, 6, 7, 8, 9]\n for un in mdrnn_units:\n for dim in dimensions:\n hparams = HParams(mixes=5, layers=2, units=un, dimension=dim)\n times = run_test(100, hparams)\n experiment_frames.append(times)\n total_experiment = pd.concat(experiment_frames, ignore_index=True)\n total_experiment.to_csv(\"total_exp.csv\")\n print(total_experiment.describe())\n\n\n# sysctl -n machdep.cpu.brand_string\n"
]
| [
[
"pandas.DataFrame",
"tensorflow.Graph",
"tensorflow.Session",
"tensorflow.contrib.training.python.training.hparam.HParams",
"pandas.concat"
]
]
|
dpcomp-org/ektelo | [
"7629fbf106f9b9568c66a0b97f6005280022c3d8"
]
| [
"test/unit/test_transformation.py"
]
| [
"from collections import OrderedDict\nfrom ektelo.data import Relation\nfrom ektelo.data import RelationHelper\nimport numpy as np\nimport os\nfrom ektelo.client.mapper import Grid\nfrom ektelo.private.transformation import Group\nfrom ektelo.private.transformation import Null\nfrom ektelo.private.transformation import ReduceByPartition\nfrom ektelo.private.transformation import Reshape\nfrom ektelo.private.transformation import Filter\nfrom ektelo.private.transformation import Where\nfrom ektelo.private.transformation import Project\nfrom ektelo.private.transformation import Vectorize\nimport unittest\nimport yaml\n\nCSV_PATH = os.environ['EKTELO_DATA']\nCONFIG_PATH = os.path.join(os.environ['EKTELO_HOME'], 'resources', 'config')\n\n\nclass TestTransformation(unittest.TestCase):\n\n def setUp(self):\n self.n = 8\n self.grid_shape = 2\n self.idxs = [1,3,5]\n self.X = np.random.rand(self.n)\n\n delimiter = ','\n self.reduced_domain = (10, 10, 7, 4, 2)\n config = yaml.load(open(os.path.join(CONFIG_PATH, 'cps.yml'), 'r').read())\n self.relation = RelationHelper('CPS').load()\n\n def test_vectorize_operator(self):\n vectorize = Vectorize('CPS-CSV', reduced_domain=self.reduced_domain)\n transformation = vectorize.transform(self.relation)\n X = transformation\n\n self.assertEqual(np.prod(self.reduced_domain), len(X)) \n\n def test_where_operator(self):\n where = Where('age >= 30')\n X = where.transform(self.relation)\n\n self.assertEqual(X._df.age.min(), 30) \n\n def test_project_operator(self):\n project = Project(['income'])\n X = project.transform(self.relation)\n\n np.testing.assert_array_equal(X.domains, [X.config['income']['domain']])\n\n def test_group_operator(self):\n group = Group(self.idxs) \n transformation = group.transform(self.X)\n\n self.assertEqual(transformation.shape, (3,))\n np.testing.assert_array_equal(transformation, self.X[self.idxs])\n\n def test_reduce_operator(self):\n grid = Grid(self.n, self.grid_shape, canonical_order=False)\n mapping = grid.mapping()\n reduction = ReduceByPartition(mapping)\n transformation = reduction.transform(self.X)\n\n for i in range(4):\n self.assertEqual(sum(self.X[2*i:2*i+2]), transformation[i])\n\n def test_reshape_operator(self):\n shape = (4, 2)\n reshaper = Reshape(shape)\n\n x_hat = reshaper.transform(self.X)\n\n self.assertEqual(x_hat.shape, shape)\n\n def test_filter_operator(self):\n vectorize = Vectorize('CPS-CSV', reduced_domain=self.reduced_domain)\n transformation = vectorize.transform(self.relation)\n X = transformation\n mask = np.ones(self.reduced_domain).flatten()\n\n filterer = Filter(mask)\n self.assertEqual(sum(filterer.transform(X)), sum(X))\n\n filterer = Filter(1-mask)\n self.assertEqual(sum(filterer.transform(X)), 0)\n\n def test_null_operator(self):\n null = Null()\n transformation = null.transform(self.X)\n\n np.testing.assert_array_equal(transformation, self.X)\n"
]
| [
[
"numpy.testing.assert_array_equal",
"numpy.prod",
"numpy.random.rand",
"numpy.ones"
]
]
|
K-A-R-T/DCL-Release | [
"44c6e1234af63daa1ae32302eef5981651a5a0aa"
]
| [
"scripts/trainval_tube_v2.py"
]
| [
"#! /usr/bin/env python3\n## -*- coding: utf-8 -*-\n\n\"\"\"\nTraining and evaulating the Neuro-Symbolic Concept Learner.\n\"\"\"\nimport torch\ntorch.manual_seed(0)\ntorch.backends.cudnn.deterministic = True\ntorch.backends.cudnn.benchmark = False\nimport numpy as np\nnp.random.seed(0)\n\nimport pdb\n\nimport time\nimport os.path as osp\n\nimport torch.backends.cudnn as cudnn\nimport torch.cuda as cuda\n\nfrom jacinle.cli.argument import JacArgumentParser\nfrom jacinle.logging import get_logger, set_output_file\nfrom jacinle.utils.imp import load_source\nfrom jacinle.utils.tqdm import tqdm_pbar\n\nfrom jactorch.cli import escape_desc_name, ensure_path, dump_metainfo\nfrom jactorch.cuda.copy import async_copy_to\nfrom jactorch.train import TrainerEnv\nfrom jactorch.utils.meta import as_float\n\nfrom nscl.datasets import get_available_datasets, initialize_dataset, get_dataset_builder\nfrom clevrer.dataset_clevrer import build_clevrer_dataset \n\nfrom clevrer.utils import set_debugger, prepare_data_for_testing, jsondump, keep_only_temporal_concept_learner \nfrom opts import load_param_parser \n\nlogger = get_logger(__file__)\n\nargs = load_param_parser()\n# filenames\nargs.series_name = args.dataset\nargs.desc_name = escape_desc_name(args.desc)\nargs.run_name = 'run-{}'.format(time.strftime('%Y-%m-%d-%H-%M-%S'))\n\n# directories\nif args.use_gpu:\n nr_devs = cuda.device_count()\n if args.force_gpu and nr_devs == 0:\n nr_devs = 1\n assert nr_devs > 0, 'No GPU device available'\n args.gpus = [i for i in range(nr_devs)]\n args.gpu_parallel = (nr_devs > 1)\n\ndesc = load_source(args.desc)\nconfigs = desc.configs\nargs.configs.apply(configs)\n\ndef main():\n args.dump_dir = ensure_path(osp.join(\n 'dumps', args.series_name, args.desc_name))\n if args.normalized_boxes:\n args.dump_dir = args.dump_dir + '_norm_box'\n if args.even_smp_flag:\n args.dump_dir = args.dump_dir + '_even_smp'+str(args.frm_img_num)\n if args.even_smp_flag:\n args.dump_dir = args.dump_dir + '_col_box_ftr'\n args.dump_dir += '_' + args.version + '_' + args.prefix\n\n #if args.debug:\n if not args.debug:\n args.ckpt_dir = ensure_path(osp.join(args.dump_dir, 'checkpoints'))\n args.meta_dir = ensure_path(osp.join(args.dump_dir, 'meta'))\n args.meta_file = osp.join(args.meta_dir, args.run_name + '.json')\n args.log_file = osp.join(args.meta_dir, args.run_name + '.log')\n args.meter_file = osp.join(args.meta_dir, args.run_name + '.meter.json')\n\n logger.critical('Writing logs to file: \"{}\".'.format(args.log_file))\n set_output_file(args.log_file)\n\n logger.critical('Writing metainfo to file: \"{}\".'.format(args.meta_file))\n with open(args.meta_file, 'w') as f:\n f.write(dump_metainfo(args=args.__dict__, configs=configs))\n\n # Initialize the tensorboard.\n if args.use_tb:\n args.tb_dir_root = ensure_path(osp.join(args.dump_dir, 'tensorboard'))\n args.tb_dir = ensure_path(osp.join(args.tb_dir_root, args.run_name))\n\n initialize_dataset(args.dataset, args.version)\n #validation_dataset = extra_dataset \n if args.testing_flag==1 or args.dataset=='billiards':\n validation_dataset = build_clevrer_dataset(args, 'test')\n else:\n validation_dataset = build_clevrer_dataset(args, 'validation')\n train_dataset = build_clevrer_dataset(args, 'train')\n\n extra_dataset = None\n main_train(train_dataset, validation_dataset, extra_dataset)\n\ndef main_train(train_dataset, validation_dataset, extra_dataset=None):\n logger.critical('Building the model.')\n model = desc.make_model(args)\n if args.version=='v3':\n desc_pred = load_source(args.pred_model_path)\n model.build_temporal_prediction_model(args, desc_pred)\n elif args.version=='v4':\n desc_pred = load_source(args.pred_model_path)\n desc_spatial_pred = load_source(args.pred_spatial_model_path)\n model.build_temporal_prediction_model(args, desc_pred, desc_spatial_pred)\n\n elif args.version=='v2_1':\n model.make_relation_embedding_for_unseen_events(args) \n\n if args.use_gpu:\n model.cuda()\n # Disable the cudnn benchmark.\n cudnn.benchmark = False\n\n if hasattr(desc, 'make_optimizer'):\n logger.critical('Building customized optimizer.')\n optimizer = desc.make_optimizer(model, args.lr)\n else:\n from jactorch.optim import AdamW\n if args.freeze_learner_flag==1:\n if args.reconstruct_flag:\n parameters = list(model._model_pred.parameters())+list(model._decoder.parameters())\n trainable_parameters = filter(lambda x: x.requires_grad, parameters)\n elif args.version=='v4':\n trainable_parameters = filter(lambda x: x.requires_grad, model._model_pred.parameters())\n else:\n trainable_parameters = filter(lambda x: x.requires_grad, model.parameters())\n optimizer = AdamW(trainable_parameters, args.lr, weight_decay=configs.train.weight_decay)\n\n if args.acc_grad > 1:\n from jactorch.optim import AccumGrad\n optimizer = AccumGrad(optimizer, args.acc_grad)\n logger.warning('Use accumulated grad={:d}, effective iterations per epoch={:d}.'.format(args.acc_grad, int(args.iters_per_epoch / args.acc_grad)))\n\n trainer = TrainerEnv(model, optimizer)\n\n if args.resume:\n extra = trainer.load_checkpoint(args.resume)\n if extra:\n args.start_epoch = extra['epoch']\n logger.critical('Resume from epoch {}.'.format(args.start_epoch))\n elif args.load:\n if trainer.load_weights(args.load):\n logger.critical('Loaded weights from pretrained model: \"{}\".'.format(args.load))\n if args.version=='v3':\n if args.pretrain_pred_model_path:\n model._model_pred.load_state_dict(torch.load(args.pretrain_pred_model_path))\n logger.critical('Loaded weights from pretrained temporal model: \"{}\".'.format(args.pretrain_pred_model_path))\n elif args.version=='v4':\n if args.pretrain_pred_spatial_model_path:\n model._model_spatial_pred.load_state_dict(torch.load(args.pretrain_pred_spatial_model_path))\n logger.critical('Loaded spatial models from pretrained temporal model: \"{}\".'.format(args.pretrain_pred_spatial_model_path))\n if args.pretrain_pred_feature_model_path:\n model._model_pred.load_state_dict(torch.load(args.pretrain_pred_feature_model_path))\n logger.critical('Loaded feature models from pretrained temporal model: \"{}\".'.format(args.pretrain_pred_feature_model_path))\n #pdb.set_trace()\n if args.pretrain_pred_model_path:\n model._model_pred.load_state_dict(torch.load(args.pretrain_pred_model_path))\n logger.critical('Loaded weights from pretrained temporal model: \"{}\".'.format(args.pretrain_pred_model_path))\n elif args.version =='v2_1':\n model.reasoning.embedding_relation_future.load_state_dict(model.reasoning.embedding_relation.state_dict())\n model.reasoning.embedding_relation_counterfact.load_state_dict(model.reasoning.embedding_relation.state_dict())\n logger.critical('Copy original relation weights into counterfact and future relation.')\n if args.use_tb and not args.debug:\n from jactorch.train.tb import TBLogger, TBGroupMeters\n tb_logger = TBLogger(args.tb_dir)\n meters = TBGroupMeters(tb_logger)\n logger.critical('Writing tensorboard logs to: \"{}\".'.format(args.tb_dir))\n else:\n from jacinle.utils.meter import GroupMeters\n meters = GroupMeters()\n\n if not args.debug:\n logger.critical('Writing meter logs to file: \"{}\".'.format(args.meter_file))\n\n if args.clip_grad:\n logger.info('Registering the clip_grad hook: {}.'.format(args.clip_grad))\n def clip_grad(self, loss):\n from torch.nn.utils import clip_grad_norm_\n clip_grad_norm_(self.model.parameters(), max_norm=args.clip_grad)\n trainer.register_event('backward:after', clip_grad)\n\n if hasattr(desc, 'customize_trainer'):\n desc.customize_trainer(trainer)\n\n if args.embed:\n from IPython import embed; embed()\n\n if args.debug:\n shuffle_flag=False\n else:\n shuffle_flag=True\n\n logger.critical('Building the data loader.')\n validation_dataloader = validation_dataset.make_dataloader(args.batch_size, shuffle=False, drop_last=False, nr_workers=args.data_workers)\n if extra_dataset is not None:\n extra_dataloader = extra_dataset.make_dataloader(args.batch_size, shuffle=False, drop_last=False, nr_workers=args.data_workers)\n\n if args.evaluate:\n meters.reset()\n model.eval()\n validate_epoch(0, trainer, validation_dataloader, meters)\n if extra_dataset is not None:\n validate_epoch(0, trainer, extra_dataloader, meters, meter_prefix='validation_extra')\n logger.critical(meters.format_simple('Validation', {k: v for k, v in meters.avg.items() if v != 0}, compressed=False))\n return meters\n\n\n for epoch in range(args.start_epoch + 1, args.epochs + 1):\n meters.reset()\n\n model.train()\n\n this_train_dataset = train_dataset\n train_dataloader = this_train_dataset.make_dataloader(args.batch_size, shuffle=shuffle_flag, drop_last=True, nr_workers=args.data_workers)\n\n for enum_id in range(args.enums_per_epoch):\n train_epoch(epoch, trainer, train_dataloader, meters)\n\n if epoch % args.validation_interval == 0:\n model.eval()\n validate_epoch(epoch, trainer, validation_dataloader, meters)\n\n if not args.debug:\n meters.dump(args.meter_file)\n\n logger.critical(meters.format_simple(\n 'Epoch = {}'.format(epoch),\n {k: v for k, v in meters.avg.items() if epoch % args.validation_interval == 0 or not k.startswith('validation')},\n compressed=False\n ))\n\n if epoch % args.save_interval == 0 and not args.debug:\n fname = osp.join(args.ckpt_dir, 'epoch_{}.pth'.format(epoch))\n trainer.save_checkpoint(fname, dict(epoch=epoch, meta_file=args.meta_file))\n\n if epoch > int(args.epochs * 0.6):\n trainer.set_learning_rate(args.lr * 0.1)\n\n\ndef backward_check_nan(self, feed_dict, loss, monitors, output_dict):\n import torch\n for name, param in self.model.named_parameters():\n if param.grad is None:\n continue\n if torch.isnan(param.grad.data).any().item():\n print('Caught NAN in gradient.', name)\n from IPython import embed; embed()\n\n\ndef train_epoch(epoch, trainer, train_dataloader, meters):\n nr_iters = args.iters_per_epoch\n if nr_iters == 0:\n nr_iters = len(train_dataloader)\n\n meters.update(epoch=epoch)\n if args.dataset=='blocks' and epoch==6:\n keep_only_temporal_concept_learner(trainer, args, configs)\n\n trainer.trigger_event('epoch:before', trainer, epoch)\n train_iter = iter(train_dataloader)\n end = time.time()\n with tqdm_pbar(total=nr_iters) as pbar:\n for i in range(nr_iters):\n feed_dict = next(train_iter)\n if args.use_gpu:\n if not args.gpu_parallel:\n feed_dict = async_copy_to(feed_dict, 0)\n data_time = time.time() - end; end = time.time()\n loss, monitors, output_dict, extra_info = trainer.step(feed_dict, cast_tensor=False)\n step_time = time.time() - end; end = time.time()\n\n n = len(feed_dict)\n meters.update(loss=loss, n=n)\n\n for tmp_key, tmp_value in monitors.items(): \n if isinstance(tmp_value , list):\n for sub_idx, sub_value in enumerate(tmp_value):\n if sub_value[0]==-1:\n continue \n meters.update({tmp_key: sub_value[0]}, n=sub_value[1])\n elif tmp_value==-1:\n continue \n else:\n meters.update({tmp_key: tmp_value}, n=1)\n\n meters.update({'time/data': data_time, 'time/step': step_time})\n\n if args.use_tb:\n meters.flush()\n\n pbar.set_description(meters.format_simple(\n 'Epoch {}'.format(epoch),\n {k: v for k, v in meters.val.items() if not k.startswith('validation') and k != 'epoch' and k.count('/') <= 1},\n compressed=True\n ))\n pbar.update()\n\n end = time.time()\n\n trainer.trigger_event('epoch:after', trainer, epoch)\n\n\ndef validate_epoch(epoch, trainer, val_dataloader, meters, meter_prefix='validation'):\n if args.testing_flag:\n json_output_list = []\n \n end = time.time()\n with tqdm_pbar(total=len(val_dataloader)*args.batch_size) as pbar:\n for feed_dict in val_dataloader:\n if args.use_gpu:\n if not args.gpu_parallel:\n feed_dict = async_copy_to(feed_dict, 0)\n #pdb.set_trace()\n data_time = time.time() - end; end = time.time()\n output_dict_list, extra_info = trainer.evaluate(feed_dict, cast_tensor=False)\n if args.testing_flag:\n prepare_data_for_testing(output_dict_list, feed_dict, json_output_list)\n\n step_time = time.time() - end; end = time.time()\n for idx, mon_dict in enumerate(output_dict_list['monitors']): \n monitors = {meter_prefix + '/' + k: v for k, v in as_float(mon_dict).items()}\n # remove padding values\n for tmp_key, tmp_value in monitors.items(): \n if isinstance(tmp_value , list):\n for sub_idx, sub_value in enumerate(tmp_value):\n if sub_value[0]==-1:\n continue \n meters.update({tmp_key: sub_value[0]}, n=sub_value[1])\n elif tmp_value==-1:\n continue \n else:\n meters.update({tmp_key: tmp_value}, n=1)\n \n meters.update({'time/data': data_time, 'time/step': step_time})\n if args.use_tb:\n meters.flush()\n\n pbar.set_description(meters.format_simple(\n 'Epoch {} (validation)'.format(epoch),\n {k: v for k, v in meters.val.items() if k.startswith('validation') and k.count('/') <= 2},\n compressed=True\n ))\n pbar.update()\n\n end = time.time()\n if args.testing_flag==1:\n jsondump(args.test_result_path, json_output_list)\n\nif __name__ == '__main__':\n main()\n\n"
]
| [
[
"torch.isnan",
"numpy.random.seed",
"torch.cuda.device_count",
"torch.manual_seed",
"torch.load"
]
]
|
c0redumb/bert | [
"8ccd1863cc9bc73f149224ad149673b9c9bb5196"
]
| [
"run_squad.py"
]
| [
"# coding=utf-8\n# Copyright 2018 The Google AI Language Team Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Run BERT on SQuAD 1.1 and SQuAD 2.0.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\nimport json\nimport math\nimport os\nimport random\nimport modeling\nimport optimization\nimport tokenization\nimport six\nimport tensorflow as tf\ntf.compat.v1.disable_eager_execution()\n\n# Add Horovod to run_squad\ntry:\n import horovod.tensorflow as hvd\nexcept:\n hvd = None\n\nflags = tf.compat.v1.flags\nFLAGS = flags.FLAGS\n\n## Required parameters\nflags.DEFINE_string(\n \"bert_config_file\", None,\n \"The config json file corresponding to the pre-trained BERT model. \"\n \"This specifies the model architecture.\")\n\nflags.DEFINE_string(\"vocab_file\", None,\n \"The vocabulary file that the BERT model was trained on.\")\n\nflags.DEFINE_string(\n \"output_dir\", None,\n \"The output directory where the model checkpoints will be written.\")\n\n## Other parameters\nflags.DEFINE_string(\"train_file\", None,\n \"SQuAD json for training. E.g., train-v1.1.json\")\n\nflags.DEFINE_string(\n \"predict_file\", None,\n \"SQuAD json for predictions. E.g., dev-v1.1.json or test-v1.1.json\")\n\nflags.DEFINE_string(\n \"init_checkpoint\", None,\n \"Initial checkpoint (usually from a pre-trained BERT model).\")\n\nflags.DEFINE_bool(\n \"do_lower_case\", True,\n \"Whether to lower case the input text. Should be True for uncased \"\n \"models and False for cased models.\")\n\nflags.DEFINE_integer(\n \"max_seq_length\", 384,\n \"The maximum total input sequence length after WordPiece tokenization. \"\n \"Sequences longer than this will be truncated, and sequences shorter \"\n \"than this will be padded.\")\n\nflags.DEFINE_integer(\n \"doc_stride\", 128,\n \"When splitting up a long document into chunks, how much stride to \"\n \"take between chunks.\")\n\nflags.DEFINE_integer(\n \"max_query_length\", 64,\n \"The maximum number of tokens for the question. Questions longer than \"\n \"this will be truncated to this length.\")\n\nflags.DEFINE_bool(\"do_train\", False, \"Whether to run training.\")\n\nflags.DEFINE_bool(\"do_predict\", False, \"Whether to run eval on the dev set.\")\n\nflags.DEFINE_integer(\"train_batch_size\", 32, \"Total batch size for training.\")\n\nflags.DEFINE_integer(\"predict_batch_size\", 8,\n \"Total batch size for predictions.\")\n\nflags.DEFINE_float(\"learning_rate\", 5e-5, \"The initial learning rate for Adam.\")\n\nflags.DEFINE_float(\"num_train_epochs\", 3.0,\n \"Total number of training epochs to perform.\")\n\nflags.DEFINE_float(\n \"warmup_proportion\", 0.1,\n \"Proportion of training to perform linear learning rate warmup for. \"\n \"E.g., 0.1 = 10% of training.\")\n\nflags.DEFINE_integer(\"save_checkpoints_steps\", 1000,\n \"How often to save the model checkpoint.\")\n\nflags.DEFINE_integer(\"iterations_per_loop\", 1000,\n \"How many steps to make in each estimator call.\")\n\nflags.DEFINE_integer(\n \"n_best_size\", 20,\n \"The total number of n-best predictions to generate in the \"\n \"nbest_predictions.json output file.\")\n\nflags.DEFINE_integer(\n \"max_answer_length\", 30,\n \"The maximum length of an answer that can be generated. This is needed \"\n \"because the start and end predictions are not conditioned on one another.\")\n\nflags.DEFINE_bool(\"use_tpu\", False, \"Whether to use TPU or GPU/CPU.\")\n\nflags.DEFINE_string(\n \"tpu_name\", None,\n \"The Cloud TPU to use for training. This should be either the name \"\n \"used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 \"\n \"url.\")\n\nflags.DEFINE_string(\n \"tpu_zone\", None,\n \"[Optional] GCE zone where the Cloud TPU is located in. If not \"\n \"specified, we will attempt to automatically detect the GCE project from \"\n \"metadata.\")\n\nflags.DEFINE_string(\n \"gcp_project\", None,\n \"[Optional] Project name for the Cloud TPU-enabled project. If not \"\n \"specified, we will attempt to automatically detect the GCE project from \"\n \"metadata.\")\n\nflags.DEFINE_string(\"master\", None, \"[Optional] TensorFlow master URL.\")\n\nflags.DEFINE_integer(\n \"num_tpu_cores\", 8,\n \"Only used if `use_tpu` is True. Total number of TPU cores to use.\")\n\nflags.DEFINE_bool(\n \"verbose_logging\", False,\n \"If true, all of the warnings related to data processing will be printed. \"\n \"A number of warnings are expected for a normal SQuAD evaluation.\")\n\nflags.DEFINE_bool(\n \"version_2_with_negative\", False,\n \"If true, the SQuAD examples contain some that do not have an answer.\")\n\nflags.DEFINE_float(\n \"null_score_diff_threshold\", 0.0,\n \"If null_score - best_non_null is greater than the threshold predict null.\")\n\nflags.DEFINE_string(\n \"optimizer_type\", \"adam\", \"Optimizer used for training - adam (default), lamb, nadam and nlamb\")\n\nflags.DEFINE_bool(\"auto_mixed_precision\", False, \"Whether to enable AMP (Auto Mixed Precision).\")\n\nflags.DEFINE_bool(\"use_horovod\", False, \"Whether to use Horovod.\")\n\nflags.DEFINE_bool(\"enable_timeline\", False,\n \"Whether to enable generation of profiling data.\")\n\nflags.DEFINE_integer(\"num_timeline_steps\", 1,\n \"Only used if `enable_timeline` is True. \"\n \"Generate timeline for every Nth step.\")\n\n\nclass SquadExample(object):\n \"\"\"A single training/test example for simple sequence classification.\n\n For examples without an answer, the start and end position are -1.\n \"\"\"\n\n def __init__(self,\n qas_id,\n question_text,\n doc_tokens,\n orig_answer_text=None,\n start_position=None,\n end_position=None,\n is_impossible=False):\n self.qas_id = qas_id\n self.question_text = question_text\n self.doc_tokens = doc_tokens\n self.orig_answer_text = orig_answer_text\n self.start_position = start_position\n self.end_position = end_position\n self.is_impossible = is_impossible\n\n def __str__(self):\n return self.__repr__()\n\n def __repr__(self):\n s = \"\"\n s += \"qas_id: %s\" % (tokenization.printable_text(self.qas_id))\n s += \", question_text: %s\" % (\n tokenization.printable_text(self.question_text))\n s += \", doc_tokens: [%s]\" % (\" \".join(self.doc_tokens))\n if self.start_position:\n s += \", start_position: %d\" % (self.start_position)\n if self.start_position:\n s += \", end_position: %d\" % (self.end_position)\n if self.start_position:\n s += \", is_impossible: %r\" % (self.is_impossible)\n return s\n\n\nclass InputFeatures(object):\n \"\"\"A single set of features of data.\"\"\"\n\n def __init__(self,\n unique_id,\n example_index,\n doc_span_index,\n tokens,\n token_to_orig_map,\n token_is_max_context,\n input_ids,\n input_mask,\n segment_ids,\n start_position=None,\n end_position=None,\n is_impossible=None):\n self.unique_id = unique_id\n self.example_index = example_index\n self.doc_span_index = doc_span_index\n self.tokens = tokens\n self.token_to_orig_map = token_to_orig_map\n self.token_is_max_context = token_is_max_context\n self.input_ids = input_ids\n self.input_mask = input_mask\n self.segment_ids = segment_ids\n self.start_position = start_position\n self.end_position = end_position\n self.is_impossible = is_impossible\n\n\ndef read_squad_examples(input_file, is_training):\n \"\"\"Read a SQuAD json file into a list of SquadExample.\"\"\"\n with tf.io.gfile.GFile(input_file, \"r\") as reader:\n input_data = json.load(reader)[\"data\"]\n\n def is_whitespace(c):\n if c == \" \" or c == \"\\t\" or c == \"\\r\" or c == \"\\n\" or ord(c) == 0x202F:\n return True\n return False\n\n examples = []\n for entry in input_data:\n for paragraph in entry[\"paragraphs\"]:\n paragraph_text = paragraph[\"context\"]\n doc_tokens = []\n char_to_word_offset = []\n prev_is_whitespace = True\n for c in paragraph_text:\n if is_whitespace(c):\n prev_is_whitespace = True\n else:\n if prev_is_whitespace:\n doc_tokens.append(c)\n else:\n doc_tokens[-1] += c\n prev_is_whitespace = False\n char_to_word_offset.append(len(doc_tokens) - 1)\n\n for qa in paragraph[\"qas\"]:\n qas_id = qa[\"id\"]\n question_text = qa[\"question\"]\n start_position = None\n end_position = None\n orig_answer_text = None\n is_impossible = False\n if is_training:\n\n if FLAGS.version_2_with_negative:\n is_impossible = qa[\"is_impossible\"]\n if (len(qa[\"answers\"]) != 1) and (not is_impossible):\n raise ValueError(\n \"For training, each question should have exactly 1 answer.\")\n if not is_impossible:\n answer = qa[\"answers\"][0]\n orig_answer_text = answer[\"text\"]\n answer_offset = answer[\"answer_start\"]\n answer_length = len(orig_answer_text)\n start_position = char_to_word_offset[answer_offset]\n end_position = char_to_word_offset[answer_offset + answer_length -\n 1]\n # Only add answers where the text can be exactly recovered from the\n # document. If this CAN'T happen it's likely due to weird Unicode\n # stuff so we will just skip the example.\n #\n # Note that this means for training mode, every example is NOT\n # guaranteed to be preserved.\n actual_text = \" \".join(\n doc_tokens[start_position:(end_position + 1)])\n cleaned_answer_text = \" \".join(\n tokenization.whitespace_tokenize(orig_answer_text))\n if actual_text.find(cleaned_answer_text) == -1:\n tf.compat.v1.logging.warning(\"Could not find answer: '%s' vs. '%s'\",\n actual_text, cleaned_answer_text)\n continue\n else:\n start_position = -1\n end_position = -1\n orig_answer_text = \"\"\n\n example = SquadExample(\n qas_id=qas_id,\n question_text=question_text,\n doc_tokens=doc_tokens,\n orig_answer_text=orig_answer_text,\n start_position=start_position,\n end_position=end_position,\n is_impossible=is_impossible)\n examples.append(example)\n\n return examples\n\n\ndef convert_examples_to_features(examples, tokenizer, max_seq_length,\n doc_stride, max_query_length, is_training,\n output_fn):\n \"\"\"Loads a data file into a list of `InputBatch`s.\"\"\"\n\n unique_id = 1000000000\n\n for (example_index, example) in enumerate(examples):\n query_tokens = tokenizer.tokenize(example.question_text)\n\n if len(query_tokens) > max_query_length:\n query_tokens = query_tokens[0:max_query_length]\n\n tok_to_orig_index = []\n orig_to_tok_index = []\n all_doc_tokens = []\n for (i, token) in enumerate(example.doc_tokens):\n orig_to_tok_index.append(len(all_doc_tokens))\n sub_tokens = tokenizer.tokenize(token)\n for sub_token in sub_tokens:\n tok_to_orig_index.append(i)\n all_doc_tokens.append(sub_token)\n\n tok_start_position = None\n tok_end_position = None\n if is_training and example.is_impossible:\n tok_start_position = -1\n tok_end_position = -1\n if is_training and not example.is_impossible:\n tok_start_position = orig_to_tok_index[example.start_position]\n if example.end_position < len(example.doc_tokens) - 1:\n tok_end_position = orig_to_tok_index[example.end_position + 1] - 1\n else:\n tok_end_position = len(all_doc_tokens) - 1\n (tok_start_position, tok_end_position) = _improve_answer_span(\n all_doc_tokens, tok_start_position, tok_end_position, tokenizer,\n example.orig_answer_text)\n\n # The -3 accounts for [CLS], [SEP] and [SEP]\n max_tokens_for_doc = max_seq_length - len(query_tokens) - 3\n\n # We can have documents that are longer than the maximum sequence length.\n # To deal with this we do a sliding window approach, where we take chunks\n # of the up to our max length with a stride of `doc_stride`.\n _DocSpan = collections.namedtuple( # pylint: disable=invalid-name\n \"DocSpan\", [\"start\", \"length\"])\n doc_spans = []\n start_offset = 0\n while start_offset < len(all_doc_tokens):\n length = len(all_doc_tokens) - start_offset\n if length > max_tokens_for_doc:\n length = max_tokens_for_doc\n doc_spans.append(_DocSpan(start=start_offset, length=length))\n if start_offset + length == len(all_doc_tokens):\n break\n start_offset += min(length, doc_stride)\n\n for (doc_span_index, doc_span) in enumerate(doc_spans):\n tokens = []\n token_to_orig_map = {}\n token_is_max_context = {}\n segment_ids = []\n tokens.append(\"[CLS]\")\n segment_ids.append(0)\n for token in query_tokens:\n tokens.append(token)\n segment_ids.append(0)\n tokens.append(\"[SEP]\")\n segment_ids.append(0)\n\n for i in range(doc_span.length):\n split_token_index = doc_span.start + i\n token_to_orig_map[len(tokens)] = tok_to_orig_index[split_token_index]\n\n is_max_context = _check_is_max_context(doc_spans, doc_span_index,\n split_token_index)\n token_is_max_context[len(tokens)] = is_max_context\n tokens.append(all_doc_tokens[split_token_index])\n segment_ids.append(1)\n tokens.append(\"[SEP]\")\n segment_ids.append(1)\n\n input_ids = tokenizer.convert_tokens_to_ids(tokens)\n\n # The mask has 1 for real tokens and 0 for padding tokens. Only real\n # tokens are attended to.\n input_mask = [1] * len(input_ids)\n\n # Zero-pad up to the sequence length.\n while len(input_ids) < max_seq_length:\n input_ids.append(0)\n input_mask.append(0)\n segment_ids.append(0)\n\n assert len(input_ids) == max_seq_length\n assert len(input_mask) == max_seq_length\n assert len(segment_ids) == max_seq_length\n\n start_position = None\n end_position = None\n if is_training and not example.is_impossible:\n # For training, if our document chunk does not contain an annotation\n # we throw it out, since there is nothing to predict.\n doc_start = doc_span.start\n doc_end = doc_span.start + doc_span.length - 1\n out_of_span = False\n if not (tok_start_position >= doc_start and\n tok_end_position <= doc_end):\n out_of_span = True\n if out_of_span:\n start_position = 0\n end_position = 0\n else:\n doc_offset = len(query_tokens) + 2\n start_position = tok_start_position - doc_start + doc_offset\n end_position = tok_end_position - doc_start + doc_offset\n\n if is_training and example.is_impossible:\n start_position = 0\n end_position = 0\n\n if example_index < 20:\n tf.compat.v1.logging.info(\"*** Example ***\")\n tf.compat.v1.logging.info(\"unique_id: %s\" % (unique_id))\n tf.compat.v1.logging.info(\"example_index: %s\" % (example_index))\n tf.compat.v1.logging.info(\"doc_span_index: %s\" % (doc_span_index))\n tf.compat.v1.logging.info(\"tokens: %s\" % \" \".join(\n [tokenization.printable_text(x) for x in tokens]))\n tf.compat.v1.logging.info(\"token_to_orig_map: %s\" % \" \".join(\n [\"%d:%d\" % (x, y) for (x, y) in six.iteritems(token_to_orig_map)]))\n tf.compat.v1.logging.info(\"token_is_max_context: %s\" % \" \".join([\n \"%d:%s\" % (x, y) for (x, y) in six.iteritems(token_is_max_context)\n ]))\n tf.compat.v1.logging.info(\"input_ids: %s\" % \" \".join([str(x) for x in input_ids]))\n tf.compat.v1.logging.info(\n \"input_mask: %s\" % \" \".join([str(x) for x in input_mask]))\n tf.compat.v1.logging.info(\n \"segment_ids: %s\" % \" \".join([str(x) for x in segment_ids]))\n if is_training and example.is_impossible:\n tf.compat.v1.logging.info(\"impossible example\")\n if is_training and not example.is_impossible:\n answer_text = \" \".join(tokens[start_position:(end_position + 1)])\n tf.compat.v1.logging.info(\"start_position: %d\" % (start_position))\n tf.compat.v1.logging.info(\"end_position: %d\" % (end_position))\n tf.compat.v1.logging.info(\n \"answer: %s\" % (tokenization.printable_text(answer_text)))\n\n feature = InputFeatures(\n unique_id=unique_id,\n example_index=example_index,\n doc_span_index=doc_span_index,\n tokens=tokens,\n token_to_orig_map=token_to_orig_map,\n token_is_max_context=token_is_max_context,\n input_ids=input_ids,\n input_mask=input_mask,\n segment_ids=segment_ids,\n start_position=start_position,\n end_position=end_position,\n is_impossible=example.is_impossible)\n\n # Run callback\n output_fn(feature)\n\n unique_id += 1\n\n\ndef _improve_answer_span(doc_tokens, input_start, input_end, tokenizer,\n orig_answer_text):\n \"\"\"Returns tokenized answer spans that better match the annotated answer.\"\"\"\n\n # The SQuAD annotations are character based. We first project them to\n # whitespace-tokenized words. But then after WordPiece tokenization, we can\n # often find a \"better match\". For example:\n #\n # Question: What year was John Smith born?\n # Context: The leader was John Smith (1895-1943).\n # Answer: 1895\n #\n # The original whitespace-tokenized answer will be \"(1895-1943).\". However\n # after tokenization, our tokens will be \"( 1895 - 1943 ) .\". So we can match\n # the exact answer, 1895.\n #\n # However, this is not always possible. Consider the following:\n #\n # Question: What country is the top exporter of electornics?\n # Context: The Japanese electronics industry is the lagest in the world.\n # Answer: Japan\n #\n # In this case, the annotator chose \"Japan\" as a character sub-span of\n # the word \"Japanese\". Since our WordPiece tokenizer does not split\n # \"Japanese\", we just use \"Japanese\" as the annotation. This is fairly rare\n # in SQuAD, but does happen.\n tok_answer_text = \" \".join(tokenizer.tokenize(orig_answer_text))\n\n for new_start in range(input_start, input_end + 1):\n for new_end in range(input_end, new_start - 1, -1):\n text_span = \" \".join(doc_tokens[new_start:(new_end + 1)])\n if text_span == tok_answer_text:\n return (new_start, new_end)\n\n return (input_start, input_end)\n\n\ndef _check_is_max_context(doc_spans, cur_span_index, position):\n \"\"\"Check if this is the 'max context' doc span for the token.\"\"\"\n\n # Because of the sliding window approach taken to scoring documents, a single\n # token can appear in multiple documents. E.g.\n # Doc: the man went to the store and bought a gallon of milk\n # Span A: the man went to the\n # Span B: to the store and bought\n # Span C: and bought a gallon of\n # ...\n #\n # Now the word 'bought' will have two scores from spans B and C. We only\n # want to consider the score with \"maximum context\", which we define as\n # the *minimum* of its left and right context (the *sum* of left and\n # right context will always be the same, of course).\n #\n # In the example the maximum context for 'bought' would be span C since\n # it has 1 left context and 3 right context, while span B has 4 left context\n # and 0 right context.\n best_score = None\n best_span_index = None\n for (span_index, doc_span) in enumerate(doc_spans):\n end = doc_span.start + doc_span.length - 1\n if position < doc_span.start:\n continue\n if position > end:\n continue\n num_left_context = position - doc_span.start\n num_right_context = end - position\n score = min(num_left_context, num_right_context) + 0.01 * doc_span.length\n if best_score is None or score > best_score:\n best_score = score\n best_span_index = span_index\n\n return cur_span_index == best_span_index\n\n\ndef create_model(bert_config, is_training, input_ids, input_mask, segment_ids,\n use_one_hot_embeddings):\n \"\"\"Creates a classification model.\"\"\"\n model = modeling.BertModel(\n config=bert_config,\n is_training=is_training,\n input_ids=input_ids,\n input_mask=input_mask,\n token_type_ids=segment_ids,\n use_one_hot_embeddings=use_one_hot_embeddings)\n\n final_hidden = model.get_sequence_output()\n\n final_hidden_shape = modeling.get_shape_list(final_hidden, expected_rank=3)\n batch_size = final_hidden_shape[0]\n seq_length = final_hidden_shape[1]\n hidden_size = final_hidden_shape[2]\n\n output_weights = tf.compat.v1.get_variable(\n \"cls/squad/output_weights\", [2, hidden_size],\n initializer=tf.compat.v1.truncated_normal_initializer(stddev=0.02))\n\n output_bias = tf.compat.v1.get_variable(\n \"cls/squad/output_bias\", [2], initializer=tf.compat.v1.zeros_initializer())\n\n final_hidden_matrix = tf.reshape(final_hidden,\n [batch_size * seq_length, hidden_size])\n logits = tf.matmul(final_hidden_matrix, output_weights, transpose_b=True)\n logits = tf.nn.bias_add(logits, output_bias)\n\n logits = tf.reshape(logits, [batch_size, seq_length, 2])\n logits = tf.transpose(a=logits, perm=[2, 0, 1])\n\n unstacked_logits = tf.unstack(logits, axis=0)\n\n (start_logits, end_logits) = (unstacked_logits[0], unstacked_logits[1])\n\n return (start_logits, end_logits)\n\n\ndef model_fn_builder(bert_config, init_checkpoint, learning_rate,\n num_train_steps, num_warmup_steps, use_tpu,\n use_one_hot_embeddings, use_hvd, use_amp):\n \"\"\"Returns `model_fn` closure for TPUEstimator.\"\"\"\n\n def model_fn(features, labels, mode, params): # pylint: disable=unused-argument\n \"\"\"The `model_fn` for TPUEstimator.\"\"\"\n\n tf.compat.v1.logging.info(\"*** Features ***\")\n for name in sorted(features.keys()):\n tf.compat.v1.logging.info(\" name = %s, shape = %s\" % (name, features[name].shape))\n\n unique_ids = features[\"unique_ids\"]\n input_ids = features[\"input_ids\"]\n input_mask = features[\"input_mask\"]\n segment_ids = features[\"segment_ids\"]\n\n is_training = (mode == tf.estimator.ModeKeys.TRAIN)\n\n (start_logits, end_logits) = create_model(\n bert_config=bert_config,\n is_training=is_training,\n input_ids=input_ids,\n input_mask=input_mask,\n segment_ids=segment_ids,\n use_one_hot_embeddings=use_one_hot_embeddings)\n\n tvars = tf.compat.v1.trainable_variables()\n\n initialized_variable_names = {}\n scaffold_fn = None\n if init_checkpoint and (hvd == None or hvd.rank() == 0):\n tf.compat.v1.logging.info(\"**** Init Checkpoint {} {} ****\".format(hvd.rank(), init_checkpoint))\n (assignment_map, initialized_variable_names\n ) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)\n if use_tpu:\n\n def tpu_scaffold():\n tf.compat.v1.train.init_from_checkpoint(init_checkpoint, assignment_map)\n return tf.compat.v1.train.Scaffold()\n\n scaffold_fn = tpu_scaffold\n else:\n tf.compat.v1.train.init_from_checkpoint(init_checkpoint, assignment_map)\n\n tf.compat.v1.logging.info(\"**** Trainable Variables ****\")\n for var in tvars:\n init_string = \"\"\n if var.name in initialized_variable_names:\n init_string = \", *INIT_FROM_CKPT*\"\n tf.compat.v1.logging.info(\" name = %s, shape = %s%s\", var.name, var.shape,\n init_string)\n\n output_spec = None\n if mode == tf.estimator.ModeKeys.TRAIN:\n seq_length = modeling.get_shape_list(input_ids)[1]\n\n def compute_loss(logits, positions):\n one_hot_positions = tf.one_hot(\n positions, depth=seq_length, dtype=tf.float32)\n log_probs = tf.nn.log_softmax(logits, axis=-1)\n loss = -tf.reduce_mean(\n input_tensor=tf.reduce_sum(input_tensor=one_hot_positions * log_probs, axis=-1))\n return loss\n\n start_positions = features[\"start_positions\"]\n end_positions = features[\"end_positions\"]\n\n start_loss = compute_loss(start_logits, start_positions)\n end_loss = compute_loss(end_logits, end_positions)\n\n total_loss = (start_loss + end_loss) / 2.0\n\n train_op = optimization.create_optimizer(\n total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu, use_hvd, FLAGS.optimizer_type, use_amp)\n\n output_spec = tf.compat.v1.estimator.tpu.TPUEstimatorSpec(\n mode=mode,\n loss=total_loss,\n train_op=train_op,\n scaffold_fn=scaffold_fn)\n elif mode == tf.estimator.ModeKeys.PREDICT:\n predictions = {\n \"unique_ids\": unique_ids,\n \"start_logits\": start_logits,\n \"end_logits\": end_logits,\n }\n output_spec = tf.compat.v1.estimator.tpu.TPUEstimatorSpec(\n mode=mode, predictions=predictions, scaffold_fn=scaffold_fn)\n else:\n raise ValueError(\n \"Only TRAIN and PREDICT modes are supported: %s\" % (mode))\n\n return output_spec\n\n return model_fn\n\n\ndef input_fn_builder(input_file, seq_length, is_training, drop_remainder):\n \"\"\"Creates an `input_fn` closure to be passed to TPUEstimator.\"\"\"\n\n name_to_features = {\n \"unique_ids\": tf.io.FixedLenFeature([], tf.int64),\n \"input_ids\": tf.io.FixedLenFeature([seq_length], tf.int64),\n \"input_mask\": tf.io.FixedLenFeature([seq_length], tf.int64),\n \"segment_ids\": tf.io.FixedLenFeature([seq_length], tf.int64),\n }\n\n if is_training:\n name_to_features[\"start_positions\"] = tf.io.FixedLenFeature([], tf.int64)\n name_to_features[\"end_positions\"] = tf.io.FixedLenFeature([], tf.int64)\n\n def _decode_record(record, name_to_features):\n \"\"\"Decodes a record to a TensorFlow example.\"\"\"\n example = tf.io.parse_single_example(serialized=record, features=name_to_features)\n\n # tf.Example only supports tf.int64, but the TPU only supports tf.int32.\n # So cast all int64 to int32.\n for name in list(example.keys()):\n t = example[name]\n if t.dtype == tf.int64:\n t = tf.cast(t, dtype=tf.int32)\n example[name] = t\n\n return example\n\n def input_fn(params):\n \"\"\"The actual input function.\"\"\"\n batch_size = params[\"batch_size\"]\n\n # For training, we want a lot of parallel reading and shuffling.\n # For eval, we want no shuffling and parallel reading doesn't matter.\n d = tf.data.TFRecordDataset(input_file)\n if is_training:\n d = d.repeat()\n d = d.shuffle(buffer_size=100)\n\n d = d.apply(\n tf.data.experimental.map_and_batch(\n lambda record: _decode_record(record, name_to_features),\n batch_size=batch_size,\n drop_remainder=drop_remainder))\n\n return d\n\n return input_fn\n\n\nRawResult = collections.namedtuple(\"RawResult\",\n [\"unique_id\", \"start_logits\", \"end_logits\"])\n\n\ndef write_predictions(all_examples, all_features, all_results, n_best_size,\n max_answer_length, do_lower_case, output_prediction_file,\n output_nbest_file, output_null_log_odds_file):\n \"\"\"Write final predictions to the json file and log-odds of null if needed.\"\"\"\n tf.compat.v1.logging.info(\"Writing predictions to: %s\" % (output_prediction_file))\n tf.compat.v1.logging.info(\"Writing nbest to: %s\" % (output_nbest_file))\n\n example_index_to_features = collections.defaultdict(list)\n for feature in all_features:\n example_index_to_features[feature.example_index].append(feature)\n\n unique_id_to_result = {}\n for result in all_results:\n unique_id_to_result[result.unique_id] = result\n\n _PrelimPrediction = collections.namedtuple( # pylint: disable=invalid-name\n \"PrelimPrediction\",\n [\"feature_index\", \"start_index\", \"end_index\", \"start_logit\", \"end_logit\"])\n\n all_predictions = collections.OrderedDict()\n all_nbest_json = collections.OrderedDict()\n scores_diff_json = collections.OrderedDict()\n\n for (example_index, example) in enumerate(all_examples):\n features = example_index_to_features[example_index]\n\n prelim_predictions = []\n # keep track of the minimum score of null start+end of position 0\n score_null = 1000000 # large and positive\n min_null_feature_index = 0 # the paragraph slice with min mull score\n null_start_logit = 0 # the start logit at the slice with min null score\n null_end_logit = 0 # the end logit at the slice with min null score\n for (feature_index, feature) in enumerate(features):\n result = unique_id_to_result[feature.unique_id]\n start_indexes = _get_best_indexes(result.start_logits, n_best_size)\n end_indexes = _get_best_indexes(result.end_logits, n_best_size)\n # if we could have irrelevant answers, get the min score of irrelevant\n if FLAGS.version_2_with_negative:\n feature_null_score = result.start_logits[0] + result.end_logits[0]\n if feature_null_score < score_null:\n score_null = feature_null_score\n min_null_feature_index = feature_index\n null_start_logit = result.start_logits[0]\n null_end_logit = result.end_logits[0]\n for start_index in start_indexes:\n for end_index in end_indexes:\n # We could hypothetically create invalid predictions, e.g., predict\n # that the start of the span is in the question. We throw out all\n # invalid predictions.\n if start_index >= len(feature.tokens):\n continue\n if end_index >= len(feature.tokens):\n continue\n if start_index not in feature.token_to_orig_map:\n continue\n if end_index not in feature.token_to_orig_map:\n continue\n if not feature.token_is_max_context.get(start_index, False):\n continue\n if end_index < start_index:\n continue\n length = end_index - start_index + 1\n if length > max_answer_length:\n continue\n prelim_predictions.append(\n _PrelimPrediction(\n feature_index=feature_index,\n start_index=start_index,\n end_index=end_index,\n start_logit=result.start_logits[start_index],\n end_logit=result.end_logits[end_index]))\n\n if FLAGS.version_2_with_negative:\n prelim_predictions.append(\n _PrelimPrediction(\n feature_index=min_null_feature_index,\n start_index=0,\n end_index=0,\n start_logit=null_start_logit,\n end_logit=null_end_logit))\n prelim_predictions = sorted(\n prelim_predictions,\n key=lambda x: (x.start_logit + x.end_logit),\n reverse=True)\n\n _NbestPrediction = collections.namedtuple( # pylint: disable=invalid-name\n \"NbestPrediction\", [\"text\", \"start_logit\", \"end_logit\"])\n\n seen_predictions = {}\n nbest = []\n for pred in prelim_predictions:\n if len(nbest) >= n_best_size:\n break\n feature = features[pred.feature_index]\n if pred.start_index > 0: # this is a non-null prediction\n tok_tokens = feature.tokens[pred.start_index:(pred.end_index + 1)]\n orig_doc_start = feature.token_to_orig_map[pred.start_index]\n orig_doc_end = feature.token_to_orig_map[pred.end_index]\n orig_tokens = example.doc_tokens[orig_doc_start:(orig_doc_end + 1)]\n tok_text = \" \".join(tok_tokens)\n\n # De-tokenize WordPieces that have been split off.\n tok_text = tok_text.replace(\" ##\", \"\")\n tok_text = tok_text.replace(\"##\", \"\")\n\n # Clean whitespace\n tok_text = tok_text.strip()\n tok_text = \" \".join(tok_text.split())\n orig_text = \" \".join(orig_tokens)\n\n final_text = get_final_text(tok_text, orig_text, do_lower_case)\n if final_text in seen_predictions:\n continue\n\n seen_predictions[final_text] = True\n else:\n final_text = \"\"\n seen_predictions[final_text] = True\n\n nbest.append(\n _NbestPrediction(\n text=final_text,\n start_logit=pred.start_logit,\n end_logit=pred.end_logit))\n\n # if we didn't inlude the empty option in the n-best, inlcude it\n if FLAGS.version_2_with_negative:\n if \"\" not in seen_predictions:\n nbest.append(\n _NbestPrediction(\n text=\"\", start_logit=null_start_logit,\n end_logit=null_end_logit))\n # In very rare edge cases we could have no valid predictions. So we\n # just create a nonce prediction in this case to avoid failure.\n if not nbest:\n nbest.append(\n _NbestPrediction(text=\"empty\", start_logit=0.0, end_logit=0.0))\n\n assert len(nbest) >= 1\n\n total_scores = []\n best_non_null_entry = None\n for entry in nbest:\n total_scores.append(entry.start_logit + entry.end_logit)\n if not best_non_null_entry:\n if entry.text:\n best_non_null_entry = entry\n\n probs = _compute_softmax(total_scores)\n\n nbest_json = []\n for (i, entry) in enumerate(nbest):\n output = collections.OrderedDict()\n output[\"text\"] = entry.text\n output[\"probability\"] = probs[i]\n output[\"start_logit\"] = entry.start_logit\n output[\"end_logit\"] = entry.end_logit\n nbest_json.append(output)\n\n assert len(nbest_json) >= 1\n\n if not FLAGS.version_2_with_negative:\n all_predictions[example.qas_id] = nbest_json[0][\"text\"]\n else:\n # predict \"\" iff the null score - the score of best non-null > threshold\n score_diff = score_null - best_non_null_entry.start_logit - (\n best_non_null_entry.end_logit)\n scores_diff_json[example.qas_id] = score_diff\n if score_diff > FLAGS.null_score_diff_threshold:\n all_predictions[example.qas_id] = \"\"\n else:\n all_predictions[example.qas_id] = best_non_null_entry.text\n\n all_nbest_json[example.qas_id] = nbest_json\n\n with tf.io.gfile.GFile(output_prediction_file, \"w\") as writer:\n writer.write(json.dumps(all_predictions, indent=4) + \"\\n\")\n\n with tf.io.gfile.GFile(output_nbest_file, \"w\") as writer:\n writer.write(json.dumps(all_nbest_json, indent=4) + \"\\n\")\n\n if FLAGS.version_2_with_negative:\n with tf.io.gfile.GFile(output_null_log_odds_file, \"w\") as writer:\n writer.write(json.dumps(scores_diff_json, indent=4) + \"\\n\")\n\n\ndef get_final_text(pred_text, orig_text, do_lower_case):\n \"\"\"Project the tokenized prediction back to the original text.\"\"\"\n\n # When we created the data, we kept track of the alignment between original\n # (whitespace tokenized) tokens and our WordPiece tokenized tokens. So\n # now `orig_text` contains the span of our original text corresponding to the\n # span that we predicted.\n #\n # However, `orig_text` may contain extra characters that we don't want in\n # our prediction.\n #\n # For example, let's say:\n # pred_text = steve smith\n # orig_text = Steve Smith's\n #\n # We don't want to return `orig_text` because it contains the extra \"'s\".\n #\n # We don't want to return `pred_text` because it's already been normalized\n # (the SQuAD eval script also does punctuation stripping/lower casing but\n # our tokenizer does additional normalization like stripping accent\n # characters).\n #\n # What we really want to return is \"Steve Smith\".\n #\n # Therefore, we have to apply a semi-complicated alignment heruistic between\n # `pred_text` and `orig_text` to get a character-to-charcter alignment. This\n # can fail in certain cases in which case we just return `orig_text`.\n\n def _strip_spaces(text):\n ns_chars = []\n ns_to_s_map = collections.OrderedDict()\n for (i, c) in enumerate(text):\n if c == \" \":\n continue\n ns_to_s_map[len(ns_chars)] = i\n ns_chars.append(c)\n ns_text = \"\".join(ns_chars)\n return (ns_text, ns_to_s_map)\n\n # We first tokenize `orig_text`, strip whitespace from the result\n # and `pred_text`, and check if they are the same length. If they are\n # NOT the same length, the heuristic has failed. If they are the same\n # length, we assume the characters are one-to-one aligned.\n tokenizer = tokenization.BasicTokenizer(do_lower_case=do_lower_case)\n\n tok_text = \" \".join(tokenizer.tokenize(orig_text))\n\n start_position = tok_text.find(pred_text)\n if start_position == -1:\n if FLAGS.verbose_logging:\n tf.compat.v1.logging.info(\n \"Unable to find text: '%s' in '%s'\" % (pred_text, orig_text))\n return orig_text\n end_position = start_position + len(pred_text) - 1\n\n (orig_ns_text, orig_ns_to_s_map) = _strip_spaces(orig_text)\n (tok_ns_text, tok_ns_to_s_map) = _strip_spaces(tok_text)\n\n if len(orig_ns_text) != len(tok_ns_text):\n if FLAGS.verbose_logging:\n tf.compat.v1.logging.info(\"Length not equal after stripping spaces: '%s' vs '%s'\",\n orig_ns_text, tok_ns_text)\n return orig_text\n\n # We then project the characters in `pred_text` back to `orig_text` using\n # the character-to-character alignment.\n tok_s_to_ns_map = {}\n for (i, tok_index) in six.iteritems(tok_ns_to_s_map):\n tok_s_to_ns_map[tok_index] = i\n\n orig_start_position = None\n if start_position in tok_s_to_ns_map:\n ns_start_position = tok_s_to_ns_map[start_position]\n if ns_start_position in orig_ns_to_s_map:\n orig_start_position = orig_ns_to_s_map[ns_start_position]\n\n if orig_start_position is None:\n if FLAGS.verbose_logging:\n tf.compat.v1.logging.info(\"Couldn't map start position\")\n return orig_text\n\n orig_end_position = None\n if end_position in tok_s_to_ns_map:\n ns_end_position = tok_s_to_ns_map[end_position]\n if ns_end_position in orig_ns_to_s_map:\n orig_end_position = orig_ns_to_s_map[ns_end_position]\n\n if orig_end_position is None:\n if FLAGS.verbose_logging:\n tf.compat.v1.logging.info(\"Couldn't map end position\")\n return orig_text\n\n output_text = orig_text[orig_start_position:(orig_end_position + 1)]\n return output_text\n\n\ndef _get_best_indexes(logits, n_best_size):\n \"\"\"Get the n-best logits from a list.\"\"\"\n index_and_score = sorted(enumerate(logits), key=lambda x: x[1], reverse=True)\n\n best_indexes = []\n for i in range(len(index_and_score)):\n if i >= n_best_size:\n break\n best_indexes.append(index_and_score[i][0])\n return best_indexes\n\n\ndef _compute_softmax(scores):\n \"\"\"Compute softmax probability over raw logits.\"\"\"\n if not scores:\n return []\n\n max_score = None\n for score in scores:\n if max_score is None or score > max_score:\n max_score = score\n\n exp_scores = []\n total_sum = 0.0\n for score in scores:\n x = math.exp(score - max_score)\n exp_scores.append(x)\n total_sum += x\n\n probs = []\n for score in exp_scores:\n probs.append(score / total_sum)\n return probs\n\n\nclass FeatureWriter(object):\n \"\"\"Writes InputFeature to TF example file.\"\"\"\n\n def __init__(self, filename, is_training):\n self.filename = filename\n self.is_training = is_training\n self.num_features = 0\n self._writer = tf.io.TFRecordWriter(filename)\n\n def process_feature(self, feature):\n \"\"\"Write a InputFeature to the TFRecordWriter as a tf.train.Example.\"\"\"\n self.num_features += 1\n\n def create_int_feature(values):\n feature = tf.train.Feature(\n int64_list=tf.train.Int64List(value=list(values)))\n return feature\n\n features = collections.OrderedDict()\n features[\"unique_ids\"] = create_int_feature([feature.unique_id])\n features[\"input_ids\"] = create_int_feature(feature.input_ids)\n features[\"input_mask\"] = create_int_feature(feature.input_mask)\n features[\"segment_ids\"] = create_int_feature(feature.segment_ids)\n\n if self.is_training:\n features[\"start_positions\"] = create_int_feature([feature.start_position])\n features[\"end_positions\"] = create_int_feature([feature.end_position])\n impossible = 0\n if feature.is_impossible:\n impossible = 1\n features[\"is_impossible\"] = create_int_feature([impossible])\n\n tf_example = tf.train.Example(features=tf.train.Features(feature=features))\n self._writer.write(tf_example.SerializeToString())\n\n def close(self):\n self._writer.close()\n\n\ndef validate_flags_or_throw(bert_config):\n \"\"\"Validate the input FLAGS or throw an exception.\"\"\"\n tokenization.validate_case_matches_checkpoint(FLAGS.do_lower_case,\n FLAGS.init_checkpoint)\n\n if not FLAGS.do_train and not FLAGS.do_predict:\n raise ValueError(\"At least one of `do_train` or `do_predict` must be True.\")\n\n if FLAGS.do_train:\n if not FLAGS.train_file:\n raise ValueError(\n \"If `do_train` is True, then `train_file` must be specified.\")\n if FLAGS.do_predict:\n if not FLAGS.predict_file:\n raise ValueError(\n \"If `do_predict` is True, then `predict_file` must be specified.\")\n\n if FLAGS.max_seq_length > bert_config.max_position_embeddings:\n raise ValueError(\n \"Cannot use sequence length %d because the BERT model \"\n \"was only trained up to sequence length %d\" %\n (FLAGS.max_seq_length, bert_config.max_position_embeddings))\n\n if FLAGS.max_seq_length <= FLAGS.max_query_length + 3:\n raise ValueError(\n \"The max_seq_length (%d) must be greater than max_query_length \"\n \"(%d) + 3\" % (FLAGS.max_seq_length, FLAGS.max_query_length))\n\n\ndef main(_):\n tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.INFO)\n\n use_amp = False\n if FLAGS.auto_mixed_precision:\n use_amp = True\n tf.compat.v1.logging.info(\"TF AMP (Auto Mixed Precision) is enabled\")\n \n use_hvd = False\n if FLAGS.use_horovod and hvd != None:\n use_hvd = True\n tf.compat.v1.logging.info(\"Horovod enabled and used\")\n\n if use_hvd:\n # [HVD] Initialize the library: basic bookkeeping, sets up communication between GPUs, allocates buffers etc.\n hvd.init()\n # [HVD] Use different output directories for different GPU's. \n FLAGS.output_dir = FLAGS.output_dir if hvd.rank() == 0 else os.path.join(FLAGS.output_dir, str(hvd.rank()))\n FLAGS.save_checkpoints_steps = FLAGS.save_checkpoints_steps if hvd.rank() == 0 else None\n\n bert_config = modeling.BertConfig.from_json_file(FLAGS.bert_config_file)\n\n validate_flags_or_throw(bert_config)\n\n tf.io.gfile.makedirs(FLAGS.output_dir)\n\n tokenizer = tokenization.FullTokenizer(\n vocab_file=FLAGS.vocab_file, do_lower_case=FLAGS.do_lower_case)\n\n tpu_cluster_resolver = None\n if FLAGS.use_tpu and FLAGS.tpu_name:\n tpu_cluster_resolver = tf.distribute.cluster_resolver.TPUClusterResolver(\n FLAGS.tpu_name, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project)\n\n is_per_host = tf.compat.v1.estimator.tpu.InputPipelineConfig.PER_HOST_V2\n\n config = None\n if use_hvd:\n # [HVD] Pin each worker to a GPU (make sure one worker uses only one GPU).\n config = tf.compat.v1.ConfigProto()\n config.gpu_options.visible_device_list = str(hvd.local_rank())\n\n run_config = tf.compat.v1.estimator.tpu.RunConfig(\n cluster=tpu_cluster_resolver,\n master=FLAGS.master,\n model_dir=FLAGS.output_dir,\n save_checkpoints_steps=FLAGS.save_checkpoints_steps,\n tpu_config=tf.compat.v1.estimator.tpu.TPUConfig(\n iterations_per_loop=FLAGS.iterations_per_loop,\n num_shards=FLAGS.num_tpu_cores,\n per_host_input_for_training=is_per_host),\n log_step_count_steps=100,\n session_config=config)\n\n train_examples = None\n num_train_steps = None\n num_warmup_steps = None\n if FLAGS.do_train:\n train_examples = read_squad_examples(\n input_file=FLAGS.train_file, is_training=True)\n num_train_steps = int(\n len(train_examples) / FLAGS.train_batch_size * FLAGS.num_train_epochs)\n num_warmup_steps = int(num_train_steps * FLAGS.warmup_proportion)\n\n if use_hvd:\n # [HVD] The training_steps for each GPU is the total steps divided by the number of GPU's.\n num_train_steps = num_train_steps // hvd.size()\n num_warmup_steps = num_warmup_steps // hvd.size()\n\n # Pre-shuffle the input to avoid having to make a very large shuffle\n # buffer in in the `input_fn`.\n rng = random.Random(12345)\n rng.shuffle(train_examples)\n\n model_fn = model_fn_builder(\n bert_config=bert_config,\n init_checkpoint=FLAGS.init_checkpoint,\n learning_rate=FLAGS.learning_rate,\n num_train_steps=num_train_steps,\n num_warmup_steps=num_warmup_steps,\n use_tpu=FLAGS.use_tpu,\n use_one_hot_embeddings=FLAGS.use_tpu,\n use_amp=use_amp,\n use_hvd=use_hvd)\n\n # If TPU is not available, this will fall back to normal Estimator on CPU\n # or GPU.\n estimator = tf.compat.v1.estimator.tpu.TPUEstimator(\n use_tpu=FLAGS.use_tpu,\n model_fn=model_fn,\n config=run_config,\n train_batch_size=FLAGS.train_batch_size,\n predict_batch_size=FLAGS.predict_batch_size)\n\n if FLAGS.do_train:\n # We write to a temporary file to avoid storing very large constant tensors\n # in memory.\n train_writer = FeatureWriter(\n filename=os.path.join(FLAGS.output_dir, \"train.tf_record\"),\n is_training=True)\n convert_examples_to_features(\n examples=train_examples,\n tokenizer=tokenizer,\n max_seq_length=FLAGS.max_seq_length,\n doc_stride=FLAGS.doc_stride,\n max_query_length=FLAGS.max_query_length,\n is_training=True,\n output_fn=train_writer.process_feature)\n train_writer.close()\n\n tf.compat.v1.logging.info(\"***** Running training *****\")\n tf.compat.v1.logging.info(\" Num orig examples = %d\", len(train_examples))\n tf.compat.v1.logging.info(\" Num split examples = %d\", train_writer.num_features)\n tf.compat.v1.logging.info(\" Batch size = %d\", FLAGS.train_batch_size)\n tf.compat.v1.logging.info(\" Num steps = %d\", num_train_steps)\n del train_examples\n\n train_input_fn = input_fn_builder(\n input_file=train_writer.filename,\n seq_length=FLAGS.max_seq_length,\n is_training=True,\n drop_remainder=True)\n\n hooks = []\n if FLAGS.enable_timeline:\n profiler_hook = tf.estimator.ProfilerHook(\n save_steps=FLAGS.num_timeline_steps,\n output_dir=FLAGS.output_dir)\n hooks.append(profiler_hook)\n\n if use_hvd:\n # [HVD] Ensure all GPU's start with the same weights.\n hooks.append(hvd.BroadcastGlobalVariablesHook(0))\n estimator.train(input_fn=train_input_fn, max_steps=num_train_steps, hooks=hooks)\n\n if FLAGS.do_predict:\n eval_examples = read_squad_examples(\n input_file=FLAGS.predict_file, is_training=False)\n\n eval_writer = FeatureWriter(\n filename=os.path.join(FLAGS.output_dir, \"eval.tf_record\"),\n is_training=False)\n eval_features = []\n\n def append_feature(feature):\n eval_features.append(feature)\n eval_writer.process_feature(feature)\n\n convert_examples_to_features(\n examples=eval_examples,\n tokenizer=tokenizer,\n max_seq_length=FLAGS.max_seq_length,\n doc_stride=FLAGS.doc_stride,\n max_query_length=FLAGS.max_query_length,\n is_training=False,\n output_fn=append_feature)\n eval_writer.close()\n\n tf.compat.v1.logging.info(\"***** Running predictions *****\")\n tf.compat.v1.logging.info(\" Num orig examples = %d\", len(eval_examples))\n tf.compat.v1.logging.info(\" Num split examples = %d\", len(eval_features))\n tf.compat.v1.logging.info(\" Batch size = %d\", FLAGS.predict_batch_size)\n\n all_results = []\n\n predict_input_fn = input_fn_builder(\n input_file=eval_writer.filename,\n seq_length=FLAGS.max_seq_length,\n is_training=False,\n drop_remainder=False)\n\n # If running eval on the TPU, you will need to specify the number of\n # steps.\n all_results = []\n for result in estimator.predict(\n predict_input_fn, yield_single_examples=True):\n if len(all_results) % 1000 == 0:\n tf.compat.v1.logging.info(\"Processing example: %d\" % (len(all_results)))\n unique_id = int(result[\"unique_ids\"])\n start_logits = [float(x) for x in result[\"start_logits\"].flat]\n end_logits = [float(x) for x in result[\"end_logits\"].flat]\n all_results.append(\n RawResult(\n unique_id=unique_id,\n start_logits=start_logits,\n end_logits=end_logits))\n\n output_prediction_file = os.path.join(FLAGS.output_dir, \"predictions.json\")\n output_nbest_file = os.path.join(FLAGS.output_dir, \"nbest_predictions.json\")\n output_null_log_odds_file = os.path.join(FLAGS.output_dir, \"null_odds.json\")\n\n write_predictions(eval_examples, eval_features, all_results,\n FLAGS.n_best_size, FLAGS.max_answer_length,\n FLAGS.do_lower_case, output_prediction_file,\n output_nbest_file, output_null_log_odds_file)\n\n\nif __name__ == \"__main__\":\n flags.mark_flag_as_required(\"vocab_file\")\n flags.mark_flag_as_required(\"bert_config_file\")\n flags.mark_flag_as_required(\"output_dir\")\n tf.compat.v1.app.run()\n"
]
| [
[
"tensorflow.io.gfile.GFile",
"tensorflow.compat.v1.logging.info",
"tensorflow.data.TFRecordDataset",
"tensorflow.train.Features",
"tensorflow.compat.v1.estimator.tpu.TPUEstimator",
"tensorflow.matmul",
"tensorflow.reshape",
"tensorflow.compat.v1.trainable_variables",
"tensorflow.compat.v1.train.init_from_checkpoint",
"tensorflow.one_hot",
"tensorflow.cast",
"tensorflow.io.FixedLenFeature",
"tensorflow.transpose",
"tensorflow.compat.v1.zeros_initializer",
"tensorflow.distribute.cluster_resolver.TPUClusterResolver",
"tensorflow.compat.v1.logging.warning",
"tensorflow.nn.bias_add",
"tensorflow.nn.log_softmax",
"tensorflow.compat.v1.app.run",
"tensorflow.compat.v1.train.Scaffold",
"tensorflow.compat.v1.ConfigProto",
"tensorflow.io.gfile.makedirs",
"tensorflow.compat.v1.estimator.tpu.TPUConfig",
"tensorflow.reduce_sum",
"tensorflow.compat.v1.estimator.tpu.TPUEstimatorSpec",
"tensorflow.unstack",
"tensorflow.estimator.ProfilerHook",
"tensorflow.compat.v1.logging.set_verbosity",
"tensorflow.io.parse_single_example",
"tensorflow.io.TFRecordWriter",
"tensorflow.compat.v1.truncated_normal_initializer",
"tensorflow.compat.v1.disable_eager_execution"
]
]
|
rnwatanabe/silx | [
"b0395f4a06c048b7778dc04ada828edd195ef02d"
]
| [
"src/silx/gui/plot3d/items/core.py"
]
| [
"# coding: utf-8\n# /*##########################################################################\n#\n# Copyright (c) 2017-2021 European Synchrotron Radiation Facility\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n#\n# ###########################################################################*/\n\"\"\"This module provides the base class for items of the :class:`.SceneWidget`.\n\"\"\"\n\nfrom __future__ import absolute_import\n\n__authors__ = [\"T. Vincent\"]\n__license__ = \"MIT\"\n__date__ = \"15/11/2017\"\n\nfrom collections import defaultdict\nimport enum\n\nimport numpy\n\nfrom ... import qt\nfrom ...plot.items import ItemChangedType\nfrom .. import scene\nfrom ..scene import axes, primitives, transform\nfrom ._pick import PickContext\n\n\[email protected]\nclass Item3DChangedType(enum.Enum):\n \"\"\"Type of modification provided by :attr:`Item3D.sigItemChanged` signal.\"\"\"\n\n INTERPOLATION = 'interpolationChanged'\n \"\"\"Item3D image interpolation changed flag.\"\"\"\n\n TRANSFORM = 'transformChanged'\n \"\"\"Item3D transform changed flag.\"\"\"\n\n HEIGHT_MAP = 'heightMapChanged'\n \"\"\"Item3D height map changed flag.\"\"\"\n\n ISO_LEVEL = 'isoLevelChanged'\n \"\"\"Isosurface level changed flag.\"\"\"\n\n LABEL = 'labelChanged'\n \"\"\"Item's label changed flag.\"\"\"\n\n BOUNDING_BOX_VISIBLE = 'boundingBoxVisibleChanged'\n \"\"\"Item's bounding box visibility changed\"\"\"\n\n ROOT_ITEM = 'rootItemChanged'\n \"\"\"Item's root changed flag.\"\"\"\n\n\nclass Item3D(qt.QObject):\n \"\"\"Base class representing an item in the scene.\n\n :param parent: The View widget this item belongs to.\n :param primitive: An optional primitive to use as scene primitive\n \"\"\"\n\n _LABEL_INDICES = defaultdict(int)\n \"\"\"Store per class label indices\"\"\"\n\n sigItemChanged = qt.Signal(object)\n \"\"\"Signal emitted when an item's property has changed.\n\n It provides a flag describing which property of the item has changed.\n See :class:`ItemChangedType` and :class:`Item3DChangedType`\n for flags description.\n \"\"\"\n\n def __init__(self, parent, primitive=None):\n qt.QObject.__init__(self, parent)\n\n if primitive is None:\n primitive = scene.Group()\n\n self._primitive = primitive\n\n self.__syncForegroundColor()\n\n labelIndex = self._LABEL_INDICES[self.__class__]\n self._label = str(self.__class__.__name__)\n if labelIndex != 0:\n self._label += u' %d' % labelIndex\n self._LABEL_INDICES[self.__class__] += 1\n\n if isinstance(parent, Item3D):\n parent.sigItemChanged.connect(self.__parentItemChanged)\n\n def setParent(self, parent):\n \"\"\"Override set parent to handle root item change\"\"\"\n previousParent = self.parent()\n if isinstance(previousParent, Item3D):\n previousParent.sigItemChanged.disconnect(self.__parentItemChanged)\n\n super(Item3D, self).setParent(parent)\n\n if isinstance(parent, Item3D):\n parent.sigItemChanged.connect(self.__parentItemChanged)\n\n self._updated(Item3DChangedType.ROOT_ITEM)\n\n def __parentItemChanged(self, event):\n \"\"\"Handle updates of the parent if it is an Item3D\n\n :param Item3DChangedType event:\n \"\"\"\n if event == Item3DChangedType.ROOT_ITEM:\n self._updated(Item3DChangedType.ROOT_ITEM)\n\n def root(self):\n \"\"\"Returns the root of the scene this item belongs to.\n\n The root is the up-most Item3D in the scene tree hierarchy.\n\n :rtype: Union[Item3D, None]\n \"\"\"\n root = None\n ancestor = self.parent()\n while isinstance(ancestor, Item3D):\n root = ancestor\n ancestor = ancestor.parent()\n\n return root\n\n def _getScenePrimitive(self):\n \"\"\"Return the group containing the item rendering\"\"\"\n return self._primitive\n\n def _updated(self, event=None):\n \"\"\"Handle MixIn class updates.\n\n :param event: The event to send to :attr:`sigItemChanged` signal.\n \"\"\"\n if event == Item3DChangedType.ROOT_ITEM:\n self.__syncForegroundColor()\n\n if event is not None:\n self.sigItemChanged.emit(event)\n\n # Label\n\n def getLabel(self):\n \"\"\"Returns the label associated to this item.\n\n :rtype: str\n \"\"\"\n return self._label\n\n def setLabel(self, label):\n \"\"\"Set the label associated to this item.\n\n :param str label:\n \"\"\"\n label = str(label)\n if label != self._label:\n self._label = label\n self._updated(Item3DChangedType.LABEL)\n\n # Visibility\n\n def isVisible(self):\n \"\"\"Returns True if item is visible, else False\n\n :rtype: bool\n \"\"\"\n return self._getScenePrimitive().visible\n\n def setVisible(self, visible=True):\n \"\"\"Set the visibility of the item in the scene.\n\n :param bool visible: True (default) to show the item, False to hide\n \"\"\"\n visible = bool(visible)\n primitive = self._getScenePrimitive()\n if visible != primitive.visible:\n primitive.visible = visible\n self._updated(ItemChangedType.VISIBLE)\n\n # Foreground color\n\n def _setForegroundColor(self, color):\n \"\"\"Set the foreground color of the item.\n\n The default implementation does nothing, override it in subclass.\n\n :param color: RGBA color\n :type color: tuple of 4 float in [0., 1.]\n \"\"\"\n if hasattr(super(Item3D, self), '_setForegroundColor'):\n super(Item3D, self)._setForegroundColor(color)\n\n def __syncForegroundColor(self):\n \"\"\"Retrieve foreground color from parent and update this item\"\"\"\n # Look-up for SceneWidget to get its foreground color\n root = self.root()\n if root is not None:\n widget = root.parent()\n if isinstance(widget, qt.QWidget):\n self._setForegroundColor(\n widget.getForegroundColor().getRgbF())\n\n # picking\n\n def _pick(self, context):\n \"\"\"Implement picking on this item.\n\n :param PickContext context: Current picking context\n :return: Data indices at picked position or None\n :rtype: Union[None,PickingResult]\n \"\"\"\n if (self.isVisible() and\n context.isEnabled() and\n context.isItemPickable(self) and\n self._pickFastCheck(context)):\n return self._pickFull(context)\n return None\n\n def _pickFastCheck(self, context):\n \"\"\"Approximate item pick test (e.g., bounding box-based picking).\n\n :param PickContext context: Current picking context\n :return: True if item might be picked\n :rtype: bool\n \"\"\"\n primitive = self._getScenePrimitive()\n\n positionNdc = context.getNDCPosition()\n if positionNdc is None: # No picking outside viewport\n return False\n\n bounds = primitive.bounds(transformed=False, dataBounds=False)\n if bounds is None: # primitive has no bounds\n return False\n\n bounds = primitive.objectToNDCTransform.transformBounds(bounds)\n\n return (bounds[0, 0] <= positionNdc[0] <= bounds[1, 0] and\n bounds[0, 1] <= positionNdc[1] <= bounds[1, 1])\n\n def _pickFull(self, context):\n \"\"\"Perform precise picking in this item at given widget position.\n\n :param PickContext context: Current picking context\n :return: Object holding the results or None\n :rtype: Union[None,PickingResult]\n \"\"\"\n return None\n\n\nclass DataItem3D(Item3D):\n \"\"\"Base class representing a data item with transform in the scene.\n\n :param parent: The View widget this item belongs to.\n :param Union[GroupBBox, None] group:\n The scene group to use for rendering\n \"\"\"\n\n def __init__(self, parent, group=None):\n if group is None:\n group = primitives.GroupBBox()\n\n # Set-up bounding box\n group.boxVisible = False\n group.axesVisible = False\n else:\n assert isinstance(group, primitives.GroupBBox)\n\n Item3D.__init__(self, parent=parent, primitive=group)\n\n # Transformations\n self._translate = transform.Translate()\n self._rotateForwardTranslation = transform.Translate()\n self._rotate = transform.Rotate()\n self._rotateBackwardTranslation = transform.Translate()\n self._translateFromRotationCenter = transform.Translate()\n self._matrix = transform.Matrix()\n self._scale = transform.Scale()\n # Group transforms to do to data before rotation\n # This is useful to handle rotation center relative to bbox\n self._transformObjectToRotate = transform.TransformList(\n [self._matrix, self._scale])\n self._transformObjectToRotate.addListener(self._updateRotationCenter)\n\n self._rotationCenter = 0., 0., 0.\n\n self.__transforms = transform.TransformList([\n self._translate,\n self._rotateForwardTranslation,\n self._rotate,\n self._rotateBackwardTranslation,\n self._transformObjectToRotate])\n\n self._getScenePrimitive().transforms = self.__transforms\n\n def _updated(self, event=None):\n \"\"\"Handle MixIn class updates.\n\n :param event: The event to send to :attr:`sigItemChanged` signal.\n \"\"\"\n if event == ItemChangedType.DATA:\n self._updateRotationCenter()\n super(DataItem3D, self)._updated(event)\n\n # Transformations\n\n def _getSceneTransforms(self):\n \"\"\"Return TransformList corresponding to current transforms\n\n :rtype: TransformList\n \"\"\"\n return self.__transforms\n\n def setScale(self, sx=1., sy=1., sz=1.):\n \"\"\"Set the scale of the item in the scene.\n\n :param float sx: Scale factor along the X axis\n :param float sy: Scale factor along the Y axis\n :param float sz: Scale factor along the Z axis\n \"\"\"\n scale = numpy.array((sx, sy, sz), dtype=numpy.float32)\n if not numpy.all(numpy.equal(scale, self.getScale())):\n self._scale.scale = scale\n self._updated(Item3DChangedType.TRANSFORM)\n\n def getScale(self):\n \"\"\"Returns the scales provided by :meth:`setScale`.\n\n :rtype: numpy.ndarray\n \"\"\"\n return self._scale.scale\n\n def setTranslation(self, x=0., y=0., z=0.):\n \"\"\"Set the translation of the origin of the item in the scene.\n\n :param float x: Offset of the data origin on the X axis\n :param float y: Offset of the data origin on the Y axis\n :param float z: Offset of the data origin on the Z axis\n \"\"\"\n translation = numpy.array((x, y, z), dtype=numpy.float32)\n if not numpy.all(numpy.equal(translation, self.getTranslation())):\n self._translate.translation = translation\n self._updated(Item3DChangedType.TRANSFORM)\n\n def getTranslation(self):\n \"\"\"Returns the offset set by :meth:`setTranslation`.\n\n :rtype: numpy.ndarray\n \"\"\"\n return self._translate.translation\n\n _ROTATION_CENTER_TAGS = 'lower', 'center', 'upper'\n\n def _updateRotationCenter(self, *args, **kwargs):\n \"\"\"Update rotation center relative to bounding box\"\"\"\n center = []\n for index, position in enumerate(self.getRotationCenter()):\n # Patch position relative to bounding box\n if position in self._ROTATION_CENTER_TAGS:\n bounds = self._getScenePrimitive().bounds(\n transformed=False, dataBounds=True)\n bounds = self._transformObjectToRotate.transformBounds(bounds)\n\n if bounds is None:\n position = 0.\n elif position == 'lower':\n position = bounds[0, index]\n elif position == 'center':\n position = 0.5 * (bounds[0, index] + bounds[1, index])\n elif position == 'upper':\n position = bounds[1, index]\n\n center.append(position)\n\n if not numpy.all(numpy.equal(\n center, self._rotateForwardTranslation.translation)):\n self._rotateForwardTranslation.translation = center\n self._rotateBackwardTranslation.translation = \\\n - self._rotateForwardTranslation.translation\n self._updated(Item3DChangedType.TRANSFORM)\n\n def setRotationCenter(self, x=0., y=0., z=0.):\n \"\"\"Set the center of rotation of the item.\n\n Position of the rotation center is either a float\n for an absolute position or one of the following\n string to define a position relative to the item's bounding box:\n 'lower', 'center', 'upper'\n\n :param x: rotation center position on the X axis\n :rtype: float or str\n :param y: rotation center position on the Y axis\n :rtype: float or str\n :param z: rotation center position on the Z axis\n :rtype: float or str\n \"\"\"\n center = []\n for position in (x, y, z):\n if isinstance(position, str):\n assert position in self._ROTATION_CENTER_TAGS\n else:\n position = float(position)\n center.append(position)\n center = tuple(center)\n\n if center != self._rotationCenter:\n self._rotationCenter = center\n self._updateRotationCenter()\n\n def getRotationCenter(self):\n \"\"\"Returns the rotation center set by :meth:`setRotationCenter`.\n\n :rtype: 3-tuple of float or str\n \"\"\"\n return self._rotationCenter\n\n def setRotation(self, angle=0., axis=(0., 0., 1.)):\n \"\"\"Set the rotation of the item in the scene\n\n :param float angle: The rotation angle in degrees.\n :param axis: The (x, y, z) coordinates of the rotation axis.\n \"\"\"\n axis = numpy.array(axis, dtype=numpy.float32)\n assert axis.ndim == 1\n assert axis.size == 3\n if (self._rotate.angle != angle or\n not numpy.all(numpy.equal(axis, self._rotate.axis))):\n self._rotate.setAngleAxis(angle, axis)\n self._updated(Item3DChangedType.TRANSFORM)\n\n def getRotation(self):\n \"\"\"Returns the rotation set by :meth:`setRotation`.\n\n :return: (angle, axis)\n :rtype: 2-tuple (float, numpy.ndarray)\n \"\"\"\n return self._rotate.angle, self._rotate.axis\n\n def setMatrix(self, matrix=None):\n \"\"\"Set the transform matrix\n\n :param numpy.ndarray matrix: 3x3 transform matrix\n \"\"\"\n matrix4x4 = numpy.identity(4, dtype=numpy.float32)\n\n if matrix is not None:\n matrix = numpy.array(matrix, dtype=numpy.float32)\n assert matrix.shape == (3, 3)\n matrix4x4[:3, :3] = matrix\n\n if not numpy.all(numpy.equal(matrix4x4, self._matrix.getMatrix())):\n self._matrix.setMatrix(matrix4x4)\n self._updated(Item3DChangedType.TRANSFORM)\n\n def getMatrix(self):\n \"\"\"Returns the matrix set by :meth:`setMatrix`\n\n :return: 3x3 matrix\n :rtype: numpy.ndarray\"\"\"\n return self._matrix.getMatrix(copy=True)[:3, :3]\n\n # Bounding box\n\n def _setForegroundColor(self, color):\n \"\"\"Set the color of the bounding box\n\n :param color: RGBA color as 4 floats in [0, 1]\n \"\"\"\n self._getScenePrimitive().color = color\n super(DataItem3D, self)._setForegroundColor(color)\n\n def isBoundingBoxVisible(self):\n \"\"\"Returns item's bounding box visibility.\n\n :rtype: bool\n \"\"\"\n return self._getScenePrimitive().boxVisible\n\n def setBoundingBoxVisible(self, visible):\n \"\"\"Set item's bounding box visibility.\n\n :param bool visible:\n True to show the bounding box, False (default) to hide it\n \"\"\"\n visible = bool(visible)\n primitive = self._getScenePrimitive()\n if visible != primitive.boxVisible:\n primitive.boxVisible = visible\n self._updated(Item3DChangedType.BOUNDING_BOX_VISIBLE)\n\n\nclass BaseNodeItem(DataItem3D):\n \"\"\"Base class for data item having children (e.g., group, 3d volume).\"\"\"\n\n def __init__(self, parent=None, group=None):\n \"\"\"Base class representing a group of items in the scene.\n\n :param parent: The View widget this item belongs to.\n :param Union[GroupBBox, None] group:\n The scene group to use for rendering\n \"\"\"\n DataItem3D.__init__(self, parent=parent, group=group)\n\n def getItems(self):\n \"\"\"Returns the list of items currently present in the group.\n\n :rtype: tuple\n \"\"\"\n raise NotImplementedError('getItems must be implemented in subclass')\n\n def visit(self, included=True):\n \"\"\"Generator visiting the group content.\n\n It traverses the group sub-tree in a top-down left-to-right way.\n\n :param bool included: True (default) to include self in visit\n \"\"\"\n if included:\n yield self\n for child in self.getItems():\n yield child\n if hasattr(child, 'visit'):\n for item in child.visit(included=False):\n yield item\n\n def pickItems(self, x, y, condition=None):\n \"\"\"Iterator over picked items in the group at given position.\n\n Each picked item yield a :class:`PickingResult` object\n holding the picking information.\n\n It traverses the group sub-tree in a left-to-right top-down way.\n\n :param int x: X widget device pixel coordinate\n :param int y: Y widget device pixel coordinate\n :param callable condition: Optional test called for each item\n checking whether to process it or not.\n \"\"\"\n viewport = self._getScenePrimitive().viewport\n if viewport is None:\n raise RuntimeError(\n 'Cannot perform picking: Item not attached to a widget')\n\n context = PickContext(x, y, viewport, condition)\n for result in self._pickItems(context):\n yield result\n\n def _pickItems(self, context):\n \"\"\"Implement :meth:`pickItems`\n\n :param PickContext context: Current picking context\n \"\"\"\n if not self.isVisible() or not context.isEnabled():\n return # empty iterator\n\n # Use a copy to discard context changes once this returns\n context = context.copy()\n\n if not self._pickFastCheck(context):\n return # empty iterator\n\n result = self._pick(context)\n if result is not None:\n yield result\n\n for child in self.getItems():\n if isinstance(child, BaseNodeItem):\n for result in child._pickItems(context):\n yield result # Flatten result\n\n else:\n result = child._pick(context)\n if result is not None:\n yield result\n\n\nclass _BaseGroupItem(BaseNodeItem):\n \"\"\"Base class for group of items sharing a common transform.\"\"\"\n\n sigItemAdded = qt.Signal(object)\n \"\"\"Signal emitted when a new item is added to the group.\n\n The newly added item is provided by this signal\n \"\"\"\n\n sigItemRemoved = qt.Signal(object)\n \"\"\"Signal emitted when an item is removed from the group.\n\n The removed item is provided by this signal.\n \"\"\"\n\n def __init__(self, parent=None, group=None):\n \"\"\"Base class representing a group of items in the scene.\n\n :param parent: The View widget this item belongs to.\n :param Union[GroupBBox, None] group:\n The scene group to use for rendering\n \"\"\"\n BaseNodeItem.__init__(self, parent=parent, group=group)\n self._items = []\n\n def _getGroupPrimitive(self):\n \"\"\"Returns the group for which to handle children.\n\n This allows this group to be different from the primitive.\n \"\"\"\n return self._getScenePrimitive()\n\n def addItem(self, item, index=None):\n \"\"\"Add an item to the group\n\n :param Item3D item: The item to add\n :param int index: The index at which to place the item.\n By default it is appended to the end of the list.\n :raise ValueError: If the item is already in the group.\n \"\"\"\n assert isinstance(item, Item3D)\n assert item.parent() in (None, self)\n\n if item in self.getItems():\n raise ValueError(\"Item3D already in group: %s\" % item)\n\n item.setParent(self)\n if index is None:\n self._getGroupPrimitive().children.append(\n item._getScenePrimitive())\n self._items.append(item)\n else:\n self._getGroupPrimitive().children.insert(\n index, item._getScenePrimitive())\n self._items.insert(index, item)\n self.sigItemAdded.emit(item)\n\n def getItems(self):\n \"\"\"Returns the list of items currently present in the group.\n\n :rtype: tuple\n \"\"\"\n return tuple(self._items)\n\n def removeItem(self, item):\n \"\"\"Remove an item from the scene.\n\n :param Item3D item: The item to remove from the scene\n :raises ValueError: If the item does not belong to the group\n \"\"\"\n if item not in self.getItems():\n raise ValueError(\"Item3D not in group: %s\" % str(item))\n\n self._getGroupPrimitive().children.remove(item._getScenePrimitive())\n self._items.remove(item)\n item.setParent(None)\n self.sigItemRemoved.emit(item)\n\n def clearItems(self):\n \"\"\"Remove all item from the group.\"\"\"\n for item in self.getItems():\n self.removeItem(item)\n\n\nclass GroupItem(_BaseGroupItem):\n \"\"\"Group of items sharing a common transform.\"\"\"\n\n def __init__(self, parent=None):\n super(GroupItem, self).__init__(parent=parent)\n\n\nclass GroupWithAxesItem(_BaseGroupItem):\n \"\"\"\n Group of items sharing a common transform surrounded with labelled axes.\n \"\"\"\n\n def __init__(self, parent=None):\n \"\"\"Class representing a group of items in the scene with labelled axes.\n\n :param parent: The View widget this item belongs to.\n \"\"\"\n super(GroupWithAxesItem, self).__init__(parent=parent,\n group=axes.LabelledAxes())\n\n # Axes labels\n\n def setAxesLabels(self, xlabel=None, ylabel=None, zlabel=None):\n \"\"\"Set the text labels of the axes.\n\n :param str xlabel: Label of the X axis, None to leave unchanged.\n :param str ylabel: Label of the Y axis, None to leave unchanged.\n :param str zlabel: Label of the Z axis, None to leave unchanged.\n \"\"\"\n labelledAxes = self._getScenePrimitive()\n if xlabel is not None:\n labelledAxes.xlabel = xlabel\n\n if ylabel is not None:\n labelledAxes.ylabel = ylabel\n\n if zlabel is not None:\n labelledAxes.zlabel = zlabel\n\n class _Labels(tuple):\n \"\"\"Return type of :meth:`getAxesLabels`\"\"\"\n\n def getXLabel(self):\n \"\"\"Label of the X axis (str)\"\"\"\n return self[0]\n\n def getYLabel(self):\n \"\"\"Label of the Y axis (str)\"\"\"\n return self[1]\n\n def getZLabel(self):\n \"\"\"Label of the Z axis (str)\"\"\"\n return self[2]\n\n def getAxesLabels(self):\n \"\"\"Returns the text labels of the axes\n\n >>> group = GroupWithAxesItem()\n >>> group.setAxesLabels(xlabel='X')\n\n You can get the labels either as a 3-tuple:\n\n >>> xlabel, ylabel, zlabel = group.getAxesLabels()\n\n Or as an object with methods getXLabel, getYLabel and getZLabel:\n\n >>> labels = group.getAxesLabels()\n >>> labels.getXLabel()\n ... 'X'\n\n :return: object describing the labels\n \"\"\"\n labelledAxes = self._getScenePrimitive()\n return self._Labels((labelledAxes.xlabel,\n labelledAxes.ylabel,\n labelledAxes.zlabel))\n\n\nclass RootGroupWithAxesItem(GroupWithAxesItem):\n \"\"\"Special group with axes item for root of the scene.\n\n Uses 2 groups so that axes take transforms into account.\n \"\"\"\n\n def __init__(self, parent=None):\n super(RootGroupWithAxesItem, self).__init__(parent)\n self.__group = scene.Group()\n self.__group.transforms = self._getSceneTransforms()\n\n groupWithAxes = self._getScenePrimitive()\n groupWithAxes.transforms = [] # Do not apply transforms here\n groupWithAxes.children.append(self.__group)\n\n def _getGroupPrimitive(self):\n \"\"\"Returns the group for which to handle children.\n\n This allows this group to be different from the primitive.\n \"\"\"\n return self.__group\n"
]
| [
[
"numpy.identity",
"numpy.equal",
"numpy.array"
]
]
|
jRicciL/ml_and_ensemble_docking | [
"d2bf7010d6df34710e860b0c01f2746b4dc8e09a"
]
| [
"fxa/5_Machine_Learning/nested_cv_results/lr_nested_cv_lr.py"
]
| [
"# Nested vs non-nested cross-validation\n# Based on: https://scikit-learn.org/stable/auto_examples/model_selection/plot_nested_cross_validation_iris.html\n\n# Filename with the results\nfile_name = '../4_Ensemble_docking_results/df_DkSc_results_COCRYS_DEKOIS_DUD.pkl'\n\nimport pickle\nimport numpy as np\nimport pandas as pd\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.metrics import roc_auc_score\nfrom sklearn.model_selection import \\\n GridSearchCV, RandomizedSearchCV, \\\n cross_val_score, StratifiedKFold\nimport warnings\nwarnings.filterwarnings('ignore')\n\n# Random trials\nPROT_NAME = 'fxa'\nMODEL = 'LR'\nN_CPUS = 16\nNUM_TRIALS = 30\nk_CV_INNER = 4\nk_CV_OUTER = 4\nESTIMATOR = LogisticRegression\nOPTIMIZER = GridSearchCV \n\n# Import the data\ndf_dk_res = pd.read_pickle(file_name)\n\n# Extract the features columns: Docking scores\nX = df_dk_res.drop('activity', axis = 1).values\ny = df_dk_res['activity'].values\n\n# Previously optimized params \n# CDK2 - LogisticRegression\n# Estimator with pre-optimized hyprms\npre_optimized_hyparams = {\n 'C': 0.01, \n 'penalty': 'l2', \n 'max_iter':400\n}\n\n# GRID HYPERPARAMETERS\ngrid_hyprms = {\n 'C' : np.geomspace(1e-6, 1e2, 5),\n 'penalty' : ['l1', 'l2', None],\n 'max_iter': [400]\n}\n\n\n# Arrays to store the scores per repetition (means)\ndefault_hypms_scores = np.zeros(NUM_TRIALS)\npreOpt_hypms_scores = np.zeros(NUM_TRIALS)\nnested_scores = np.zeros(NUM_TRIALS)\nnon_nested_scores = np.zeros(NUM_TRIALS)\n\n\n# Arrays to stroe the scores per cv validation test inside repetition\nind_default_hypms_scores = np.zeros(NUM_TRIALS*k_CV_OUTER)\nind_preOpt_hypms_scores = np.zeros(NUM_TRIALS*k_CV_OUTER)\nind_nested_scores = np.zeros(NUM_TRIALS*k_CV_OUTER)\n\n\n# ********* Loop for the trial *********\nfor i in range(NUM_TRIALS):\n # Here different cross-validation techniques could be applied\n \n # ********** Perform the splittings **********\n # Outer splitting\n outer_cv = StratifiedKFold(n_splits = k_CV_OUTER, \n shuffle = True,\n random_state = i)\n # Inner splitting\n inner_cv = StratifiedKFold(n_splits = k_CV_INNER,\n shuffle = True,\n random_state = i)\n\n \n # ********** Estimator with default hyprms **********\n estimator_DEFAULT = ESTIMATOR(max_iter = 400) # Only for LR\n default_hypms_score = cross_val_score(\n estimator = estimator_DEFAULT,\n X = X, \n y = y,\n cv = outer_cv,\n scoring = 'roc_auc',\n n_jobs = N_CPUS\n )\n default_hypms_scores[i] = default_hypms_score.mean()\n ind_default_hypms_scores[i*k_CV_OUTER:(i+1)*k_CV_OUTER] = default_hypms_score\n \n \n # ********** Estimator with pre optimized hyprms **********\n estimator_PREOPTIMIZED = ESTIMATOR(\n **pre_optimized_hyparams)\n preOpt_hypms_score = cross_val_score(\n estimator = estimator_PREOPTIMIZED,\n X = X, \n y = y,\n cv = outer_cv,\n scoring = 'roc_auc',\n n_jobs = N_CPUS\n )\n preOpt_hypms_scores[i] = preOpt_hypms_score.mean()\n ind_preOpt_hypms_scores[i*k_CV_OUTER:(i+1)*k_CV_OUTER] = preOpt_hypms_score\n \n \n # ********** Estimator with optimized hyprms inside outer loop **********\n estimator = ESTIMATOR()\n clf_gs = OPTIMIZER(\n estimator = estimator,\n param_grid = grid_hyprms,\n cv = inner_cv,\n scoring = 'roc_auc',\n n_jobs = N_CPUS\n )\n clf_gs.fit(X, y)\n non_nested_scores[i] = clf_gs.best_score_\n\n\n # ********** Nested CV with parameter optimization **********\n # Inside each fold of the cross_val_score perform a GridSearch\n nested_score = cross_val_score(\n estimator = clf_gs,\n X = X, \n y = y,\n cv = outer_cv,\n scoring = 'roc_auc',\n n_jobs = N_CPUS\n )\n nested_scores[i] = nested_score.mean()\n ind_nested_scores[i*k_CV_OUTER:(i+1)*k_CV_OUTER] = nested_score\n\n # Save the results at each repetition\n\n # repets*k_outers dataframe\n df_1 = pd.DataFrame({\n 'nested': ind_nested_scores,\n 'non_nested': np.repeat(non_nested_scores, k_CV_INNER),\n 'preOptHpm': ind_preOpt_hypms_scores,\n 'defHpm': ind_default_hypms_scores\n })\n df_1.to_csv(f'./DF1_{PROT_NAME}_{MODEL}_{NUM_TRIALS}reps_{k_CV_OUTER}x{k_CV_INNER}nCV_reesults.csv')\n\n\n df_2 = pd.DataFrame({\n 'nested': nested_scores,\n 'non_nested': non_nested_scores,\n 'pre_optimized_hyprms': preOpt_hypms_scores,\n 'default_hyprms': default_hypms_scores\n })\n df_2.to_csv(f'./DF2_MEANS_{PROT_NAME}_{MODEL}_{NUM_TRIALS}reps_{k_CV_OUTER}x{k_CV_INNER}nCV_reesults.csv')\n"
]
| [
[
"pandas.read_pickle",
"sklearn.model_selection.StratifiedKFold",
"numpy.zeros",
"pandas.DataFrame",
"numpy.geomspace",
"numpy.repeat",
"sklearn.model_selection.cross_val_score"
]
]
|
SilvinWillemsen/lorenz | [
"d76d3a7a8560622545644c6ed241781600d611f2"
]
| [
"lorenz/plotting.py"
]
| [
"\"\"\"\n\nThe main function plotLorenz plots the output of the result provided by \nsolver.solve. The output includes 3 2D plots and 1 3D plot. An internal \nfunction _plot2D handles the 2D plots.\n\n\"\"\"\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport time\n\n\ndef _plot2D (vecs, vel_vecs, num_hor, num_vert, idx_to_plot, scatter_size):\n \n \"\"\"\n Internal function to be used from plotting.plotLorenz(...). The function \n generates a 2D plot from two columns of the data in vecs determined by\n num_hor (x-axis) and num_vert (y-axis). Variables are called [hor]izontal \n and [vert]ical as to not confuse with the spatial coordinates x and y.\n\n\n INPUT:: \n \n vecs : tuple (3) of (Numpy) Arrays of floats\n The (x, y, z)-coordinate over time ((x, y, z)[n]). Columns 0, 1,\n and 2 contain x[n], y[n] and z[n] respectively.\n \n vel_vecs : tuple (3) of (Numpy) Arrays of floats\n The velocities of x, y and z respectively.\n \n num_hor : (Numpy) Arrays of floats\n What column of vecs and vel_vecs to use for the horizontal (x) \n axis of the plot.\n \n num_vert : (Numpy) Arrays of floats\n What column of vecs and vel_vecs to use for the vertical (y) axis\n of the plot.\n\n idx_to_plot : range\n Range object containing the temporal indices to plot.\n \n scatter_size : int\n Size of the plotted points.\n\n OUTPUT::\n \n None.\n\n \"\"\"\n \n # initialise figure\n fig = plt.figure(figsize=(7, 5))\n fig.add_axes([0.05, 0.1, 0.8, 0.8])\n axis_names = ['x', 'y', 'z']\n vel_vec = np.sqrt(vel_vecs[num_hor]**2 + vel_vecs[num_vert]**2);\n \n # plot state of the system with colours denoting velocity\n p = plt.scatter (vecs[num_hor][idx_to_plot], \n vecs[num_vert][idx_to_plot],\n s = scatter_size,\n c = vel_vec[idx_to_plot])\n \n # plot initial condition (in red)\n plt.scatter (vecs[num_hor][0], vecs[num_vert][0], s = scatter_size*2, c = 'red')\n \n # add axis labels\n plt.xlabel (axis_names[num_hor])\n plt.ylabel (axis_names[num_vert]) \n \n plt.title('2D plot of (' + axis_names[num_hor] + ',' + axis_names[num_vert] + ') view')\n\n # add grid\n plt.grid (True)\n \n # add colourbar\n cbaxes = fig.add_axes([0.88, 0.1, 0.03, 0.8]) \n plt.colorbar(p, cax = cbaxes) \n cbaxes.set_ylabel('velocity', rotation=0, position=(1,1.05))\n \n \n \ndef plotLorenz (vecs, dt, file_name = \"\", step_size = 5, scatter_size = 10):\n \n \"\"\"\n \n Function that plots the result of the ODE simulation lorenz.solver.solve(...)\n and saves plots to .pdf files if file_name is not empty (\"\").\n \n The plots use the matplotlib.pyplot functionality and the scatter function\n and colourcode the plots based on the velocity of the system. \n\n The initial condition of the ODE simulation :math:`(x_0, y_0, z_0)` -- is \n plotted in red, whereas the rest of the colours are determined by the\n velocity data stored in vel_vecs.\n \n\n Two plots are generated:\n A subplot containing the following three perspectives\n :math:`(x,y)`\n :math:`(x,z)`\n :math:`(y,z)`\n \n A 3D plot with all axes :math:`(x, y, z)`\n \n INPUT::\n \n vecs : tuple of (Numpy) Arrays of floats\n The (x, y, z)-coordinate over time ((x, y, z)[n]).\n \n dt : float\n Time step (s).\n \n file_name : string, optional\n Name of the file calling this function. Needed for storing files\n in the correct folder. The default is \"\" and no files will be\n saved if file_name = \"\".\n \n step_size : int, optional\n Determines the (discrete) temporal interval between two plotted points.\n The default is 5.\n \n scatter_size : int, optional\n Size of the points in the scatterplot. The default is 10.\n\n OUTPUT:: \n \n None.\n\n\n \"\"\"\n\n \n if file_name == \"\":\n save_files = False\n print ('Generating plots...')\n\n else:\n save_files = True\n print ('Generating plots and saving files...')\n\n \n tic = time.time();\n \n # Initialise velocity vectors for colouring the plots\n N = vecs[0].shape[0]\n x_vel = np.zeros(N)\n y_vel = np.zeros(N)\n z_vel = np.zeros(N)\n\n x_vel = (vecs[0][1:] - vecs[0][0:-1]) / dt\n y_vel = (vecs[1][1:] - vecs[1][0:-1]) / dt\n z_vel = (vecs[2][1:] - vecs[2][0:-1]) / dt\n\n vel_vecs = (x_vel, y_vel, z_vel)\n\n # calculate 3D velocity vector\n xyz_vel = np.sqrt (vel_vecs[0]**2 + vel_vecs[1]**2 + vel_vecs[2]**2)\n\n \n # PLOTTING 2D #\n \n idx_to_plot = range (step_size, N-1, step_size);\n \n # Generate 2D plots using plot2D function\n _plot2D (vecs, vel_vecs, 0, 1, idx_to_plot, scatter_size)\n if save_files:\n plt.savefig('output_files/' + file_name + '_output/xyPlot.pdf')\n \n _plot2D (vecs, vel_vecs, 0, 2, idx_to_plot, scatter_size)\n if save_files:\n plt.savefig('output_files/' + file_name + '_output/xzPlot.pdf')\n \n _plot2D (vecs, vel_vecs, 1, 2, idx_to_plot, scatter_size)\n if save_files:\n plt.savefig('output_files/' + file_name + '_output/yzPlot.pdf')\n\n\n # PLOTTING 3D #\n\n fig = plt.figure()\n ax = plt.subplot (projection='3d', position=[0.05, 0.1, 0.8, 0.8])\n ax.scatter (vecs[0][0],\n vecs[1][0],\n vecs[2][0],\n s = scatter_size*2,\n c = 'red')\n\n p = ax.scatter (vecs[0][idx_to_plot],\n vecs[1][idx_to_plot],\n vecs[2][idx_to_plot],\n s = scatter_size,\n c = xyz_vel[idx_to_plot])\n \n ax.set_xlabel('x')\n ax.set_ylabel('y')\n ax.set_zlabel('z')\n ax.set_title('3D plot')\n\n cbaxes = fig.add_axes([0.85, 0.1, 0.03, 0.8]) \n plt.colorbar(p, cax = cbaxes) \n cbaxes.set_ylabel('velocity', rotation=0, position=(0, 0.99))\n \n if save_files:\n plt.savefig('output_files/' + file_name + '_output/xyzPlot.pdf')\n\n toc = time.time() - tic\n \n if save_files:\n print (f'Done generating plots and saving files! It took {toc:1.3} seconds to generate the plots and save the files.')\n else:\n print (f'Done generating plots! It took {toc:1.3} seconds to generate the plots.')\n"
]
| [
[
"matplotlib.pyplot.colorbar",
"numpy.zeros",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.title",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.ylabel",
"numpy.sqrt",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.subplot"
]
]
|
neutrinoceros/astropy | [
"40ba5e4c609d2760152898b8d92a146e3e38c744"
]
| [
"astropy/io/ascii/tests/test_ecsv.py"
]
| [
"# Licensed under a 3-clause BSD style license - see LICENSE.rst\n\n\"\"\"\nThis module tests some of the methods related to the ``ECSV``\nreader/writer.\n\"\"\"\nfrom astropy.table.column import MaskedColumn\nimport os\nimport copy\nimport sys\nfrom io import StringIO\nfrom contextlib import nullcontext\n\nimport pytest\nimport numpy as np\nimport yaml\n\nfrom astropy.table import Table, Column, QTable\nfrom astropy.table.table_helpers import simple_table\nfrom astropy.units import allclose as quantity_allclose\nfrom astropy.units import QuantityInfo\n\nfrom astropy.utils.exceptions import AstropyUserWarning\nfrom astropy.utils.compat import NUMPY_LT_1_19_1\n\nfrom astropy.io.ascii.ecsv import DELIMITERS\nfrom astropy.io import ascii\nfrom astropy import units as u\n\nfrom astropy.io.tests.mixin_columns import mixin_cols, compare_attrs\nfrom .common import TEST_DIR\n\nDTYPES = ['bool', 'int8', 'int16', 'int32', 'int64', 'uint8', 'uint16', 'uint32',\n 'uint64', 'float16', 'float32', 'float64', 'float128',\n 'str']\nif not hasattr(np, 'float128') or os.name == 'nt' or sys.maxsize <= 2**32:\n DTYPES.remove('float128')\n\nT_DTYPES = Table()\n\nfor dtype in DTYPES:\n if dtype == 'bool':\n data = np.array([False, True, False])\n elif dtype == 'str':\n data = np.array(['ab 0', 'ab, 1', 'ab2'])\n else:\n data = np.arange(3, dtype=dtype)\n c = Column(data, unit='m / s', description='descr_' + dtype,\n meta={'meta ' + dtype: 1})\n T_DTYPES[dtype] = c\n\nT_DTYPES.meta['comments'] = ['comment1', 'comment2']\n\n# Corresponds to simple_table()\nSIMPLE_LINES = ['# %ECSV 1.0',\n '# ---',\n '# datatype:',\n '# - {name: a, datatype: int64}',\n '# - {name: b, datatype: float64}',\n '# - {name: c, datatype: string}',\n '# schema: astropy-2.0',\n 'a b c',\n '1 1.0 c',\n '2 2.0 d',\n '3 3.0 e']\n\n\ndef test_write_simple():\n \"\"\"\n Write a simple table with common types. This shows the compact version\n of serialization with one line per column.\n \"\"\"\n t = simple_table()\n\n out = StringIO()\n t.write(out, format='ascii.ecsv')\n assert out.getvalue().splitlines() == SIMPLE_LINES\n\n\ndef test_write_full():\n \"\"\"\n Write a full-featured table with common types and explicitly checkout output\n \"\"\"\n t = T_DTYPES['bool', 'int64', 'float64', 'str']\n lines = ['# %ECSV 1.0',\n '# ---',\n '# datatype:',\n '# - name: bool',\n '# unit: m / s',\n '# datatype: bool',\n '# description: descr_bool',\n '# meta: {meta bool: 1}',\n '# - name: int64',\n '# unit: m / s',\n '# datatype: int64',\n '# description: descr_int64',\n '# meta: {meta int64: 1}',\n '# - name: float64',\n '# unit: m / s',\n '# datatype: float64',\n '# description: descr_float64',\n '# meta: {meta float64: 1}',\n '# - name: str',\n '# unit: m / s',\n '# datatype: string',\n '# description: descr_str',\n '# meta: {meta str: 1}',\n '# meta: !!omap',\n '# - comments: [comment1, comment2]',\n '# schema: astropy-2.0',\n 'bool int64 float64 str',\n 'False 0 0.0 \"ab 0\"',\n 'True 1 1.0 \"ab, 1\"',\n 'False 2 2.0 ab2']\n\n out = StringIO()\n t.write(out, format='ascii.ecsv')\n assert out.getvalue().splitlines() == lines\n\n\ndef test_write_read_roundtrip():\n \"\"\"\n Write a full-featured table with all types and see that it round-trips on\n readback. Use both space and comma delimiters.\n \"\"\"\n t = T_DTYPES\n for delimiter in DELIMITERS:\n out = StringIO()\n t.write(out, format='ascii.ecsv', delimiter=delimiter)\n\n t2s = [Table.read(out.getvalue(), format='ascii.ecsv'),\n Table.read(out.getvalue(), format='ascii'),\n ascii.read(out.getvalue()),\n ascii.read(out.getvalue(), format='ecsv', guess=False),\n ascii.read(out.getvalue(), format='ecsv')]\n for t2 in t2s:\n assert t.meta == t2.meta\n for name in t.colnames:\n assert t[name].attrs_equal(t2[name])\n assert np.all(t[name] == t2[name])\n\n\ndef test_bad_delimiter():\n \"\"\"\n Passing a delimiter other than space or comma gives an exception\n \"\"\"\n out = StringIO()\n with pytest.raises(ValueError) as err:\n T_DTYPES.write(out, format='ascii.ecsv', delimiter='|')\n assert 'only space and comma are allowed' in str(err.value)\n\n\ndef test_bad_header_start():\n \"\"\"\n Bad header without initial # %ECSV x.x\n \"\"\"\n lines = copy.copy(SIMPLE_LINES)\n lines[0] = '# %ECV 0.9'\n with pytest.raises(ascii.InconsistentTableError):\n Table.read('\\n'.join(lines), format='ascii.ecsv', guess=False)\n\n\ndef test_bad_delimiter_input():\n \"\"\"\n Illegal delimiter in input\n \"\"\"\n lines = copy.copy(SIMPLE_LINES)\n lines.insert(2, '# delimiter: |')\n with pytest.raises(ValueError) as err:\n Table.read('\\n'.join(lines), format='ascii.ecsv', guess=False)\n assert 'only space and comma are allowed' in str(err.value)\n\n\ndef test_multidim_input():\n \"\"\"\n Multi-dimensional column in input\n \"\"\"\n t = Table()\n t['a'] = np.arange(24).reshape(2, 3, 4)\n t['a'].info.description = 'description'\n t['a'].info.meta = {1: 2}\n t['b'] = [1, 2]\n\n out = StringIO()\n t.write(out, format='ascii.ecsv')\n t2 = Table.read(out.getvalue(), format='ascii.ecsv')\n\n assert np.all(t2['a'] == t['a'])\n assert t2['a'].shape == t['a'].shape\n assert t2['a'].dtype == t['a'].dtype\n assert t2['a'].info.description == t['a'].info.description\n assert t2['a'].info.meta == t['a'].info.meta\n\n assert np.all(t2['b'] == t['b'])\n\n\ndef test_round_trip_empty_table():\n \"\"\"Test fix in #5010 for issue #5009 (ECSV fails for empty type with bool type)\"\"\"\n t = Table(dtype=[bool, 'i', 'f'], names=['a', 'b', 'c'])\n out = StringIO()\n t.write(out, format='ascii.ecsv')\n t2 = Table.read(out.getvalue(), format='ascii.ecsv')\n assert t.dtype == t2.dtype\n assert len(t2) == 0\n\n\ndef test_csv_ecsv_colnames_mismatch():\n \"\"\"\n Test that mismatch in column names from normal CSV header vs.\n ECSV YAML header raises the expected exception.\n \"\"\"\n lines = copy.copy(SIMPLE_LINES)\n header_index = lines.index('a b c')\n lines[header_index] = 'a b d'\n with pytest.raises(ValueError) as err:\n ascii.read(lines, format='ecsv')\n assert \"column names from ECSV header ['a', 'b', 'c']\" in str(err.value)\n\n\ndef test_regression_5604():\n \"\"\"\n See https://github.com/astropy/astropy/issues/5604 for more.\n \"\"\"\n t = Table()\n t.meta = {\"foo\": 5 * u.km, \"foo2\": u.s}\n t[\"bar\"] = [7] * u.km\n\n out = StringIO()\n t.write(out, format=\"ascii.ecsv\")\n\n assert '!astropy.units.Unit' in out.getvalue()\n assert '!astropy.units.Quantity' in out.getvalue()\n\n\ndef assert_objects_equal(obj1, obj2, attrs, compare_class=True):\n if compare_class:\n assert obj1.__class__ is obj2.__class__\n\n # For a column that is a native astropy Column, ignore the specified\n # `attrs`. This happens for a mixin like Quantity that is stored in a\n # `Table` (not QTable).\n if isinstance(obj1, Column):\n attrs = []\n\n assert obj1.shape == obj2.shape\n\n info_attrs = ['info.name', 'info.format', 'info.unit', 'info.description']\n for attr in attrs + info_attrs:\n a1 = obj1\n a2 = obj2\n for subattr in attr.split('.'):\n try:\n a1 = getattr(a1, subattr)\n a2 = getattr(a2, subattr)\n except AttributeError:\n a1 = a1[subattr]\n a2 = a2[subattr]\n\n if isinstance(a1, np.ndarray) and a1.dtype.kind == 'f':\n assert quantity_allclose(a1, a2, rtol=1e-10)\n else:\n assert np.all(a1 == a2)\n\n # For no attrs that means we just compare directly.\n if not attrs:\n if isinstance(obj1, np.ndarray) and obj1.dtype.kind == 'f':\n assert quantity_allclose(obj1, obj2, rtol=1e-15)\n else:\n assert np.all(obj1 == obj2)\n\n\ndef test_ecsv_mixins_ascii_read_class():\n \"\"\"Ensure that ascii.read(ecsv_file) returns the correct class\n (QTable if any Quantity subclasses, Table otherwise).\n \"\"\"\n # Make a table with every mixin type except Quantities\n t = QTable({name: col for name, col in mixin_cols.items()\n if not isinstance(col.info, QuantityInfo)})\n out = StringIO()\n t.write(out, format=\"ascii.ecsv\")\n t2 = ascii.read(out.getvalue(), format='ecsv')\n assert type(t2) is Table\n\n # Add a single quantity column\n t['lon'] = mixin_cols['lon']\n\n out = StringIO()\n t.write(out, format=\"ascii.ecsv\")\n t2 = ascii.read(out.getvalue(), format='ecsv')\n assert type(t2) is QTable\n\n\ndef test_ecsv_mixins_qtable_to_table():\n \"\"\"Test writing as QTable and reading as Table. Ensure correct classes\n come out.\n \"\"\"\n names = sorted(mixin_cols)\n\n t = QTable([mixin_cols[name] for name in names], names=names)\n out = StringIO()\n t.write(out, format=\"ascii.ecsv\")\n t2 = Table.read(out.getvalue(), format='ascii.ecsv')\n\n assert t.colnames == t2.colnames\n\n for name, col in t.columns.items():\n col2 = t2[name]\n attrs = compare_attrs[name]\n compare_class = True\n\n if isinstance(col.info, QuantityInfo):\n # Downgrade Quantity to Column + unit\n assert type(col2) is Column\n # Class-specific attributes like `value` or `wrap_angle` are lost.\n attrs = ['unit']\n compare_class = False\n # Compare data values here (assert_objects_equal doesn't know how in this case)\n assert np.allclose(col.value, col2, rtol=1e-10)\n\n assert_objects_equal(col, col2, attrs, compare_class)\n\n\[email protected]('table_cls', (Table, QTable))\ndef test_ecsv_mixins_as_one(table_cls):\n \"\"\"Test write/read all cols at once and validate intermediate column names\"\"\"\n names = sorted(mixin_cols)\n\n serialized_names = ['ang',\n 'cr.x', 'cr.y', 'cr.z',\n 'dt',\n 'el.x', 'el.y', 'el.z',\n 'lat',\n 'lon',\n 'nd',\n 'obj',\n 'qdb',\n 'qdex',\n 'qmag',\n 'sc.ra', 'sc.dec',\n 'scd.ra', 'scd.dec', 'scd.distance',\n 'scd.obstime',\n 'scdc.x', 'scdc.y', 'scdc.z',\n 'scdc.obstime',\n 'scpm.ra', 'scpm.dec', 'scpm.distance',\n 'scpm.pm_ra_cosdec', 'scpm.pm_dec',\n 'scpmrv.ra', 'scpmrv.dec', 'scpmrv.distance',\n 'scpmrv.pm_ra_cosdec', 'scpmrv.pm_dec',\n 'scpmrv.radial_velocity',\n 'scrv.ra', 'scrv.dec', 'scrv.distance',\n 'scrv.radial_velocity',\n 'sd.d_lon_coslat', 'sd.d_lat', 'sd.d_distance',\n 'sr.lon', 'sr.lat', 'sr.distance',\n 'srd.lon', 'srd.lat', 'srd.distance',\n 'srd.differentials.s.d_lon_coslat',\n 'srd.differentials.s.d_lat',\n 'srd.differentials.s.d_distance',\n 'tm', # serialize_method is formatted_value\n 'tm2', # serialize_method is formatted_value\n 'tm3.jd1', 'tm3.jd2', # serialize is jd1_jd2\n 'tm3.location.x', 'tm3.location.y', 'tm3.location.z',\n 'x']\n\n t = table_cls([mixin_cols[name] for name in names], names=names)\n\n out = StringIO()\n t.write(out, format=\"ascii.ecsv\")\n t2 = table_cls.read(out.getvalue(), format='ascii.ecsv')\n\n assert t.colnames == t2.colnames\n\n # Read as a ascii.basic table (skip all the ECSV junk)\n t3 = table_cls.read(out.getvalue(), format='ascii.basic')\n assert t3.colnames == serialized_names\n\n\ndef make_multidim(col, ndim):\n \"\"\"Take a col with length=2 and make it N-d by repeating elements.\n\n For the special case of ndim==1 just return the original.\n\n The output has shape [3] * ndim. By using 3 we can be sure that repeating\n the two input elements gives an output that is sufficiently unique for\n the multidim tests.\n \"\"\"\n if ndim > 1:\n import itertools\n idxs = [idx for idx, _ in zip(itertools.cycle([0, 1]), range(3 ** ndim))]\n col = col[idxs].reshape([3] * ndim)\n return col\n\n\[email protected]('name_col', list(mixin_cols.items()))\[email protected]('table_cls', (Table, QTable))\[email protected]('ndim', (1, 2, 3))\ndef test_ecsv_mixins_per_column(table_cls, name_col, ndim):\n \"\"\"Test write/read one col at a time and do detailed validation.\n This tests every input column type as 1-d, 2-d and 3-d.\n \"\"\"\n name, col = name_col\n\n c = make_multidim(np.array([1.0, 2.0]), ndim)\n col = make_multidim(col, ndim)\n t = table_cls([c, col, c], names=['c1', name, 'c2'])\n t[name].info.description = 'description'\n\n out = StringIO()\n t.write(out, format=\"ascii.ecsv\")\n t2 = table_cls.read(out.getvalue(), format='ascii.ecsv')\n\n assert t.colnames == t2.colnames\n\n for colname in t.colnames:\n assert len(t2[colname].shape) == ndim\n compare = ['data'] if colname in ('c1', 'c2') else compare_attrs[colname]\n assert_objects_equal(t[colname], t2[colname], compare)\n\n # Special case to make sure Column type doesn't leak into Time class data\n if name.startswith('tm'):\n assert t2[name]._time.jd1.__class__ is np.ndarray\n assert t2[name]._time.jd2.__class__ is np.ndarray\n\n\ndef test_round_trip_masked_table_default(tmpdir):\n \"\"\"Test (mostly) round-trip of MaskedColumn through ECSV using default serialization\n that uses an empty string \"\" to mark NULL values. Note:\n\n >>> simple_table(masked=True)\n <Table masked=True length=3>\n a b c\n int64 float64 str1\n ----- ------- ----\n -- 1.0 c\n 2 2.0 --\n 3 -- e\n \"\"\"\n filename = str(tmpdir.join('test.ecsv'))\n\n t = simple_table(masked=True) # int, float, and str cols with one masked element\n t.write(filename)\n\n t2 = Table.read(filename)\n assert t2.masked is False\n assert t2.colnames == t.colnames\n for name in t2.colnames:\n # From formal perspective the round-trip columns are the \"same\"\n assert np.all(t2[name].mask == t[name].mask)\n assert np.all(t2[name] == t[name])\n\n # But peeking under the mask shows that the underlying data are changed\n # because by default ECSV uses \"\" to represent masked elements.\n t[name].mask = False\n t2[name].mask = False\n assert not np.all(t2[name] == t[name]) # Expected diff\n\n\ndef test_round_trip_masked_table_serialize_mask(tmpdir):\n \"\"\"Same as prev but set the serialize_method to 'data_mask' so mask is written out\"\"\"\n filename = str(tmpdir.join('test.ecsv'))\n\n t = simple_table(masked=True) # int, float, and str cols with one masked element\n t['c'][0] = '' # This would come back as masked for default \"\" NULL marker\n\n # MaskedColumn with no masked elements. See table the MaskedColumnInfo class\n # _represent_as_dict() method for info about how we test a column with no masked elements.\n t['d'] = [1, 2, 3]\n\n t.write(filename, serialize_method='data_mask')\n\n t2 = Table.read(filename)\n assert t2.masked is False\n assert t2.colnames == t.colnames\n for name in t2.colnames:\n assert np.all(t2[name].mask == t[name].mask)\n assert np.all(t2[name] == t[name])\n\n # Data under the mask round-trips also (unmask data to show this).\n t[name].mask = False\n t2[name].mask = False\n assert np.all(t2[name] == t[name])\n\n\[email protected]('table_cls', (Table, QTable))\ndef test_ecsv_round_trip_user_defined_unit(table_cls, tmpdir):\n \"\"\"Ensure that we can read-back enabled user-defined units.\"\"\"\n\n # Test adapted from #8897, where it was noted that this works\n # but was not tested.\n filename = str(tmpdir.join('test.ecsv'))\n unit = u.def_unit('bandpass_sol_lum')\n t = table_cls()\n t['l'] = np.arange(5) * unit\n t.write(filename)\n # without the unit enabled, get UnrecognizedUnit\n if table_cls is QTable:\n ctx = pytest.warns(u.UnitsWarning, match=r\"'bandpass_sol_lum' did not parse .*\")\n else:\n ctx = nullcontext()\n # Note: The read might also generate ResourceWarning, in addition to UnitsWarning\n with ctx:\n t2 = table_cls.read(filename)\n assert isinstance(t2['l'].unit, u.UnrecognizedUnit)\n assert str(t2['l'].unit) == 'bandpass_sol_lum'\n if table_cls is QTable:\n assert np.all(t2['l'].value == t['l'].value)\n else:\n assert np.all(t2['l'] == t['l'])\n\n # But with it enabled, it works.\n with u.add_enabled_units(unit):\n t3 = table_cls.read(filename)\n assert t3['l'].unit is unit\n assert np.all(t3['l'] == t['l'])\n\n # Just to be sure, aloso try writing with unit enabled.\n filename2 = str(tmpdir.join('test2.ecsv'))\n t3.write(filename2)\n t4 = table_cls.read(filename)\n assert t4['l'].unit is unit\n assert np.all(t4['l'] == t['l'])\n\n\ndef test_read_masked_bool():\n txt = \"\"\"\\\n# %ECSV 1.0\n# ---\n# datatype:\n# - {name: col0, datatype: bool}\n# schema: astropy-2.0\ncol0\n1\n0\nTrue\n\"\"\nFalse\n\"\"\"\n dat = ascii.read(txt, format='ecsv')\n col = dat['col0']\n assert isinstance(col, MaskedColumn)\n assert np.all(col.mask == [False, False, False, True, False])\n assert np.all(col == [True, False, True, False, False])\n\n\[email protected]('serialize_method', ['null_value', 'data_mask'])\[email protected]('dtype', [np.int64, np.float64, bool, str])\[email protected]('delimiter', [',', ' '])\ndef test_roundtrip_multidim_masked_array(serialize_method, dtype, delimiter):\n # TODO also test empty string with null value\n t = Table()\n col = MaskedColumn(np.arange(12).reshape(2, 3, 2), dtype=dtype)\n if dtype is str:\n # np does something funny and gives a dtype of U21.\n col = col.astype('U2')\n col.mask[0, 0, 0] = True\n col.mask[1, 1, 1] = True\n t['a'] = col\n t['b'] = ['x', 'y'] # Add another column for kicks\n out = StringIO()\n t.write(out, format='ascii.ecsv', serialize_method=serialize_method)\n t2 = Table.read(out.getvalue(), format='ascii.ecsv')\n\n assert t2.masked is False\n assert t2.colnames == t.colnames\n for name in t2.colnames:\n assert t2[name].dtype == t[name].dtype\n if hasattr(t[name], 'mask'):\n assert np.all(t2[name].mask == t[name].mask)\n assert np.all(t2[name] == t[name])\n\n\[email protected]('subtype', ['some-user-type', 'complex'])\ndef test_multidim_unknown_subtype(subtype):\n \"\"\"Test an ECSV file with a string type but unknown subtype\"\"\"\n txt = f\"\"\"\\\n# %ECSV 1.0\n# ---\n# datatype:\n# - name: a\n# datatype: string\n# subtype: {subtype}\n# schema: astropy-2.0\na\n[1,2]\n[3,4]\"\"\"\n with pytest.warns(AstropyUserWarning,\n match=rf\"unexpected subtype '{subtype}' set for column 'a'\"):\n t = ascii.read(txt, format='ecsv')\n\n assert t['a'].dtype.kind == 'U'\n assert t['a'][0] == '[1,2]'\n\n\ndef test_multidim_bad_shape():\n \"\"\"Test a malformed ECSV file\"\"\"\n txt = \"\"\"\\\n# %ECSV 1.0\n# ---\n# datatype:\n# - name: a\n# datatype: string\n# subtype: int64[3]\n# schema: astropy-2.0\na\n[1,2]\n[3,4]\"\"\"\n with pytest.raises(ValueError, match=\"column 'a' failed to convert: shape mismatch\"):\n Table.read(txt, format='ascii.ecsv')\n\n\ndef test_write_not_json_serializable():\n t = Table()\n t['a'] = np.array([set([1, 2]), 1], dtype=object)\n match = \"could not convert column 'a' to string: Object of type set is not JSON serializable\"\n out = StringIO()\n with pytest.raises(TypeError, match=match):\n t.write(out, format='ascii.ecsv')\n\n\ndef test_read_not_json_serializable():\n \"\"\"Test a malformed ECSV file\"\"\"\n txt = \"\"\"\\\n# %ECSV 1.0\n# ---\n# datatype:\n# - {name: a, datatype: string, subtype: json}\n# schema: astropy-2.0\na\nfail\n[3,4]\"\"\"\n match = \"column 'a' failed to convert: column value is not valid JSON\"\n with pytest.raises(ValueError, match=match):\n Table.read(txt, format='ascii.ecsv')\n\n\ndef test_read_complex():\n \"\"\"Test an ECSV file with a complex column\"\"\"\n txt = \"\"\"\\\n# %ECSV 1.0\n# ---\n# datatype:\n# - {name: a, datatype: complex}\n# schema: astropy-2.0\na\n1+1j\n2+2j\"\"\"\n match = \"datatype 'complex' of column 'a' is not in allowed values\"\n with pytest.raises(ValueError, match=match):\n Table.read(txt, format='ascii.ecsv')\n\n\[email protected](NUMPY_LT_1_19_1,\n reason=\"numpy<=1.19.0 cannot parse 'complex' as string\")\ndef test_read_complex_v09():\n \"\"\"Test an ECSV file with a complex column for version 0.9\n Note: ECSV Version <=0.9 files should not raise ValueError\n for complex datatype to maintain backwards compatibility.\n \"\"\"\n txt = \"\"\"\\\n# %ECSV 0.9\n# ---\n# datatype:\n# - {name: a, datatype: complex}\n# schema: astropy-2.0\na\n1+1j\n2+2j\"\"\"\n t = Table.read(txt, format='ascii.ecsv')\n assert t['a'].dtype.type is np.complex128\n\n\ndef test_read_bad_datatype_for_object_subtype():\n \"\"\"Test a malformed ECSV file\"\"\"\n txt = \"\"\"\\\n# %ECSV 1.0\n# ---\n# datatype:\n# - {name: a, datatype: int64, subtype: json}\n# schema: astropy-2.0\na\nfail\n[3,4]\"\"\"\n match = \"column 'a' failed to convert: datatype of column 'a' must be \\\"string\\\"\"\n with pytest.raises(ValueError, match=match):\n Table.read(txt, format='ascii.ecsv')\n\n\ndef test_read_bad_datatype():\n \"\"\"Test a malformed ECSV file\"\"\"\n txt = \"\"\"\\\n# %ECSV 1.0\n# ---\n# datatype:\n# - {name: a, datatype: object}\n# schema: astropy-2.0\na\nfail\n[3,4]\"\"\"\n match = r\"column 'a' is not in allowed values \\('bool', 'int8', 'int16', 'int32'\"\n with pytest.raises(ValueError, match=match):\n Table.read(txt, format='ascii.ecsv')\n\n\ndef test_read_bad_datatype_v09():\n \"\"\"Test a malformed ECSV file for version 0.9\n Note: ECSV Version <=0.9 files should not raise ValueError\n for malformed datatypes to maintain backwards compatibility.\n \"\"\"\n txt = \"\"\"\\\n# %ECSV 0.9\n# ---\n# datatype:\n# - {name: a, datatype: object}\n# schema: astropy-2.0\na\nfail\n[3,4]\"\"\"\n t = Table.read(txt, format='ascii.ecsv')\n assert t['a'][0] == \"fail\"\n assert type(t['a'][1]) is str\n assert type(t['a'].dtype) == np.dtype(\"O\")\n\n\ndef test_full_repr_roundtrip():\n \"\"\"Test round-trip of float values to full precision even with format\n specified\"\"\"\n t = Table()\n t['a'] = np.array([np.pi, 1/7], dtype=np.float64)\n t['a'].info.format = '.2f'\n out = StringIO()\n t.write(out, format='ascii.ecsv')\n t2 = Table.read(out.getvalue(), format='ascii.ecsv')\n assert np.all(t['a'] == t2['a'])\n assert t2['a'].info.format == '.2f'\n\n\n#############################################################################\n# Define a number of specialized columns for testing and the expected values\n# of `datatype` for each column.\n#############################################################################\n\n# First here is some helper code used to make the expected outputs code.\ndef _get_ecsv_header_dict(text):\n lines = [line.strip() for line in text.splitlines()]\n lines = [line[2:] for line in lines if line.startswith('#')]\n lines = lines[2:] # Get rid of the header\n out = yaml.safe_load('\\n'.join(lines))\n return out\n\n\ndef _make_expected_values(cols):\n from pprint import pformat\n for name, col in cols.items():\n t = Table()\n t[name] = col\n out = StringIO()\n t.write(out, format='ascii.ecsv')\n hdr = _get_ecsv_header_dict(out.getvalue())\n fmt_hdr = pformat(hdr['datatype'])\n print(f'exps[{name!r}] =', fmt_hdr[:1])\n print(fmt_hdr[1:])\n print()\n\n\n# Expected values of `datatype` for each column\nexps = {}\ncols = {}\n\n# Run of the mill scalar for completeness\ncols['scalar'] = np.array([1, 2], dtype=np.int16)\nexps['scalar'] = [\n {'datatype': 'int16', 'name': 'scalar'}]\n\n# Array of lists that works as a 2-d variable array. This is just treated\n# as an object.\ncols['2-d variable array lists'] = c = np.empty(shape=(2,), dtype=object)\nc[0] = [[1, 2], [\"a\", 4]]\nc[1] = [[1, 2, 3], [4, 5.25, 6]]\nexps['2-d variable array lists'] = [\n {'datatype': 'string',\n 'name': '2-d variable array lists',\n 'subtype': 'json'}]\n\n# Array of numpy arrays that is a 2-d variable array\ncols['2-d variable array numpy'] = c = np.empty(shape=(2,), dtype=object)\nc[0] = np.array([[1, 2], [3, 4]], dtype=np.float32)\nc[1] = np.array([[1, 2, 3], [4, 5.5, 6]], dtype=np.float32)\nexps['2-d variable array numpy'] = [\n {'datatype': 'string',\n 'name': '2-d variable array numpy',\n 'subtype': 'float32[2,null]'}]\n\ncols['1-d variable array lists'] = np.array([[1, 2], [3, 4, 5]], dtype=object)\nexps['1-d variable array lists'] = [\n {'datatype': 'string',\n 'name': '1-d variable array lists',\n 'subtype': 'json'}]\n\n# Variable-length array\ncols['1-d variable array numpy'] = np.array(\n [np.array([1, 2], dtype=np.uint8),\n np.array([3, 4, 5], dtype=np.uint8)], dtype=object)\nexps['1-d variable array numpy'] = [\n {'datatype': 'string',\n 'name': '1-d variable array numpy',\n 'subtype': 'uint8[null]'}]\n\ncols['1-d variable array numpy str'] = np.array(\n [np.array(['a', 'b']),\n np.array(['c', 'd', 'e'])], dtype=object)\nexps['1-d variable array numpy str'] = [\n {'datatype': 'string',\n 'name': '1-d variable array numpy str',\n 'subtype': 'string[null]'}]\n\ncols['1-d variable array numpy bool'] = np.array(\n [np.array([True, False]),\n np.array([True, False, True])], dtype=object)\nexps['1-d variable array numpy bool'] = [\n {'datatype': 'string',\n 'name': '1-d variable array numpy bool',\n 'subtype': 'bool[null]'}]\n\ncols['1-d regular array'] = np.array([[1, 2], [3, 4]], dtype=np.int8)\nexps['1-d regular array'] = [\n {'datatype': 'string',\n 'name': '1-d regular array',\n 'subtype': 'int8[2]'}]\n\ncols['2-d regular array'] = np.arange(8, dtype=np.float16).reshape(2, 2, 2)\nexps['2-d regular array'] = [\n {'datatype': 'string',\n 'name': '2-d regular array',\n 'subtype': 'float16[2,2]'}]\n\ncols['scalar object'] = np.array([{'a': 1}, {'b':2}], dtype=object)\nexps['scalar object'] = [\n {'datatype': 'string', 'name': 'scalar object', 'subtype': 'json'}]\n\ncols['1-d object'] = np.array(\n [[{'a': 1}, {'b':2}],\n [{'a': 1}, {'b':2}]], dtype=object)\nexps['1-d object'] = [\n {'datatype': 'string',\n 'name': '1-d object',\n 'subtype': 'json[2]'}]\n\n\[email protected]('name,col,exp',\n list(zip(cols, cols.values(), exps.values())))\ndef test_specialized_columns(name, col, exp):\n \"\"\"Test variable length lists, multidim columns, object columns.\n \"\"\"\n t = Table()\n t[name] = col\n out = StringIO()\n t.write(out, format='ascii.ecsv')\n hdr = _get_ecsv_header_dict(out.getvalue())\n assert hdr['datatype'] == exp\n t2 = Table.read(out.getvalue(), format='ascii.ecsv')\n assert t2.colnames == t.colnames\n for name in t2.colnames:\n assert t2[name].dtype == t[name].dtype\n for val1, val2 in zip(t2[name], t[name]):\n if isinstance(val1, np.ndarray):\n assert val1.dtype == val2.dtype\n assert np.all(val1 == val2)\n\n\ndef test_full_subtypes():\n \"\"\"Read ECSV file created by M. Taylor that includes scalar, fixed array,\n variable array for all datatypes. This file has missing values for all\n columns as both per-value null and blank entries for the entire column\n value.\n\n Note: original file was modified to include blank values in f_float and\n f_double columns.\n \"\"\"\n t = Table.read(os.path.join(TEST_DIR, 'data', 'subtypes.ecsv'))\n colnames = ('i_index,'\n 's_byte,s_short,s_int,s_long,s_float,s_double,s_string,s_boolean,'\n 'f_byte,f_short,f_int,f_long,f_float,f_double,f_string,f_boolean,'\n 'v_byte,v_short,v_int,v_long,v_float,v_double,v_string,v_boolean,'\n 'm_int,m_double').split(',')\n assert t.colnames == colnames\n\n type_map = {'byte': 'int8',\n 'short': 'int16',\n 'int': 'int32',\n 'long': 'int64',\n 'float': 'float32',\n 'double': 'float64',\n 'string': 'str',\n 'boolean': 'bool'}\n\n for col in t.itercols():\n info = col.info\n if info.name == 'i_index':\n continue\n\n assert isinstance(col, MaskedColumn)\n\n type_name = info.name[2:] # short, int, etc\n subtype = info.name[:1]\n\n if subtype == 's': # Scalar\n assert col.shape == (16,)\n\n if subtype == 'f': # Fixed array\n assert col.shape == (16, 3)\n\n if subtype == 'v': # Variable array\n assert col.shape == (16,)\n assert info.dtype.name == 'object'\n for val in col:\n assert isinstance(val, np.ndarray)\n assert val.dtype.name.startswith(type_map[type_name])\n assert len(val) in [0, 1, 2, 3]\n else:\n assert info.dtype.name.startswith(type_map[type_name])\n\n\ndef test_masked_empty_subtypes():\n \"\"\"Test blank field in subtypes. Similar to previous test but with explicit\n checks of values\"\"\"\n txt = \"\"\"\n # %ECSV 1.0\n # ---\n # datatype:\n # - {name: o, datatype: string, subtype: json}\n # - {name: f, datatype: string, subtype: 'int64[2]'}\n # - {name: v, datatype: string, subtype: 'int64[null]'}\n # schema: astropy-2.0\n o f v\n null [0,1] [1]\n \"\" \"\" \"\"\n [1,2] [2,3] [2,3]\n \"\"\"\n t = Table.read(txt, format='ascii.ecsv')\n assert np.all(t['o'] == np.array([None, -1, [1, 2]], dtype=object))\n assert np.all(t['o'].mask == [False, True, False])\n\n exp = np.ma.array([[0, 1], [-1, -1], [2, 3]], mask=[[0, 0], [1, 1], [0, 0]])\n assert np.all(t['f'] == exp)\n assert np.all(t['f'].mask == exp.mask)\n\n assert np.all(t['v'][0] == [1])\n assert np.all(t['v'][2] == [2, 3])\n assert np.all(t['v'].mask == [False, True, False])\n\n\ndef test_masked_vals_in_array_subtypes():\n \"\"\"Test null values in fixed and variable array subtypes.\"\"\"\n t = Table()\n t['f'] = np.ma.array([[1, 2], [3, 4]], mask=[[0, 1], [1, 0]], dtype=np.int64)\n t['v'] = np.empty(2, dtype=object)\n t['v'][0] = np.ma.array([1, 2], mask=[0, 1], dtype=np.int64)\n t['v'][1] = np.ma.array([3, 4, 5], mask=[1, 0, 0], dtype=np.int64)\n\n out = StringIO()\n t.write(out, format='ascii.ecsv')\n txt = \"\"\"\n # %ECSV 1.0\n # ---\n # datatype:\n # - {name: f, datatype: string, subtype: 'int64[2]'}\n # - {name: v, datatype: string, subtype: 'int64[null]'}\n # schema: astropy-2.0\n f v\n [1,null] [1,null]\n [null,4] [null,4,5]\n \"\"\"\n hdr = _get_ecsv_header_dict(out.getvalue())\n hdr_exp = _get_ecsv_header_dict(txt)\n assert hdr == hdr_exp\n t2 = Table.read(out.getvalue(), format='ascii.ecsv')\n assert t2.colnames == t.colnames\n for name in t2.colnames:\n assert t2[name].dtype == t[name].dtype\n assert type(t2[name]) is type(t[name])\n for val1, val2 in zip(t2[name], t[name]):\n if isinstance(val1, np.ndarray):\n assert val1.dtype == val2.dtype\n if isinstance(val1, np.ma.MaskedArray):\n assert np.all(val1.mask == val2.mask)\n assert np.all(val1 == val2)\n\n\ndef test_guess_ecsv_with_one_column():\n \"\"\"Except for ECSV, guessing always requires at least 2 columns\"\"\"\n txt = \"\"\"\n # %ECSV 1.0\n # ---\n # datatype:\n # - {name: col, datatype: string, description: hello}\n # schema: astropy-2.0\n col\n 1\n 2\n \"\"\"\n t = ascii.read(txt)\n assert t['col'].dtype.kind == 'U' # would be int with basic format\n assert t['col'].description == 'hello'\n"
]
| [
[
"numpy.array",
"numpy.empty",
"numpy.allclose",
"numpy.ma.array",
"numpy.arange",
"numpy.all",
"numpy.dtype"
]
]
|
dan-armstrong/amrrt | [
"84d063a45adafc317c141bbad7306aa1dbca9b69"
]
| [
"amrrt/planners.py"
]
| [
"#Copyright (c) 2020 Ocado. All Rights Reserved.\n\nimport time, math, random\nimport numpy as np\n\nfrom amrrt.tree import Tree\nfrom amrrt.metrics import EuclideanMetric\nfrom amrrt.cost import Cost\n\n\nclass RTSamplingPlanner:\n \"\"\"\n Real time sampling planner, parent class for RTRRT and AMRRT\n \"\"\"\n def __init__(self, space, agent_pos, assisting_metric=None, t_exp=0.15, t_root=0.003, t_steer=0.002):\n \"\"\"\n :param space: State space planner operates in\n :param agent_pos: Initial position of agent\n :param assisting_metric: Assisting metric for the planner, default None for Euclidean\n :param t_exp: Time spent expanding and rewiring\n :param t_root: Time spent rewiring at the root\n :param t_steer: Time spent steering\n \"\"\"\n self.space = space\n self.euclidean_metric = EuclideanMetric()\n self.assisting_metric = assisting_metric if assisting_metric is not None else self.euclidean_metric\n self.euclidean_distance = lambda a, b : self.euclidean_metric.distance(a, b)\n self.assisting_distance = lambda a, b : self.assisting_metric.distance(a, b)\n self.tree = Tree(self.space.create_state(agent_pos), self.space, assisting_dist_fn=self.assisting_distance)\n self.goal = None\n self.root_queue = []\n self.rewired_root = set()\n self.t_exp, self.t_root, self.t_steer = t_exp, t_root, t_steer\n\n def _nearest_node(self, node):\n \"\"\"\n Find nearest node, first by Euclidean metric if the path is free or fallback on the assisting metric\n \"\"\"\n euclidean = self.tree.euclidean_nearest(node)\n if self.space.free_path(node, euclidean) or self.euclidean_metric == self.assisting_metric:\n return euclidean\n return self.tree.nearest(node)\n\n def set_root(self, root):\n \"\"\"\n Set the root node for the planner tree, and reset rewiring queue\n \"\"\"\n self.tree.set_root(root)\n self.root_queue = [self.tree.root]\n self.rewired_root = {self.tree.root}\n\n def set_goal(self, pos):\n \"\"\"\n Set new goal from position, try to add goal to tree if within range & path free\n \"\"\"\n self.goal = self.space.create_state(pos)\n xnearest = self._nearest_node(self.goal)\n if self.euclidean_distance(xnearest, self.goal) < self.max_step and self.space.free_path(xnearest, self.goal):\n self.tree.add_node(self.goal, xnearest)\n\n def _add_node(self, xnew, xnearest, nearby):\n \"\"\"\n Add new node to the tree, connecting it to the node in nearby that minimises cost\n Also attempt to add the goal to the tree via xnew if within range & free path\n\n :param xnew: New state to be added\n :param xnearest: Nearest node to xnew\n :param nearby: Set of nodes in the neighbourhood of xnew\n \"\"\"\n xmin = xnearest\n cmin = self.cost(xnearest) + self.euclidean_distance(xnearest, xnew)\n for xnear in nearby:\n cnew = self.cost(xnear) + self.euclidean_distance(xnear, xnew)\n if cnew < cmin and self.space.free_path(xnear, xnew):\n xmin = xnear\n cmin = cnew\n self.tree.add_node(xnew, xmin)\n if self.goal is not None and self.euclidean_distance(xnew, self.goal) < self.max_step and self.space.free_path(xnew, self.goal):\n self.tree.add_node(self.goal, xnew)\n\n def goal_path(self):\n \"\"\"\n Return path to the goal if one exists, if not a path to the nearest node\n \"\"\"\n if self.goal is None:\n return []\n if self.goal in self.tree.nodes:\n return self.tree.path(self.goal)\n return self.tree.path(self._nearest_node(self.goal))\n\n def cost(self, node):\n \"\"\"\n Returns path cost to given node (as Cost object)\n \"\"\"\n if node == self.tree.root:\n return Cost(0, False)\n path = self.tree.path(node)\n running_cost = 0\n blocked = False\n for i in range(1, len(path)):\n if not self.space.dynamically_free_path(path[i-1], path[i]) : blocked = True\n running_cost += self.euclidean_distance(path[i-1], path[i])\n return Cost(running_cost, blocked)\n\n def add_dynamic_obstacle(self, pos, radius):\n \"\"\"\n Add circular dynamic obstacle, and reset goal queues\n\n :param pos: Position of dynamic obstacle\n :param radius: Radius of dynamic obstacle\n \"\"\"\n self.space.add_dynamic_obstacle(pos, radius)\n if self.goal is not None : self.set_goal(self.goal.pos)\n\n\nclass RTRRTPlanner(RTSamplingPlanner):\n def __init__(self, space, agent_pos, assisting_metric=None, t_exp=0.15, t_root=0.003, t_rand=0.003, t_steer=0.002):\n \"\"\"\n :param space: State space planner operates in\n :param agent_pos: Initial position of agent\n :param assisting_metric: Assisting metric for the planner, default None for Euclidean\n :param t_exp: Time spent expanding and rewiring\n :param t_root: Time spent rewiring at the root\n :param t_rand: Time spent rewiring random nodes\n :param t_steer: Time spent steering\n \"\"\"\n super().__init__(space, agent_pos, assisting_metric=assisting_metric, t_exp=t_exp, t_root=t_root, t_steer=t_steer)\n self.rand_queue = []\n self.t_rand = t_rand\n self.max_step = 5\n self.node_density = 12\n\n def plan(self, agent_pos):\n \"\"\"\n Run main body of RT-RRT algorithm, explore & rewire to plan a path\n\n :param agent_pos: Agent's current position\n \"\"\"\n agent = self.space.create_state(agent_pos)\n start = time.time()\n while time.time() - start < self.t_exp:\n self.expand()\n goal_path = self.goal_path()\n if self.euclidean_distance(agent, self.tree.root) < self.max_step/10 and len(goal_path) > 1:\n self.set_root(goal_path[1])\n return self.tree.root\n\n def expand(self):\n \"\"\"\n Expand the tree through sampling & rewiring\n \"\"\"\n xrand = self.sample_state()\n xnearest = self.tree.nearest(xrand)\n xnew = self.steer(xnearest, xrand)\n if xnew is not None:\n nearby = self.find_nodes_near(xnew)\n if len(nearby) < self.node_density or self.euclidean_distance(xnearest, xrand) > self.max_step or xnew == self.goal:\n self._add_node(xnew, xnearest, nearby)\n self.rand_queue.insert(0, xnew)\n else:\n self.rand_queue.insert(0, xnearest)\n self.rewire_rand()\n self.rewire_root()\n\n def find_nodes_near(self, node):\n radius = max(math.sqrt((self.space.area()*self.node_density) / (math.pi*self.tree.node_amount)), self.max_step)\n return self.tree.neighbourhood(node, radius)\n\n def rewire_root(self):\n \"\"\"\n Rewire unseen nodes out from the root, resetting rewired_root once all nodes have been visited\n \"\"\"\n if len(self.root_queue) == 0:\n self.root_queue.append(self.tree.root)\n self.rewired_root = {self.tree.root}\n start = time.time()\n while len(self.root_queue) > 0 and time.time() - start < self.t_root:\n xqueue = self.root_queue.pop()\n nearby = self.find_nodes_near(xqueue)\n for xnear in nearby:\n if self.cost(xnear) + self.euclidean_distance(xnear, xqueue) < self.cost(xqueue) and self.space.free_path(xnear, xqueue):\n self.tree.update_edge(xnear, xqueue)\n if xnear not in self.rewired_root:\n self.root_queue.insert(0, xnear)\n self.rewired_root.add(xnear)\n\n def rewire_rand(self):\n \"\"\"\n Rewire random portions of the graph via the rand_queue\n \"\"\"\n start = time.time()\n while len(self.rand_queue) > 0 and time.time() - start < self.t_rand:\n xqueue = self.rand_queue.pop()\n nearby = self.find_nodes_near(xqueue)\n for xnear in nearby:\n if self.cost(xqueue) + self.euclidean_distance(xqueue, xnear) < self.cost(xnear) and self.space.free_path(xqueue, xnear):\n self.tree.update_edge(xqueue, xnear)\n self.rand_queue.insert(0, xnear)\n\n def sample_state(self, a=0.3, b=1.5):\n \"\"\"\n Return a randomly sampled state from the state space\n\n :param a: Scales probability of sampling on the line between the goal and its current nearest neighbour\n :param b: Scales probability of sampling between entire state space and within rewire ellipse\n \"\"\"\n p = np.random.rand()\n if p > 1-a and self.goal is not None:\n r = np.random.rand()\n return self.space.create_state(r * self.goal.pos + (1-r) * self.tree.nearest(self.goal).pos)\n elif p < (1-a)/b or self.goal is None or self.goal not in self.tree.nodes:\n return self.space.choose_state_uniform()\n else:\n cbest = self.cost(self.goal).to_float()\n cmin = self.euclidean_distance(self.tree.root, self.goal)\n return self.space.choose_state_ellipse(self.tree.root.pos, self.goal.pos, cbest, max(0, cbest**2 - cmin**2)**0.5)\n\n def steer(self, start, end):\n \"\"\"\n Return a state that grows the tree towards end from start\n \"\"\"\n return self.euclidean_metric.steer(self.space, start, end, np.inf, self.t_steer)\n\n\nclass AMRRTPlanner(RTSamplingPlanner):\n def __init__(self, space, agent_pos, assisting_metric=None, t_exp=0.15, t_root=0.002, t_goal=0.004, t_steer=0.002):\n \"\"\"\n :param space: State space planner operates in\n :param agent_pos: Initial position of agent\n :param assisting_metric: Assisting metric for the planner, default None for Euclidean\n :param t_exp: Time spent expanding and rewiring\n :param t_root: Time spent rewiring at the root\n :param t_goal: Time spent rewiring at the goal\n :param t_steer: Time spent steering\n \"\"\"\n super().__init__(space, agent_pos, assisting_metric=assisting_metric, t_exp=t_exp, t_root=t_root, t_steer=t_steer)\n self.goal_stack = []\n self.goal_queue = []\n self.t_goal = t_goal\n self.max_step = (self.space.bounds[0,1] - self.space.bounds[0,0]) * 0.05\n self.node_density = 20\n\n def plan(self, agent_pos):\n \"\"\"\n Run main body of AM-RRT algorithm, explore & rewire to plan a path\n\n :param agent_pos: Agent's current position\n \"\"\"\n agent = self.space.create_state(agent_pos)\n start = time.time()\n while time.time() - start < self.t_exp:\n self.expand()\n goal_path = self.goal_path()\n if self.euclidean_distance(agent, self.tree.root) < self.max_step/10 and len(goal_path) > 1:\n self.set_root(goal_path[1])\n return self.tree.root\n\n def expand(self):\n \"\"\"\n Expand the tree through sampling & rewiring\n \"\"\"\n xrand = self.sample_state()\n xnearest = self._nearest_node(xrand)\n xnew = self.steer(xnearest, xrand)\n if xnew is not None and xnew not in self.tree.nodes:\n nearby = self.tree.neighbourhood(xnew, self.max_step)\n if len(nearby) < self.node_density or self.euclidean_distance(xnearest, xrand) > self.max_step or xnew == self.goal:\n self._add_node(xnew, xnearest, nearby)\n self.rewire_root()\n if self.goal is not None and self.goal in self.tree.nodes:\n self.rewire_goal()\n\n def sample_state(self, a=0.3, b=1.5):\n \"\"\"\n Return a randomly sampled state from the state space\n\n :param a: Scales probability of sampling on the line between the goal and its current nearest neighbour\n :param b: Scales probability of sampling between entire state space and within rewire ellipse\n \"\"\"\n p = np.random.rand()\n if p > 1-a and self.goal is not None and self.goal not in self.tree.nodes:\n return self.space.create_state(self.goal.pos)\n elif p < (1-a)/b or self.goal is None or self.goal not in self.tree.nodes:\n return self.space.choose_state_uniform()\n else:\n cbest = self.cost(self.goal).to_float()\n cmin = self.euclidean_distance(self.tree.root, self.goal)\n return self.space.choose_state_ellipse(self.tree.root.pos, self.goal.pos, cbest, max(0, cbest**2 - cmin**2)**0.5)\n\n def rewire_root(self):\n \"\"\"\n Rewire unseen nodes out from the root, resetting rewired_root once all nodes have been visited\n \"\"\"\n if len(self.root_queue) == 0:\n self.root_queue.append(self.tree.root)\n self.rewired_root = {self.tree.root}\n start = time.time()\n while len(self.root_queue) > 0 and time.time() - start < self.t_root:\n xrewire = self.root_queue.pop()\n nearby = self.tree.neighbourhood(xrewire, self.max_step)\n for xnear in nearby:\n if self.cost(xnear) + self.euclidean_distance(xnear, xrewire) < self.cost(xrewire) and self.space.free_path(xnear, xrewire):\n self.tree.update_edge(xnear, xrewire)\n if xnear not in self.rewired_root:\n self.root_queue.insert(0, xnear)\n self.rewired_root.add(xnear)\n\n def rewire_goal(self):\n \"\"\"\n Rewire unseen nodes out from the root towards the goal along offshoots\n \"\"\"\n if len(self.goal_stack) == 0 and len(self.goal_queue) == 0:\n self.goal_stack.append(self.tree.root)\n self.seen_goal = set()\n start = time.time()\n while time.time() - start < self.t_goal and (len(self.goal_stack) > 0 or len(self.goal_queue) > 0):\n if len(self.goal_stack) > 0 : xrewire = self.goal_stack.pop()\n else : xrewire = self.goal_queue.pop()\n cbest = self.cost(self.goal).to_float()\n cmin = self.euclidean_distance(self.tree.root, self.goal)\n if self._within_ellipse(xrewire.pos, self.tree.root.pos, self.goal.pos, cbest/2, max(0, cbest**2-cmin**2)**0.5/2):\n nearby = self.tree.neighbourhood(xrewire, self.max_step)\n rev_sorted_indexes = sorted([(-self.assisting_distance(self.goal, nearby[i]), i) for i in range(len(nearby))])\n nearby_sorted = [nearby[i] for _, i in rev_sorted_indexes]\n frontier = []\n for xnear in nearby_sorted:\n if self.cost(xrewire) + self.euclidean_distance(xrewire, xnear) < self.cost(xnear) and self.space.free_path(xrewire, xnear):\n self.tree.update_edge(xrewire, xnear)\n if xnear not in self.seen_goal and self.space.free_path(xrewire, xnear):\n frontier.append(xnear)\n self.seen_goal.add(xnear)\n self.goal_stack = self.goal_stack + frontier\n self.goal_queue = frontier + self.goal_queue\n if len(self.goal_stack) > 0 and self.assisting_distance(self.goal_stack[-1], self.goal) > self.assisting_distance(xrewire, self.goal):\n self.goal_stack = []\n\n def steer(self, start, end):\n \"\"\"\n Return a state that grows the tree towards end from start\n \"\"\"\n if self.space.free_path(start, end):\n p = min(1, self.max_step/self.euclidean_distance(start, end))\n return self.space.create_state((1-p)*start.pos + p*end.pos)\n return self.assisting_metric.steer(self.space, start, end, self.max_step, self.t_steer)\n\n def set_goal(self, pos):\n \"\"\"\n Set new goal from position, try to add goal to tree if within range & path free, and rest goal stack/queue\n \"\"\"\n super().set_goal(pos)\n self.goal_stack = [self.tree.root]\n self.goal_queue = []\n self.seen_goal = {self.tree.root}\n\n def _within_ellipse(self, pos, fa, fb, major, minor):\n \"\"\"\n Check if point falls within given ellipse\n\n :param pos: Position of point being checked\n :param fa: Position of first ellipse focus\n :param fb: Position of second ellipse focus\n :param major: Length of major axis\n :param minor: Length of minor axis\n \"\"\"\n c = (fa + fb) / 2\n r = np.arctan2((fa-fb)[1], (fa-fb)[0])\n return (((np.cos(r)*(pos[0]-c[0]) + np.sin(r)*(pos[1]-c[1]))/major)**2 +\n ((np.sin(r)*(pos[0]-c[0]) - np.cos(r)*(pos[1]-c[1]))/minor)**2 <= 1)\n"
]
| [
[
"numpy.sin",
"numpy.random.rand",
"numpy.arctan2",
"numpy.cos"
]
]
|
NileGraddis/pynwb | [
"85ef17dc5d820deeaf1c40e5ed22e22336b691ec"
]
| [
"docs/gallery/general/iterative_write.py"
]
| [
"\"\"\"\nIterative Data Write\n====================\n\nThis example demonstrate how to iteratively write data arrays with applications to\nwriting large arrays without loading all data into memory and streaming data write.\n\n\"\"\"\n\n####################\n# Introduction\n# --------------------------------------------\n\n\n####################\n# What is Iterative Data Write?\n# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n#\n# In the typical write process, datasets are created and written as a whole. In contrast,\n# iterative data write refers to the writing of the content of a dataset in an incremental,\n# iterative fashion.\n\n####################\n# Why Iterative Data Write?\n# ^^^^^^^^^^^^^^^^^^^^^^^^^\n#\n# The possible applications for iterative data write are broad. Here we list a few typical applications\n# for iterative data write in practice.\n#\n# * **Large data arrays** A central challenge when dealing with large data arrays is that it is often\n# not feasible to load all of the data into memory. Using an iterative data write process allows us\n# to avoid this problem by writing the data one-subblock-at-a-time, so that we only need to hold\n# a small subset of the array in memory at any given time.\n# * **Data streaming** In the context of streaming data we are faced with several issues:\n# **1)** data is not available in memory but arrives in subblocks as the stream progresses\n# **2)** caching the data of a stream in-memory is often prohibitively expensive and volatile\n# **3)** the total size of the data is often unknown ahead of time.\n# Iterative data write allows us to address issues 1) and 2) by enabling us to save data to\n# file incrementally as it arrives from the data stream. Issue 3) is addressed in the HDF5\n# storage backend via support for chunking, enabling the creation of resizable arrays.\n#\n# * **Data generators** Data generators are in many ways similar to data streams only that the\n# data is typically being generated locally and programmatically rather than from an external\n# data source.\n# * **Sparse data arrays** In order to reduce storage size of sparse arrays a challenge is that while\n# the data array (e.g., a matrix) may be large, only few values are set. To avoid storage overhead\n# for storing the full array we can employ (in HDF5) a combination of chunking, compression, and\n# and iterative data write to significantly reduce storage cost for sparse data.\n#\n\n####################\n# Iterating Over Data Arrays\n# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n#\n# In PyNWB the process of iterating over large data arrays is implemented via the concept of\n# :py:class:`~hdmf.data_utils.DataChunk` and :py:class:`~hdmf.data_utils.AbstractDataChunkIterator`.\n#\n# * :py:class:`~hdmf.data_utils.DataChunk` is a simple data structure used to describe\n# a subset of a larger data array (i.e., a data chunk), consisting of:\n#\n# * ``DataChunk.data`` : the array with the data value(s) of the chunk and\n# * ``DataChunk.selection`` : the NumPy index tuple describing the location of the chunk in the whole array.\n#\n# * :py:class:`~hdmf.data_utils.AbstractDataChunkIterator` then defines a class for iterating over large\n# data arrays one-:py:class:`~hdmf.data_utils.DataChunk`-at-a-time.\n#\n# * :py:class:`~hdmf.data_utils.DataChunkIterator` is a specific implementation of an\n# :py:class:`~hdmf.data_utils.AbstractDataChunkIterator` that accepts any iterable and assumes\n# that we iterate over the first dimension of the data array. :py:class:`~hdmf.data_utils.DataChunkIterator`\n# also supports buffered read, i.e., multiple values from the input iterator can be combined to a single chunk.\n# This is useful for buffered I/O operations, e.g., to improve performance by accumulating data in memory and\n# writing larger blocks at once.\n#\n\n####################\n# Iterative Data Write: API\n# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n#\n# On the front end, all a user needs to do is to create or wrap their data in a\n# :py:class:`~hdmf.data_utils.AbstractDataChunkIterator`. The I/O backend (e.g.,\n# :py:class:`~hdmf.backends.hdf5.h5tools.HDF5IO` or :py:class:`~pynwb.NWBHDF5IO`) then\n# implements the iterative processing of the data chunk iterators. PyNWB also provides with\n# :py:class:`~hdmf.data_utils.DataChunkIterator` a specific implementation of a data chunk iterator\n# which we can use to wrap common iterable types (e.g., generators, lists, or numpy arrays).\n# For more advanced use cases we then need to implement our own derived class of\n# :py:class:`~hdmf.data_utils.AbstractDataChunkIterator`.\n#\n# .. tip::\n#\n# Currently the HDF5 I/O backend of PyNWB (:py:class:`~hdmf.backends.hdf5.h5tools.HDF5IO`,\n# :py:class:`~pynwb.NWBHDF5IO`) processes itertive data writes one-dataset-at-a-time. This means, that\n# while you may have an arbitrary number of iterative data writes, the write is performed in order.\n# In the future we may use a queing process to enable the simultaneous processing of multiple iterative writes at\n# the same time.\n#\n# Preparations:\n# ^^^^^^^^^^^^^^^^^^^^\n#\n# The data write in our examples really does not change. We, therefore, here create a\n# simple helper function first to write a simple NWBFile containing a single timeseries to\n# avoid repition of the same code and to allow us to focus on the important parts of this tutorial.\n\nfrom datetime import datetime\nfrom dateutil.tz import tzlocal\nfrom pynwb import NWBFile, TimeSeries\nfrom pynwb import NWBHDF5IO\n\n\ndef write_test_file(filename, data):\n \"\"\"\n Simple helper function to write an NWBFile with a single timeseries containing data\n :param filename: String with the name of the output file\n :param data: The data of the timeseries\n \"\"\"\n\n # Create a test NWBfile\n start_time = datetime(2017, 4, 3, 11, tzinfo=tzlocal())\n create_date = datetime(2017, 4, 15, 12, tzinfo=tzlocal())\n nwbfile = NWBFile('demonstrate NWBFile basics',\n 'NWB123',\n start_time,\n file_create_date=create_date)\n\n # Create our time series\n test_ts = TimeSeries(name='synthetic_timeseries',\n data=data, # <---------\n unit='SIunit',\n rate=1.0,\n starting_time=0.0)\n nwbfile.add_acquisition(test_ts)\n\n # Write the data to file\n io = NWBHDF5IO(filename, 'w')\n io.write(nwbfile)\n io.close()\n\n\n####################\n# Example: Write Data from Generators and Streams\n# -----------------------------------------------------\n#\n# Here we use a simple data generator but PyNWB does not make any assumptions about what happens\n# inside the generator. Instead of creating data programmatically, you may hence, e.g., receive\n# data from an acquisition system (or other source). We can, hence, use the same approach to write streaming data.\n\n####################\n# Step 1: Define the data generator\n# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n#\nfrom math import sin, pi\nfrom random import random\nimport numpy as np\n\n\ndef iter_sin(chunk_length=10, max_chunks=100):\n \"\"\"\n Generator creating a random number of chunks (but at most max_chunks) of length chunk_length containing\n random samples of sin([0, 2pi]).\n \"\"\"\n x = 0\n num_chunks = 0\n while(x < 0.5 and num_chunks < max_chunks):\n val = np.asarray([sin(random() * 2 * pi) for i in range(chunk_length)])\n x = random()\n num_chunks += 1\n yield val\n return\n\n\n####################\n# Step 2: Wrap the generator in a DataChunkIterator\n# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n#\n\nfrom hdmf.data_utils import DataChunkIterator\n\ndata = DataChunkIterator(data=iter_sin(10))\n\n####################\n# Step 3: Write the data as usual\n# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n#\n# Here we use our wrapped generator to create the data for a synthetic time series.\n\nwrite_test_file(filename='basic_iterwrite_example.nwb',\n data=data)\n\n####################\n# Discussion\n# ^^^^^^^^^^\n# Note, we here actually do not know how long our timeseries will be.\n\nprint(\"maxshape=%s, recommended_data_shape=%s, dtype=%s\" % (str(data.maxshape),\n str(data.recommended_data_shape()),\n str(data.dtype)))\n\n####################\n# ``[Out]:``\n#\n# .. code-block:: python\n#\n# maxshape=(None, 10), recommended_data_shape=(1, 10), dtype=float64\n#\n# As we can see :py:class:`~hdmf.data_utils.DataChunkIterator` automatically recommends\n# in its ``maxshape`` that the first dimensions of our array should be unlimited (``None``) and the second\n# dimension be ``10`` (i.e., the length of our chunk. Since :py:class:`~hdmf.data_utils.DataChunkIterator`\n# has no way of knowing the minimum size of the array it automatically recommends the size of the first\n# chunk as the minimum size (i.e, ``(1, 10)``) and also infers the data type automatically from the first chunk.\n# To further customize this behavior we may also define the ``maxshape``, ``dtype``, and ``buffer_size`` when\n# we create the :py:class:`~hdmf.data_utils.DataChunkIterator`.\n#\n# .. tip::\n#\n# We here used :py:class:`~hdmf.data_utils.DataChunkIterator` to conveniently wrap our data stream.\n# :py:class:`~hdmf.data_utils.DataChunkIterator` assumes that our generators yields in **consecutive order**\n# **single** complete element along the **first dimension** of our a array (i.e., iterate over the first\n# axis and yield one-element-at-a-time). This behavior is useful in many practical cases. However, if\n# this strategy does not match our needs, then you can alternatively implement our own derived\n# :py:class:`~hdmf.data_utils.AbstractDataChunkIterator`. We show an example of this next.\n#\n\n\n####################\n# Example: Optimizing Sparse Data Array I/O and Storage\n# -------------------------------------------------------\n#\n# Step 1: Create a data chunk iterator for our sparse matrix\n# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\nfrom hdmf.data_utils import AbstractDataChunkIterator, DataChunk\n\n\nclass SparseMatrixIterator(AbstractDataChunkIterator):\n\n def __init__(self, shape, num_chunks, chunk_shape):\n \"\"\"\n :param shape: 2D tuple with the shape of the matrix\n :param num_chunks: Number of data chunks to be created\n :param chunk_shape: The shape of each chunk to be created\n :return:\n \"\"\"\n self.shape, self.num_chunks, self.chunk_shape = shape, num_chunks, chunk_shape\n self.__chunks_created = 0\n\n def __iter__(self):\n return self\n\n def __next__(self):\n \"\"\"\n Return in each iteration a fully occupied data chunk of self.chunk_shape values at a random\n location within the matrix. Chunks are non-overlapping. REMEMBER: h5py does not support all\n fancy indexing that numpy does so we need to make sure our selection can be\n handled by the backend.\n \"\"\"\n if self.__chunks_created < self.num_chunks:\n data = np.random.rand(np.prod(self.chunk_shape)).reshape(self.chunk_shape)\n xmin = np.random.randint(0, int(self.shape[0] / self.chunk_shape[0]), 1)[0] * self.chunk_shape[0]\n xmax = xmin + self.chunk_shape[0]\n ymin = np.random.randint(0, int(self.shape[1] / self.chunk_shape[1]), 1)[0] * self.chunk_shape[1]\n ymax = ymin + self.chunk_shape[1]\n self.__chunks_created += 1\n return DataChunk(data=data,\n selection=np.s_[xmin:xmax, ymin:ymax])\n else:\n raise StopIteration\n\n next = __next__\n\n def recommended_chunk_shape(self):\n # Here we can optionally recommend what a good chunking should be.\n return self.chunk_shape\n\n def recommended_data_shape(self):\n # We know the full size of the array. In cases where we don't know the full size\n # this should be the minimum size.\n return self.shape\n\n @property\n def dtype(self):\n # The data type of our array\n return np.dtype(float)\n\n @property\n def maxshape(self):\n # We know the full shape of the array. If we don't know the size of a dimension\n # beforehand we can set the dimension to None instead\n return self.shape\n\n\n#####################\n# Step 2: Instantiate our sparse matrix\n# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n#\n\n# Setting for our random sparse matrix\nxsize = 1000000\nysize = 1000000\nnum_chunks = 1000\nchunk_shape = (10, 10)\nnum_values = num_chunks * np.prod(chunk_shape)\n\n# Create our sparse matrix data.\ndata = SparseMatrixIterator(shape=(xsize, ysize),\n num_chunks=num_chunks,\n chunk_shape=chunk_shape)\n\n#####################\n# In order to also enable compression and other advanced HDF5 dataset I/O featurs we can then also\n# wrap our data via :py:class:`~hdmf.backends.hdf5.h5_utils.H5DataIO`.\nfrom hdmf.backends.hdf5.h5_utils import H5DataIO\nmatrix2 = SparseMatrixIterator(shape=(xsize, ysize),\n num_chunks=num_chunks,\n chunk_shape=chunk_shape)\ndata2 = H5DataIO(data=matrix2,\n compression='gzip',\n compression_opts=4)\n\n######################\n# We can now also customize the chunking , fillvalue and other settings\n#\nfrom hdmf.backends.hdf5.h5_utils import H5DataIO\n\n# Increase the chunk size and add compression\nmatrix3 = SparseMatrixIterator(shape=(xsize, ysize),\n num_chunks=num_chunks,\n chunk_shape=chunk_shape)\ndata3 = H5DataIO(data=matrix3,\n chunks=(100, 100),\n fillvalue=np.nan)\n\n# Increase the chunk size and add compression\nmatrix4 = SparseMatrixIterator(shape=(xsize, ysize),\n num_chunks=num_chunks,\n chunk_shape=chunk_shape)\ndata4 = H5DataIO(data=matrix4,\n compression='gzip',\n compression_opts=4,\n chunks=(100, 100),\n fillvalue=np.nan\n )\n\n####################\n# Step 3: Write the data as usual\n# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n#\n# Here we simply use our ``SparseMatrixIterator`` as input for our ``TimeSeries``\n\nwrite_test_file(filename='basic_sparse_iterwrite_example.nwb',\n data=data)\nwrite_test_file(filename='basic_sparse_iterwrite_compressed_example.nwb',\n data=data2)\nwrite_test_file(filename='basic_sparse_iterwrite_largechunks_example.nwb',\n data=data3)\nwrite_test_file(filename='basic_sparse_iterwrite_largechunks_compressed_example.nwb',\n data=data4)\n\n####################\n# Check the results\n# ^^^^^^^^^^^^^^^^^\n#\n# Now lets check out the size of our data file and compare it against the expected full size of our matrix\nimport os\n\nexpected_size = xsize * ysize * 8 # This is the full size of our matrix in byte\noccupied_size = num_values * 8 # Number of non-zero values in out matrix\nfile_size = os.stat('basic_sparse_iterwrite_example.nwb').st_size # Real size of the file\nfile_size_compressed = os.stat('basic_sparse_iterwrite_compressed_example.nwb').st_size\nfile_size_largechunks = os.stat('basic_sparse_iterwrite_largechunks_example.nwb').st_size\nfile_size_largechunks_compressed = os.stat('basic_sparse_iterwrite_largechunks_compressed_example.nwb').st_size\nmbfactor = 1000. * 1000 # Factor used to convert to MegaBytes\n\nprint(\"1) Sparse Matrix Size:\")\nprint(\" Expected Size : %.2f MB\" % (expected_size / mbfactor))\nprint(\" Occupied Size : %.5f MB\" % (occupied_size / mbfactor))\nprint(\"2) NWB:N HDF5 file (no compression):\")\nprint(\" File Size : %.2f MB\" % (file_size / mbfactor))\nprint(\" Reduction : %.2f x\" % (expected_size / file_size))\nprint(\"3) NWB:N HDF5 file (with GZIP compression):\")\nprint(\" File Size : %.5f MB\" % (file_size_compressed / mbfactor))\nprint(\" Reduction : %.2f x\" % (expected_size / file_size_compressed))\nprint(\"4) NWB:N HDF5 file (large chunks):\")\nprint(\" File Size : %.5f MB\" % (file_size_largechunks / mbfactor))\nprint(\" Reduction : %.2f x\" % (expected_size / file_size_largechunks))\nprint(\"5) NWB:N HDF5 file (large chunks with compression):\")\nprint(\" File Size : %.5f MB\" % (file_size_largechunks_compressed / mbfactor))\nprint(\" Reduction : %.2f x\" % (expected_size / file_size_largechunks_compressed))\n\n####################\n# ``[Out]:``\n#\n# .. code-block:: python\n#\n# 1) Sparse Matrix Size:\n# Expected Size : 8000000.00 MB\n# Occupied Size : 0.80000 MB\n# 2) NWB:N HDF5 file (no compression):\n# File Size : 0.89 MB\n# Reduction : 9035219.28 x\n# 3) NWB:N HDF5 file (with GZIP compression):\n# File Size : 0.88847 MB\n# Reduction : 9004283.79 x\n# 4) NWB:N HDF5 file (large chunks):\n# File Size : 80.08531 MB\n# Reduction : 99893.47 x\n# 5) NWB:N HDF5 file (large chunks with compression):\n# File Size : 1.14671 MB\n# Reduction : 6976450.12 x\n#\n# Discussion\n# ^^^^^^^^^^\n#\n# * **1) vs 2):** While the full matrix would have a size of ``8TB`` the HDF5 file is only ``0.88MB``. This is roughly\n# the same as the real occupied size of ``0.8MB``. When using chunking, HDF5 does not allocate the full dataset but\n# only allocates chunks that actually contain data. In (2) the size of our chunks align perfectly with the\n# occupied chunks of our sparse matrix, hence, only the minimal amount of storage needs to be allocated.\n# A slight overhead (here 0.08MB) is expected because our file contains also the additional objects from\n# the NWBFile, plus some overhead for managing all the HDF5 metadata for all objects.\n# * **3) vs 2):** Adding compression does not yield any improvement here. This is expected, because, again we\n# selected the chunking here in a way that we already allocated the minimum amount of storage to represent our data\n# and lossless compression of random data is not efficient.\n# * **4) vs 2):** When we increase our chunk size to ``(100,100)`` (i.e., ``100x`` larger than the chunks produced by\n# our matrix generator) we observe an according roughly ``100x`` increase in file size. This is expected\n# since our chunks now do not align perfectly with the occupied data and each occupied chunk is allocated fully.\n# * **5) vs 4):** When using compression for the larger chunks we see a significant reduction\n# in file size (``1.14MB`` vs. ``80MB``). This is because the allocated chunks now contain in addition to the random\n# values large areas of constant fillvalues, which compress easily.\n#\n# **Advantages:**\n#\n# * We only need to hold one :py:class:`~hdmf.data_utils.DataChunk` in memory at any given time\n# * Only the data chunks in the HDF5 file that contain non-default values are ever being allocated\n# * The overall size of our file is reduced significantly\n# * Reduced I/O load\n# * On read users can use the array as usual\n#\n# .. tip::\n#\n# With great power comes great responsibility **!** I/O and storage cost will depend among others on the chunk size,\n# compression options, and the write pattern, i.e., the number and structure of the\n# :py:class:`~hdmf.data_utils.DataChunk` objects written. For example, using ``(1,1)`` chunks and writing them\n# one value at a time would result in poor I/O performance in most practical cases, because of the large number of\n# chunks and large number of small I/O operations required.\n#\n# .. tip::\n#\n# A word of caution, while this approach helps optimize storage, the in-memory representation on read is\n# still a dense numpy array. This behavior is convenient for many user interactions with the data but\n# can be problematic with regard to performance/memory when accessing large data subsets.\n#\n# .. code-block:: python\n#\n# io = NWBHDF5IO('basic_sparse_iterwrite_example.nwb', 'r')\n# nwbfile = io.read()\n# data = nwbfile.get_acquisition('synthetic_timeseries').data # <-- PyNWB does lazy load; no problem\n# subset = data[10:100, 10:100] # <-- Loading a subset is fine too\n# alldata = data[:] # <-- !!!! This would load the complete (1000000 x 1000000) array !!!!\n#\n# .. tip::\n#\n# As we have seen here, our data chunk iterator may produce chunks in arbitrary order and locations within the\n# array. In the case of the HDF5 I/O backend we need to take care that the selection we yield can be understood\n# by h5py.\n\n####################\n# Example: Convert large binary data arrays\n# -----------------------------------------------------\n#\n# When converting large data files, a typical problem is that it is often too expensive to load all the data\n# into memory. This example is very similar to the data generator example only that instead of generating\n# data on-the-fly in memory we are loading data from a file one-chunk-at-a-time in our generator.\n#\n\n####################\n# Create example data\n# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\nimport numpy as np\n# Create the test data\ndatashape = (100, 10) # OK, this not really large, but we just want to show how it works\nnum_values = np.prod(datashape)\narrdata = np.arange(num_values).reshape(datashape)\n# Write the test data to disk\ntemp = np.memmap('basic_sparse_iterwrite_testdata.npy', dtype='float64', mode='w+', shape=datashape)\ntemp[:] = arrdata\ndel temp # Flush to disk\n\n####################\n# Step 1: Create a generator for our array\n# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n#\n# Note, we here use a generator for simplicity but we could equally well also implement our own\n# :py:class:`~hdmf.data_utils.AbstractDataChunkIterator`.\n\n\ndef iter_largearray(filename, shape, dtype='float64'):\n \"\"\"\n Generator reading [chunk_size, :] elements from our array in each iteration.\n \"\"\"\n for i in range(shape[0]):\n # Open the file and read the next chunk\n newfp = np.memmap(filename, dtype=dtype, mode='r', shape=shape)\n curr_data = newfp[i:(i + 1), ...][0]\n del newfp # Reopen the file in each iterator to prevent accumulation of data in memory\n yield curr_data\n return\n\n\n####################\n# Step 2: Wrap the generator in a DataChunkIterator\n# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n#\n\nfrom hdmf.data_utils import DataChunkIterator\n\ndata = DataChunkIterator(data=iter_largearray(filename='basic_sparse_iterwrite_testdata.npy',\n shape=datashape),\n maxshape=datashape,\n buffer_size=10) # Buffer 10 elements into a chunk, i.e., create chunks of shape (10,10)\n\n\n####################\n# Step 3: Write the data as usual\n# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n#\n\nwrite_test_file(filename='basic_sparse_iterwrite_largearray.nwb',\n data=data)\n\n####################\n# .. tip::\n#\n# Again, if we want to explicitly control how our data will be chunked (compressed etc.)\n# in the HDF5 file then we need to wrap our :py:class:`~hdmf.data_utils.DataChunkIterator`\n# using :py:class:`~hdmf.backends.hdf5.h5_utils.H5DataIO`\n\n####################\n# Discussion\n# ^^^^^^^^^^\n# Let's verify that our data was written correctly\n\n# Read the NWB file\nfrom pynwb import NWBHDF5IO # noqa\n\nio = NWBHDF5IO('basic_sparse_iterwrite_largearray.nwb', 'r')\nnwbfile = io.read()\ndata = nwbfile.get_acquisition('synthetic_timeseries').data\n# Compare all the data values of our two arrays\ndata_match = np.all(arrdata == data[:]) # Don't do this for very large arrays!\n# Print result message\nif data_match:\n print(\"Success: All data values match\")\nelse:\n print(\"ERROR: Mismatch between data\")\n\n\n####################\n# ``[Out]:``\n#\n# .. code-block:: python\n#\n# Success: All data values match\n\n\n####################\n# Example: Convert arrays stored in multiple files\n# -----------------------------------------------------\n#\n# In practice, data from recording devices may be distributed across many files, e.g., one file per time range\n# or one file per recording channel. Using iterative data write provides an elegant solution to this problem\n# as it allows us to process large arrays one-subarray-at-a-time. To make things more interesting we'll show\n# this for the case where each recording channel (i.e, the second dimension of our ``TimeSeries``) is broken up\n# across files.\n\n####################\n# Create example data\n# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\nimport numpy as np\n# Create the test data\nnum_channels = 10\nnum_steps = 100\nchannel_files = ['basic_sparse_iterwrite_testdata_channel_%i.npy' % i for i in range(num_channels)]\nfor f in channel_files:\n temp = np.memmap(f, dtype='float64', mode='w+', shape=(num_steps,))\n temp[:] = np.arange(num_steps, dtype='float64')\n del temp # Flush to disk\n\n#####################\n# Step 1: Create a data chunk iterator for our multifile array\n# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\nfrom hdmf.data_utils import AbstractDataChunkIterator, DataChunk # noqa\n\n\nclass MultiFileArrayIterator(AbstractDataChunkIterator):\n\n def __init__(self, channel_files, num_steps):\n \"\"\"\n :param channel_files: List of files with the channels\n :param num_steps: Number of timesteps per channel\n :return:\n \"\"\"\n self.shape = (num_steps, len(channel_files))\n self.channel_files = channel_files\n self.num_steps = num_steps\n self.__curr_index = 0\n\n def __iter__(self):\n return self\n\n def __next__(self):\n \"\"\"\n Return in each iteration the data from a single file\n \"\"\"\n if self.__curr_index < len(channel_files):\n newfp = np.memmap(channel_files[self.__curr_index],\n dtype='float64', mode='r', shape=(self.num_steps,))\n curr_data = newfp[:]\n i = self.__curr_index\n self.__curr_index += 1\n del newfp\n return DataChunk(data=curr_data,\n selection=np.s_[:, i])\n else:\n raise StopIteration\n\n next = __next__\n\n def recommended_chunk_shape(self):\n return None # Use autochunking\n\n def recommended_data_shape(self):\n return self.shape\n\n @property\n def dtype(self):\n return np.dtype('float64')\n\n @property\n def maxshape(self):\n return self.shape\n\n\n#####################\n# Step 2: Instantiate our multi file iterator\n# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n#\n\ndata = MultiFileArrayIterator(channel_files, num_steps)\n\n####################\n# Step 3: Write the data as usual\n# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n#\n\nwrite_test_file(filename='basic_sparse_iterwrite_multifile.nwb',\n data=data)\n\n####################\n# Discussion\n# ^^^^^^^^^^\n#\n# That's it ;-)\n#\n# .. tip::\n#\n# Common mistakes that will result in errors on write:\n#\n# * The size of a :py:class:`~hdmf.data_utils.DataChunk` does not match the selection.\n# * The selection for the :py:class:`~hdmf.data_utils.DataChunk` is not supported by h5py\n# (e.g., unordered lists etc.)\n#\n# Other common mistakes:\n#\n# * Choosing inappropriate chunk sizes. This typically means bad performance with regard to I/O and/or storage cost.\n# * Using auto chunking without supplying a good recommended_data_shape. h5py auto chunking can only make a good\n# guess of what the chunking should be if it (at least roughly) knows what the shape of the array will be.\n# * Trying to wrap a data generator using the default :py:class:`~hdmf.data_utils.DataChunkIterator`\n# when the generator does not comply with the assumptions of the default implementation (i.e., yield\n# individual, complete elements along the first dimension of the array one-at-a-time). Depending on the generator,\n# this may or may not result in an error on write, but the array you are generating will probably end up\n# at least not having the intended shape.\n#\n"
]
| [
[
"numpy.memmap",
"numpy.prod",
"numpy.arange",
"numpy.all",
"numpy.dtype"
]
]
|
Northengard/graphx-conv | [
"051c1bb3e9d0f7086acf54234c8169da1da49530"
]
| [
"src/train.py"
]
| [
"import argparse\n\nparser = argparse.ArgumentParser('GraphX-convolution')\nparser.add_argument('config_file', type=str, help='config file to dictate training/testing')\nparser.add_argument('-g', '--gpu', type=int, default=0, help='gpu number')\nargs = parser.parse_args()\n\nimport os\nos.environ['CUDA_VISIBLE_DEVICES'] = str(args.gpu)\n\nfrom torch.utils.data import DataLoader\nfrom torch.utils.data.sampler import WeightedRandomSampler\nimport neuralnet_pytorch.gin_nnt as gin\n\nfrom networks import *\nfrom data_loader import ShapeNet, collate\n\nconfig_file = args.config_file\ngin.external_configurable(CNN18Encoder, 'cnn18_enc')\ngin.external_configurable(PointCloudEncoder, 'pc_enc')\ngin.external_configurable(PointCloudDecoder, 'pc_dec')\ngin.external_configurable(PointCloudResDecoder, 'pc_resdec')\ngin.external_configurable(PointCloudResGraphXUpDecoder, 'pc_upresgraphxdec')\ngin.external_configurable(PointCloudResLowRankGraphXUpDecoder, 'pc_upreslowrankgraphxdec')\n\n\[email protected]('GraphX')\ndef train_valid(data_root, name, img_enc, pc_enc, pc_dec, optimizer, scheduler, adain=True, projection=True,\n decimation=None, color_img=False, n_points=250, bs=4, lr=5e-5, weight_decay=1e-5, gamma=.3,\n milestones=(5, 8), n_epochs=10, print_freq=1000, val_freq=10000, checkpoint_folder=None):\n if decimation is not None:\n pc_dec = partial(pc_dec, decimation=decimation)\n\n net = PointcloudDeformNet((bs,) + (3 if color_img else 1, 224, 224), (bs, n_points, 3), img_enc, pc_enc, pc_dec,\n adain=adain, projection=projection, weight_decay=None)\n print(net)\n solver = T.optim.Adam(net.trainable, 1e-4, weight_decay=0) if optimizer is None \\\n else optimizer(net.trainable, lr, weight_decay=weight_decay)\n scheduler = scheduler(solver, milestones=milestones, gamma=gamma) if scheduler is not None else None\n\n train_data = ShapeNet(path=data_root, grayscale=not color_img, type='train', n_points=n_points)\n sampler = WeightedRandomSampler(train_data.sample_weights, len(train_data), True)\n train_loader = DataLoader(train_data, batch_size=bs, num_workers=1, collate_fn=collate, drop_last=True,\n sampler=sampler)\n\n val_data = ShapeNet(path=data_root, grayscale=not color_img, type='valid', num_vals=10 * len(os.listdir(data_root)),\n n_points=n_points)\n val_loader = DataLoader(val_data, batch_size=bs, shuffle=False, num_workers=1, collate_fn=collate, drop_last=True)\n\n mon.model_name = name\n mon.print_freq = print_freq\n mon.num_iters = len(train_data) // bs\n mon.set_path(checkpoint_folder)\n if checkpoint_folder is None:\n backups = os.listdir('.')\n mon.backup(backups, ignore=('results', '*.pyc', '__pycache__', '.idea'))\n mon.dump_rep('network', net)\n mon.dump_rep('optimizer', solver)\n if scheduler is not None:\n mon.dump_rep('scheduler', scheduler)\n\n def save_checkpoint():\n states = {\n 'states': mon.epoch,\n 'model_state_dict': net.state_dict(),\n 'opt_state_dict': solver.state_dict()\n }\n if scheduler is not None:\n states['scheduler_state_dict'] = scheduler.state_dict()\n\n mon.dump(name='training.pt', obj=states, method='torch', keep=5)\n\n mon.schedule(save_checkpoint, when=mon._end_epoch_)\n print('Training...')\n else:\n states = mon.load('training.pt', type='torch')\n mon.epoch = states['epoch']\n net.load_state_dict(states['model_state_dict'])\n net.optim['optimizer'].load_state_dict(states['opt_state_dict'])\n if net.optim['scheduler']:\n net.optim['scheduler'].load_state_dict(states['scheduler_state_dict'])\n\n print('Resume from epoch %d...' % mon.epoch)\n\n mon.run_training(net, solver, train_loader, n_epochs, scheduler=scheduler, eval_loader=val_loader,\n valid_freq=val_freq, reduce='mean', device='cuda')\n print('Training finished!')\n\n\nif __name__ == '__main__':\n gin.parse_config_file(config_file)\n train_valid()\n"
]
| [
[
"torch.utils.data.DataLoader"
]
]
|
stencila/libdh | [
"41b0dc826e6a6af3390877736185ed90b52459a2"
]
| [
"funcs/word_cloud_plot.py"
]
| [
"import matplotlib.pyplot as plt\r\nimport random\r\n\r\nfrom word_cloud import word_cloud\r\n\r\ndef word_cloud_plot(values):\r\n \"\"\"\r\n Plot a word cloud\r\n\r\n :param values: Is either a string or an array containing strings\r\n :return A matplotlib of the actual n gram\r\n\r\n e.g.\r\n\r\n word_cloud_plot(A1:10)\r\n \"\"\"\r\n\r\n coords = []\r\n text_colors = ['red','green','black','blue','purple'] #set list of colors\r\n def draw(x, y, fs, col, r):\r\n global t #allo\r\n t = plt.text(x, y, key, fontsize=fs,color= col,alpha=alpha_level) #plots text at random position\r\n\r\n #gets information about text box\r\n bb = t.get_window_extent(renderer=r)\r\n width = bb.width\r\n height = bb.height\r\n\r\n #checks to see if current text overlaps other text\r\n for c in coords:\r\n #if yes, remove it and try again\r\n if (((x > c[0] and x < c[1]) or (x + width/500 > c[0] and x + width/500 < c[1])) and ((y > c[2] and y < c[3]) or (y + height/500 > c[2] and y + height/500 < c[3]))):\r\n t.remove()\r\n x = random.uniform(0.05, 0.85)\r\n y = random.uniform(0.05, 0.85)\r\n draw(x, y, fs, col,r)\r\n coords.append([x, x+(width/500), y, y+(height/500)]) #color is randomly selected from list above\r\n dict = word_cloud(values)\r\n max_freq = dict[0][1] #the max_frequency is the value of the first word\r\n alpha_level = 1\r\n\r\n f = plt.figure()\r\n r = f.canvas.get_renderer()\r\n for key, val in dict: #add words to matplotlib at random coordinates with font size relative to max frequency, rotate every other word\r\n x = random.uniform(0.05, 0.85)\r\n y = random.uniform(0.05, 0.85)\r\n fs = (float(val)/max_freq)*40 #scales font size according to relation to maximum frequency\r\n col = text_colors[random.randint(0,len(text_colors)-1)] #selects random color from color list\r\n draw(x,y,fs,col,r) #calls draw function to draw text\r\n alpha_level *= .97 #the transparency level decreases as the word frequency decreases\r\n plt.axis('off') #removes axes so it's just an image\r\n return plt\r\n"
]
| [
[
"matplotlib.pyplot.text",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.figure"
]
]
|
exoristos21/exostriker | [
"85cee34744bcd6e960dcdffc9140bb1d9107982e"
]
| [
"exostriker/lib/detrend_window.py"
]
| [
"# -*- coding: utf-8 -*-\nimport pyqtgraph as pg\nfrom PyQt5 import QtCore, QtGui, QtWidgets, uic\n\nimport numpy as np\nimport os, sys\nfrom print_info_window import print_info\nfrom worker import Worker\nfrom multiprocessing import cpu_count\nimport gls as gls\nimport dill\nimport RV_mod as rv\nimport pg_hack\n\n\n#qtCreatorFile = \"./lib/UI/tdt.ui\" \n#Ui_DetrendWindow, QtBaseClass = uic.loadUiType(qtCreatorFile)\n#import time\n#start_time = time.time()\n\ntry:\n from tdt import Ui_Detrend as Ui_DetrendWindow\nexcept (ImportError, KeyError,ModuleNotFoundError) as e:\n qtCreatorFile = \"./lib/UI/tdt.ui\" #%lib_path \n Ui_DetrendWindow, QtBaseClass = uic.loadUiType(qtCreatorFile)\n#print(\"--- %s seconds ---\" % (time.time() - start_time))\n\ntry:\n import sklearn\n sklearn_found = True\nexcept (ImportError, KeyError,ModuleNotFoundError) as e:\n sklearn_found = False\n pass\n\ntry:\n import statsmodels\n statsmodels_found = True\nexcept (ImportError, KeyError,ModuleNotFoundError) as e:\n statsmodels_found = False\n pass\n\ntry:\n import pygam\n pygam_found = True\nexcept (ImportError, KeyError,ModuleNotFoundError) as e:\n pygam_found = False\n pass\n\ntry:\n import supersmoother\n supersmoother_found = True\nexcept (ImportError, KeyError,ModuleNotFoundError) as e:\n supersmoother_found = False\n pass\n\ntry:\n from wotan import flatten\n wotan_found = True\nexcept (ImportError, KeyError,ModuleNotFoundError) as e:\n wotan_found = False\n print(\"wotan not found!\")\n pass\n\n\n\n\nclass DetrendWindow(QtWidgets.QWidget, Ui_DetrendWindow):\n def __init__(self,parent):\n super(DetrendWindow, self).__init__()\n\n QtWidgets.QWidget.__init__(self)\n Ui_DetrendWindow.__init__(self)\n # self.setWindowTitle('Transit detrending options')\n self.font = QtGui.QFont()\n self.font.setPointSize(9)\n self.font.setBold(False)\n self.threadpool_DT = QtCore.QThreadPool()\n self.threadpool_DT.setMaxThreadCount(cpu_count()) \n \n self.parent=parent\n # Create the main window\n self.ui = Ui_DetrendWindow()\n self.ui.setupUi(self)\n self.setWindowIcon(QtGui.QIcon('./lib/UI/33_striker.png'))\n\n self.sklearn_found = sklearn_found\n self.statsmodels_found = statsmodels_found\n self.pygam_found = pygam_found\n self.supersmoother_found = supersmoother_found\n self.wotan_found = wotan_found\n \n self.ui.radio_GPs.setEnabled(sklearn_found)\n self.ui.comboBox_GP.setEnabled(sklearn_found)\n self.ui.kernel_size.setEnabled(sklearn_found)\n \n if wotan_found == False:\n self.ui.radio_timeW.setEnabled(wotan_found)\n self.ui.radio_Splines.setEnabled(wotan_found)\n self.ui.radio_Polynomials.setEnabled(wotan_found)\n self.ui.radio_Regressions.setEnabled(wotan_found)\n self.ui.radio_GPs.setEnabled(wotan_found)\n\n\n\n self.t_store = {k: [] for k in range(20)}\n self.flux_store = {k: [] for k in range(20)}\n self.flux_err_store = {k: [] for k in range(20)}\n self.flux_o_c_store = {k: [] for k in range(20)}\n self.flux_err_o_c_store = {k: [] for k in range(20)}\n self.trend_store = {k: [] for k in range(20)}\n self.airmass_store = {k: [] for k in range(20)}\n\n\n self.t = []\n self.old_t = []\n self.flux_o_c = []\n self.airmass = []\n \n self.initialize_plots()\n\n self.init_comboBox_regres()\n self.init_comboBox_sliders()\n self.init_comboBox_poly()\n self.init_comboBox_splines()\n self.init_comboBox_GP()\n\n self.ui.try_button.clicked.connect(self.worker_detrend)\n self.ui.saveProduct.clicked.connect(self.save_data_product)\n self.ui.readme_button.clicked.connect(self.info)\n self.ui.print_stat.clicked.connect(self.print_stat_window)\n\n self.info_dialog = print_info(self)\n self.stat_dialog = print_info(self)\n\n\n self.ui.buttonGroup_plot2.buttonClicked.connect(self.replot)\n\n self.ui.buttonGroup_trendOptions.buttonClicked.connect(self.update_labels)\n\n self.ui.click_to_reject.clicked.connect(self.top_plot)\n self.ui.reset_data.clicked.connect(self.reset_data)\n self.ui.add_epoch.clicked.connect(self.add_bjd)\n\n self.ui.button_bin_data.clicked.connect(self.bin_data) \n \n self.ui.apply_dilution.clicked.connect(self.add_dilution)\n\n def replot(self):\n \n if self.ui.GLS_of_data.isChecked():\n self.make_GLS()\n self.plot_GLS()\n elif self.ui.GLS_of_model.isChecked():\n self.make_GLS(model=True)\n self.plot_GLS()\n elif self.ui.GLS_of_detr_data.isChecked():\n self.make_GLS(model=False,o_c=True)\n self.plot_GLS()\n elif self.ui.flatten_data.isChecked():\n self.bottom_plot_lc()\n\n\n def init_data(self):\n\n self.t = self.parent.tra_data[0]\n self.flux = self.parent.tra_data[4]\n self.airmass = self.parent.tra_data[3]\n \n self.flux_err = self.parent.tra_data[2]\n self.data_file_name = self.parent.tra_data[-1]\n self.old_t = dill.copy(self.t)\n return\n\n\n def add_dilution(self):\n\n D_flux = self.flux/(self.ui.Dilution_fact.value())\n self.flux = D_flux - np.median(D_flux) * (1.0 - self.ui.Dilution_fact.value())\n\n D_flux_err = self.flux_err/(self.ui.Dilution_fact.value())\n self.flux_err = D_flux_err\n\n self.ui.radio_remove_median.setChecked(True)\n #self.plot()\n self.worker_detrend()\n\n \n return\n\n\n def add_bjd(self):\n\n self.t = self.t + self.ui.extra_BJD.value()\n self.ui.radio_remove_median.setChecked(True)\n #self.plot()\n self.worker_detrend()\n \n return\n\n def bin_data(self):\n \n self.ui.radio_remove_median.setChecked(True)\n\n\n self.ui.try_button.setEnabled(False)\n #self.ui.label_working.setText(\"Working!!!\")\n self.ui.try_button.setText(\"Working!!!\") \n #self.ui.try_button.setText(\"Working!!!\")\n #print('test 1')\n t_, flux_, flux_err_, ind = rv.bin_data(self.t,self.flux,self.flux_err, np.zeros(len(self.t)), bin_size =self.ui.bin_data.value()) \n #print('test 2')\n self.ui.try_button.setEnabled(True)\n self.ui.try_button.setText(\"Try !\")\n\n self.t = t_ \n self.flux = flux_\n self.flux_err = flux_err_\n \n self.worker_detrend()\n \n return\n\n\n\n def reset_data(self):\n \n self.ui.radio_remove_median.setChecked(True)\n self.t = []\n self.worker_detrend()\n \n return\n\n\n def calculate(self):\n\n \n if self.ui.radio_remove_median.isChecked():\n\n flatten_lc1 = self.flux/np.median(self.flux)\n trend_lc1 = np.ones(len(self.flux))*np.median(self.flux)\n\n elif self.ui.radio_remove_mean.isChecked():\n\n flatten_lc1 = self.flux/np.mean(self.flux)\n trend_lc1 = np.ones(len(self.flux))*np.mean(self.flux)\n\n elif self.ui.radio_timeW.isChecked():\n flatten_lc1, trend_lc1 = flatten(\n self.t, # Array of time values\n self.flux , # Array of flux values\n method=str(self.ui.comboBox_sliders.currentText()),\n window_length=self.ui.sliders_wl.value(), # The length of the filter window in units of ``time``\n# break_tolerance=self.ui.spline_bt.value(), # Split into segments at breaks longer than that\n return_trend=True, # Return trend and flattened light curve\n )\n\n elif self.ui.radio_Splines.isChecked():\n flatten_lc1, trend_lc1 = flatten(\n self.t, # Array of time values\n self.flux , # Array of flux values\n method=str(self.ui.comboBox_splines.currentText()),\n window_length=self.ui.spline_wl.value(), # The length of the filter window in units of ``time``\n break_tolerance=self.ui.spline_bt.value(), # Split into segments at breaks longer than that\n return_trend=True, # Return trend and flattened light curve\n )\n\n elif self.ui.radio_Polynomials.isChecked():\n\n flatten_lc1, trend_lc1 = flatten(\n self.t, # Array of time values\n self.flux , # Array of flux values\n method=str(self.ui.comboBox_poly.currentText()),\n window_length=self.ui.poly_wl.value(), # The length of the filter window in units of ``time``\n break_tolerance=self.ui.poly_bt.value(), # Split into segments at breaks longer than that\n return_trend=True, # Return trend and flattened light curve\n )\n\n elif self.ui.radio_Regressions.isChecked():\n\n flatten_lc1, trend_lc1 = flatten(\n self.t, # Array of time values\n self.flux , # Array of flux values\n method=str(self.ui.comboBox_regs.currentText()),\n window_length=self.ui.regres_wl.value(), # The length of the filter window in units of ``time``\n break_tolerance=self.ui.regres_bt.value(), # Split into segments at breaks longer than that\n return_trend=True, # Return trend and flattened light curve\n )\n \n elif self.ui.radio_GPs.isChecked():\n\n flatten_lc1, trend_lc1 = flatten(\n self.t, # Array of time values\n self.flux , # Array of flux values\n method='gp',\n kernel = str(self.ui.comboBox_GP.currentText()),\n kernel_size=self.ui.kernel_size.value(),\n break_tolerance=self.ui.regres_bt.value(), # Split into segments at breaks longer than that\n kernel_period = self.ui.GP_period.value(),\n robust = self.ui.checkBox_GP_robust.isChecked(),\n return_trend=True # Return trend and flattened light curve\n )\n\n else:\n flatten_lc1 = self.flux \n trend_lc1 = np.ones(len(self.flux))*np.median(self.flux)\n\n\n self.flux_o_c = flatten_lc1\n self.trend = trend_lc1\n self.flux_err_o_c = self.flux_err/trend_lc1\n \n\n\n\n def worker_detrend_complete(self):\n\n #self.ui.label_working.setText(\"\")\n self.ui.try_button.setText(\"Try !\")\n self.ui.try_button.setEnabled(True)\n self.ui.flatten_data.setChecked(True)\n self.old_t = dill.copy(self.t)\n\n\n self.plot()\n self.show()\n\n return\n \n def worker_detrend(self):\n \n if len(self.t) == 0:\n self.init_data()\n\n self.ui.try_button.setEnabled(False)\n #self.ui.label_working.setText(\"Working!!!\")\n self.ui.try_button.setText(\"Working!!!\")\n worker_detrend_wk = Worker(self.calculate)# Any other args, kwargs are passed to the run \n worker_detrend_wk.signals.finished.connect(self.worker_detrend_complete)\n\n # worker.signals.result.connect(self.print_output)\n #worker.signals.finished.connect(self.thread_complete)\n # worker.signals.progress.connect(self.progress_fn)\n self.threadpool_DT.start(worker_detrend_wk)\n\n\n\n def plot(self):\n\n self.top_plot()\n self.bottom_plot_lc()\n\n\n def top_plot(self):\n global p_1\n \n if self.ui.click_to_reject.isChecked():\n symbolSize=6\n else:\n symbolSize=2\n\n self.ui.plot.plot(clear=True,)\n\n ######## Top plot ############\n\n self.ui.plot.plot(self.t,self.flux, pen=None,\n symbol='o', symbolPen={'color': '#0066ff', 'width': 1.1},\n symbolSize=symbolSize,enableAutoRange=True,viewRect=True,\n symbolBrush='#0066ff')\n \n err_ = pg.ErrorBarItem(x=self.t, y=self.flux, symbol = 'o',\n top=self.flux_err, \n bottom=self.flux_err,\n beam=0.0, pen='#0066ff')\n\n self.ui.plot.addItem(err_)\n\n model_curve = self.ui.plot.plot(self.t, self.trend , pen={'color': '#000000', 'width': 3}, enableAutoRange=True,viewRect=True ) \n model_curve.setZValue(1)\n\n self.ui.plot.plotItem.items[1].sigPointsClicked.connect(self.plotClicked)\n\n\n def bottom_plot_lc(self):\n #global p_2\n \n self.ui.plot_2.plot(clear=True,)\n ######## Bottom plot ############\n self.ui.plot_2.setLogMode(False,False)\n\n self.ui.plot_2.plot(self.t,self.flux_o_c, pen=None,\n symbol='o', symbolPen={'color': '#0066ff', 'width': 1.1},\n symbolSize=2,enableAutoRange=True,viewRect=True,\n symbolBrush='#0066ff')\n \n self.ui.plot_2.setLabel('left', 'Flux', units='', **{'font-size':'9pt'})\n\n\n err_ = pg.ErrorBarItem(x=self.t, y=self.flux_o_c, symbol = 'o',\n top=self.flux_err_o_c, \n bottom=self.flux_err_o_c,\n beam=0.0, pen='#0066ff')\n \n self.ui.plot_2.addItem(err_)\n \n \n \n\n def plotClicked(self,curve,datas):\n\n if self.ui.click_to_reject.isChecked() == False:\n return\n\n rem_x,rem_y = datas[0].pos()\n print(\"Removed x,y: \",rem_x,rem_y)\n\n self.old_t = dill.copy(self.t)\n\n self.t = dill.copy(self.t[self.old_t != rem_x])\n self.flux = dill.copy(self.flux[self.old_t != rem_x])\n self.flux_err = dill.copy(self.flux_err[self.old_t != rem_x])\n self.flux_o_c = dill.copy(self.flux_o_c[self.old_t != rem_x])\n self.flux_err_o_c = dill.copy(self.flux_err_o_c[self.old_t != rem_x])\n self.trend = dill.copy(self.trend[self.old_t != rem_x])\n self.airmass = dill.copy(self.airmass[self.old_t != rem_x])\n\n self.ui.plot.plotItem.items[1].setData(x=self.t,y=self.flux)\n self.ui.plot.plotItem.items[2].setData(x=self.t, y=self.flux, \n top=self.flux_err, \n bottom=self.flux_err)\n\n \n if self.ui.flatten_data.isChecked():\n self.ui.plot_2.plotItem.items[1].setData(x=self.t,y=self.flux_o_c)\n self.ui.plot_2.plotItem.items[2].setData(x=self.t, y=self.flux_o_c, \n top=self.flux_err_o_c, \n bottom=self.flux_err_o_c)\n else:\n self.replot()\n #self.plot()\n\n\n\n def make_GLS(self, model=False, o_c=False):\n\n #omega = 1/ np.logspace(np.log10(self.parent.gls_min_period.value()), np.log10(self.parent.gls_max_period.value()), num=int(self.parent.gls_n_omega.value())) \n #omega = 1/ np.logspace(np.log10(0.9), np.log10((max(self.t)-min(self.t))*2.0), num=int(self.parent.gls_n_omega.value()))\n ind_norm = self.parent.gls_norm_combo.currentIndex()\n\n if model == False and o_c == False:\n data_for_GLS = self.flux\n e_data_for_GLS = self.flux_err\n elif model == False and o_c == True:\n data_for_GLS = self.flux_o_c\n e_data_for_GLS = self.flux_err_o_c\n else:\n data_for_GLS = self.trend\n e_data_for_GLS = self.flux_err\n\n self.trend_per = gls.Gls((self.t, data_for_GLS, e_data_for_GLS), \n # fast=True, verbose=False, norm= \"ZK\",ofac=self.parent.gls_ofac.value(), fbeg=omega[-1], fend=omega[ 0],)\n fast=True, verbose=False, norm=self.parent.norms[ind_norm],ofac=self.parent.gls_ofac.value(), fbeg=1/self.parent.gls_max_period.value(), fend=1/self.parent.gls_min_period.value()) \n\n\n def plot_GLS(self):\n #global p_2\n\n self.ui.plot_2.plot(clear=True,)\n\n power_levels = np.array([self.parent.gls_fap1.value(),self.parent.gls_fap2.value(),self.parent.gls_fap3.value()])\n\n ######################## GLS ##############################\n if self.parent.radioButton_act_GLS_period.isChecked():\n self.ui.plot_2.setLogMode(True,False)\n self.ui.plot_2.plot(1.0/self.trend_per.freq, self.trend_per.power,pen='r',symbol=None ) \n self.ui.plot_2.setLabel('bottom', 'period [d]', units='', **{'font-size':'9pt'}) \n self.ui.plot_2.setLabel('left', 'Power', units='', **{'font-size':'9pt'})\n\n else:\n self.ui.plot_2.setLogMode(False,False) \n self.ui.plot_2.plot(self.trend_per.freq, self.trend_per.power,pen='r',symbol=None )\n self.ui.plot_2.setLabel('bottom', 'frequency [1/d]', units='', **{'font-size':'9pt'}) \n\n [self.ui.plot_2.addLine(x=None, y=fap, pen=pg.mkPen('k', width=0.8, style=QtCore.Qt.DotLine)) for ii,fap in enumerate(self.trend_per.powerLevel(np.array(power_levels)))]\n\n\n\n\n def closeEvent(self, event):\n \n if len(self.old_t) != len(self.t):\n choice = QtWidgets.QMessageBox.information(self, 'Warning!',\n \"It seems that you removed data, but you did not refit! This is not allowed. Please press the 'Try!' button and then close\", QtWidgets.QMessageBox.Ok)\n event.ignore()\n\n \n \n else:\n ret = QtWidgets.QMessageBox.question(None, 'Close request', 'Are you sure you want to quit?',\n QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No,\n QtWidgets.QMessageBox.Yes)\n if ret == QtWidgets.QMessageBox.Yes:\n \n self.t_store[self.parent.tra_data_index] = self.t\n self.flux_store[self.parent.tra_data_index] = self.flux\n self.flux_err_store[self.parent.tra_data_index] = self.flux_err\n self.flux_o_c_store[self.parent.tra_data_index] = self.flux_o_c\n self.flux_err_o_c_store[self.parent.tra_data_index] = self.flux_err_o_c\n self.trend_store[self.parent.tra_data_index] = self.trend\n\n self.airmass_store[self.parent.tra_data_index] = self.airmass\n \n \n self.ui.radio_remove_median.setChecked(True)\n QtWidgets.QMainWindow.closeEvent(self, event)\n else:\n event.ignore()\n\n\n def save_data_product(self):\n\n output_file = QtWidgets.QFileDialog.getSaveFileName(self, 'Save detrended data file', 'detrended_%s'%self.data_file_name, 'All (*.*);;Data (*.tran)', options=QtWidgets.QFileDialog.DontUseNativeDialog)\n \n if str(output_file[0]) != '':\n f = open(output_file[0], 'w')\n f.write(\"# BJD Detrended data Detrended data errors Airmass Original data Original data errors Model applied \\n\")\n for i in range(len(self.t)):\n f.write('{0:{width}.{precision}f} {1:{width}.{precision}f} {2:{width}.{precision}f} {3:{width}.{precision}f} {4:{width}.{precision}f} {5:{width}.{precision}f} {6:{width}.{precision}f}\\n'.format(\n float(self.t[i]), \n float(self.flux_o_c[i]), \n float(self.flux_err_o_c[i]), \n float(self.airmass[i]),\n float(self.flux[i]), \n float(self.flux_err[i]), \n float(self.trend[i]), \n width = 14, precision = 7 ))\n f.close()\n\n def print_stat_window(self):\n\n self.stat_dialog.setFixedSize(550, 600)\n self.stat_dialog.setWindowTitle('Detrending stat. info')\n \n\n text_info = \"\"\" \n\"\"\"\n self.stat_dialog.text.setText(text_info) \n\n ################## text generator #################\n text_info = \"\"\" \n----------------------------------- \n\nN data : %s\n\nfirst epoch : %.3f\nlast epoch : %.3f\ntime span : %.3f\n\nmin. value : %.4f\nmax. value : %.4f\nend-to-end : %.4f\nmean : %.4f\nmedian : %.4f\nrms : %.4f\n\nmin error : %.4f\nmax error : %.4f\nmean error : %.4f\nmedian error : %.4f\n\n\"\"\"%(len(self.t), \nself.t[0], \nself.t[-1], \nself.t[-1]-self.t[0], \nnp.min(self.flux_o_c), \nnp.max(self.flux_o_c), \nnp.max(self.flux_o_c)-np.min(self.flux_o_c), \nnp.mean(self.flux_o_c), \nnp.median(self.flux_o_c), \nnp.sqrt(np.mean(np.square(self.flux_o_c))),\nnp.min(self.flux_err_o_c), \nnp.max(self.flux_err_o_c), \nnp.mean(self.flux_err_o_c), \nnp.median(self.flux_err_o_c))\n\n\n self.stat_dialog.text.append(text_info)\n\n self.stat_dialog.text.setReadOnly(True)\n #self.dialog.setWindowIcon (QtGui.QIcon('logo.png'))\n self.stat_dialog.show()\n\n\n\n def info(self):\n \n #self.info_dialog.setGeometry(300, 200, 150, 150)\n self.info_dialog.setFixedSize(550, 600)\n self.info_dialog.setWindowTitle('Detrending options info')\n \n \n text = ''\n self.info_dialog.text.setText(text) \n \n text = \"For more info on the detrending algorithms see <a href='https://github.com/hippke/wotan'>wotan</a>\" \n self.info_dialog.text.append(text)\n\n text = \"\"\"\n<br>\n<br>\nAs explained in \"wotan\", some algorithms request\nadditional dependencies, which are not included in \"wotan\", and thus, not included in the Exo-Striker \ndependencies list. For example:\n<br> \n<br> \"huber\", \"ramsay\", and \"hampel\" depend on \"statsmodels\"\n<br> \"Gaussian processes\", \"hspline\", \"ridge\", and \"lasso\" depend on \"sklearn\"\n<br> \"pspline\" depends on \"pygam\"\n<br> \"supersmoother\" depends on \"supersmoother\"\n<br> \n<br> To install all additional dependencies, try to install these python packages:\n<br> \n<br> * pip install statsmodels \n<br> * pip install sklearn \n<br> * pip install supersmoother\n<br> * pip install pygam\n<br> \n<br> Also, \"wotan\" depends on \"numba\" and \"llvmlite\" so it might be a good idea to update those two if you already have older versions.\n<br> \n<br>\n<br> If you made the use of the detrending options for your paper, please also cite: <a href='https://ui.adsabs.harvard.edu/abs/2019AJ....158..143H/abstract'> Hippke et al. (2019)</a>\n\"\"\"\n self.info_dialog.text.append(text)\n\n\n \n self.info_dialog.text.setReadOnly(True)\n #self.dialog.setWindowIcon (QtGui.QIcon('logo.png'))\n self.info_dialog.show()\n\n\n def update_labels(self):\n \n if self.ui.radio_GPs.isChecked():\n self.ui.label_method.setText(\"Kernel\")\n self.ui.label_wl.setText(\"Kernel size\")\n self.ui.label_tolerance.setText(\"Kernel period\")\n else:\n self.ui.label_method.setText(\"Method\")\n self.ui.label_wl.setText(\"Window length\")\n self.ui.label_tolerance.setText(\"break tolerance\")\n\n\n\n def init_comboBox_sliders(self):\n\n sliders = [\"biweight\",\"huber\",\"huber_psi\",\"hampel\",\"andrewsinewave\",\"welsch\",\"ramsay\",\"tau\",\"hodges\",\"median\",\n\"medfilt\",\"mean\",\"trim_mean\",\"winsorize\",\"hampelfilt\"] \n sliders_use = [self.wotan_found, self.statsmodels_found, self.wotan_found, self.statsmodels_found,\n self.wotan_found, self.wotan_found, self.statsmodels_found,self.wotan_found,self.wotan_found,\n self.wotan_found, self.wotan_found, self.wotan_found, self.wotan_found,self.wotan_found,self.wotan_found] \n \n for i in range(len(sliders)):\n if sliders_use[i] == True:\n self.ui.comboBox_sliders.addItem(sliders[i],i) \n\n def init_comboBox_poly(self):\n\n poly = [\"cofiam\",\"cosine\",\"savgol\"]\n for i in range(len(poly)):\n self.ui.comboBox_poly.addItem(poly[i],i) \n\n def init_comboBox_splines(self):\n\n splines = [\"rspline\",\"hspline\",\"pspline\"]\n splines_use = [True,self.sklearn_found,self.pygam_found]\n\n for i in range(len(splines)):\n if splines_use[i] == True:\n self.ui.comboBox_splines.addItem(splines[i],i) \n\n def init_comboBox_regres(self):\n\n regres = [\"lowess\",\"supersmoother\",\"ridge\",\"lasso\"]\n regres_use = [self.wotan_found,self.supersmoother_found,self.sklearn_found,self.sklearn_found]\n \n for i in range(len(regres)):\n if regres_use[i] == True:\n self.ui.comboBox_regs.addItem(regres[i],i) \n\n def init_comboBox_GP(self):\n\n gps = [\"squared_exp\",\"matern\",\"periodic\",\"periodic_auto\"]\n for i in range(len(gps)):\n self.ui.comboBox_GP.addItem(gps[i],i) \n\n\n def initialize_plots(self):\n\n #global p_1,p_2\n \n xaxis = ['BJD [days]','BJD [days]']\n yaxis = ['Flux','Flux']\n xunit = ['' ,'']\n yunit = ['' ,'' ]\n\n #p_1 = self.ui.plot\n #p_2 = self.ui.plot_2\n\n zzz = [self.ui.plot,self.ui.plot_2]\n\n\n for i in range(len(zzz)):\n\n zzz[i].setAxisItems({'bottom': pg_hack.CustomAxisItem('bottom')})\n \n #zzz[i].getAxis(\"bottom\").tickFont = self.font\n zzz[i].getAxis(\"bottom\").setStyle(tickTextOffset = 12, tickFont = self.font)\n #zzz[i].getAxis(\"top\").tickFont = self.font\n zzz[i].getAxis(\"top\").setStyle(tickTextOffset = 12, tickFont = self.font)\n #zzz[i].getAxis(\"left\").tickFont = self.font\n zzz[i].getAxis(\"left\").setStyle(tickTextOffset = 12, tickFont = self.font)\n #zzz[i].getAxis(\"right\").tickFont = self.font\n zzz[i].getAxis(\"right\").setStyle(tickTextOffset = 12, tickFont = self.font)\n zzz[i].getAxis('left').setWidth(50)\n zzz[i].getAxis('right').setWidth(10)\n zzz[i].getAxis('top').setHeight(10)\n zzz[i].getAxis('bottom').setHeight(50)\n\n zzz[i].setLabel('bottom', '%s'%xaxis[i], units='%s'%xunit[i], **{'font-size':'9pt'})\n zzz[i].setLabel('left', '%s'%yaxis[i], units='%s'%yunit[i], **{'font-size':'9pt'}) \n zzz[i].showAxis('top') \n zzz[i].showAxis('right') \n zzz[i].getAxis('bottom').enableAutoSIPrefix(enable=False)\n\n #zzz[i].getViewBox().setAspectLocked(True)\n\n return\n\n\n\ndef main():\n app = QtWidgets.QApplication(sys.argv)\n# main = DetrendWindow()\n# main.show()\n sys.exit(app.exec_())\n\n\nif __name__ == '__main__':\n main()\n\n"
]
| [
[
"numpy.max",
"numpy.square",
"numpy.array",
"numpy.median",
"numpy.min",
"numpy.mean"
]
]
|
Aoxig/CenterNet-Fixed-For-Colab | [
"5edf76c2b569b499d21a5bb7f2608bb9ee0e260a"
]
| [
"src/lib/models/networks/resnet_spp_short_cbam.py"
]
| [
"# ------------------------------------------------------------------------------\n# Copyright (c) Microsoft\n# Licensed under the MIT License.\n# Written by Bin Xiao ([email protected])\n# Modified by Dequan Wang and Xingyi Zhou\n# ------------------------------------------------------------------------------\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport math\nimport logging\n\nimport torch\nimport torch.nn as nn\nfrom .DCNv2.dcn_v2 import DCN\nimport torch.utils.model_zoo as model_zoo\nfrom .module import ShortcutConv2d, ChannelAttention, SpatialAttention, SPP, BottleneckCSP, Conv\n\nBN_MOMENTUM = 0.1\nlogger = logging.getLogger(__name__)\n\nmodel_urls = {\n 'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',\n 'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',\n 'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',\n 'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',\n 'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',\n}\n\n\ndef conv3x3(in_planes, out_planes, stride=1):\n \"\"\"3x3 convolution with padding\"\"\"\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=1, bias=False)\n\n\nclass BasicBlock(nn.Module):\n expansion = 1\n\n def __init__(self, inplanes, planes, stride=1, downsample=None):\n super(BasicBlock, self).__init__()\n self.conv1 = conv3x3(inplanes, planes, stride)\n self.bn1 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)\n self.relu = nn.ReLU(inplace=True)\n self.conv2 = conv3x3(planes, planes)\n self.bn2 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n residual = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n\n if self.downsample is not None:\n residual = self.downsample(x)\n\n out += residual\n out = self.relu(out)\n\n return out\n\n\nclass Bottleneck(nn.Module):\n expansion = 4\n\n def __init__(self, inplanes, planes, stride=1, downsample=None):\n super(Bottleneck, self).__init__()\n self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)\n self.bn1 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)\n self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,\n padding=1, bias=False)\n self.bn2 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)\n self.conv3 = nn.Conv2d(planes, planes * self.expansion, kernel_size=1,\n bias=False)\n self.bn3 = nn.BatchNorm2d(planes * self.expansion,\n momentum=BN_MOMENTUM)\n self.relu = nn.ReLU(inplace=True)\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n residual = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n out = self.relu(out)\n\n out = self.conv3(out)\n out = self.bn3(out)\n\n if self.downsample is not None:\n residual = self.downsample(x)\n\n out += residual\n out = self.relu(out)\n\n return out\n\n\ndef fill_up_weights(up):\n w = up.weight.data\n f = math.ceil(w.size(2) / 2)\n c = (2 * f - 1 - f % 2) / (2. * f)\n for i in range(w.size(2)):\n for j in range(w.size(3)):\n w[0, 0, i, j] = \\\n (1 - math.fabs(i / f - c)) * (1 - math.fabs(j / f - c))\n for c in range(1, w.size(0)):\n w[c, 0, :, :] = w[0, 0, :, :]\n\n\ndef fill_fc_weights(layers):\n for m in layers.modules():\n if isinstance(m, nn.Conv2d):\n nn.init.normal_(m.weight, std=0.001)\n # torch.nn.init.kaiming_normal_(m.weight.data, nonlinearity='relu')\n # torch.nn.init.xavier_normal_(m.weight.data)\n if m.bias is not None:\n nn.init.constant_(m.bias, 0)\n\n\nclass PoseResNet(nn.Module):\n\n def __init__(self, block, layers, heads, head_conv):\n self.inplanes = 64\n self.heads = heads\n self.deconv_with_bias = False\n\n super(PoseResNet, self).__init__()\n self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,\n bias=False)\n self.bn1 = nn.BatchNorm2d(64, momentum=BN_MOMENTUM)\n self.relu = nn.ReLU(inplace=True)\n\n self.ca1 = ChannelAttention(self.inplanes)\n self.sa1 = SpatialAttention()\n\n self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)\n self.layer1 = self._make_layer(block, 64, layers[0])\n self.layer2 = self._make_layer(block, 128, layers[1], stride=2)\n self.layer3 = self._make_layer(block, 256, layers[2], stride=2)\n self.layer4 = self._make_layer(block, 512, layers[3], stride=2)\n\n self.ca2 = ChannelAttention(self.inplanes)\n self.sa2 = SpatialAttention()\n\n # neck\n # self.spp = nn.Sequential(\n # Conv(512, 256, k=1),\n # SPP(),\n # BottleneckCSP(256 * 4, 512, n=1, shortcut=False)\n # )\n\n #for 101\n self.spp = nn.Sequential(\n Conv(2048, 1024, k=1),\n SPP(),\n BottleneckCSP(1024 * 4, 2048, n=1, shortcut=False)\n )\n\n # used for deconv layers\n # self.deconv_layers = self._make_deconv_layer(\n # 3,\n # [256, 128, 64],\n # [4, 4, 4],\n # )\n\n deconv_layers = []\n for i in range (0, 3):\n deconv_layers.append(self._make_single_deconv_layer(\n 3,\n [256, 128, 64],\n [4, 4, 4],\n i\n ))\n self.deconv_layers = nn.Sequential(*deconv_layers)\n\n inplanes = (64, 128, 256, 512)\n planes = (256, 128, 64)\n\n #for 101\n inplanes = (256, 512, 1024, 2048)\n planes = (256, 128, 64)\n shortcut_num = min(len(inplanes) - 1, len(planes))\n shortcut_cfg = (1, 2, 3)\n shortcut_kernel = 3\n padding = (shortcut_kernel - 1) // 2\n\n self.shortcut_layers = self.build_shortcut(inplanes[:-1][::-1][:shortcut_num], planes[:shortcut_num],\n shortcut_cfg,\n kernel_size=shortcut_kernel, padding=padding)\n\n for head in self.heads:\n classes = self.heads[head]\n if head_conv > 0:\n fc = nn.Sequential(\n nn.Conv2d(64, head_conv,\n kernel_size=3, padding=1, bias=True),\n nn.ReLU(inplace=True),\n nn.Conv2d(head_conv, classes,\n kernel_size=1, stride=1,\n padding=0, bias=True))\n if 'hm' in head:\n fc[-1].bias.data.fill_(-2.19)\n else:\n fill_fc_weights(fc)\n else:\n fc = nn.Conv2d(64, classes,\n kernel_size=1, stride=1,\n padding=0, bias=True)\n if 'hm' in head:\n fc.bias.data.fill_(-2.19)\n else:\n fill_fc_weights(fc)\n self.__setattr__(head, fc)\n\n def _make_layer(self, block, planes, blocks, stride=1):\n downsample = None\n if stride != 1 or self.inplanes != planes * block.expansion:\n downsample = nn.Sequential(\n nn.Conv2d(self.inplanes, planes * block.expansion,\n kernel_size=1, stride=stride, bias=False),\n nn.BatchNorm2d(planes * block.expansion, momentum=BN_MOMENTUM),\n )\n\n layers = []\n layers.append(block(self.inplanes, planes, stride, downsample))\n self.inplanes = planes * block.expansion\n for i in range(1, blocks):\n layers.append(block(self.inplanes, planes))\n\n return nn.Sequential(*layers)\n\n def build_shortcut(self, inplanes, planes, shortcut_cfg, kernel_size=3, padding=1):\n assert len(inplanes) == len(planes) == len(shortcut_cfg)\n shortcut_layers = []\n for (inp, outp, layer_num) in zip(\n inplanes, planes, shortcut_cfg):\n assert layer_num > 0\n layer = ShortcutConv2d(\n inp, outp, [kernel_size] * layer_num, [padding] * layer_num)\n shortcut_layers.append(layer)\n return nn.Sequential(*shortcut_layers)\n\n def _get_deconv_cfg(self, deconv_kernel, index):\n if deconv_kernel == 4:\n padding = 1\n output_padding = 0\n elif deconv_kernel == 3:\n padding = 1\n output_padding = 1\n elif deconv_kernel == 2:\n padding = 0\n output_padding = 0\n\n return deconv_kernel, padding, output_padding\n\n def _make_single_deconv_layer(self, num_layers, num_filters, num_kernels, i):\n assert num_layers == len(num_filters), \\\n 'ERROR: num_deconv_layers is different len(num_deconv_filters)'\n assert num_layers == len(num_kernels), \\\n 'ERROR: num_deconv_layers is different len(num_deconv_filters)'\n\n layers = []\n kernel, padding, output_padding = \\\n self._get_deconv_cfg(num_kernels[i], i)\n\n planes = num_filters[i]\n fc = DCN(self.inplanes, planes,\n kernel_size=(3, 3), stride=1,\n padding=1, dilation=1, deformable_groups=1)\n # fc = nn.Conv2d(self.inplanes, planes,\n # kernel_size=3, stride=1,\n # padding=1, dilation=1, bias=False)\n # fill_fc_weights(fc)\n up = nn.ConvTranspose2d(\n in_channels=planes,\n out_channels=planes,\n kernel_size=kernel,\n stride=2,\n padding=padding,\n output_padding=output_padding,\n bias=self.deconv_with_bias)\n fill_up_weights(up)\n\n layers.append(fc)\n layers.append(nn.BatchNorm2d(planes, momentum=BN_MOMENTUM))\n layers.append(nn.ReLU(inplace=True))\n layers.append(up)\n layers.append(nn.BatchNorm2d(planes, momentum=BN_MOMENTUM))\n layers.append(nn.ReLU(inplace=True))\n self.inplanes = planes\n return nn.Sequential(*layers)\n\n\n def _make_deconv_layer(self, num_layers, num_filters, num_kernels):\n assert num_layers == len(num_filters), \\\n 'ERROR: num_deconv_layers is different len(num_deconv_filters)'\n assert num_layers == len(num_kernels), \\\n 'ERROR: num_deconv_layers is different len(num_deconv_filters)'\n\n layers = []\n for i in range(num_layers):\n kernel, padding, output_padding = \\\n self._get_deconv_cfg(num_kernels[i], i)\n\n planes = num_filters[i]\n fc = DCN(self.inplanes, planes,\n kernel_size=(3, 3), stride=1,\n padding=1, dilation=1, deformable_groups=1)\n # fc = nn.Conv2d(self.inplanes, planes,\n # kernel_size=3, stride=1,\n # padding=1, dilation=1, bias=False)\n # fill_fc_weights(fc)\n up = nn.ConvTranspose2d(\n in_channels=planes,\n out_channels=planes,\n kernel_size=kernel,\n stride=2,\n padding=padding,\n output_padding=output_padding,\n bias=self.deconv_with_bias)\n fill_up_weights(up)\n\n layers.append(fc)\n layers.append(nn.BatchNorm2d(planes, momentum=BN_MOMENTUM))\n layers.append(nn.ReLU(inplace=True))\n layers.append(up)\n layers.append(nn.BatchNorm2d(planes, momentum=BN_MOMENTUM))\n layers.append(nn.ReLU(inplace=True))\n self.inplanes = planes\n\n return nn.Sequential(*layers)\n\n def forward(self, x):\n feats = x\n x = self.conv1(x)\n x = self.bn1(x)\n x = self.relu(x)\n\n x= self.ca1(x) * x\n x = self.sa1(x) * x\n\n x = self.maxpool(x)\n\n y1 = self.layer1(x)\n y2 = self.layer2(y1)\n y3 = self.layer3(y2)\n y4 = self.layer4(y3)\n\n y4 = self.ca2(y4) * y4\n y4 = self.sa2(y4) * y4\n\n y4 = self.spp(y4)\n\n feats = [y1, y2, y3, y4]\n #x = self.deconv_layers(x)\n for i, upsample_layer in enumerate(self.deconv_layers):\n y4 = upsample_layer(y4)\n if i < len(self.shortcut_layers):\n shortcut = self.shortcut_layers[i](feats[-i - 2])\n y4 = y4 + shortcut\n ret = {}\n for head in self.heads:\n ret[head] = self.__getattr__(head)(y4)\n return [ret]\n\n\n def init_weights(self, num_layers):\n if 1:\n url = model_urls['resnet{}'.format(num_layers)]\n pretrained_state_dict = model_zoo.load_url(url)\n print('=> loading pretrained model {}'.format(url))\n self.load_state_dict(pretrained_state_dict, strict=False)\n print('=> init deconv weights from normal distribution')\n for name, m in self.deconv_layers.named_modules():\n if isinstance(m, nn.BatchNorm2d):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n\n\nresnet_spec = {18: (BasicBlock, [2, 2, 2, 2]),\n 34: (BasicBlock, [3, 4, 6, 3]),\n 50: (Bottleneck, [3, 4, 6, 3]),\n 101: (Bottleneck, [3, 4, 23, 3]),\n 152: (Bottleneck, [3, 8, 36, 3])}\n\n\ndef get_pose_net(num_layers, heads, head_conv=256):\n block_class, layers = resnet_spec[num_layers]\n\n model = PoseResNet(block_class, layers, heads, head_conv=head_conv)\n model.init_weights(num_layers)\n return model"
]
| [
[
"torch.nn.MaxPool2d",
"torch.nn.Sequential",
"torch.nn.init.constant_",
"torch.nn.BatchNorm2d",
"torch.nn.ConvTranspose2d",
"torch.utils.model_zoo.load_url",
"torch.nn.ReLU",
"torch.nn.Conv2d",
"torch.nn.init.normal_"
]
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.