repo_name
stringlengths 6
130
| hexsha
list | file_path
list | code
list | apis
list |
---|---|---|---|---|
haoyang-insitro/ABC-Enhancer-Gene-Prediction
|
[
"48038167b1bf746cca0716f6202f6a90e56bb273"
] |
[
"src/hic.py"
] |
[
"# from insitro_core.utils.storage import makedirs\nimport os\nimport time\nfrom os.path import basename, join\n\nimport numpy as np\nimport pandas as pd\nimport scipy.sparse as ssp\n\nfrom insitro_core.utils.cloud.bucket_utils import check_exists, download_file\n\n\ndef get_hic_file(chromosome, hic_dir, allow_vc=True, hic_type=\"juicebox\"):\n if hic_type == \"juicebox\":\n hic_file = join(hic_dir, chromosome, chromosome + \".KRobserved.gz\")\n hic_norm = join(hic_dir, chromosome, chromosome + \".KRnorm.gz\")\n is_vc = False\n if allow_vc and not check_exists(hic_file):\n hic_file = join(hic_dir, chromosome, chromosome + \".VCobserved.gz\")\n hic_norm = join(hic_dir, chromosome, chromosome + \".VCnorm.gz\")\n\n if not check_exists(hic_file):\n RuntimeError(\"Could not find KR or VC normalized hic files\")\n else:\n print(\n \"Could not find KR normalized hic file. Using VC normalized hic file\"\n )\n is_vc = True\n\n print(\"Using: \" + hic_file)\n return hic_file, hic_norm, is_vc\n elif hic_type == \"bedpe\":\n hic_file = join(hic_dir, chromosome, chromosome + \".bedpe.gz\")\n return hic_file, None, None\n\n\ndef load_hic(\n tmpdir,\n hic_file,\n hic_norm_file,\n hic_is_vc,\n hic_type,\n hic_resolution,\n tss_hic_contribution,\n window,\n min_window,\n gamma,\n interpolate_nan=True,\n apply_diagonal_bin_correction=True,\n):\n print(\"Loading HiC\")\n if hic_file.startswith(\"s3\"):\n download_file(hic_file, tmpdir, overwrite_ok=True)\n hic_file = join(tmpdir, basename(hic_file))\n if hic_norm_file.startswith(\"s3\"):\n download_file(hic_norm_file, tmpdir, overwrite_ok=True)\n hic_norm_file = join(tmpdir, basename(hic_norm_file))\n if hic_type == \"juicebox\":\n HiC_sparse_mat = hic_to_sparse(hic_file, hic_norm_file, hic_resolution)\n HiC = process_hic(\n hic_mat=HiC_sparse_mat,\n hic_norm_file=hic_norm_file,\n hic_is_vc=hic_is_vc,\n resolution=hic_resolution,\n tss_hic_contribution=tss_hic_contribution,\n window=window,\n min_window=min_window,\n gamma=gamma,\n interpolate_nan=interpolate_nan,\n apply_diagonal_bin_correction=apply_diagonal_bin_correction,\n )\n # HiC = juicebox_to_bedpe(HiC, chromosome, args)\n elif hic_type == \"bedpe\":\n HiC = pd.read_csv(\n hic_file,\n sep=\"\\t\",\n names=[\"chr1\", \"x1\", \"x2\", \"chr2\", \"y1\", \"y2\", \"name\", \"hic_contact\"],\n )\n\n return HiC\n\n\n# def juicebox_to_bedpe(hic, chromosome, resolution):\n# hic['chr'] = chromosome\n# hic['x1'] = hic['bin1'] * resolution\n# hic['x2'] = (hic['bin1'] + 1) * resolution\n# hic['y1'] = hic['bin2'] * resolution\n# hic['y2'] = (hic['bin2'] + 1) * resolution\n\n# return(hic)\n\n\ndef process_hic(\n hic_mat,\n hic_norm_file,\n hic_is_vc,\n resolution,\n tss_hic_contribution,\n window,\n min_window=0,\n hic_is_doubly_stochastic=False,\n apply_diagonal_bin_correction=True,\n interpolate_nan=True,\n gamma=None,\n kr_cutoff=0.25,\n):\n # Make doubly stochastic.\n # Juicer produces a matrix with constant row/column sums. But sum is not 1 and is variable across chromosomes\n t = time.time()\n\n if not hic_is_doubly_stochastic and not hic_is_vc:\n # Any row with Nan in it will sum to nan\n # So need to calculate sum excluding nan\n temp = hic_mat\n temp.data = np.nan_to_num(temp.data, copy=False)\n sums = temp.sum(axis=0)\n sums = sums[~np.isnan(sums)]\n assert np.max(sums[sums > 0]) / np.min(sums[sums > 0]) < 1.001\n mean_sum = np.mean(sums[sums > 0])\n\n if abs(mean_sum - 1) < 0.001:\n print(\n \"HiC Matrix has row sums of {}, continuing without making doubly stochastic\".format(\n mean_sum\n )\n )\n else:\n print(\n \"HiC Matrix has row sums of {}, making doubly stochastic...\".format(\n mean_sum\n )\n )\n hic_mat = hic_mat.multiply(1 / mean_sum)\n\n # Adjust diagonal of matrix based on neighboring bins\n # First and last rows need to be treated differently\n if apply_diagonal_bin_correction:\n last_idx = hic_mat.shape[0] - 1\n nonzero_diag = hic_mat.nonzero()[0][\n hic_mat.nonzero()[0] == hic_mat.nonzero()[1]\n ]\n nonzero_diag = list(\n set(nonzero_diag) - set(np.array([last_idx])) - set(np.array([0]))\n )\n\n for ii in nonzero_diag:\n hic_mat[ii, ii] = (\n max(hic_mat[ii, ii - 1], hic_mat[ii, ii + 1])\n * tss_hic_contribution\n / 100\n )\n\n if hic_mat[0, 0] != 0:\n hic_mat[0, 0] = hic_mat[0, 1] * tss_hic_contribution / 100\n\n if hic_mat[last_idx, last_idx] != 0:\n hic_mat[last_idx, last_idx] = (\n hic_mat[last_idx, last_idx - 1] * tss_hic_contribution / 100\n )\n\n # Any entries with low KR norm entries get set to NaN. These will be interpolated below\n hic_mat = apply_kr_threshold(hic_mat, hic_norm_file, kr_cutoff)\n\n # Remove lower triangle\n if not hic_is_vc:\n hic_mat = ssp.triu(hic_mat)\n else:\n hic_mat = process_vc(hic_mat)\n\n # Turn into dataframe\n hic_mat = hic_mat.tocoo(copy=False)\n hic_df = pd.DataFrame(\n {\"bin1\": hic_mat.row, \"bin2\": hic_mat.col, \"hic_contact\": hic_mat.data}\n )\n\n # Prune to window\n hic_df = hic_df.loc[\n np.logical_and(\n abs(hic_df[\"bin1\"] - hic_df[\"bin2\"]) <= window / resolution,\n abs(hic_df[\"bin1\"] - hic_df[\"bin2\"]) >= min_window / resolution,\n )\n ]\n print(\n \"HiC has {} rows after windowing between {} and {}\".format(\n hic_df.shape[0], min_window, window\n )\n )\n\n # Fill NaN\n # NaN in the KR normalized matrix are not zeros. They are entries where the KR algorithm did not converge (or low KR norm)\n # So need to fill these. Use powerlaw.\n # Not ideal obviously but the scipy interpolation algos are either very slow or don't work since the nan structure implies that not all nans are interpolated\n if interpolate_nan:\n nan_loc = np.isnan(hic_df[\"hic_contact\"])\n hic_df.loc[nan_loc, \"hic_contact\"] = get_powerlaw_at_distance(\n abs(hic_df.loc[nan_loc, \"bin1\"] - hic_df.loc[nan_loc, \"bin2\"]) * resolution,\n gamma,\n )\n\n print(\"process.hic: Elapsed time: {}\".format(time.time() - t))\n\n return hic_df\n\n\ndef apply_kr_threshold(hic_mat, hic_norm_file, kr_cutoff):\n # Convert all entries in the hic matrix corresponding to low kr norm entries to NaN\n # Note that in scipy sparse matrix multiplication 0*nan = 0\n # So this doesn't convert 0's to nan only nonzero to nan\n\n norms = np.loadtxt(hic_norm_file)\n norms[norms < kr_cutoff] = np.nan\n norms[norms >= kr_cutoff] = 1\n norm_mat = ssp.dia_matrix((1.0 / norms, [0]), (len(norms), len(norms)))\n\n return norm_mat * hic_mat * norm_mat\n\n\ndef hic_to_sparse(filename, norm_file, resolution, hic_is_doubly_stochastic=False):\n t = time.time()\n HiC = pd.read_table(\n filename,\n names=[\"bin1\", \"bin2\", \"hic_contact\"],\n header=None,\n engine=\"c\",\n memory_map=True,\n )\n\n # verify our assumptions\n assert np.all(HiC.bin1 <= HiC.bin2)\n\n # Need load norms here to know the dimensions of the hic matrix\n norms = pd.read_csv(norm_file, header=None)\n hic_size = norms.shape[0]\n\n # convert to sparse matrix in CSR (compressed sparse row) format, chopping\n # down to HiC bin size. note that conversion to scipy sparse matrices\n # accumulates repeated indices, so this will do the right thing.\n row = np.floor(HiC.bin1.values / resolution).astype(int)\n col = np.floor(HiC.bin2.values / resolution).astype(int)\n dat = HiC.hic_contact.values\n\n # JN: Need both triangles in order to compute row/column sums to make double stochastic.\n # If juicebox is upgraded to return DS matrices, then can remove one triangle\n # TO DO: Remove one triangle when juicebox is updated.\n # we want a symmetric matrix. Easiest to do that during creation, but have to be careful of diagonal\n if not hic_is_doubly_stochastic:\n mask = row != col # off-diagonal\n row2 = col[mask] # note the row/col swap\n col2 = row[mask]\n dat2 = dat[mask]\n\n # concat and create\n row = np.hstack((row, row2))\n col = np.hstack((col, col2))\n dat = np.hstack((dat, dat2))\n\n print(\"hic.to.sparse: Elapsed time: {}\".format(time.time() - t))\n\n return ssp.csr_matrix((dat, (row, col)), (hic_size, hic_size))\n\n\ndef get_powerlaw_at_distance(distances, gamma, min_distance=5000, scale=None):\n assert gamma > 0\n\n # The powerlaw is computed for distances > 5kb. We don't know what the contact freq looks like at < 5kb.\n # So just assume that everything at < 5kb is equal to 5kb.\n # TO DO: get more accurate powerlaw at < 5kb\n distances = np.clip(distances, min_distance, np.Inf)\n log_dists = np.log(distances + 1)\n\n # Determine scale parameter\n # A powerlaw distribution has two parameters: the exponent and the minimum domain value\n # In our case, the minimum domain value is always constant (equal to 1 HiC bin) so there should only be 1 parameter\n # The current fitting approach does a linear regression in log-log space which produces both a slope (gamma) and a intercept (scale)\n # Empirically there is a linear relationship between these parameters (which makes sense since we expect only a single parameter distribution)\n # It should be possible to analytically solve for scale using gamma. But this doesn't quite work since the hic data does not actually follow a power-law\n # So could pass in the scale parameter explicity here. Or just kludge it as I'm doing now\n # TO DO: Eventually the pseudocount should be replaced with a more appropriate smoothing procedure.\n\n # 4.80 and 11.63 come from a linear regression of scale on gamma across 20 hic cell types at 5kb resolution. Do the params change across resolutions?\n if scale is None:\n scale = -4.80 + 11.63 * gamma\n\n powerlaw_contact = np.exp(scale + -1 * gamma * log_dists)\n\n return powerlaw_contact\n\n\ndef process_vc(hic):\n # For a vc normalized matrix, need to make rows sum to 1.\n # Assume rows correspond to genes and cols to enhancers\n\n row_sums = hic.sum(axis=0)\n row_sums[row_sums == 0] = 1\n norm_mat = ssp.dia_matrix(\n (1.0 / row_sums, [0]), (row_sums.shape[1], row_sums.shape[1])\n )\n\n # left multiply to operate on rows\n hic = norm_mat * hic\n\n return hic\n"
] |
[
[
"numpy.nan_to_num",
"pandas.DataFrame",
"numpy.all",
"numpy.max",
"numpy.mean",
"scipy.sparse.dia_matrix",
"numpy.exp",
"numpy.hstack",
"pandas.read_csv",
"numpy.clip",
"numpy.log",
"numpy.min",
"numpy.isnan",
"scipy.sparse.csr_matrix",
"pandas.read_table",
"scipy.sparse.triu",
"numpy.floor",
"numpy.array",
"numpy.loadtxt"
]
] |
Spacider/UNSW_2021T1
|
[
"bc59ca2dc048ee695217e6da09f32e2a82961bb7"
] |
[
"COMP9318/lab2/test.py"
] |
[
"## import modules here \nimport pandas as pd\nimport numpy as np\nimport helper\n\n\n################### Question 1 ###################\n\ndef buc_rec_optimized(df): # do not change the heading of the function\n res = pd.DataFrame(columns=df.columns.values)\n pre = []\n my_buc_rec_optimized(df, pre, res)\n return res\n\n\ndef my_buc_rec_optimized(df, pre, res): # help recursive function\n dims = df.shape[1]\n if df.shape[0] == 1:\n single_tuple(df, pre, res)\n elif dims == 1:\n pre.append(sum(helper.project_data(df, 0)))\n res.loc[len(res)] = pre\n else:\n vals = set(helper.project_data(df, 0).values)\n pre_copy = pre.copy()\n for val in vals:\n pre = pre_copy.copy()\n sub_data = helper.slice_data_dim0(df, val)\n pre.append(val)\n my_buc_rec_optimized(sub_data, pre, res)\n\n sub_data = helper.remove_first_dim(df)\n pre = pre_copy.copy()\n pre.append(\"ALL\")\n my_buc_rec_optimized(sub_data, pre, res)\n\n\ndef single_tuple(df, pre, res):\n cols = df.shape[1]\n last = []\n if cols > 1:\n last.append([df.iloc[0, 0]])\n last.append([\"ALL\"])\n for i in range(1, cols - 1):\n temp = []\n for j in last:\n jcopy = j.copy()\n jcopy.append(df.iloc[0, i])\n temp.append(jcopy)\n for j in last:\n jcopy = j.copy()\n jcopy.append(\"ALL\")\n temp.append(jcopy)\n last = temp\n for i in last:\n i.append(df.iloc[0, cols - 1])\n else:\n last.append([df.iloc[0, cols - 1]])\n for i in last:\n pre_copy = pre.copy()\n pre_copy.extend(i)\n res.loc[len(res)] = pre_copy\n\nif __name__ == '__main__':\n input_data = helper.read_data('./asset/c_.txt')\n output = buc_rec_optimized(input_data)\n print(output)"
] |
[
[
"pandas.DataFrame"
]
] |
JesseyXujin/Paddle
|
[
"843bdbaae1253d6dc964e6beddce239a88add113"
] |
[
"python/paddle/fluid/contrib/slim/graph/graph_wrapper.py"
] |
[
"# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom collections import OrderedDict\nfrom .... import io\nfrom .... import compiler\nfrom ....framework import Program\nfrom ....framework import program_guard\nfrom ....framework import Parameter\nfrom ....framework import Variable\nfrom ....executor import Executor\nimport copy\nfrom collections import Iterable\nfrom ....io import save_inference_model, load_inference_model, save_persistables\nimport numpy as np\nimport pickle\nimport os\n\n__all__ = ['GraphWrapper', 'VarWrapper', 'OpWrapper']\n\nOPTIMIZER_OPS = [\n 'momentum',\n 'lars_momentum',\n 'adagrad',\n 'adam',\n 'adamax',\n 'dpsgd',\n 'decayed_adagrad',\n 'adadelta',\n 'rmsprop',\n]\n\n\nclass VarWrapper(object):\n def __init__(self, var, graph):\n assert isinstance(var, Variable)\n assert isinstance(graph, GraphWrapper)\n self._var = var\n self._graph = graph\n\n def __eq__(self, v):\n \"\"\"\n Overwrite this function for ...in... syntax in python.\n \"\"\"\n return self._var.name == v._var.name\n\n def name(self):\n \"\"\"\n Get the name of the variable.\n \"\"\"\n return self._var.name\n\n def shape(self):\n \"\"\"\n Get the shape of the varibale.\n \"\"\"\n return self._var.shape\n\n def set_shape(self, shape):\n \"\"\"\n Set the shape of the variable.\n \"\"\"\n self._var.desc.set_shape(shape)\n\n def inputs(self):\n \"\"\"\n Get all the operators that use this variable as output.\n Returns:\n list<OpWrapper>: A list of operators.\n \"\"\"\n ops = []\n for op in self._graph.ops():\n if self in op.all_inputs():\n ops.append(op)\n return ops\n\n def outputs(self):\n \"\"\"\n Get all the operators that use this variable as input.\n Returns:\n list<OpWrapper>: A list of operators.\n \"\"\"\n ops = []\n for op in self._graph.ops():\n if self in op.all_outputs():\n ops.append(op)\n return ops\n\n\nclass OpWrapper(object):\n def __init__(self, op, graph):\n assert isinstance(graph, GraphWrapper)\n self._op = op\n self._graph = graph\n\n def __eq__(self, op):\n \"\"\"\n Overwrite this function for ...in... syntax in python.\n \"\"\"\n return self.idx() == op.idx()\n\n def all_inputs(self):\n \"\"\"\n Get all the input variables of this operator.\n \"\"\"\n return [\n self._graph.var(var_name) for var_name in self._op.input_arg_names\n ]\n\n def all_outputs(self):\n \"\"\"\n Get all the output variables of this operator.\n \"\"\"\n return [\n self._graph.var(var_name) for var_name in self._op.output_arg_names\n ]\n\n def idx(self):\n \"\"\"\n Get the id of this operator.\n \"\"\"\n return self._op.idx\n\n def type(self):\n \"\"\"\n Get the type of this operator.\n \"\"\"\n return self._op.type\n\n def is_bwd_op(self):\n \"\"\"\n Whether this operator is backward op.\n \"\"\"\n return self.type().endswith('_grad')\n\n def is_opt_op(self):\n \"\"\"\n Whether this operator is optimizer op.\n \"\"\"\n return self.type() in OPTIMIZER_OPS\n\n def inputs(self, name):\n \"\"\"\n Get all the varibales by the input name.\n \"\"\"\n return [self._graph.var(var_name) for var_name in self._op.input(name)]\n\n def outputs(self, name):\n \"\"\"\n Get all the varibales by the output name.\n \"\"\"\n return [self._graph.var(var_name) for var_name in self._op.output(name)]\n\n def set_attr(self, key, value):\n \"\"\"\n Set the value of attribute by attribute's name.\n\n Args:\n key(str): the attribute name.\n value(bool|int|str|float|list): the value of the attribute.\n \"\"\"\n self._op._set_attr(key, value)\n\n def attr(self, name):\n \"\"\"\n Get the attribute by name.\n\n Args:\n name(str): the attribute name.\n\n Returns:\n bool|int|str|float|list: The attribute value. The return value\n can be any valid attribute type.\n \"\"\"\n return self._op.attr(name)\n\n\nclass GraphWrapper(object):\n \"\"\"\n It is a wrapper of paddle.fluid.framework.IrGraph with some special functions\n for paddle slim framework.\n \"\"\"\n\n def __init__(self, program=None, in_nodes=[], out_nodes=[]):\n \"\"\"\n Args:\n program(framework.Program): A program with \n in_nodes(dict): A dict to indicate the input nodes of the graph.\n The key is user-defined and human-readable name.\n The value is the name of Variable.\n out_nodes(dict): A dict to indicate the input nodes of the graph.\n The key is user-defined and human-readable name.\n The value is the name of Variable.\n \"\"\"\n super(GraphWrapper, self).__init__()\n self.program = Program() if program is None else program\n self.persistables = {}\n self.teacher_persistables = {}\n for var in self.program.list_vars():\n if var.persistable:\n self.persistables[var.name] = var\n self.compiled_graph = None\n in_nodes = [] if in_nodes is None else in_nodes\n out_nodes = [] if out_nodes is None else out_nodes\n self.in_nodes = OrderedDict(in_nodes)\n self.out_nodes = OrderedDict(out_nodes)\n self._attrs = OrderedDict()\n\n def all_parameters(self):\n \"\"\"\n Get all the parameters in this graph.\n Returns:\n list<VarWrapper>: A list of VarWrapper instances.\n \"\"\"\n params = []\n for block in self.program.blocks:\n for param in block.all_parameters():\n params.append(VarWrapper(param, self))\n return params\n\n def is_parameter(self, var):\n \"\"\"\n Whether the given variable is parameter.\n Args:\n var(VarWrapper): The given varibale.\n \"\"\"\n return isinstance(var._var, Parameter)\n\n def is_persistable(self, var):\n \"\"\"\n Whether the given variable is persistable.\n Args:\n var(VarWrapper): The given varibale.\n \"\"\"\n return var._var.persistable\n\n def compile(self, for_parallel=True, for_test=False, mem_opt=False):\n \"\"\"\n Compile the program in this wrapper to framework.CompiledProgram for next running.\n This function must be called if the program is modified.\n Args:\n for_parallel(bool): Whether the program to run in data parallel way. default: True.\n for_test(bool): Whether the compiled program is used for test.\n \"\"\"\n target = self.program\n if for_test:\n loss = None\n else:\n loss = self.out_nodes['loss']\n if for_parallel:\n # disable memory optimize for stable training\n build_strategy = compiler.BuildStrategy()\n build_strategy.enable_inplace = mem_opt\n build_strategy.memory_optimize = mem_opt\n build_strategy.fuse_all_reduce_ops = False\n # build_strategy.async_mode = False\n self.compiled_graph = compiler.CompiledProgram(\n target).with_data_parallel(\n loss_name=loss, build_strategy=build_strategy)\n else:\n self.compiled_graph = compiler.CompiledProgram(target)\n\n def ops(self):\n \"\"\"\n Return all operator nodes included in the graph as a set.\n \"\"\"\n ops = []\n for block in self.program.blocks:\n for op in block.ops:\n ops.append(OpWrapper(op, self))\n return ops\n\n def vars(self):\n \"\"\"\n Get all the variables.\n \"\"\"\n return [VarWrapper(var, self) for var in self.program.list_vars()]\n\n def var(self, name):\n \"\"\"\n Get the variable by variable name.\n \"\"\"\n return VarWrapper(self.program.global_block().var(name), self)\n\n def clone(self, for_test=False):\n \"\"\"\n Clone a new graph from current graph.\n Returns:\n (GraphWrapper): The wrapper of a new graph.\n \"\"\"\n return GraphWrapper(\n self.program.clone(for_test),\n copy.deepcopy(self.in_nodes), copy.deepcopy(self.out_nodes))\n\n def merge(self, graph):\n \"\"\"\n Merge a graph into current graph.\n Args:\n graph(GraphWrapper): The graph to be merged by current graph.\n \"\"\"\n for var in graph.program.list_vars():\n if var.persistable:\n self.teacher_persistables[var.name] = var\n new_var = self.program.global_block()._clone_variable(\n var, force_persistable=False)\n new_var.stop_gradient = var.stop_gradient\n # TODO: parameters should be cloned\n for op in graph.ops():\n op = op._op\n inputs = {}\n outputs = {}\n attrs = {}\n for input_name in op.input_names:\n inputs[input_name] = [\n self.var(in_var_name)._var\n for in_var_name in op.input(input_name)\n ]\n for output_name in op.output_names:\n outputs[output_name] = [\n self.var(out_var_name)._var\n for out_var_name in op.output(output_name)\n ]\n for attr_name in op.attr_names:\n attrs[attr_name] = op.attr(attr_name)\n self.program.global_block().append_op(\n type=op.type, inputs=inputs, outputs=outputs, attrs=attrs)\n\n def program(self):\n \"\"\"\n Get the program in current wrapper.\n \"\"\"\n return self.program\n\n def pre_ops(self, op):\n \"\"\"\n Get all the previous operators of target operator.\n Args:\n op(OpWrapper): Target operator..\n Returns:\n list<OpWrapper>: A list of operators.\n \"\"\"\n ops = []\n for p in self.ops():\n for in_var in op.all_inputs():\n if in_var in p.all_outputs():\n ops.append(p)\n return ops\n\n def next_ops(self, op):\n \"\"\"\n Get all the next operators of target operator.\n Args:\n op(OpWrapper): Target operator..\n Returns:\n list<OpWrapper>: A list of operators.\n \"\"\"\n ops = []\n for p in self.ops():\n for out_var in op.all_outputs():\n if out_var in p.all_inputs():\n ops.append(p)\n return ops\n\n def get_param_by_op(self, op):\n \"\"\"\n Get the parameters used by target operator.\n \"\"\"\n assert isinstance(op, OpWrapper)\n params = []\n for var in op.all_inputs():\n if isinstance(var._var, Parameter):\n params.append(var)\n assert len(params) > 0\n return params\n\n def numel_params(self):\n \"\"\"\n Get the number of elements in all parameters.\n \"\"\"\n ret = 0\n for param in self.all_parameters():\n ret += np.product(param.shape())\n return ret\n\n def get_optimize_graph(self, optimizer, place, scope, no_grad_var_names=[]):\n \"\"\"\n Get a new graph for training by appending some backward operators and optimization operators.\n Args:\n optimizer: The optimzier used to generate training graph.\n place: The place to run the graph.\n scope: The scope used to run the graph. Some new variable will be added into this scope.\n no_grad_var_names(list<str>): Names of variables that should be ignored while computing gradients. default: [].\n Returns:\n (GraphWrapper): The wrapper of new graph with backward ops and optimization ops. \n \"\"\"\n graph = self.clone()\n startup_program = Program()\n with program_guard(\n main_program=graph.program, startup_program=startup_program):\n target_name = None\n if 'loss' in graph.out_nodes:\n target_name = graph.out_nodes['loss']\n elif 'cost' in graph.out_nodes:\n target_name = graph.out_nodes['cost']\n else:\n return None\n target = graph.var(target_name)._var\n # The learning rate variable may be created in other program.\n # Update information in optimizer to make\n # learning rate variable being accessible in current program.\n if isinstance(optimizer._learning_rate, Variable):\n optimizer._learning_rate_map[\n graph.program] = optimizer._learning_rate\n optimizer.minimize(target, no_grad_set=no_grad_var_names)\n\n exe = Executor(place)\n exe.run(program=startup_program, scope=scope)\n return graph\n\n def flops(self, only_conv=False):\n \"\"\"\n Get the flops of current graph.\n Args:\n only_conv: Only calculating the conv layers. default: False.\n Returns:\n int: The flops of current graph.\n \"\"\"\n flops = 0\n for op in self.ops():\n if op.type() in ['conv2d', 'depthwise_conv2d']:\n filter_shape = op.inputs(\"Filter\")[0].shape()\n input_shape = op.inputs(\"Input\")[0].shape()\n output_shape = op.outputs(\"Output\")[0].shape()\n c_out, c_in, k_h, k_w = filter_shape\n _, _, h_out, w_out = output_shape\n groups = op.attr(\"groups\")\n kernel_ops = k_h * k_w * (c_in / groups)\n if len(op.inputs(\"Bias\")) > 0:\n with_bias = 1\n else:\n with_bias = 0\n flops += 2 * h_out * w_out * c_out * (kernel_ops + with_bias)\n elif op.type() == 'pool2d' and not only_conv:\n input_shape = op.inputs(\"X\")[0].shape()\n output_shape = op.outputs(\"Out\")[0].shape()\n _, c_out, h_out, w_out = output_shape\n k_size = op.attr(\"ksize\")\n flops += h_out * w_out * c_out * (k_size[0]**2)\n\n elif op.type() == 'mul' and not only_conv:\n x_shape = list(op.inputs(\"X\")[0].shape())\n y_shape = op.inputs(\"Y\")[0].shape()\n if x_shape[0] == -1:\n x_shape[0] = 1\n flops += 2 * x_shape[0] * x_shape[1] * y_shape[1]\n\n elif op.type() in ['relu', 'sigmoid', 'batch_norm'\n ] and not only_conv:\n input_shape = list(op.inputs(\"X\")[0].shape())\n if input_shape[0] == -1:\n input_shape[0] = 1\n flops += np.product(input_shape)\n\n return flops\n\n def save_model(self, path, exe):\n \"\"\"\n Save network and parameters into file which can be load by load_inference_model api.\n Args:\n path(str): The path to save the persistables.\n exe(framework.Executor): The executor used to save the persistables.\n \"\"\"\n out_vars = [\n self.var(var_name)._var for var_name in self.out_nodes.values()\n ]\n in_vars = list(self.in_nodes.values())\n assert (len(in_vars) > 0)\n assert (len(out_vars) > 0)\n io.save_inference_model(\n path,\n in_vars,\n out_vars,\n exe.exe,\n model_filename=\"__model__\",\n params_filename=\"__params__\",\n main_program=self.program.clone(),\n export_for_deployment=True)\n\n def save_infer_model(self, path, exe, in_out, program_only=False):\n \"\"\"\n Save network and parameters into file which can be load by load_inference_model api.\n Args:\n path(str): The path to save the persistables.\n exe(framework.Executor): The executor used to save the persistables.\n in_out(tuple|list): in_out[0] is a list of input nodes' names\n and in_out[1] is a list of output nodes' names.\n program_only(bool): Whether to save program only.\n \"\"\"\n out_vars = [self.var(var_name)._var for var_name in in_out[1]]\n in_vars = list(in_out[0])\n assert (len(in_vars) > 0)\n assert (len(out_vars) > 0)\n io.save_inference_model(\n path,\n in_vars,\n out_vars,\n exe.exe,\n model_filename=\"__model__.infer\",\n params_filename=\"__params__\",\n program_only=program_only,\n main_program=self.program.clone(),\n export_for_deployment=True)\n\n def save_persistables(self, path, exe):\n \"\"\"\n Save all the persistable variables into file.\n Args:\n path(str): The path to save the persistables.\n exe(framework.Executor): The executor used to save the persistables.\n \"\"\"\n # update persistables from program\n for var in self.program.list_vars():\n if var.persistable and var.name not in self.persistables:\n self.persistables[var.name] = var\n persistables = []\n for var in self.persistables:\n if 'reader' not in var and 'double_buffer' not in var and var not in self.teacher_persistables:\n persistables.append(self.persistables[var])\n\n io.save_vars(exe.exe, path, vars=persistables)\n\n def load_persistables(self, path, exe):\n \"\"\"\n Load the persistable variables from file.\n Args:\n path(str): The path to load the persistables.\n exe(framework.Executor): The executor used to load the persistables.\n \"\"\"\n\n def if_exist(var):\n return os.path.exists(os.path.join(path, var.name))\n\n persistables = []\n for var in self.persistables:\n if 'reader' not in var and 'double_buffer' not in var:\n persistables.append(self.persistables[var])\n io.load_vars(exe.exe, path, vars=persistables, predicate=if_exist)\n\n def update_param_shape(self, scope):\n \"\"\"\n Update the shape of parameters in the graph according to tensors in scope.\n It is used after loading pruned parameters from file.\n \"\"\"\n for param in self.all_parameters():\n tensor_shape = np.array(scope.find_var(param.name()).get_tensor(\n )).shape\n param.set_shape(tensor_shape)\n\n def infer_shape(self):\n \"\"\"\n Update the groups of convolution layer according to current filters.\n It is used after loading pruned parameters from file.\n \"\"\"\n for op in self.ops():\n if op.type() != 'conditional_block':\n op._op.desc.infer_shape(op._op.block.desc)\n\n def update_groups_of_conv(self):\n for op in self.ops():\n if op.type() == 'depthwise_conv2d' or op.type(\n ) == 'depthwise_conv2d_grad':\n op.set_attr('groups', op.inputs('Filter')[0].shape()[0])\n"
] |
[
[
"numpy.product"
]
] |
davidgraymi/tetris-ai
|
[
"e406ac13ecf2aae018478acc06e815b77f483d09"
] |
[
"src/run.py"
] |
[
"from dqn_agent import DQNAgent\nfrom tetris import Tetris\nfrom datetime import datetime\nfrom statistics import mean, median\nimport random\nfrom tqdm import tqdm\nimport pandas as pd\n \n\n# Run dqn with Tetris\ndef dqn():\n env = Tetris()\n episodes = 500\n max_steps = None\n epsilon_stop_episode = int(episodes*0.75)\n mem_size = 20000\n discount = 0.95\n batch_size = 512\n epochs = 1\n render_every = 50\n log_every = 25\n replay_start_size = 2000\n train_every = 1\n n_neurons = [32, 32]\n render_delay = None\n activations = ['relu', 'relu', 'linear']\n dqn_num = 3\n filepaths = \"tetris-nn_\"+str(dqn_num)+\"-.h5\"\n # filepaths = [\"tetris-nn_\"+str(dqn_num)+\"-\"+str(i)+\".h5\" for i in range(0,10)]\n save = len(filepaths)\n save_every = episodes/save\n log_fp = \"log.txt\"\n csv_fp = \"dqn_\"+str(dqn_num)+\"_training.csv\"\n log = open(log_fp,\"a\")\n log.write(\"\\ntetris-nn=\"+str(n_neurons)+\"-mem=\"+str(mem_size)+\"-bs=\"+str(batch_size)+\"-e=\"+str(epochs)+\"-\"+str(datetime.now().strftime(\"%Y%m%d-%H%M%S\"))+\"\\n\\n\")\n log.close()\n\n agent = DQNAgent(env.get_action_space(),\n n_neurons=n_neurons, activations=activations,\n epsilon_stop_episode=epsilon_stop_episode, mem_size=mem_size,\n discount=discount, replay_start_size=replay_start_size)\n\n scores = []\n\n for episode in tqdm(range(episodes)):\n current_state = env.reset()\n done = False\n steps = 0\n\n if render_every and episode % render_every == 0:\n render = True\n else:\n render = False\n\n # Game\n while not done and (not max_steps or steps < max_steps):\n next_states = env.get_next_states()\n best_state = agent.best_state(next_states.values())\n \n best_action = None\n for action, state in next_states.items():\n if state == best_state:\n best_action = action\n break\n\n reward, done = env.step(best_action[0], best_action[1], render=render,\n render_delay=render_delay)\n \n agent.add_to_memory(current_state, next_states[best_action], reward, done)\n current_state = next_states[best_action]\n steps += 1\n\n scores.append(env.get_game_score())\n\n # Train\n if episode % train_every == 0:\n agent.train(batch_size=batch_size, epochs=epochs)\n\n # Save\n if (episode+1) % save_every == 0:\n agent.save(filepaths)\n # agent.save(filepaths[save-10])\n save += 1\n\n # Logs\n if log_every and episode and (episode+1) % log_every == 0:\n avg_score = mean(scores[-log_every:])\n min_score = min(scores[-log_every:])\n max_score = max(scores[-log_every:])\n\n log = open(log_fp,\"a\")\n logging = \"episode: \"+str(episode+1)+\", avg_score: \"+str(avg_score)+\", min_score: \"+\\\n str(min_score)+\", max_score: \"+str(max_score)+\"\\n\"\n log.write(logging)\n log.close()\n \n log = open(log_fp,\"a\")\n log.write(\"\\n------------------------------------------------------------------------------------------------\"+\"\\n\")\n log.close()\n\n df = pd.DataFrame(scores)\n df.to_csv(csv_fp)\n\n\nif __name__ == \"__main__\":\n dqn()\n"
] |
[
[
"pandas.DataFrame"
]
] |
krystophny/profit
|
[
"c6316c9df7cfaa7b30332fdbbf85ad27175eaf92",
"c6316c9df7cfaa7b30332fdbbf85ad27175eaf92"
] |
[
"draft/pendulum/prod_extended/prod_extended.py",
"draft/Pendulum/cosine_gpy.py"
] |
[
"import numpy as np\nimport GPy\n\nk0 = GPy.kern.RBF(1, active_dims=0)\nk1 = GPy.kern.RBF(1, active_dims=1)\n\nk0_der = GPy.kern.DiffKern(k0, 0)\n\n# Extended class for product kernel,\n# can be merged with Prod class when finished\n\nclass ProdExtended(GPy.kern.Prod): \n def dK_dX(self, X, X2, dimX):\n # Product rule to kprod = k0*k1\n # d(kprod/dx0) = dk0/dx0*k1\n # d(kprod/dx1) = k0*dk1/dx1\n other = self.parts[:] \n All = other[:]\n n = len(All)\n nX = X.shape[0]\n nX2 = X2.shape[0]\n diffpart = other.pop(dimX)\n K_all = np.zeros((n,nX,nX2))\n for k in range(n):\n if (k!=dimX):\n K_all[k,:,:] = All[k].K(X,X2)\n else:\n K_all[k,:,:] = diffpart.dK_dX(X,X2,dimX)\n return np.prod(K_all,0)\n \n def dK_dX2(self,X,X2,dimX2):\n return -self.dK_dX(X,X2,dimX2)\n\n def dK2_dXdX2(self, X, X2, dimX, dimX2):\n # Product rule to kprod = k0*k1\n # d²(kprod/dx0²) = d²k0/dx0²*k1\n # d²(kprod/dx1²) = k0*d²k1/dx1²\n # d²(kprod/dx0dx1) = dk0/dx0*dk1/dx1\n # d²(kprod/dx1dx0) = dk0/dx0*dk1/dx1\n other = self.parts[:] \n All = other[:]\n n = len(All)\n nX = X.shape[0]\n nX2 = X2.shape[0]\n if (dimX==dimX2):\n diffpart = other.pop(dimX)\n K_all = np.zeros((n,nX,nX2))\n for k in range(n):\n if (k!=dimX):\n K_all[k,:,:] = All[k].K(X,X2)\n else:\n K_all[k,:,:] = diffpart.dK2_dXdX2(X,X2,dimX,dimX)\n else:\n diffpart1 = other.pop(dimX)\n diffpart2 = All[dimX2]\n K_all = np.zeros((n,nX,nX2))\n for k in range(n):\n if (k==dimX):\n K_all[k,:,:] = diffpart1.dK_dX(X,X2,dimX)\n elif (k==dimX2):\n K_all[k,:,:] = diffpart2.dK_dX2(X,X2,dimX2)\n else:\n K_all[k,:,:] = All[k].K(X,X2)\n return np.prod(K_all,0)\n\n",
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Aug 22 00:21:20 2020\n\n@author: manal\n\"\"\"\n\nimport numpy as np\n# from GPy import *\nimport GPy\nfrom GPy.kern.src.stationary import Stationary\nimport matplotlib.pyplot as plt\n\nclass Cosin(Stationary):\n \"\"\"\n Cosine kernel:\n .. math::\n k(r) = \\sigma^2 \\cos ( r )\n\n r(x, x') = \\\\sqrt{ \\\\sum_{q=1}^Q \\\\frac{(x_q - x'_q)^2}{\\ell_q^2} }\n \"\"\"\n def __init__(self, input_dim, variance=1., lengthscale=None, ARD=False, active_dims=None, name='Cosin'):\n super(Cosin, self).__init__(input_dim, variance, lengthscale, ARD, active_dims, name)\n\n def K_of_r(self, r):\n return self.variance * np.cos(r)\n\n def dK_dr(self, r):\n return -self.variance * np.sin(r)\n\n def dK_dX(self, X, X2, dimX):\n r = self._scaled_dist(X, X2)\n id = np.where(r<=1e-6)\n dK_dr = self.dK_dr(r)\n dist = X[:,None,dimX]-X2[None,:,dimX]\n lengthscale2inv = (np.ones((X.shape[1]))/(self.lengthscale**2))[dimX]\n k = 2. * lengthscale2inv * dist * r**(-1) * dK_dr\n for i in range(id[0].shape[0]):\n k[i,i]= -dist[i,i]\n return k\n # return r,2. * lengthscale2inv * dist * r1 * dK_dr\n\n def dK_dX2(self,X,X2,dimX2):\n return -1.*self.dK_dX(X,X2, dimX2)\n\n def dK2_dXdX2(self, X, X2, dimX, dimX2):\n if X2 is None:\n X2 = X\n r = self._scaled_dist(X, X2)\n dist = X[:,None,:]-X2[None,:,:]\n lengthscale2inv = np.ones((X.shape[1]))/(self.lengthscale**2)\n K = self.K_of_r(r)\n dK_dr = self.dK_dr(r)\n dist = X[:,None,:]-X2[None,:,:]\n lengthscale2inv = np.ones((X.shape[1]))/(self.lengthscale**2)\n l1 = lengthscale2inv[dimX]\n l2 = lengthscale2inv[dimX2]\n d1 = dist[:,:,dimX]\n d2 = dist[:,:,dimX]\n id = np.where(r<=1e-6)\n k = (dimX==dimX2)*(-2.)*l1*d1*r**(-1)*dK_dr + l1*d1*r**(-1) * (2.*l1*d1*r**(-1)*dK_dr + 4*l2*d2*K)\n for i in range(id[0].shape[0]):\n k[i,i]= -dist[i,i,dimX2]*dist[i,i,dimX]\n return k\n\n#%%\nk0 = Cosin(1, active_dims=0)\nk01 = Cosin(1)\nk1 = Cosin(1, active_dims=1)\nk2 = Cosin(3)\n\nx0train = np.linspace(-5,5,100).reshape(-1,1)\nx1train = np.linspace(-5,5,100).reshape(-1,1)\nxtrain = np.hstack((x0train, x1train))\n\n# print('Training points:')\n# print(xtrain)\n# print()\n\n\n\nk0_K = k0.K(x0train,x0train)\ndk0 = k0.dK_dX(x0train, x0train, 0)\ndk0_2 = k0.dK2_dXdX2(x0train, x0train, 0, 0)\n\n# print('K0 = ')\n# print(k0_K,'\\n')\n# print('dK0.dX: \\n', dk0,'\\n')\n# print('dK2.dXdX2: \\n',dk0_2)\n# # print()\n# # k0.dK_dX(x0train, x0train, 0).plot()\n# plt.figure()\n# plt.plot(k0_K[0,:])\n# plt.show\n# plt.figure()\n# plt.plot(dk0[0,:])\n# plt.show\n# plt.figure()\n# plt.plot(dk0_2[0,:])\n# plt.show\n# # plt.plot(r,k0.dK_dX(x0train,x0train,0))\n# plt.figure()\n# plt.imshow(k)\n# plt.figure()\n# plt.imshow(k[1:5,1:5])\n# plt.figure()\n# plt.imshow(r)\n\n# k01_K = k01.K(x0train,x0train)\n# dk01 = k01.dK_dX(x0train, x0train, 0)\n# dk01_2 = k01.dK2_dXdX2(x0train, x0train, 0, 0)\n# print('k: \\n', k01_K-k0_K)\n# print('dk: \\n', dk01-dk0)\n# print('dk2: \\n', dk01_2-dk0_2)\n\n# k1_K = k1.K(xtrain,xtrain)\n# dk1 = k1.dK_dX(xtrain, xtrain, 1)\n# dk1_2 = k1.dK2_dXdX2(xtrain, xtrain, 1, 1)\n# print('K1 = ')\n# print(k1_K,'\\n') # Need 2D vectors, here, as active_dims=1\n# print('dk1.dX: \\n',dk1,'\\n')\n# print('dk12.dXdX2: \\n',dk1_2)\n# plt.figure()\n# plt.plot(k1_K[0,:])\n# plt.show\n# plt.figure()\n# plt.plot(dk1[0,:])\n# plt.show\n# plt.figure()\n# plt.plot(dk1_2[0,:])\n# plt.show\n# print()\n# # plt.plot(k1.dK_dX)\n# print('k0_der: \\n', k0_der.K(xtrain,xtrain),'\\n')\n\n# xtrain2 = np.hstack((x0train, x1train, x1train))\n# k2_K = k2.K(xtrain2,xtrain2)\n# dk2 = k2.dK_dX(xtrain2, xtrain2, 2)\n# dk2_2 = k2.dK2_dXdX2(xtrain2, xtrain2, 2, 2)\n# print('K2 = ')\n# print(k2_K,'\\n') # Need 2D vectors, here, as active_dims=1\n# print('dk2.dX: \\n',dk2,'\\n')\n# print('dk22.dXdX2: \\n',dk2_2)\n# plt.figure()\n# plt.plot(k2_K[0,:])\n# plt.show\n# plt.figure()\n# plt.plot(dk2[0,:])\n# plt.show\n# plt.figure()\n# plt.plot(dk2_2[0,:])\n# plt.show\n\n\n#%%\n\nimport pytest\n# from katharina import kernels\nimport func\n\ndef test_kern_Cosine():\n x0train = np.linspace(-5,5,100).reshape(-1,1)\n # x1train = np.linspace(-5,5,100).reshape(-1,1)\n # xtrain = np.hstack((x0train, x1train))\n\n k0 = Cosin(1, active_dims=0)\n k0_K = k0.K(x0train,x0train)\n dk0 = k0.dK_dX(x0train, x0train, 0)\n dk0_2 = k0.dK2_dXdX2(x0train, x0train, 0, 0)\n\n l = np.array([1, 1])\n\n K = np.zeros((len(x0train),len(x0train)))\n for i in range(K.shape[0]):\n for j in range(K.shape[1]):\n K[i,j] = func.f_kern(x0train[i], x0train[i], x0train[j], x0train[j], l)\n\n dK = np.zeros((len(x0train),len(x0train)))\n for i in range(dK.shape[0]):\n for j in range(dK.shape[1]):\n dK[i,j] = func.dkdx(x0train[i], x0train[i], x0train[j], x0train[j], l)\n\n dK2 = np.zeros((len(x0train),len(x0train)))\n for i in range(dK2.shape[0]):\n for j in range(dK2.shape[1]):\n dK2[i,j] = func.d2kdxdx0(x0train[i], x0train[i], x0train[j], x0train[j], l)\n\n # print(k)\n # print(\",,,,\")\n # print(k1a)\n\n plt.figure()\n plt.imshow(K)\n plt.figure()\n plt.imshow(k0_K)\n plt.figure()\n plt.plot(K[0,:])\n\n plt.figure()\n plt.imshow(dK)\n plt.figure()\n plt.imshow(dk0)\n plt.figure()\n plt.plot(dK[0,:])\n\n plt.figure()\n plt.imshow(dK2)\n plt.figure()\n plt.imshow(dk0_2)\n plt.figure()\n plt.plot(dK2[0,:])\n\n print(k0_K)\n print(K)\n print()\n print(dk0)\n print(dK)\n print()\n print(dk0_2)\n print(dK2)\n print()\n # print(np.isclose(k0_K, K, rtol=1e-6))\n # assert np.isclose(dk0, dK, rtol=1e-6)\n # assert np.isclose(dk0_2, dK2, rtol=1e-6)\n\n# %%\ntest_kern_Cosine()\n"
] |
[
[
"numpy.zeros",
"numpy.prod"
],
[
"numpy.hstack",
"matplotlib.pyplot.imshow",
"numpy.linspace",
"numpy.cos",
"numpy.sin",
"matplotlib.pyplot.plot",
"numpy.ones",
"numpy.array",
"numpy.where",
"matplotlib.pyplot.figure"
]
] |
yasuyuky/optuna
|
[
"f9d7b65f4a5918ec717cfc70fa005975b6a23190"
] |
[
"examples/pytorch_lightning_simple.py"
] |
[
"\"\"\"\nOptuna example that optimizes multi-layer perceptrons using PyTorch Lightning.\n\nIn this example, we optimize the validation accuracy of hand-written digit recognition using\nPyTorch Lightning, and MNIST. We optimize the neural network architecture. As it is too time\nconsuming to use the whole MNIST dataset, we here use a small subset of it.\n\nWe have the following two ways to execute this example:\n\n(1) Execute this code directly. Pruning can be turned on and off with the `--pruning` argument.\n $ python pytorch_lightning_simple.py [--pruning]\n\n\n(2) Execute through CLI. Pruning is enabled automatically.\n $ STUDY_NAME=`optuna create-study --direction maximize --storage sqlite:///example.db`\n $ optuna study optimize pytorch_lightning_simple.py objective --n-trials=100 --study \\\n $STUDY_NAME --storage sqlite:///example.db\n\"\"\"\n\nimport argparse\nimport os\nimport shutil\n\nimport pytorch_lightning as pl\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.optim import Adam\nimport torch.utils.data\nfrom torchvision import datasets\nfrom torchvision import transforms\n\nimport optuna\nfrom optuna.integration import PyTorchLightningPruningCallback\n\nPERCENT_TEST_EXAMPLES = 0.1\nBATCHSIZE = 128\nCLASSES = 10\nEPOCHS = 10\nDIR = os.getcwd()\nMODEL_DIR = os.path.join(DIR, 'result')\n\n\nclass DictLogger(pl.logging.LightningLoggerBase):\n \"\"\"PyTorch Lightning `dict` logger.\"\"\"\n\n def __init__(self, version):\n super(DictLogger, self).__init__()\n self.metrics = []\n self._version = version\n\n def log_metrics(self, metric, step_num=None):\n self.metrics.append(metric)\n\n @property\n def version(self):\n return self._version\n\n\nclass Net(nn.Module):\n def __init__(self, trial):\n super(Net, self).__init__()\n self.layers = []\n self.dropouts = []\n\n # We optimize the number of layers, hidden untis in each layer and drouputs.\n n_layers = trial.suggest_int('n_layers', 1, 3)\n dropout = trial.suggest_uniform('dropout', 0.2, 0.5)\n input_dim = 28 * 28\n for i in range(n_layers):\n output_dim = int(trial.suggest_loguniform('n_units_l{}'.format(i), 4, 128))\n self.layers.append(nn.Linear(input_dim, output_dim))\n self.dropouts.append(nn.Dropout(dropout))\n input_dim = output_dim\n\n self.layers.append(nn.Linear(input_dim, CLASSES))\n\n # Assigning the layers as class variables (PyTorch requirement).\n for idx, layer in enumerate(self.layers):\n setattr(self, 'fc{}'.format(idx), layer)\n\n # Assigning the dropouts as class variables (PyTorch requirement).\n for idx, dropout in enumerate(self.dropouts):\n setattr(self, 'drop{}'.format(idx), dropout)\n\n def forward(self, data):\n data = data.view(-1, 28 * 28)\n for layer, dropout in zip(self.layers, self.dropouts):\n data = F.relu(layer(data))\n data = dropout(data)\n return F.log_softmax(self.layers[-1](data), dim=1)\n\n\nclass LightningNet(pl.LightningModule):\n\n def __init__(self, trial):\n super(LightningNet, self).__init__()\n\n # Be careful not to overwrite `pl.LightningModule` attributes such as `self.model`.\n self._model = Net(trial)\n\n def forward(self, data):\n return self._model(data)\n\n def training_step(self, batch, batch_nb):\n data, target = batch\n output = self.forward(data)\n loss = F.nll_loss(output, target)\n return {'loss': loss}\n\n def validation_step(self, batch, batch_nb):\n data, target = batch\n output = self.forward(data)\n pred = output.argmax(dim=1, keepdim=True)\n correct = pred.eq(target.view_as(pred)).sum().item()\n accuracy = correct / data.size(0)\n return {'validation_accuracy': accuracy}\n\n def validation_end(self, outputs):\n accuracy = sum(x['validation_accuracy'] for x in outputs) / len(outputs)\n # Pass the accuracy to the `DictLogger` via the `'log'` key.\n return {'log': {'accuracy': accuracy}}\n\n def configure_optimizers(self):\n return Adam(self._model.parameters())\n\n @pl.data_loader\n def train_dataloader(self):\n return torch.utils.data.DataLoader(\n datasets.MNIST(DIR, train=True, download=True, transform=transforms.ToTensor()),\n batch_size=BATCHSIZE, shuffle=True)\n\n @pl.data_loader\n def val_dataloader(self):\n return torch.utils.data.DataLoader(\n datasets.MNIST(DIR, train=False, download=True, transform=transforms.ToTensor()),\n batch_size=BATCHSIZE, shuffle=False)\n\n\ndef objective(trial):\n # PyTorch Lightning will try to restore model parameters from previous trials if checkpoint\n # filenames match. Therefore, the filenames for each trial must be made unique.\n checkpoint_callback = pl.callbacks.ModelCheckpoint(\n os.path.join(MODEL_DIR, 'trial_{}'.format(trial.number)), save_best_only=False)\n\n # The default logger in PyTorch Lightning writes to event files to be consumed by\n # TensorBoard. We create a simple logger instead that holds the log in memory so that the\n # final accuracy can be obtained after optimization. When using the default logger, the\n # final accuracy could be stored in an attribute of the `Trainer` instead.\n logger = DictLogger(trial.number)\n\n trainer = pl.Trainer(\n logger=logger,\n val_percent_check=PERCENT_TEST_EXAMPLES,\n checkpoint_callback=checkpoint_callback,\n max_nb_epochs=EPOCHS,\n gpus=0 if torch.cuda.is_available() else None,\n early_stop_callback=PyTorchLightningPruningCallback(trial, monitor='accuracy')\n )\n\n model = LightningNet(trial)\n trainer.fit(model)\n\n return logger.metrics[-1]['accuracy']\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='PyTorch Lightning example.')\n parser.add_argument('--pruning', '-p', action='store_true',\n help='Activate the pruning feature. `MedianPruner` stops unpromising '\n 'trials at the early stages of training.')\n args = parser.parse_args()\n\n pruner = optuna.pruners.MedianPruner() if args.pruning else optuna.pruners.NopPruner()\n\n study = optuna.create_study(direction='maximize', pruner=pruner)\n study.optimize(objective, n_trials=100, timeout=600)\n\n print('Number of finished trials: {}'.format(len(study.trials)))\n\n print('Best trial:')\n trial = study.best_trial\n\n print(' Value: {}'.format(trial.value))\n\n print(' Params: ')\n for key, value in trial.params.items():\n print(' {}: {}'.format(key, value))\n\n shutil.rmtree(MODEL_DIR)\n"
] |
[
[
"torch.nn.Linear",
"torch.nn.Dropout",
"torch.cuda.is_available",
"torch.nn.functional.nll_loss"
]
] |
RobbeDePrins/bosonic
|
[
"036e537421255793a165742e9af678bc0f31f9b7"
] |
[
"bosonic/phi_dispatch.py"
] |
[
"\n\nimport sys\nimport numpy as np\n\nfrom .aa_phi import aa_phi as aa_phi_cpu\nfrom .aa_phi import binom\n\n# Try to import cuda library & test\ngpu_avail = True\ntry:\n from .gpu_phi import GpuPhiDispatcher\n from .util import haar_rand\n\n aa_phi_gpu = GpuPhiDispatcher()\n U = haar_rand(4)\n phiU_GPU = aa_phi_gpu(U, 2)\n phiU_CPU = aa_phi_cpu(U, 2)\n assert np.mean(np.abs(phiU_CPU - phiU_GPU)) < 1e-12\nexcept:\n gpu_avail = False\n\n\nclass PhiDispatcher(object):\n estimatorCutoff = 1732\n\n def __init__(self):\n self.gpu = aa_phi_gpu\n self.cpu = aa_phi_cpu\n self.lastUsed = None\n\n def estimator(self, n, m):\n return binom(n+m-1, n) * n**2\n\n def __call__(self, U, n):\n m = U.shape[0]\n est = self.estimator(n, m)\n if est < self.estimatorCutoff:\n self.lastUsed = 'cpu'\n return self.cpu(U, n)\n else:\n self.lastUsed = 'gpu'\n return self.gpu(U, n)\n\n\nif gpu_avail:\n aa_phi = PhiDispatcher()\nelse:\n print(\"Warning: bosonic could not set up GPU; falling back to CPU\",\n file=sys.stderr)\n aa_phi = aa_phi_cpu\n"
] |
[
[
"numpy.abs"
]
] |
Mortal/smb-frames
|
[
"5e6377a724e58c84ead9c95c1c5abf9e7c8eca2b"
] |
[
"smb-frames.py"
] |
[
"#!/usr/bin/env python3\nimport json\nimport argparse\nimport datetime\nimport numpy as np\n\nfrom smb_timer import timestamp, crop_string, find_levels_streaming\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('--plot', '-p', action='store_true')\n parser.add_argument('--from', '-f', dest='from_', type=timestamp)\n parser.add_argument('--to', '-t', type=timestamp)\n parser.add_argument('--input-filename', '-i', required=True)\n parser.add_argument('--crop', '-c', required=True, type=crop_string)\n parser.add_argument('--delay', '-d', default=0, type=float)\n parser.add_argument('--mode')\n args = parser.parse_args()\n\n if args.plot:\n import matplotlib.pyplot as plt\n\n def level_name(i):\n world, part = divmod(i, 4)\n worlds = '123456789ABCD'\n return '%s-%s' % (worlds[world], part+1)\n\n if args.mode == 'll_any_d-4':\n level_names = '''1-1 1-2 4-1 4-2 4-3 4-4 5-1 5-2 8-1 8-2 8-3 8-4 A-1\n A-2 A-3 B-4 D-1 D-2 D-3'''.split() + ['King Koopa']\n\n def level_name(i):\n return level_names[i]\n\n framerate, levels = find_levels_streaming(\n args.input_filename, args.crop, args.from_)\n split_at = [args.from_ * framerate]\n for i, (f1, f2, extra) in enumerate(levels):\n split_at.append(f2 + args.delay * framerate)\n f = f2 - f1\n print(\"%s %.2f (from %s to %s)\" %\n (level_name(i),\n f/framerate,\n datetime.timedelta(seconds=f1 / framerate),\n datetime.timedelta(seconds=f2 / framerate)))\n\n if args.plot:\n d12, change2, timer_peaks = extra\n time = (f1 + np.arange(len(d12))) / framerate\n plt.plot(time, d12)\n plt.plot(time[:len(change2)], change2, 'k')\n plt.plot(time[timer_peaks], d12[timer_peaks], 'o')\n plt.show()\n split_at[-1] = args.to * framerate\n\n s0 = split_at[0]\n\n # \"Urn format\", to be imported by\n # https://github.com/LiveSplit/LiveSplit/blob/master/LiveSplit/LiveSplit.Core/Model/RunFactories/UrnRunFactory.cs\n obj = {\n 'title': '',\n 'attempt_count': 1,\n 'start_delay': '0:0:00',\n 'splits': [\n {\n 'title': level_name(i),\n 'time': str(datetime.timedelta(seconds=(s2 - s0)/framerate)),\n 'best_time':\n str(datetime.timedelta(seconds=(s2 - s0)/framerate)),\n 'best_segment':\n str(datetime.timedelta(seconds=(s2 - s1)/framerate)),\n }\n for i, (s1, s2) in enumerate(zip(split_at[:-1],\n split_at[1:]))\n ]\n }\n\n filename = 'splits.json'\n if args.mode:\n filename = 'splits_%s.json' % args.mode\n with open(filename, 'w') as fp:\n json.dump(obj, fp, indent=4)\n\n\nif __name__ == '__main__':\n main()\n"
] |
[
[
"matplotlib.pyplot.plot",
"matplotlib.pyplot.show"
]
] |
sukritws40/TSP-GPwith2-OPT
|
[
"cdc7b7b7c8088da7f4e8b940bc35454295d8551c"
] |
[
"TSP/tsp_with_improvement.py"
] |
[
"import numpy as np\r\nimport pandas as pd\r\nimport math\r\nimport matplotlib.pyplot as plt\r\n\r\nfrom time import time\r\nfrom random import shuffle, randrange, randint\r\n#number of generation\r\nn = 10\r\n\r\ndef main():\r\n \r\n #Data preparation\r\n df = pd.read_csv('test_200.csv', header = 0)\r\n df = df.drop('city', 1)\r\n df_drop = df.drop_duplicates(subset = 'i', keep = 'first')\r\n #df -> array\r\n dataset = df_drop.as_matrix(columns = None)\r\n \r\n start = time()\r\n #Swap algorithm\r\n path, length = swap(dataset)\r\n print(path)\r\n \r\n tottime = time() - start\r\n print(\"time = \",tottime)\r\n print(\"total_distance\", length)\r\n\r\n list_plot = []\r\n for x in range(0, len(path)):\r\n list_plot.append(dataset[path[x]])\r\n \r\n list_plot.append(list_plot[0])\r\n plt.gca().invert_yaxis()\r\n plt.scatter(*zip(*dataset))\r\n plt.plot(*zip(*list_plot))\r\n \r\n plt.show()\r\n\r\n################################################################################\r\n#Swap algorithm\r\ndef swap(dataset):\r\n best_order = []\r\n best_length = float('inf')\r\n \r\n# order = list(range(dataset.shape[0]))\r\n# shuffle(order)\r\n \r\n for i in list(range(n)):\r\n order = list(range(dataset.shape[0]))\r\n#Create random solution \r\n shuffle(order)\r\n length = calc_length(dataset, order)\r\n print(\"generation: \", 1+i, \" [f(s): \", best_length,\"]\")\r\n\r\n changed = True\r\n while changed:\r\n\r\n changed = False\r\n\r\n for a in range(0, dataset.shape[0]):\r\n\r\n\r\n for b in range(a+1, dataset.shape[0]):\r\n\r\n new_order = order[:a] + order[a:b][::-1] + order[b:]\r\n new_length = calc_length(dataset, new_order)\r\n\r\n if new_length < length:\r\n length = new_length\r\n order = new_order\r\n changed = True\r\n \r\n \r\n if length < best_length:\r\n best_length = length\r\n best_order = order\r\n \r\n return best_order, best_length\r\n################################################################################\r\n#Calculate length\r\ndef calc_length(dataset, path):\r\n length = 0\r\n for i in list(range(0, len(path))):\r\n length += distance(dataset[path[i-1]], dataset[path[i]])\r\n# length += tsp_cost(path[i-1], path[i], dataset)\r\n return length\r\n\r\n#Distance square\r\ndef distance(c1, c2):\r\n t1 = c2[0] - c1[0]\r\n t2 = c2[1] - c1[1]\r\n\r\n return math.sqrt(t1**2 + t2**2)\r\n##################################################################################\r\ndef tsp_cost(i, j, dataset):\r\n pi = 3.14159265358979323846264\r\n \r\n lat_i = (pi*dataset[i][0])/180\r\n lat_j = (pi*dataset[j][0])/180\r\n long_i = (pi*dataset[i][1])/180\r\n long_j = (pi*dataset[j][1])/180\r\n \r\n q1 = math.cos(lat_j)*math.sin(long_i-long_j)\r\n q3 = math.sin((long_i-long_j)/2.0)\r\n q4 = math.cos((long_i-long_j)/2.0)\r\n q2 = (math.sin(lat_i+lat_j)*q3*q3) - (math.sin(lat_i-lat_j)*q4*q4)\r\n q5 = (math.cos(lat_i-lat_j)*q4*q4) - (math.cos(lat_i+lat_j)*q3*q3)\r\n \r\n return int(6378388.0*math.atan2(math.sqrt((q1*q1)+(q2*q2)),q5)+1.0)\r\n##################################################################################\r\nmain()\r\n\r\n"
] |
[
[
"matplotlib.pyplot.gca",
"pandas.read_csv",
"matplotlib.pyplot.show"
]
] |
oblanchet/mlpack
|
[
"e02ab3be544694294d2f73bd12a98d0d162ef3af"
] |
[
"src/mlpack/bindings/python/mlpack/matrix_utils.py"
] |
[
"#!/usr/bin/env python\n\"\"\"\nmatrix_utils.py: utilities for matrix conversion\n\nThis file defines the to_matrix() function, which can be used to convert Pandas\ndataframes or other types of array-like objects to numpy ndarrays for use in\nmlpack bindings.\n\nmlpack is free software; you may redistribute it and/or modify it under the\nterms of the 3-clause BSD license. You should have received a copy of the\n3-clause BSD license along with mlpack. If not, see\nhttp://www.opensource.org/licenses/BSD-3-Clause for more information.\n\"\"\"\nimport numpy as np\nimport pandas as pd\n# The CategoricalDtype class has moved multiple times, so this insanity is\n# necessary to import the right version.\nif int(pd.__version__.split('.')[0]) > 0 or \\\n int(pd.__version__.split('.')[1]) >= 20:\n from pandas.api.types import CategoricalDtype\nelif int(pd.__version__.split('.')[1]) >= 18:\n from pandas.types.dtypes import CategoricalDtype\nelif int(pd.__version__.split('.')[1]) == 17:\n from pandas.core.dtypes import CategoricalDtype\nelif int(pd.__version__.split('.')[1]) >= 15:\n from pandas.core.common import CategoricalDtype\n\n# We need a unicode type, but on python3 we don't have it.\ntry:\n UNICODE_EXISTS = bool(type(unicode))\nexcept NameError:\n unicode = str\n\n# We also need a buffer type.\ntry:\n BUFFER_EXISTS = bool(type(buffer))\nexcept:\n buffer = memoryview\n\ndef to_matrix(x, dtype=np.double, copy=False):\n \"\"\"\n Given some array-like X, return a numpy ndarray of the same type.\n \"\"\"\n # Make sure it's array-like at all.\n if not hasattr(x, '__len__') and \\\n not hasattr(x, 'shape') and \\\n not hasattr(x, '__array__'):\n raise TypeError(\"given argument is not array-like\")\n\n if (isinstance(x, np.ndarray) and x.dtype == dtype and x.flags.c_contiguous):\n if copy: # Copy the matrix if required.\n return x.copy(\"C\"), True\n else:\n return x, False\n elif (isinstance(x, np.ndarray) and x.dtype == dtype and x.flags.f_contiguous):\n # A copy is always necessary here.\n return x.copy(\"C\"), True\n else:\n if isinstance(x, pd.core.series.Series) or isinstance(x, pd.DataFrame):\n # We can only avoid a copy if the dtype is the same and the copy flag is\n # false. I'm actually not sure if this is possible, since in everything I\n # have found, Pandas stores with F_CONTIGUOUS not C_CONTIGUOUS.\n y = x.values\n if copy == False and y.dtype == dtype and y.flags.c_contiguous:\n return np.ndarray(y.shape, buffer=x.values, dtype=dtype, order='C'),\\\n False\n else:\n # We have to make a copy or change the dtype, so just do this directly.\n return np.array(y, dtype=dtype, order='C', copy=True), True\n else:\n return np.array(x, copy=True, dtype=dtype, order='C'), True\n\n\ndef to_matrix_with_info(x, dtype, copy=False):\n \"\"\"\n Given some array-like X (which should be either a numpy ndarray or a pandas\n DataFrame), convert it into a numpy matrix of the given dtype.\n \"\"\"\n # Make sure it's array-like at all.\n if not hasattr(x, '__len__') and \\\n not hasattr(x, 'shape') and \\\n not hasattr(x, '__array__'):\n raise TypeError(\"given argument is not array-like\")\n\n if isinstance(x, np.ndarray):\n # It is already an ndarray, so the vector of info is all 0s (all numeric).\n if len(x.shape) < 2:\n d = np.zeros(1, dtype=np.bool)\n else:\n d = np.zeros([x.shape[1]], dtype=np.bool)\n\n # Copy the matrix if needed.\n if copy:\n return (x.copy(order=\"C\"), True, d)\n else:\n return (x, False, d)\n\n if isinstance(x, pd.DataFrame) or isinstance(x, pd.Series):\n # It's a pandas dataframe. So we need to see if any of the dtypes are\n # categorical or object, and if so, we need to convert them. First see if\n # we can take a shortcut without copying.\n dtype_array = x.dtypes.values if len(x.dtypes) > 0 else [x.dtypes]\n if not any(isinstance(t, CategoricalDtype)\n for t in dtype_array) and \\\n not np.dtype(object) in dtype_array and \\\n not np.dtype(str) in dtype_array and \\\n not np.dtype(unicode) in dtype_array:\n # We can just return the matrix as-is; it's all numeric.\n t = to_matrix(x, dtype=dtype, copy=copy)\n if len(x.shape) < 2:\n d = np.zeros(1, dtype=np.bool)\n else:\n d = np.zeros([x.shape[1]], dtype=np.bool)\n return (t[0], t[1], d)\n\n if np.dtype(str) in dtype_array or np.dtype(unicode) in dtype_array:\n raise TypeError('cannot convert matrices with string types')\n\n if np.dtype(buffer) in dtype_array:\n raise TypeError(\"'buffer' dtype not supported\")\n\n # If we get to here, then we are going to need to do some type conversion,\n # so go ahead and copy the dataframe and we'll work with y to make\n # modifications.\n y = x\n d = np.zeros([x.shape[1]], dtype=np.bool)\n\n # Convert any 'object', 'str', or 'unicode' types to categorical.\n convertColumns = x.select_dtypes(['object'])\n if not convertColumns.empty:\n y[convertColumns] = y[convertColumns].astype('category')\n\n catColumns = x.select_dtypes(['category']).columns\n if len(catColumns) > 0:\n # Do actual conversion to numeric types. This converts to an int type.\n y = x # Copy it... not great...\n\n # Note that this will map NaNs (missing values or unknown categories) to\n # -1, so we will have to convert those back to NaN.\n y[catColumns] = y[catColumns].apply(\n lambda c: c.cat.codes).astype('double')\n y[catColumns].replace(to_replace=[-1], value=float('NaN'))\n\n # Construct dataset information: 1s represent categorical data, 0s\n # represent otherwise.\n catColumnIndices = [y.columns.get_loc(i) for i in catColumns]\n d[catColumnIndices] = 1\n\n # We'll have to force the second part of the tuple (whether or not to take\n # ownership) to true.\n t = to_matrix(y.apply(pd.to_numeric), dtype=dtype)\n return (t[0], True, d)\n\n if isinstance(x, list):\n # Get the number of dimensions.\n dims = 0\n if isinstance(x[0], list):\n dims = len(x[0])\n else:\n dims = len(x)\n\n d = np.zeros([dims])\n out = np.array(x, dtype=dtype, copy=copy) # Try to avoid copy...\n\n # Since we don't have a great way to check if these are using the same\n # memory location, we will probe manually (ugh).\n oldval = x[0]\n x[0] *= 2\n alias = False\n if out[0] == x[0]:\n alias = True\n x[0] = oldval\n\n return (out, not alias, d)\n\n # If we got here, the type is not known.\n raise TypeError(\"given matrix is not a numpy ndarray or pandas DataFrame or \"\\\n \"Python array; not supported at this time\");\n"
] |
[
[
"numpy.ndarray",
"numpy.dtype",
"pandas.__version__.split",
"numpy.array",
"numpy.zeros"
]
] |
jborchma/xgboost
|
[
"7a99f8f27f7dd7fadd7e7729952eaa2e4424f6c9"
] |
[
"tests/python-gpu/test_gpu_basic_models.py"
] |
[
"import sys\nimport os\nimport unittest\nimport numpy as np\nimport xgboost as xgb\nsys.path.append(\"tests/python\")\n# Don't import the test class, otherwise they will run twice.\nimport test_basic_models as test_bm # noqa\nrng = np.random.RandomState(1994)\n\n\nclass TestGPUBasicModels(unittest.TestCase):\n cputest = test_bm.TestModels()\n\n def test_eta_decay_gpu_hist(self):\n self.cputest.run_eta_decay('gpu_hist')\n\n def test_deterministic_gpu_hist(self):\n kRows = 1000\n kCols = 64\n kClasses = 4\n # Create large values to force rounding.\n X = np.random.randn(kRows, kCols) * 1e4\n y = np.random.randint(0, kClasses, size=kRows)\n\n cls = xgb.XGBClassifier(tree_method='gpu_hist',\n deterministic_histogram=True,\n single_precision_histogram=True)\n cls.fit(X, y)\n cls.get_booster().save_model('test_deterministic_gpu_hist-0.json')\n\n cls = xgb.XGBClassifier(tree_method='gpu_hist',\n deterministic_histogram=True,\n single_precision_histogram=True)\n cls.fit(X, y)\n cls.get_booster().save_model('test_deterministic_gpu_hist-1.json')\n\n with open('test_deterministic_gpu_hist-0.json', 'r') as fd:\n model_0 = fd.read()\n with open('test_deterministic_gpu_hist-1.json', 'r') as fd:\n model_1 = fd.read()\n\n assert hash(model_0) == hash(model_1)\n\n os.remove('test_deterministic_gpu_hist-0.json')\n os.remove('test_deterministic_gpu_hist-1.json')\n"
] |
[
[
"numpy.random.RandomState",
"numpy.random.randn",
"numpy.random.randint"
]
] |
danipozo/practicas-mnii
|
[
"f4afe725316c694a4cd06e2ce3c0019f4f68652f",
"f4afe725316c694a4cd06e2ce3c0019f4f68652f"
] |
[
"practica3/12c.py",
"practica3/12.py"
] |
[
"# PROGRAMA 8\n# -*- coding: utf-8 -*-\n\nfrom math import fabs, exp\nfrom scipy.interpolate import lagrange\nfrom scipy.integrate import quad\nfrom decimal import *\n\n\na = 0.0\nb = 1.0\nn = 10\nk = 4\n\n# Función f de dos variables\ndef f(t,y):\n\treturn (2-2*t*y)/(1+pow(t,2))\n\n# Solución exacta\nsol_exacta = False\ndef y(t):\n\treturn (2*t+1)/(pow(t,2)+1)\ny_0 = 1.0\n\n# Lista con las aproximaciones u_{0},..,u_{k-1}\n# (en caso de no tener la solución exacta)\ninicial = []\n\nh = (b - a) / n\nt = [a + j * h for j in range(n + 1)]\nu = [0 for i in range(n + 1)] # Lista \"vacía\" con n+1 posiciones\n\ndef integrate_interpolation_polynomial(j):\n\tx = []\n\ty = []\n\tfor i in range(k):\n\t\tx.append(t[j-i])\n\t\ty.append(f(x[i], u[j-i]))\n\n\tpoly = lagrange(x,y)\n\treturn quad(poly, t[j], t[j+1])[0]\n\ndef adams_bashforth(j):\n\tif j < k:\n\t\treturn u[j]\n\n\tu_j = adams_bashforth(j-1) + integrate_interpolation_polynomial(j-1)\n\tu[j] = u_j\n\treturn u_j\n\ndef RungeKutta(f,a,b,n ,y_0):\n h=float(b-a)/n\n inicial.append(y_0)\n\n valor=y_0\n for i in range (0, n):\n tj =a+(i+1)*h\n Ki = []\n Ki.append(f(tj,inicial[i]))\n Ki.append(f(tj+h/2,inicial[i]+(h/2)*Ki[0]))\n Ki.append(f(tj+h/2,inicial[i]+(h/2)*Ki[1]))\n Ki.append(f(tj+h,inicial[i]+h*Ki[2]))\n x = inicial[i] + (h/6)*(Ki[0]+2*Ki[1]+2*Ki[2]+Ki[3])\n inicial.append(x)\n\n\n\"\"\"\nMain\n\"\"\"\nRungeKutta(f,0,1,4,y_0)\n\n\nif sol_exacta:\n\tfor i in range(k):\n\t\tu[i] = y(t[i])\nelse:\n\tfor i in range(k):\n\t\tu[i] = inicial[i]\n\ni = 0\nadams_bashforth(n)\nprint(\"Se han obtenido las 4 primeras iteraciones por el método de Runge Kutta\")\nprint(\"Iteracion \\tAproximacion \\t Valor Real \\t Error\")\nfor item in u:\n error = abs(y(t[i]) - item)\n print(str(a+i*h)+ \" \"+str(item)+\" \"+ str(y(t[i])) + \" \" + str(error))\n # print(str(i)+ \"\\t |\" +str(item) +\"\\t |\" +str(y(t[i])+ \"\\t| \" + str(a))\n i +=1\n",
"# PROGRAMA 8\n# -*- coding: utf-8 -*-\n\nfrom math import fabs, exp\nfrom scipy.interpolate import lagrange\nfrom scipy.integrate import quad\nfrom decimal import *\n\n\na = 0.0\nb = 1.0\nn = 10\nk = 4\n\n# Función f de dos variables\ndef f(t,y):\n\treturn (2-2*t*y)/(1+pow(t,2))\n\n# Solución exacta\nsol_exacta = False\ndef y(t):\n\treturn (2*t+1)/(pow(t,2)+1)\ny_0 = 1.0\n\n# Lista con las aproximaciones u_{0},..,u_{k-1}\n# (en caso de no tener la solución exacta)\ninicial = []\n\nh = (b - a) / n\nt = [a + j * h for j in range(n + 1)]\nu = [0 for i in range(n + 1)] # Lista \"vacía\" con n+1 posiciones\n\ndef integrate_interpolation_polynomial(j):\n\tx = []\n\ty = []\n\tfor i in range(k):\n\t\tx.append(t[j-i])\n\t\ty.append(f(x[i], u[j-i]))\n\n\tpoly = lagrange(x,y)\n\treturn quad(poly, t[j], t[j+1])[0]\n\ndef adams_bashforth(j):\n\tif j < k:\n\t\treturn u[j]\n\n\tu_j = adams_bashforth(j-1) + integrate_interpolation_polynomial(j-1)\n\tu[j] = u_j\n\treturn u_j\n\ndef euler(f,a,b,n ,y_0):\n h=float(b-a)/n\n inicial.append(y_0)\n for i in range (0, n-1):\n tj =a+(i+1)*h\n x = inicial[i] + h*f(tj,inicial[i])\n inicial.append(x)\n\n\n\n\n\"\"\"\nMain\n\"\"\"\neuler(f,0,1,4,y_0)\n\n\nif sol_exacta:\n\tfor i in range(k):\n\t\tu[i] = y(t[i])\nelse:\n\tfor i in range(k):\n\t\tu[i] = inicial[i]\n\ni = 0\nadams_bashforth(n)\nprint(\"Se han obtenido las 4 primeras iteraciones por el método de euler\")\nprint(\"Iteracion \\tAproximacion \\t Valor Real \\t Error\")\nfor item in u:\n error = abs(y(t[i]) - item)\n print(str(a+i*h)+ \" \"+str(item)+\" \"+ str(y(t[i])) + \" \" + str(error))\n # print(str(i)+ \"\\t |\" +str(item) +\"\\t |\" +str(y(t[i])+ \"\\t| \" + str(a))\n i +=1\n"
] |
[
[
"scipy.integrate.quad",
"scipy.interpolate.lagrange"
],
[
"scipy.integrate.quad",
"scipy.interpolate.lagrange"
]
] |
deepset-ai/Haystack
|
[
"4a63707f1a177123c13929eb316d3ecaa7fd6c5f"
] |
[
"test/document_stores/test_weaviate.py"
] |
[
"import uuid\n\nimport numpy as np\nimport pytest\n\nfrom haystack.schema import Document\nfrom ..conftest import get_document_store\n\n\nembedding_dim = 768\n\n\ndef get_uuid():\n return str(uuid.uuid4())\n\n\nDOCUMENTS = [\n {\"content\": \"text1\", \"id\": \"not a correct uuid\", \"key\": \"a\"},\n {\"content\": \"text2\", \"id\": get_uuid(), \"key\": \"b\", \"embedding\": np.random.rand(embedding_dim).astype(np.float32)},\n {\"content\": \"text3\", \"id\": get_uuid(), \"key\": \"b\", \"embedding\": np.random.rand(embedding_dim).astype(np.float32)},\n {\"content\": \"text4\", \"id\": get_uuid(), \"key\": \"b\", \"embedding\": np.random.rand(embedding_dim).astype(np.float32)},\n {\"content\": \"text5\", \"id\": get_uuid(), \"key\": \"b\", \"embedding\": np.random.rand(embedding_dim).astype(np.float32)},\n]\n\nDOCUMENTS_XS = [\n # current \"dict\" format for a document\n {\n \"content\": \"My name is Carla and I live in Berlin\",\n \"id\": get_uuid(),\n \"meta\": {\"metafield\": \"test1\", \"name\": \"filename1\"},\n \"embedding\": np.random.rand(embedding_dim).astype(np.float32),\n },\n # meta_field at the top level for backward compatibility\n {\n \"content\": \"My name is Paul and I live in New York\",\n \"id\": get_uuid(),\n \"metafield\": \"test2\",\n \"name\": \"filename2\",\n \"embedding\": np.random.rand(embedding_dim).astype(np.float32),\n },\n # Document object for a doc\n Document(\n content=\"My name is Christelle and I live in Paris\",\n id=get_uuid(),\n meta={\"metafield\": \"test3\", \"name\": \"filename3\"},\n embedding=np.random.rand(embedding_dim).astype(np.float32),\n ),\n]\n\n\[email protected](params=[\"weaviate\"])\ndef document_store_with_docs(request, tmp_path):\n document_store = get_document_store(request.param, tmp_path=tmp_path)\n document_store.write_documents(DOCUMENTS_XS)\n yield document_store\n document_store.delete_index(document_store.index)\n\n\[email protected](params=[\"weaviate\"])\ndef document_store(request, tmp_path):\n document_store = get_document_store(request.param, tmp_path=tmp_path)\n yield document_store\n document_store.delete_index(document_store.index)\n\n\[email protected]\[email protected](\"document_store\", [\"weaviate\"], indirect=True)\[email protected](\"batch_size\", [2])\ndef test_weaviate_write_docs(document_store, batch_size):\n # Write in small batches\n for i in range(0, len(DOCUMENTS), batch_size):\n document_store.write_documents(DOCUMENTS[i : i + batch_size])\n\n documents_indexed = document_store.get_all_documents()\n assert len(documents_indexed) == len(DOCUMENTS)\n\n documents_indexed = document_store.get_all_documents(batch_size=batch_size)\n assert len(documents_indexed) == len(DOCUMENTS)\n\n\[email protected]\[email protected](\"document_store_with_docs\", [\"weaviate\"], indirect=True)\ndef test_query_by_embedding(document_store_with_docs):\n docs = document_store_with_docs.query_by_embedding(np.random.rand(embedding_dim).astype(np.float32))\n assert len(docs) == 3\n\n docs = document_store_with_docs.query_by_embedding(np.random.rand(embedding_dim).astype(np.float32), top_k=1)\n assert len(docs) == 1\n\n docs = document_store_with_docs.query_by_embedding(\n np.random.rand(embedding_dim).astype(np.float32), filters={\"name\": [\"filename2\"]}\n )\n assert len(docs) == 1\n\n\[email protected]\[email protected](\"document_store_with_docs\", [\"weaviate\"], indirect=True)\ndef test_query(document_store_with_docs):\n query_text = \"My name is Carla and I live in Berlin\"\n with pytest.raises(Exception):\n docs = document_store_with_docs.query(query_text)\n\n docs = document_store_with_docs.query(filters={\"name\": [\"filename2\"]})\n assert len(docs) == 1\n\n docs = document_store_with_docs.query(filters={\"content\": [query_text.lower()]})\n assert len(docs) == 1\n\n docs = document_store_with_docs.query(filters={\"content\": [\"live\"]})\n assert len(docs) == 3\n\n\[email protected]\ndef test_get_all_documents_unaffected_by_QUERY_MAXIMUM_RESULTS(document_store_with_docs, monkeypatch):\n \"\"\"\n Ensure `get_all_documents` works no matter the value of QUERY_MAXIMUM_RESULTS\n see https://github.com/deepset-ai/haystack/issues/2517\n \"\"\"\n monkeypatch.setattr(document_store_with_docs, \"get_document_count\", lambda **kwargs: 13_000)\n docs = document_store_with_docs.get_all_documents()\n assert len(docs) == 3\n"
] |
[
[
"numpy.random.rand"
]
] |
bertsky/ocrd_typegroups_classifier
|
[
"245be86af76826bb3e7d1c5b2cfe638b6395d30c"
] |
[
"ocrd_typegroups_classifier/network/densenet.py"
] |
[
"import re\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom collections import OrderedDict\n\n__all__ = ['DenseNet', 'densenet121', 'densenet169', 'densenet201', 'densenet161']\n\nmodel_urls = {\n 'densenet121': 'https://download.pytorch.org/models/densenet121-a639ec97.pth',\n 'densenet169': 'https://download.pytorch.org/models/densenet169-b2777c0a.pth',\n 'densenet201': 'https://download.pytorch.org/models/densenet201-c1103571.pth',\n 'densenet161': 'https://download.pytorch.org/models/densenet161-8d451a50.pth',\n}\n\n\nclass _DenseLayer(nn.Sequential):\n def __init__(self, num_input_features, growth_rate, bn_size, drop_rate):\n super(_DenseLayer, self).__init__()\n self.add_module('norm1', nn.BatchNorm2d(num_input_features)),\n self.add_module('relu1', nn.ReLU(inplace=True)),\n self.add_module('conv1', nn.Conv2d(num_input_features, bn_size *\n growth_rate, kernel_size=1, stride=1,\n bias=False)),\n self.add_module('norm2', nn.BatchNorm2d(bn_size * growth_rate)),\n self.add_module('relu2', nn.ReLU(inplace=True)),\n self.add_module('conv2', nn.Conv2d(bn_size * growth_rate, growth_rate,\n kernel_size=3, stride=1, padding=1,\n bias=False)),\n self.drop_rate = drop_rate\n\n def forward(self, x):\n new_features = super(_DenseLayer, self).forward(x)\n if self.drop_rate > 0:\n new_features = F.dropout(new_features, p=self.drop_rate,\n training=self.training)\n return torch.cat([x, new_features], 1)\n\n\nclass _DenseBlock(nn.Sequential):\n def __init__(self, num_layers, num_input_features, bn_size, growth_rate, drop_rate):\n super(_DenseBlock, self).__init__()\n for i in range(num_layers):\n layer = _DenseLayer(num_input_features + i * growth_rate, growth_rate,\n bn_size, drop_rate)\n self.add_module('denselayer%d' % (i + 1), layer)\n\n\nclass _Transition(nn.Sequential):\n def __init__(self, num_input_features, num_output_features):\n super(_Transition, self).__init__()\n self.add_module('norm', nn.BatchNorm2d(num_input_features))\n self.add_module('relu', nn.ReLU(inplace=True))\n self.add_module('conv', nn.Conv2d(num_input_features, num_output_features,\n kernel_size=1, stride=1, bias=False))\n self.add_module('pool', nn.AvgPool2d(kernel_size=2, stride=2))\n\n\nclass DenseNet(nn.Module):\n r\"\"\"Densenet-BC model class, based on\n `\"Densely Connected Convolutional Networks\" <https://arxiv.org/pdf/1608.06993.pdf>`_\n\n Args:\n growth_rate (int) - how many filters to add each layer (`k` in paper)\n block_config (list of 4 ints) - how many layers in each pooling block\n num_init_features (int) - the number of filters to learn in the first convolution layer\n bn_size (int) - multiplicative factor for number of bottle neck layers\n (i.e. bn_size * k features in the bottleneck layer)\n drop_rate (float) - dropout rate after each dense layer\n num_classes (int) - number of classification classes\n \"\"\"\n\n def __init__(self, growth_rate=32, block_config=(6, 12, 24, 16),\n num_init_features=64, bn_size=4, drop_rate=0, num_classes=1000):\n\n super(DenseNet, self).__init__()\n\n # First convolution\n self.features = nn.Sequential(OrderedDict([\n ('conv0', nn.Conv2d(3, num_init_features, kernel_size=7, stride=2,\n padding=3, bias=False)),\n ('norm0', nn.BatchNorm2d(num_init_features)),\n ('relu0', nn.ReLU(inplace=True)),\n ('pool0', nn.MaxPool2d(kernel_size=3, stride=2, padding=1)),\n ]))\n\n # Each denseblock\n num_features = num_init_features\n for i, num_layers in enumerate(block_config):\n block = _DenseBlock(num_layers=num_layers, num_input_features=num_features,\n bn_size=bn_size, growth_rate=growth_rate,\n drop_rate=drop_rate)\n self.features.add_module('denseblock%d' % (i + 1), block)\n num_features = num_features + num_layers * growth_rate\n if i != len(block_config) - 1:\n trans = _Transition(num_input_features=num_features,\n num_output_features=num_features // 2)\n self.features.add_module('transition%d' % (i + 1), trans)\n num_features = num_features // 2\n\n # Final batch norm\n self.features.add_module('norm5', nn.BatchNorm2d(num_features))\n\n # Linear layer\n self.classifier = nn.Linear(num_features, num_classes)\n\n # Official init from torch repo.\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n nn.init.kaiming_normal_(m.weight)\n elif isinstance(m, nn.BatchNorm2d):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n elif isinstance(m, nn.Linear):\n nn.init.constant_(m.bias, 0)\n\n def forward(self, x):\n features = self.features(x)\n out = F.relu(features, inplace=True)\n out = F.adaptive_avg_pool2d(out, (1, 1)).view(features.size(0), -1)\n out = self.classifier(out)\n return out\n \n def feature_extractor(self):\n return self.features\n \n def get_classifier(self):\n return self.classifier\n\n\ndef _load_state_dict(model, model_url, progress):\n # '.'s are no longer allowed in module names, but previous _DenseLayer\n # has keys 'norm.1', 'relu.1', 'conv.1', 'norm.2', 'relu.2', 'conv.2'.\n # They are also in the checkpoints in model_urls. This pattern is used\n # to find such keys.\n pattern = re.compile(\n r'^(.*denselayer\\d+\\.(?:norm|relu|conv))\\.((?:[12])\\.(?:weight|bias|running_mean|running_var))$')\n\n state_dict = load_state_dict_from_url(model_url, progress=progress)\n for key in list(state_dict.keys()):\n res = pattern.match(key)\n if res:\n new_key = res.group(1) + res.group(2)\n state_dict[new_key] = state_dict[key]\n del state_dict[key]\n model.load_state_dict(state_dict)\n\n\ndef _densenet(arch, growth_rate, block_config, num_init_features, pretrained, progress,\n **kwargs):\n model = DenseNet(growth_rate, block_config, num_init_features, **kwargs)\n if pretrained:\n _load_state_dict(model, model_urls[arch], progress)\n return model\n\n\ndef densenet121(pretrained=False, progress=True, **kwargs):\n r\"\"\"Densenet-121 model from\n `\"Densely Connected Convolutional Networks\" <https://arxiv.org/pdf/1608.06993.pdf>`_\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n \"\"\"\n return _densenet('densenet121', 32, (6, 12, 24, 16), 64, pretrained, progress,\n **kwargs)\n\n\ndef densenet161(pretrained=False, progress=True, **kwargs):\n r\"\"\"Densenet-161 model from\n `\"Densely Connected Convolutional Networks\" <https://arxiv.org/pdf/1608.06993.pdf>`_\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n \"\"\"\n return _densenet('densenet161', 48, (6, 12, 36, 24), 96, pretrained, progress,\n **kwargs)\n\n\ndef densenet169(pretrained=False, progress=True, **kwargs):\n r\"\"\"Densenet-169 model from\n `\"Densely Connected Convolutional Networks\" <https://arxiv.org/pdf/1608.06993.pdf>`_\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n \"\"\"\n return _densenet('densenet169', 32, (6, 12, 32, 32), 64, pretrained, progress,\n **kwargs)\n\n\ndef densenet201(pretrained=False, progress=True, **kwargs):\n r\"\"\"Densenet-201 model from\n `\"Densely Connected Convolutional Networks\" <https://arxiv.org/pdf/1608.06993.pdf>`_\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n \"\"\"\n return _densenet('densenet201', 32, (6, 12, 48, 32), 64, pretrained, progress,\n **kwargs)\n"
] |
[
[
"torch.nn.functional.dropout",
"torch.cat",
"torch.nn.init.constant_",
"torch.nn.Conv2d",
"torch.nn.functional.adaptive_avg_pool2d",
"torch.nn.Linear",
"torch.nn.AvgPool2d",
"torch.nn.functional.relu",
"torch.nn.MaxPool2d",
"torch.nn.BatchNorm2d",
"torch.nn.ReLU",
"torch.nn.init.kaiming_normal_"
]
] |
mreso/pytorch
|
[
"4f32bdd80217a51d9bb8d6c4d55b97159bc1f02d"
] |
[
"torch/testing/_internal/common_fx2trt.py"
] |
[
"import unittest\nfrom typing import Callable, List, Tuple\n\nimport torch\nimport torch.fx\nimport fx2trt_oss.tracer.acc_tracer.acc_tracer as acc_tracer\nfrom fx2trt_oss.fx import (\n TRTInterpreter,\n InputTensorSpec,\n TRTModule,\n)\nfrom fx2trt_oss.fx.passes.pass_utils import chain_passes\nfrom fx2trt_oss.fx.utils import LowerPrecision\nfrom torch.testing._internal.common_utils import TestCase\nfrom torch.fx.experimental.normalize import NormalizeArgs\nfrom torch.fx.passes import shape_prop\n\n\ndef fetch_attr(mod, target):\n \"\"\"\n Fetch an attribute from the ``Module`` hierarchy of ``mod.module``.\n\n Args:\n target (str): The fully-qualfiied name of the attribute to fetch\n\n Return:\n Any: The value of the attribute.\n \"\"\"\n target_atoms = target.split(\".\")\n attr_itr = mod\n for i, atom in enumerate(target_atoms):\n if not hasattr(attr_itr, atom):\n raise RuntimeError(\n f\"Node referenced nonexistent target {'.'.join(target_atoms[:i])}\"\n )\n attr_itr = getattr(attr_itr, atom)\n return attr_itr\n\n\[email protected](not torch.cuda.is_available(), \"Skip because CUDA is not available\")\nclass TRTTestCase(TestCase):\n def setUp(self):\n super().setUp()\n torch.manual_seed(3)\n\n def run_test(self, mod, inputs, expected_ops, unexpected_ops, interpreter, rtol, atol):\n with torch.no_grad():\n cuda_inputs = []\n for i in inputs:\n cuda_inputs.append(i.cuda())\n\n mod.eval()\n if len(expected_ops):\n self.assert_has_op(mod, expected_ops)\n if unexpected_ops:\n self.assert_unexpected_op(mod, unexpected_ops)\n\n interpreter_result = interpreter.run(lower_precision=LowerPrecision.FP16)\n trt_mod = TRTModule(\n interpreter_result.engine,\n interpreter_result.input_names,\n interpreter_result.output_names,\n )\n\n ref_outputs = mod(*inputs)\n outputs = trt_mod(*cuda_inputs)\n\n if isinstance(outputs, torch.Tensor):\n ref_outputs = [ref_outputs]\n outputs = [outputs]\n\n for out, ref in zip(outputs, ref_outputs):\n torch.testing.assert_allclose(out.cpu(), ref, rtol=rtol, atol=atol)\n\n def run_test_custom_compare_results(\n self,\n mod,\n inputs,\n expected_ops,\n interpreter,\n comparators: List[Tuple[Callable, List]],\n fp16_mode=False,\n ):\n \"\"\"\n Runs the test and compares the result using the provided comparators.\n The size of comparators must be equal to the number of outputs from 'mod'.\n\n mod - a model to run.\n inputs - a list of the model inputs.\n expected ops - a list of ops that should be verified.\n interpreter - used for converting the model to TRT.\n comparators - a list of (func, args) pairs corresponding to each of\n the module outputs. usage: func(x, y, *args)\n\n \"\"\"\n with torch.no_grad():\n cuda_inputs = []\n for i in inputs:\n cuda_inputs.append(i.cuda())\n\n mod.eval()\n if len(expected_ops):\n self.assert_has_op(mod, expected_ops)\n\n interpreter_result = interpreter.run(lower_precision=LowerPrecision.FP16)\n trt_mod = TRTModule(\n interpreter_result.engine,\n interpreter_result.input_names,\n interpreter_result.output_names,\n )\n res_trt = trt_mod(*cuda_inputs).cpu()\n res_cpu = mod(*inputs)\n assert len(res_trt) == len(res_cpu)\n assert len(res_cpu) == len(comparators)\n for output_trt, output_cpu, comparator in zip(\n res_trt, res_cpu, comparators\n ):\n comp_func = comparator[0]\n args = comparator[1]\n self.assertTrue(comp_func(output_trt, output_cpu, *args))\n\n def run_test_with_error(self, mod, inputs, interpreter, expect_error):\n with self.assertRaises(expect_error):\n with torch.no_grad():\n cuda_inputs = []\n for i in inputs:\n cuda_inputs.append(i.cuda())\n\n mod.eval()\n interpreter.run(lower_precision=LowerPrecision.FP32)\n\n def assert_has_op(self, mod, ops):\n ops_in_mod = set()\n\n for node in mod.graph.nodes:\n if node.op == \"call_module\":\n ops_in_mod.add(type(fetch_attr(mod, node.target)))\n elif node.op in {\"call_function\", \"call_method\"}:\n ops_in_mod.add(node.target)\n\n self.assertTrue(\n ops_in_mod >= ops, f\"expected ops {ops}, actuall ops {ops_in_mod}\"\n )\n\n def assert_unexpected_op(self, mod, ops):\n for node in mod.graph.nodes:\n if (node.op == \"call_module\"):\n if type(fetch_attr(mod, node.target)) in ops:\n return False\n elif node.op in {\"call_function\", \"call_method\"}:\n if node.target in ops:\n return False\n return True\n\n\nclass VanillaTestCase(TRTTestCase):\n def run_test(self, mod, inputs, expected_ops, rtol=1e-05, atol=1e-06):\n mod = torch.fx.symbolic_trace(mod)\n shape_prop.ShapeProp(mod).propagate(*inputs)\n mod = NormalizeArgs(mod).transform()\n interp = TRTInterpreter(mod, InputTensorSpec.from_tensors(inputs))\n super().run_test(mod, inputs, expected_ops, None, interp, rtol, atol)\n\n def run_test_custom_compare_results(\n self,\n mod,\n inputs,\n expected_ops,\n interpreter,\n comparators: List[Tuple[Callable, List]],\n fp16_mode=False,\n ):\n # interpreter is ignored, we do not need this for Vanilla tests\n # Note this is different from internal version, we need to fix the test case\n # after we refactor the internal callsites to use this file\n mod = torch.fx.symbolic_trace(mod)\n shape_prop.ShapeProp(mod).propagate(*inputs)\n mod = NormalizeArgs(mod).transform()\n interp = TRTInterpreter(mod, InputTensorSpec.from_tensors(inputs))\n super().run_test_custom_compare_results(\n mod, inputs, expected_ops, interp, comparators, fp16_mode=fp16_mode\n )\n\n\nclass AccTestCase(TRTTestCase):\n def run_test(\n self,\n mod,\n inputs,\n expected_ops,\n unexpected_ops=None,\n apply_passes=None,\n test_explicit_batch_dim=True,\n test_implicit_batch_dim=True,\n rtol=1e-03,\n atol=1e-03,\n ):\n mod.eval()\n mod = acc_tracer.trace(mod, inputs)\n\n if apply_passes is not None:\n pass_tracer = chain_passes(*apply_passes)\n mod = pass_tracer(mod, inputs)\n\n if test_implicit_batch_dim:\n interp = TRTInterpreter(mod, InputTensorSpec.from_tensors(inputs))\n super().run_test(mod, inputs, expected_ops, unexpected_ops, interp, rtol, atol)\n\n if test_explicit_batch_dim:\n interp = TRTInterpreter(\n mod, InputTensorSpec.from_tensors(inputs), explicit_batch_dimension=True\n )\n super().run_test(mod, inputs, expected_ops, unexpected_ops, interp, rtol, atol)\n\n def run_test_with_assert_error(\n self,\n mod,\n inputs,\n expect_error,\n test_explicit_batch_dim=True,\n test_implicit_batch_dim=True,\n ):\n mod.eval()\n mod = acc_tracer.trace(mod, inputs)\n\n if test_implicit_batch_dim:\n interp = TRTInterpreter(mod, InputTensorSpec.from_tensors(inputs))\n super().run_test_with_error(mod, inputs, interp, expect_error)\n\n if test_explicit_batch_dim:\n interp = TRTInterpreter(\n mod, InputTensorSpec.from_tensors(inputs), explicit_batch_dimension=True\n )\n super().run_test_with_error(mod, inputs, interp, expect_error)\n\n def run_test_with_dynamic_shape(\n self,\n mod,\n input_specs,\n expected_ops,\n unexpected_ops=None,\n rtol=1e-03,\n atol=1e-03,\n ):\n mod.eval()\n inputs = InputTensorSpec.create_inputs_from_specs(input_specs)\n mod = acc_tracer.trace(mod, inputs)\n interp = TRTInterpreter(mod, input_specs, explicit_batch_dimension=True)\n super().run_test(mod, inputs, expected_ops, unexpected_ops, interp, rtol, atol)\n"
] |
[
[
"torch.fx.passes.shape_prop.ShapeProp",
"torch.manual_seed",
"torch.fx.experimental.normalize.NormalizeArgs",
"torch.fx.symbolic_trace",
"torch.no_grad",
"torch.cuda.is_available"
]
] |
dongchirua/Feat2Vec
|
[
"32f2345065f767be2ba61ec62ba6c89540eb83d6"
] |
[
"feat2vec/sampler.py"
] |
[
"from keras.utils import Sequence\nimport numpy as np\nimport math\nimport scipy\n\nclass FMData(Sequence):\n\n def __init__(self, inputs, output, batch_size, implicit_samples=0,splits=None, feature_extraction=None, sample_probabilities={}, mask=None, shuffle=True, nce=None):\n #validate inputs:\n check_length = -1\n for feature in inputs:\n if check_length == -1:\n check_length = feature.shape[0]\n continue\n if check_length != feature.shape[0]:\n raise RuntimeError(\"Input features do not have same length: {}, {}\".format(check_length, feature.shape[0]))\n if check_length != output.shape[0]:\n raise RuntimeError(\"Input ({}) and output ({}) have different lengths\".format(check_length, output.shape[0]))\n\n\n self.splits = None\n\n # Find the explicit indixes\n if splits is not None: # We are provided with groups of indices, need to set self.splits and self.explicit_ix\n self.splits = []\n self.explicit_ix = []\n \n mask_set = frozenset(mask if mask is not None else [])\n for split in splits:\n if mask is None:\n masked_split = split # All data is good\n else:\n masked_split = [s for s in split if s in mask_set] # Fold information\n self.explicit_ix += masked_split\n self.splits.append( masked_split )\n\n self.explicit_ix = np.array(self.explicit_ix)\n\n else: #No groups, need to set self.explicit_ix\n if mask is None:\n self.explicit_ix = np.arange(len(output)) # All data is good\n else:\n self.explicit_ix = mask # Use data from the fold\n \n\n\n # Instance variables:\n\n self.batch_size = batch_size\n self.inputs = inputs\n self.output = output\n\n self.sample_probabilities = sample_probabilities\n self.implicit_samples = implicit_samples\n\n self.implicit_ix = None\n self.length = len(self.explicit_ix)\n self.shuffle = shuffle\n self.feature_extraction = feature_extraction\n self.nce = nce\n \n self.shuffle_indexes()\n\n\n\n def __len__(self):\n return math.ceil(float(self.length) / self.batch_size)\n\n \n def shuffle_indexes(self): \n \n if self.splits is None:\n np.random.shuffle(self.explicit_ix)\n self.implicit_ix = np.random.randint(0, self.length, self.length * self.implicit_samples ) \n else:\n #np.random.shuffle(self.splits) \n shuffled_splits = []\n for s in self.splits:\n np.random.shuffle(s)\n shuffled_splits.append(s)\n \n self.explicit_ix = [item for sublist in shuffled_splits for item in sublist] # flatten\n #self.implicit_ix = np.tile(self.explicit_ix, self.implicit_samples ) \n self.implicit_ix = np.repeat(self.explicit_ix, self.implicit_samples ) \n \n\n def on_epoch_end(self):\n if self.shuffle:\n self.shuffle_indexes()\n\n \n def __getitem__(self, idx):\n if (idx >= len(self)) or (idx < 0):\n raise IndexError(\"No such mini-batch. Index error\")\n\n \n start_ex = idx * self.batch_size\n end_ex = (idx + 1) * self.batch_size\n start_im = idx * self.batch_size * self.implicit_samples\n end_im = (idx+1) * self.batch_size * self.implicit_samples\n \n \n # Get x from the mini-batch:\n mini_batch_x = []\n \n old_size = -1\n for i,l in enumerate(self.inputs):\n\n # Get data from the mini-batch:\n if (end_ex-start_ex) == self.batch_size:\n explicit = l[self.explicit_ix[start_ex:end_ex]]\n else:\n explicit = l[self.explicit_ix[start_ex:]] # sparse arrays can't overflow in index\n\n if isinstance(explicit, scipy.sparse.csr.csr_matrix):\n explicit = explicit.toarray() # densify if it's sparse \n\n\n noise = self.sample_probabilities.get(i, None)\n if noise is None:\n implicit = l[self.implicit_ix[start_im:end_im]] \n if isinstance(implicit, scipy.sparse.csr.csr_matrix):\n implicit = implicit.toarray() # densify if it's sparse \n \n else:\n implicit = np.random.choice(noise.keys(),\n explicit.shape[0]*self.implicit_samples,\n p=noise.values())\n \n feature = np.concatenate( (explicit, implicit) )\n \n \n\n assert (old_size == -1) or (len(feature) == old_size), \"Sizes don't match {} / {}\".format(len(feature), old_size)\n old_size = feature.shape[0]\n\n mini_batch_x.append(feature)\n \n # Noise contrastive estimation?\n if self.nce is not None:\n q = np.array([self.sample_probabilities[self.nce][feat] for feat in mini_batch_x[self.nce] ])\n mini_batch_x.append(q)\n \n # Extract features from the minibatch\n if self.feature_extraction is not None:\n mini_batch_x = self.feature_extraction.extract_features(mini_batch_x)\n \n # Get y from the mini-batch:\n mini_batch_y = np.concatenate( (self.output[self.explicit_ix[start_ex:end_ex]], \n np.zeros( explicit.shape[0] * self.implicit_samples )) )\n \n assert len(feature) == len(mini_batch_y), \"Sizes don't match {} / {}\".format(len(feature), mini_batch_y) \n\n return mini_batch_x, mini_batch_y\n"
] |
[
[
"numpy.random.shuffle",
"numpy.concatenate",
"numpy.repeat",
"numpy.array",
"numpy.zeros",
"numpy.random.randint"
]
] |
vikash06131721/autoMltext
|
[
"96f41161947d78c663e7f6b4ff452fc5bf2462e8"
] |
[
"textClassifier/all_features.py"
] |
[
"from sklearn.base import BaseEstimator, TransformerMixin\nimport pandas as pd\nimport joblib\nimport numpy as np\nimport ast\nimport nltk\nimport re\nimport argparse\nfrom textClassifier.statistical_features_pipeline import stats_models\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer\nfrom sklearn.metrics import mean_squared_error, make_scorer, f1_score\nimport pickle\nfrom sklearn.pipeline import Pipeline\n# from sklearn.externals import joblib\nfrom math import sqrt\nfrom textClassifier.clean_data import Cleaner\n\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.pipeline import FeatureUnion\nfrom sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier, GradientBoostingClassifier\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.model_selection import cross_validate\nfrom sklearn.base import BaseEstimator, TransformerMixin\nfrom sklearn.svm import LinearSVC\nfrom sklearn.calibration import CalibratedClassifierCV\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn.linear_model import LogisticRegression\nimport warnings\nwarnings.filterwarnings(\"ignore\") \n\nclass FeatureMultiplierCount(BaseEstimator, TransformerMixin):\n def __init__(self, word_count=True,char_count=True,\n word_density=True,total_length=True,\n capitals=True,caps_vs_length=True,num_exclamation_marks=True,num_question_marks=True,\n num_punctuation=True,num_symbols=True,num_unique_words=True,words_vs_unique=True,\n word_unique_percent=True):\n self.word_count = word_count\n self.total_length = total_length\n self.char_count =char_count\n self.word_density = word_density\n self.capitals = capitals\n self.caps_vs_length = caps_vs_length\n self.num_exclamation_marks=num_exclamation_marks\n self.num_question_marks=num_question_marks\n self.num_punctuation=num_punctuation\n self.num_symbols=num_symbols\n self.num_unique_words = num_unique_words\n self.words_vs_unique = words_vs_unique\n self.word_unique_percent = word_unique_percent\n\n def transform(self, X,y=None):\n X = pd.DataFrame(X)\n X['word_count'] = X['sentences'].apply(lambda x : len(x.split()))\n X['char_count'] = X['sentences'].apply(lambda x : len(x.replace(\" \",\"\")))\n X['word_density'] = X['word_count'] / (X['char_count'] + 1)\n\n X['total_length'] = X['sentences'].apply(len)\n X['capitals'] = X['sentences'].apply(lambda comment: sum(1 for c in comment if c.isupper()))\n \n X['num_exclamation_marks'] =X['sentences'].apply(lambda x: x.count('!'))\n X['num_question_marks'] = X['sentences'].apply(lambda x: x.count('?'))\n X['num_punctuation'] = X['sentences'].apply(lambda x: sum(x.count(w) for w in '.,;:'))\n X['num_symbols'] = X['sentences'].apply(lambda x: sum(x.count(w) for w in '*&$%'))\n X['num_unique_words'] = X['sentences'].apply(lambda x: len(set(w for w in x.split())))\n \n X.fillna(0,inplace=True)\n return X[['word_count','char_count','word_density','total_length',\\\n 'capitals','num_exclamation_marks','num_question_marks','num_punctuation',\\\n 'num_symbols',\n 'num_unique_words']]\n\n def fit(self, *_):\n return self\n\n#modelling pipeline \ndef all_features_models(scoring_method,cv):\n \"\"\"\n \n returns three pipelines Random Forest, Adaboost, Gradientboost,SVC\n The scoring function is based on f1_score\n \"\"\"\n\n\n # Random Forest Pipeline\n rf_pipeline = Pipeline([\n ('u1', FeatureUnion([\n ('tfdif_features', Pipeline([\n ('cleaner',Cleaner()),\n ('tfidf', TfidfVectorizer(max_features=10000,ngram_range=(1,3))),\n ])),\n ('numerical_features',Pipeline([('numerical_feats',FeatureMultiplierCount()),\n ('scaler',StandardScaler()),\n ])),\n\n ])),\n ('clf', RandomForestClassifier()),])\n # Adaboost Pipeline\n AdaBoost_pipeline = Pipeline([\n ('u1', FeatureUnion([\n ('tfdif_features', Pipeline([\n ('cleaner',Cleaner()),\n ('tfidf', TfidfVectorizer(max_features=10000,ngram_range=(1,3))),\n ])),\n ('numerical_features',Pipeline([('numerical_feats',FeatureMultiplierCount()),\n ('scaler',StandardScaler()),\n ])),\n\n ])),\n ('clf', AdaBoostClassifier()),])\n # Gradient Boost Pipeline\n GRD_pipeline = Pipeline([\n ('u1', FeatureUnion([\n ('tfdif_features', Pipeline([\n ('cleaner',Cleaner()),\n ('tfidf', TfidfVectorizer(max_features=10000,ngram_range=(1,3))),\n ])),\n ('numerical_features',Pipeline([('numerical_feats',FeatureMultiplierCount()),\n ('scaler',StandardScaler()),\n ])),\n\n ])),\n ('clf', GradientBoostingClassifier()),])\n \n svm_pipeline =Pipeline([\n ('u1', FeatureUnion([\n ('tfdif_features', Pipeline([\n ('cleaner',Cleaner()),\n ('tfidf', TfidfVectorizer(max_features=10000,ngram_range=(1,3))),\n ])),\n ('numerical_features',Pipeline([('numerical_feats',FeatureMultiplierCount()),\n ('scaler',StandardScaler()),\n ])),\n\n ])),\n ('clf', LinearSVC(max_iter=10000)),])\n\n logistic_pipeline = Pipeline([\n ('u1', FeatureUnion([\n ('tfdif_features', Pipeline([\n ('cleaner',Cleaner()),\n ('tfidf', TfidfVectorizer(max_features=10000,ngram_range=(1,3))),\n ])),\n ('numerical_features',Pipeline([('numerical_feats',FeatureMultiplierCount()),\n ('scaler',StandardScaler()),\n ])),\n\n ])),\n ('clf', LogisticRegression()),])\n\n dt_pipeline = Pipeline([\n ('u1', FeatureUnion([\n ('tfdif_features', Pipeline([\n ('cleaner',Cleaner()),\n ('tfidf', TfidfVectorizer(max_features=10000,ngram_range=(1,3))),\n ])),\n ('numerical_features',Pipeline([('numerical_feats',FeatureMultiplierCount()),\n ('scaler',StandardScaler()),\n ])),\n\n ])),\n ('clf', DecisionTreeClassifier()),])\n\n #grid search params for randomforest, adaboost, gradientboost\n # import pdb;pdb.set_trace()\n grid_params_rf = [{'clf__n_estimators': [10, 50, 100], 'clf__max_depth': [2, 3, 5]}]\n grid_params_adaboost = [{'clf__n_estimators': [10, 50, 100,500], 'clf__learning_rate': [0.5, 0.8, 1.0]}]\n grid_params_grd = [{'clf__n_estimators': [10, 50, 100,500], 'clf__learning_rate': [0.5, 0.8, 1.0],\n 'clf__max_depth': [2, 3, 5]}]\n \n grid_params_svc = [{'clf__C': [1.0,3.0,5.0,10.0],'clf__max_iter':[100,1000,10000]}]\n\n grid_params_logistic =[{'clf__C': [1.0,3.0,5.0,10.0],'clf__max_iter':[100,1000,10000]}]\n\n grid_params_dt =[{'clf__max_depth': [2,3,5],'clf__max_features':[\"auto\",\"sqrt\",\"log2\"]}]\n\n\n #gridsearchcv pipeline for randomforest\n gs_rf = GridSearchCV(estimator=rf_pipeline,\n param_grid=grid_params_rf,\n scoring=scoring_method,\n cv=cv)\n #gridsearchcv pipeline for adaboost\n gs_adaboost = GridSearchCV(estimator=AdaBoost_pipeline,\n param_grid=grid_params_adaboost,\n scoring=scoring_method,\n cv=cv)\n\n #gridsearchcv pipeline for gradientboost\n gs_grd = GridSearchCV(estimator=GRD_pipeline,\n param_grid=grid_params_grd,\n scoring=scoring_method,\n cv=cv)\n gs_svc = GridSearchCV(estimator=svm_pipeline,\n param_grid=grid_params_svc,\n scoring=scoring_method,\n cv=cv)\n\n gs_logistic = GridSearchCV(estimator=logistic_pipeline,\n param_grid=grid_params_logistic,\n scoring=scoring_method,\n cv=cv)\n gs_decision = GridSearchCV(estimator=dt_pipeline,\n param_grid=grid_params_dt,\n scoring=scoring_method,\n cv=cv)\n\n grids = [gs_svc, gs_adaboost, gs_rf,gs_grd,gs_logistic,gs_decision]\n # grids = [gs_rf]\n return grids\n"
] |
[
[
"sklearn.model_selection.GridSearchCV",
"sklearn.linear_model.LogisticRegression",
"sklearn.ensemble.RandomForestClassifier",
"pandas.DataFrame",
"sklearn.tree.DecisionTreeClassifier",
"sklearn.ensemble.AdaBoostClassifier",
"sklearn.svm.LinearSVC",
"sklearn.ensemble.GradientBoostingClassifier",
"sklearn.preprocessing.StandardScaler",
"sklearn.feature_extraction.text.TfidfVectorizer"
]
] |
techthiyanes/adapter-transformers
|
[
"04aeaf63c4c54856d416925258393d9e06866b46",
"04aeaf63c4c54856d416925258393d9e06866b46"
] |
[
"tests_adapters/test_adapter_composition.py",
"src/transformers/adapters/prefix_tuning.py"
] |
[
"import copy\nimport random\nimport unittest\n\nimport torch\n\nfrom tests.test_modeling_common import ids_tensor\nfrom transformers import (\n MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,\n AutoAdapterModel,\n AutoTokenizer,\n BertConfig,\n BertForSequenceClassification,\n T5AdapterModel,\n Trainer,\n TrainingArguments,\n)\nfrom transformers.adapters.composition import BatchSplit, Fuse, Parallel, Split, Stack, parse_composition\nfrom transformers.testing_utils import require_torch, torch_device\n\n\ndef filter_parameters(model, filter_string):\n return {k: v for (k, v) in model.named_parameters() if filter_string in k}\n\n\nclass AdapterCompositionParsingTest(unittest.TestCase):\n def test_parse_lists(self):\n self.assertEqual(Stack(\"a\"), parse_composition(\"a\"))\n self.assertEqual(Stack(\"a\", \"b\", \"c\"), parse_composition([\"a\", \"b\", \"c\"]))\n self.assertEqual(Stack(\"a\", Fuse(\"b\", \"c\")), parse_composition([\"a\", [\"b\", \"c\"]]))\n\n def test_to_deep(self):\n self.assertRaises(ValueError, lambda: parse_composition(Stack(\"a\", Fuse(\"b\", Stack(Fuse(\"c\", \"d\"), \"e\")))))\n\n def test_invalid_nesting_fusion(self):\n self.assertRaises(ValueError, lambda: parse_composition(Fuse(Fuse(\"a\", \"b\"), \"c\")))\n self.assertRaises(ValueError, lambda: parse_composition(Fuse(Split(\"a\", \"b\", 128), \"c\")))\n\n def test_invalid_nesting_split(self):\n self.assertRaises(ValueError, lambda: parse_composition(Split(\"a\", Fuse(\"b\", \"c\"), 128)))\n\n\n@require_torch\nclass AdapterCompositionTest(unittest.TestCase):\n def setUp(self):\n self.model = BertForSequenceClassification(BertConfig())\n self.model.add_adapter(\"a\")\n self.model.add_adapter(\"b\")\n self.model.add_adapter(\"c\")\n self.model.add_adapter(\"d\")\n self.model.to(torch_device)\n self.model.train()\n\n def training_pass(self):\n inputs = {}\n inputs[\"input_ids\"] = ids_tensor((1, 128), 1000).to(torch_device)\n inputs[\"labels\"] = torch.ones(1, dtype=torch.long).to(torch_device)\n loss = self.model(**inputs).loss\n loss.backward()\n\n def batched_training_pass(self):\n inputs = {\n \"input_ids\": ids_tensor((4, 128), 1000).to(torch_device),\n \"labels\": torch.ones(4, dtype=torch.long).to(torch_device),\n }\n loss = self.model(**inputs).loss\n loss.backward()\n\n def test_simple_split(self):\n # pass over split setup\n self.model.set_active_adapters(Split(\"a\", \"b\", 64))\n\n self.training_pass()\n\n def test_stacked_split(self):\n # split into two stacks\n self.model.set_active_adapters(Split(Stack(\"a\", \"b\"), Stack(\"c\", \"d\"), split_index=64))\n\n self.training_pass()\n\n def test_stacked_fusion(self):\n self.model.add_adapter_fusion(Fuse(\"b\", \"d\"))\n self.model.to(torch_device)\n\n # fuse two stacks\n self.model.set_active_adapters(Fuse(Stack(\"a\", \"b\"), Stack(\"c\", \"d\")))\n\n self.training_pass()\n\n def test_mixed_stack(self):\n self.model.add_adapter_fusion(Fuse(\"a\", \"b\"))\n self.model.to(torch_device)\n\n self.model.set_active_adapters(Stack(\"a\", Split(\"c\", \"d\", split_index=64), Fuse(\"a\", \"b\")))\n\n self.training_pass()\n\n def test_nested_split(self):\n # split into two stacks\n self.model.set_active_adapters(Split(Split(\"a\", \"b\", split_index=32), \"c\", split_index=64))\n\n self.training_pass()\n\n def test_parallel(self):\n self.model.set_active_adapters(Parallel(\"a\", \"b\", \"c\", \"d\"))\n\n inputs = {}\n inputs[\"input_ids\"] = ids_tensor((1, 128), 1000)\n logits = self.model(**inputs).logits\n self.assertEqual(logits.shape, (4, 2))\n\n def test_nested_parallel(self):\n self.model.set_active_adapters(Stack(\"a\", Parallel(Stack(\"b\", \"c\"), \"d\")))\n\n inputs = {}\n inputs[\"input_ids\"] = ids_tensor((1, 128), 1000)\n logits = self.model(**inputs).logits\n self.assertEqual(logits.shape, (2, 2))\n\n def test_batch_split(self):\n self.model.set_active_adapters(BatchSplit(\"a\", \"b\", \"c\", batch_sizes=[1, 1, 2]))\n self.batched_training_pass()\n\n def test_batch_split_int(self):\n self.model.set_active_adapters(BatchSplit(\"a\", \"b\", batch_sizes=2))\n self.batched_training_pass()\n\n def test_nested_batch_split(self):\n self.model.set_active_adapters(Stack(\"a\", BatchSplit(\"b\", \"c\", batch_sizes=[2, 2])))\n self.batched_training_pass()\n\n def test_batch_split_invalid(self):\n self.model.set_active_adapters(BatchSplit(\"a\", \"b\", batch_sizes=[3, 4]))\n with self.assertRaises(IndexError):\n self.batched_training_pass()\n\n def test_batch_split_equivalent(self):\n self.model.set_active_adapters(\"a\")\n self.model.eval()\n input_ids = ids_tensor((2, 128), 1000)\n output_a = self.model(input_ids[:1])\n\n self.model.set_active_adapters(\"b\")\n output_b = self.model(input_ids[1:2])\n\n self.model.set_active_adapters(BatchSplit(\"a\", \"b\", batch_sizes=[1, 1]))\n output = self.model(input_ids)\n\n self.assertTrue(torch.allclose(output_a[0], output[0][0], atol=1e-6))\n self.assertTrue(torch.allclose(output_b[0], output[0][1], atol=1e-6))\n\n\n@require_torch\nclass ParallelAdapterInferenceTestMixin:\n def test_parallel_inference_with_heads(self):\n model = AutoAdapterModel.from_config(self.config())\n\n model.add_adapter(\"a\")\n model.add_adapter(\"b\")\n self.add_head(model, \"a\", num_labels=2)\n self.add_head(model, \"b\", num_labels=3)\n model.eval()\n model.to(torch_device)\n\n inputs = self.get_input_samples((2, 128), config=model.config)\n inputs[\"attention_mask\"] = torch.randint(0, 2, size=(2, 128), device=torch_device)\n\n # for reference, pass through single adapters\n model.active_adapters = \"a\"\n model.active_head = \"a\"\n outputs_a = model(**inputs)\n model.active_adapters = \"b\"\n model.active_head = \"b\"\n outputs_b = model(**inputs)\n\n model.active_adapters = Parallel(\"a\", \"b\")\n # active_adapters should set parallel heads too\n self.assertEqual(model.active_head, [\"a\", \"b\"])\n outputs = model(**inputs)\n\n self.assertEqual(len(outputs), 2)\n if self.config_class in MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING:\n self.assertEqual(outputs[0][0].shape, (2, 2))\n self.assertEqual(outputs[1][0].shape, (2, 3))\n self.assertTrue(torch.allclose(outputs[0][0], outputs_a[0], atol=1e-5))\n self.assertTrue(torch.allclose(outputs[1][0], outputs_b[0], atol=1e-5))\n\n def test_parallel_inference_with_wrong_number_of_heads(self):\n model = AutoAdapterModel.from_config(self.config())\n model.eval()\n\n model.add_adapter(\"a\")\n model.add_adapter(\"b\")\n self.add_head(model, \"a\", num_labels=2)\n model.to(torch_device)\n\n inputs = self.get_input_samples((2, 128), config=model.config)\n\n model.active_adapters = Parallel(\"a\", \"b\")\n model.active_head = [\"a\"]\n with self.assertRaises(ValueError):\n model(**inputs)\n\n model.active_head = \"a\"\n with self.assertRaises(ValueError):\n model(**inputs)\n\n def test_batch_split_with_heads(self):\n model = AutoAdapterModel.from_config(self.config())\n model.add_adapter(\"a\")\n model.add_adapter(\"b\")\n self.add_head(model, \"a\", num_labels=2)\n self.add_head(model, \"b\", num_labels=3)\n model.eval()\n model.to(torch_device)\n\n inputs = {\"input_ids\": self.get_input_samples((2, 128), config=model.config)[\"input_ids\"]}\n if isinstance(model, T5AdapterModel):\n inputs[\"decoder_input_ids\"] = inputs[\"input_ids\"]\n\n # for reference, pass through single adapters\n model.active_adapters = \"a\"\n model.active_head = \"a\"\n outputs_a = model(**{k: v[:1] for k, v in inputs.items()})\n model.active_adapters = \"b\"\n model.active_head = \"b\"\n outputs_b = model(**{k: v[1:] for k, v in inputs.items()})\n\n model.set_active_adapters(BatchSplit(\"a\", \"b\", batch_sizes=[1, 1]))\n output = model(**inputs)\n\n self.assertEqual(2, len(output))\n self.assertTrue(\n torch.allclose(\n output[0][\"logits\"],\n outputs_a[\"logits\"],\n atol=1e-05,\n )\n )\n self.assertTrue(\n torch.allclose(\n output[1][\"logits\"],\n outputs_b[\"logits\"],\n atol=1e-05,\n )\n )\n\n\nclass ParallelTrainingMixin:\n def create_twin_adapters(self, model, name):\n # create adapter\n adapter1, adapter2 = name + \"_1\", name + \"_2\"\n model.add_adapter(adapter1)\n self.add_head(model, adapter1)\n # create a twin initialized with the same random weights\n model.add_adapter(adapter2)\n self.add_head(model, adapter2)\n\n state_dict = model.state_dict()\n for k, v in state_dict.items():\n if adapter1 in k:\n state_dict[k.replace(adapter1, adapter2)] = v\n model.load_state_dict(state_dict)\n\n return adapter1, adapter2\n\n def train_model(self, model, dataset):\n # trains model in eval mode for 2 epochs\n random.seed(42)\n torch.manual_seed(42)\n # Depending on the used optimizer the adapters are not exactly the same\n model.to(torch_device)\n optimizer = torch.optim.SGD(model.parameters(), lr=0.1)\n for epoch in range(2):\n for data_input in dataset:\n for key, value in data_input.items():\n data_input[key] = value.to(torch_device)\n\n optimizer.zero_grad()\n output = model(**data_input)\n loss = output[\"loss\"]\n loss.backward()\n optimizer.step()\n return model\n\n def test_parallel_training(self):\n tokenizer = AutoTokenizer.from_pretrained(self.tokenizer_name, use_fast=False)\n if tokenizer.pad_token is None:\n tokenizer.pad_token = tokenizer.eos_token\n model = AutoAdapterModel.from_config(self.config())\n\n model.add_adapter(\"mrpc1\")\n model.add_adapter(\"mrpc2\")\n self.add_head(model, \"mrpc1\", num_labels=2)\n self.add_head(model, \"mrpc2\", num_labels=3)\n model.active_adapters = Parallel(\"mrpc1\", \"mrpc2\")\n model.train_adapter(Parallel(\"mrpc1\", \"mrpc2\"))\n # model.eval()\n\n # all weights of the adapter should be activated\n for k, v in filter_parameters(model, \"adapters.mrpc1.\").items():\n self.assertTrue(v.requires_grad, k)\n # all weights of the adapter not used for training should be frozen\n for k, v in filter_parameters(model, \"adapters.mrpc2.\").items():\n self.assertTrue(v.requires_grad, k)\n # weights of the model should be frozen (check on some examples)\n for k, v in filter_parameters(model, \"encoder.layer.0.attention\").items():\n self.assertFalse(v.requires_grad, k)\n\n state_dict_pre = copy.deepcopy(model.state_dict())\n\n train_dataset = self.dataset(tokenizer)\n training_args = TrainingArguments(\n output_dir=\"./examples\", do_train=True, learning_rate=0.1, max_steps=15, no_cuda=True\n )\n\n # evaluate\n trainer = Trainer(\n model=model,\n args=training_args,\n train_dataset=train_dataset,\n )\n trainer.train()\n\n for ((k1, v1), (k2, v2)) in zip(state_dict_pre.items(), model.state_dict().items()):\n if \"mrpc\" in k1:\n self.assertFalse(torch.equal(v1, v2), k1)\n else:\n self.assertTrue(torch.equal(v1, v2))\n\n def test_parallel_training_equivalent_to_single_adapters(self):\n model = AutoAdapterModel.from_config(self.config())\n model.eval()\n\n a1, a2 = self.create_twin_adapters(model, \"a\")\n b1, b2 = self.create_twin_adapters(model, \"b\")\n\n dataset = []\n for i in range(3):\n input_data = self.get_input_samples((3, 128), config=model.config)\n if isinstance(model, T5AdapterModel):\n input_data[\"labels\"] = torch.randint(0, 2, (3, 128))\n else:\n input_data[\"labels\"] = torch.randint(0, 2, (3, 1))\n dataset.append(input_data)\n\n for adapter in [a1, b1]:\n model.active_head = adapter\n model.set_active_adapters(adapter)\n model.train_adapter(adapter)\n model.eval()\n\n model = self.train_model(model, dataset)\n\n model.set_active_adapters(Parallel(a2, b2))\n model.train_adapter((Parallel(a2, b2)))\n model.eval()\n\n model = self.train_model(model, dataset)\n\n state_dict = model.state_dict()\n for k, v in state_dict.items():\n if a1 in k:\n self.assertTrue(\n torch.allclose(v, state_dict[k.replace(a1, a2)], atol=1e-5),\n torch.max(torch.sub(v, state_dict[k.replace(a1, a2)])),\n )\n if b1 in k:\n self.assertTrue(torch.allclose(v, state_dict[k.replace(b1, b2)], atol=1e-5))\n\n def test_parallel_training_single_forward_pass(self):\n model = AutoAdapterModel.from_config(self.config())\n model.eval()\n\n a1, a2 = self.create_twin_adapters(model, \"a\")\n b1, b2 = self.create_twin_adapters(model, \"b\")\n\n state_dict = model.state_dict()\n for k, v in state_dict.items():\n if a1 in k:\n self.assertTrue(torch.equal(v, state_dict[k.replace(a1, a2)]))\n if b1 in k:\n self.assertTrue(torch.equal(v, state_dict[k.replace(b1, b2)]))\n\n input_data = self.get_input_samples((3, 128), config=model.config)\n if isinstance(model, T5AdapterModel):\n input_data[\"labels\"] = torch.randint(0, 2, (3, 128), device=torch_device)\n else:\n input_data[\"labels\"] = torch.randint(0, 2, (3, 1), device=torch_device)\n\n outputs = []\n for adapter in [a1, b1]:\n model.active_head = adapter\n model.set_active_adapters(adapter)\n model.train_adapter(adapter)\n model.eval()\n model.to(torch_device)\n outputs.append(model(**input_data))\n\n model.set_active_adapters(Parallel(a2, b2))\n model.train_adapter((Parallel(a2, b2)))\n model.eval()\n model.to(torch_device)\n\n parallel_outputs = model(**input_data)\n\n for out1, out2 in zip(outputs, parallel_outputs.head_outputs):\n self.assertTrue(torch.allclose(out1[\"loss\"], out2[\"loss\"]))\n self.assertTrue(torch.allclose(out1[\"logits\"], out2[\"logits\"], atol=1e-5))\n",
"from typing import List, Union\n\nimport torch\nfrom torch import nn\n\nfrom .composition import AdapterCompositionBlock\nfrom .configuration import PrefixTuningConfig\nfrom .context import AdapterSetup, ForwardContext\nfrom .layer import AdapterLayerBase\nfrom .modeling import Activation_Function_Class\n\n\nclass PrefixTuning(nn.Module):\n def __init__(\n self,\n n_layers: int,\n n_heads: int,\n input_size: int,\n config: PrefixTuningConfig,\n ):\n super().__init__()\n self.n_layers = n_layers\n self.n_heads = n_heads\n self.input_size = input_size\n self.n_embd_per_head = self.input_size // self.n_heads\n self.config = config\n\n self.input_tokens = torch.arange(self.config.prefix_length).long()\n self.wte = nn.Embedding(self.config.prefix_length, self.input_size)\n self.control_trans = nn.Sequential(\n nn.Linear(self.input_size, self.config.bottleneck_size),\n Activation_Function_Class(self.config.non_linearity.lower()),\n nn.Linear(self.config.bottleneck_size, self.n_layers * 2 * self.input_size),\n )\n self.dropout = nn.Dropout(self.config.dropout)\n\n def eject(self):\n device = next(self.parameters()).device\n input_tokens = self.input_tokens.unsqueeze(0).expand(1, -1).to(device)\n embs = self.wte(input_tokens)\n key_values = self.control_trans(embs) # batch_size x prefix_length x n_layers*2*input_size\n key_values = key_values.view(\n self.config.prefix_length * self.n_layers * 2 * self.input_size\n ) # *2 for key and value\n\n return key_values\n\n def forward(self, batch_size):\n device = next(self.parameters()).device\n input_tokens = self.input_tokens.unsqueeze(0).expand(batch_size, -1).to(device)\n embs = self.wte(input_tokens)\n key_values = self.control_trans(embs) # batch_size x prefix_length x n_layers*2*input_size\n key_values = key_values.view(\n batch_size, self.config.prefix_length, self.n_layers * 2, self.n_heads, self.n_embd_per_head\n ) # *2 for key and value\n key_values = self.dropout(key_values)\n # n_layers * (2 x batch_size x n_heads x prefix_length x n_embd_per_head)\n key_values = key_values.permute(2, 0, 3, 1, 4).split(2)\n\n return key_values\n\n\nclass FlatPrefixTuning(nn.Module):\n def __init__(\n self,\n n_layers: int,\n n_heads: int,\n input_size: int,\n config: PrefixTuningConfig,\n ):\n super().__init__()\n self.n_layers = n_layers\n self.n_heads = n_heads\n self.input_size = input_size\n self.n_embd_per_head = self.input_size // self.n_heads\n self.config = config\n\n self.control_trans = nn.Parameter(torch.randn(self.config.prefix_length * self.n_layers * 2 * self.input_size))\n\n self.dropout = nn.Dropout(self.config.dropout)\n\n def forward(self, batch_size):\n device = next(self.parameters()).device\n key_values = (\n self.control_trans.unsqueeze(0)\n .expand(batch_size, -1)\n .view(batch_size, self.config.prefix_length, self.n_layers * 2, self.n_heads, self.n_embd_per_head)\n .to(device)\n ) # *2 for key and value\n key_values = self.dropout(key_values)\n # n_layers * (2 x batch_size x n_heads x prefix_length x n_embd_per_head)\n key_values = key_values.permute(2, 0, 3, 1, 4).split(2)\n\n return key_values\n\n\nclass PrefixTuningGroup(nn.ModuleDict):\n def __init__(self, module_configs, prefix_tuning_config):\n super().__init__()\n if prefix_tuning_config[\"flat\"]:\n prefix_tuning_class = FlatPrefixTuning\n else:\n prefix_tuning_class = PrefixTuning\n for k, kwargs in module_configs.items():\n self[k] = prefix_tuning_class(**kwargs, config=prefix_tuning_config)\n\n def eject(self):\n \"\"\"Converts all PrefixTuning modules into FlatPrefixTuning modules.\"\"\"\n for k, v in self.items():\n if isinstance(v, PrefixTuning):\n config = v.config.replace(flat=True)\n self[k] = FlatPrefixTuning(v.n_layers, v.n_heads, v.input_size, config)\n weights = v.eject()\n self[k].control_trans = nn.Parameter(weights)\n\n def forward(self, batch_size):\n return {k: v(batch_size) for k, v in self.items()}\n\n\nclass PrefixTuningPool(nn.Module):\n \"\"\"\n The model layer that holds all Prefix Tuning prefixes. While each Transformers layer has its own prefix, this layer\n is shared across all Transformers layers.\n\n How it works:\n\n 1. A `PrefixTuningShim` module that sets this module as pool module is added to each layer.\n 2. On adding a prefix, each shim module where a prefix should be added increments a counter in `prefix_counts`.\n 3. Finally, the base model class confirms adding a new prefix by calling `confirm_prefix()`.\n 4. This module adds a prefix layer that produces outputs corresponding to the indicated number of layers.\n\n Notes:\n\n - The forward call to this layer is executed in the ForwardContext of each model pass.\n - All other methods of this class (except for `confirm_prefix()`) should be called exclusively by\n `PrefixTuningShim`.\n\n Args:\n config (:class:`~transformers.PretrainedConfig`): The model config.\n \"\"\"\n\n def __init__(self, config):\n super().__init__()\n self.config = config\n self.prefix_counts = {}\n self.prefix_tunings = nn.ModuleDict()\n\n def indicate_prefix(self, prefix_name: str, location_key: str):\n if prefix_name not in self.prefix_counts:\n self.prefix_counts[prefix_name] = {location_key: 1}\n elif location_key not in self.prefix_counts[prefix_name]:\n self.prefix_counts[prefix_name][location_key] = 1\n else:\n self.prefix_counts[prefix_name][location_key] += 1\n\n return self.prefix_counts[prefix_name][location_key] - 1\n\n def confirm_prefix(self, prefix_name: str):\n \"\"\"Create Prefix Tuning module based on shim layer infications.\"\"\"\n prefix_tuning_config = self.config.adapters.match(prefix_name, PrefixTuningConfig)\n if prefix_tuning_config is None:\n return\n\n if prefix_name not in self.prefix_counts:\n raise ValueError(f\"Prefix {prefix_name} not found in PrefixTuningPool\")\n\n module_configs = {}\n for location_key, count in self.prefix_counts[prefix_name].items():\n module_configs[location_key] = {\n \"n_layers\": count,\n \"n_heads\": self.config.num_attention_heads,\n \"input_size\": self.config.hidden_size,\n }\n prefix_tuning = PrefixTuningGroup(module_configs, prefix_tuning_config)\n prefix_tuning.train(self.training) # make sure training mode is consistent\n self.prefix_tunings[prefix_name] = prefix_tuning\n del self.prefix_counts[prefix_name]\n\n def delete_prefix(self, prefix_name: str):\n if prefix_name in self.prefix_tunings:\n del self.prefix_tunings[prefix_name]\n\n def enable_prefix(self, prefix_name: str):\n if prefix_name in self.prefix_tunings:\n for param in self.prefix_tunings[prefix_name].parameters():\n param.requires_grad = True\n\n def get_prefix(self, prefix_name: str):\n if prefix_name in self.prefix_tunings:\n return self.prefix_tunings[prefix_name]\n else:\n return None\n\n def forward(self, *args, **kwargs):\n context = AdapterSetup.get_context()\n if context is not None:\n adapter_setup = context.adapter_setup\n else:\n adapter_setup = self.config.adapters.active_setup\n\n prefix_states = {}\n if adapter_setup is not None:\n # Infer batch size\n input_tensor_names = [\"input_ids\", \"decoder_input_ids\", \"attention_mask\", \"inputs_embeds\"]\n batch_size = None\n for name in input_tensor_names:\n if kwargs.get(name, None) is not None:\n batch_size = kwargs[name].size(0)\n break\n if batch_size is None:\n if len(args) > 0:\n batch_size = args[0].size(0)\n else:\n raise ValueError(\"Could not infer batch size for prefix tuning from inputs.\")\n\n # Pass to sub-layers\n for name in adapter_setup.flatten():\n if name in self.prefix_tunings:\n prefix_states[name] = self.prefix_tunings[name](batch_size)\n\n return prefix_states\n\n\nclass PrefixTuningShim(AdapterLayerBase, nn.Module):\n \"\"\"\n Representation of a Prefix Tuning layer within one Transformer layer. This class implements `AdapterLayerBase` for\n compatibility with adapters. It uses `PrefixTuningPool` in the background and `set_pool()` must be called after\n initialization.\n\n Args:\n location_key (str): The id describing the location of this layer in the model.\n Currently, can be \"encoder_prefix\", \"cross_prefix\" or None.\n config (:class:`~transformers.PretrainedConfig`): The model config.\n \"\"\"\n\n def __init__(self, location_key: str, config):\n super().__init__()\n self.config = config\n self.location_key = location_key\n self.prefixes = {}\n\n def set_pool(self, pool: PrefixTuningPool):\n self.__setattr__(\"pool\", pool)\n\n def add_adapter(self, adapter_name: str, layer_idx: int):\n self.layer_idx = layer_idx\n # only match location keys for which we have config keys\n if self.location_key.startswith(\"cross\") or self.location_key.startswith(\"encoder\"):\n used_location_key = self.location_key\n else:\n used_location_key = None\n prefix_tuning_config = self.config.adapters.match(\n adapter_name,\n config_type=PrefixTuningConfig,\n layer_idx=self.layer_idx,\n location_key=used_location_key,\n )\n if prefix_tuning_config is not None:\n prefix_id = self.pool.indicate_prefix(adapter_name, self.location_key)\n self.prefixes[adapter_name] = prefix_id\n\n def delete_adapter(self, adapter_name: str):\n self.pool.delete_prefix(adapter_name)\n if adapter_name in self.prefixes:\n del self.prefixes[adapter_name]\n\n def add_fusion_layer(self, adapter_names: Union[List, str]):\n pass # not applicable to prefix tuning\n\n def delete_fusion_layer(self, adapter_names: Union[List, str]):\n pass # not applicable to prefix tuning\n\n def enable_adapters(self, adapter_setup: AdapterCompositionBlock, unfreeze_adapters: bool, unfreeze_fusion: bool):\n if unfreeze_adapters:\n for prefix_tuning_name in adapter_setup.flatten():\n self.pool.enable_prefix(prefix_tuning_name)\n\n def get_adapter(self, adapter_name):\n # Make sure to only return params once\n if adapter_name in self.prefixes and self.prefixes[adapter_name] == 0:\n return self.pool.get_prefix(adapter_name)\n\n return None\n\n def forward(self, key_states, value_states, attention_mask=None, invert_mask=True):\n adapter_setup = self.get_active_setup(self.prefixes)\n if adapter_setup is not None:\n if len(adapter_setup) == 1:\n # we already made sure we only have 1 item\n prefix_tuning_name = adapter_setup.first()\n if prefix_tuning_name in self.prefixes:\n prefix_id = self.prefixes[prefix_tuning_name]\n batch_size = key_states.size(0)\n\n # Retrieve pre-computed prefix states from context\n context = ForwardContext.get_context()\n prefix_keys, prefix_values = context.prefix_states[prefix_tuning_name][self.location_key][\n prefix_id\n ]\n\n key_states = torch.cat([prefix_keys, key_states], dim=2)\n value_states = torch.cat([prefix_values, value_states], dim=2)\n if attention_mask is not None:\n if attention_mask.dim() == 2:\n prefix_mask = torch.ones(batch_size, prefix_keys.size(2)).to(attention_mask.device)\n else:\n prefix_mask = torch.ones(batch_size, 1, attention_mask.size(2), prefix_keys.size(2)).to(\n attention_mask.device\n )\n if invert_mask:\n prefix_mask = 1.0 - prefix_mask\n attention_mask = torch.cat([prefix_mask, attention_mask], dim=-1)\n else:\n raise ValueError(f\"Invalid adapter setup. Cannot use {adapter_setup} with prefix tuning.\")\n\n return key_states, value_states, attention_mask\n"
] |
[
[
"torch.randint",
"torch.ones",
"torch.manual_seed",
"torch.equal",
"torch.allclose"
],
[
"torch.nn.Dropout",
"torch.nn.Parameter",
"torch.cat",
"torch.randn",
"torch.nn.ModuleDict",
"torch.nn.Embedding",
"torch.nn.Linear",
"torch.arange"
]
] |
Schoyen/FYS4460
|
[
"0c6ba1deefbfd5e9d1657910243afc2297c695a3"
] |
[
"project-3/percolation.py"
] |
[
"import warnings\nimport numpy as np\nimport scipy.ndimage\nimport scipy.optimize\nimport skimage\n\n\ndef compute_percolation_probability(L, p, num_samples):\n num_percolating = 0\n\n for i in range(num_samples):\n system = np.random.rand(L, L) < p\n\n labels, num_features = scipy.ndimage.measurements.label(system)\n props = skimage.measure.regionprops(labels)\n\n for prop in props:\n if (\n prop.bbox[2] - prop.bbox[0] == L\n or prop.bbox[3] - prop.bbox[1] == L\n ):\n num_percolating += 1\n break\n\n return num_percolating / num_samples\n\n\ndef compute_percolation_threshold(\n x,\n L,\n num_samples,\n p_bounds=(0, 1),\n tol=1e-5,\n max_iterations=100,\n verbose=False,\n):\n lower, upper = p_bounds\n lower_pi, upper_pi = (0, 1)\n\n for i in range(max_iterations):\n if upper - lower < tol:\n break\n\n mid = (upper + lower) / 2\n mid_pi = compute_percolation_probability(L, mid, num_samples)\n\n if mid_pi > x:\n upper = mid\n upper_pi = mid_pi\n else:\n lower = mid\n lower_pi = mid_pi\n\n if verbose and i == max_iterations - 1:\n warnings.warn(\"Minimization did not converge\")\n\n return mid\n"
] |
[
[
"numpy.random.rand"
]
] |
Injector-Spenral/libswervedrive
|
[
"4981d7103aaded08464b8c78b65b452f2ac41b0d"
] |
[
"icrestimator.py"
] |
[
"import numpy as np\nimport math\n\n\nclass ICREstimator:\n\n # constants used in the lmda estimation algo\n eta_lmda: float = 1e-3 # TODO: figure out what values this should be\n eta_delta: float = 1e-3 # TODO: figure out what values this should be\n min_delta_size: float = 1e-3 # TODO: figure out what value this should be\n max_iter = 3 # TODO: figure out what value should be\n\n def __init__(self, epsilon_init: np.ndarray, modules_alpha: np.ndarray, modules_l: np.ndarray,\n modules_b: np.ndarray):\n \"\"\"\n Initialize the ICREstimator object. The order in the following arrays\n must be preserved throughout all arguments passed to this object.\n :param epsilon_init: the starting position estimate for the robot\n position. Form (x, y, theta)^T.\n :param modules_alpha: array containing the angle to each of the modules,\n measured counter clockwise from the x-axis.\n :param modules_l: distance to the axis of rotation of each module from\n the origin of the chassis frame\n :param modules_b: distance from the axis of rotation of each module to\n it's contact with the ground.\n \"\"\"\n\n self.epsilon = epsilon_init\n\n self.alpha = modules_alpha\n self.l = modules_l\n self.b = modules_b\n self.n_modules = len(self.alpha)\n self.epsilon_init = epsilon_init\n\n self.a = np.zeros(shape=(3, self.n_modules))\n self.a_orth = np.zeros(shape=(3, self.n_modules))\n self.s = np.zeros(shape=(3, self.n_modules))\n self.l_v = np.zeros(shape=(3, self.n_modules))\n for i in range(self.n_modules):\n self.a[:,i] = np.array([math.cos(self.alpha[i]),\n math.sin(self.alpha[i]),\n 0])\n self.a_orth[:,i] = np.array([-math.sin(self.alpha[i]),\n math.cos(self.alpha[i]),\n 0])\n self.s[:,i] = np.array([self.l[i]*math.cos(self.alpha[i]),\n self.l[i]*math.sin(self.alpha[i]),\n 1])\n self.l_v[:,i] = np.array([0, 0, self.l[i]])\n\n def compute_odometry(self, lmda_e: np.ndarray, mu_e: float, delta_t: float):\n \"\"\"\n Update our estimate of epsilon (twist position) based on the new ICR\n estimate.\n :param lmda_e: the estimate of the ICR in h-space.\n :param mu_e: estimate of the position of the robot about the ICR.\n :param delta_t: time since the odometry was last updated.\n \"\"\"\n\n def estimate_mu(self, phi_dot: np.ndarray, lmda_e):\n \"\"\"\n Find the rotational position of the robot about the ICR.\n :param phi_dot: array of angular velocities of the wheels.\n :param lmda_e: the estimate of the ICR in h-space.\n :return: the estimate of mu (float).\n \"\"\"\n # this requires solving equation (22) from the control paper, i think\n # we may need to look into whether this is valid for a system with no\n # wheel coupling\n return 0.\n\n def estimate_lmda(self, q: np.ndarray):\n \"\"\"\n Find the ICR given the steering angles.\n :param q: list of angles beta between representing the steer angle\n (measured relative to the orientation orthogonal to the line to the\n chassis frame origin.)\n :return: our estimate of ICR as the array (u, v, w)^T.\n \"\"\"\n starting_points = self.select_starting_points(q)\n found = False\n closest_lmda = None\n closest_dist = None\n for lmda_start in starting_points:\n lmda = lmda_start\n if closest_lmda is None:\n closest_lmda = lmda_start\n closest_dist = np.linalg.norm(q - self.S(lmda_start))\n if np.linalg.norm(q - self.S(lmda)) < self.eta_delta:\n found = True\n else:\n last_singularity = None\n for i in range(self.max_iter):\n (S_u, S_v) = self.compute_derivatives(lmda)\n if last_singularity is not None:\n # if we had a singularity last time, set the derivatives\n # for the corresponding wheel to 0\n S_u[last_singularity] = 0\n S_v[last_singularity] = 0\n (delta_u, delta_v) = self.solve(S_u, S_v, q, lmda)\n lmda_t, worse = self.update_parameters(lmda, delta_u, delta_v, q)\n singularity, singularity_number = self.handle_singularities(lmda_t)\n S_lmda = self.S(lmda_t)\n if last_singularity is not None and singularity:\n # the test point is still on the steering axis, suggesting\n # it is on a singularity. Set beta_k to the input steering\n # value\n S_lmda[last_singularity] = q[last_singularity]\n last_singularity = singularity_number\n if np.linalg.norm(q - S_lmda) > np.linalg.norm(q - self.S(lmda_start)):\n # appears the algorithm has diverged as we are not\n # improving\n found = False\n break\n else:\n found = np.linalg.norm(lmda - lmda_t) > self.eta_lmda\n distance = np.linalg.norm(q - S_lmda)\n if distance < closest_dist:\n closest_lmda = lmda_t\n closest_dist = distance\n lmda = lmda_t\n if found:\n return lmda\n return closest_lmda\n\n def select_starting_points(self, q: np.ndarray):\n \"\"\"\n Find the starting points for the Newton-Raphson algorithm. This\n implementation places them at the intersection of the propulsion axis\n and orders them according to their distance to the input point.\n :param q: list of angles beta between representing the steer angle\n (measured relative to the orientation orthogonal to the line to the\n chassis frame origin.)\n :return: List of the top three starting points ordered according to\n their distance to the input length.\n \"\"\"\n starting_points = []\n\n def get_p(i):\n s = column(self.s, i).reshape(-1)\n d = s + np.array([math.cos(q[i] + self.alpha[i]),\n math.sin(q[i] + self.alpha[i]), 0])\n return np.cross(s, d)\n\n for i in range(self.n_modules):\n p_1 = get_p(i)\n for j in range(self.n_modules):\n if not i > j:\n continue\n p_2 = get_p(j)\n c = np.cross(p_1, p_2)\n if c[2] < 0:\n c = -c\n dist = np.linalg.norm(q-self.S(c))\n starting_points.append([c, dist])\n starting_points.sort(key=lambda point: point[1])\n sp_arr = [p[0].reshape(3, 1) for p in starting_points]\n return sp_arr\n\n def compute_derivatives(self, lmda: np.ndarray):\n \"\"\"\n Compute the derivateves of the constraining surface at the current\n estimate of the point.\n :param lmda: position of the ICR estimate\n :return: np.ndarray with (S_u, S_v). S_u and S_v are the vectors\n containing the derivatives of each steering angle in q with respect\n u and v, respectively.\n \"\"\"\n S_u = np.zeros(shape=(self.n_modules,))\n S_v = np.zeros(shape=(self.n_modules,))\n lmda = lmda.reshape(3) # computations require lambda as a row vector\n for i in range(self.n_modules):\n # equations 16 and 17 in the paper\n a = column(self.a, i).reshape(3)\n a_orth = column(self.a_orth, i).reshape(3)\n l = column(self.l_v, i).reshape(3)\n delta = lmda.dot(a-l)\n omega = lmda.dot(a_orth)\n # equation 18 excluding ∂lmda/∂u\n gamma_top = omega*(a-l) + delta*a_orth\n gamma_bottom = (lmda.dot(delta*(a-l) - omega*a_orth))\n if gamma_bottom == 0:\n S_u[i] = 0\n S_v[i] = 0\n continue\n # equation 19\n du = np.array([1, 0, -lmda[0]/lmda[2]]).reshape(1, 3)\n dv = np.array([0, 1, -lmda[1]/lmda[2]]).reshape(1, 3)\n beta_u = du.dot(gamma_top) / gamma_bottom\n beta_v = dv.dot(gamma_top) / gamma_bottom\n S_u[i] = beta_u\n S_v[i] = beta_v\n return (S_u, S_v)\n\n def solve(self, S_u: np.ndarray, S_v: np.ndarray, q: np.ndarray,\n lmda: np.ndarray):\n \"\"\"\n Solve the system of linear equations to find the free parameters\n delta_u and delta_v.\n :param S_u: derivative of constraining surface wrt u (vector).\n :param S_v: derivative of constraining surface wrt v (vector).\n :param q: list of angles beta representing the steer angle\n (measured relative to the orientation orthogonal to the line to the\n chassis frame origin.)\n :param lmda: position of the ICR estimate.\n :return: the free parameters in the form (delta_u, delta_v).\n \"\"\"\n a_u = S_u.dot(S_u)\n a_c = S_u.dot(S_v)\n a_v = S_v.dot(S_v)\n A = np.array([[a_u, a_c], [a_c, a_v]])\n p_zero = self.S(lmda)\n diff = (q - p_zero).reshape((1, -1))\n b = np.array([diff.dot(S_u.T), diff.dot(S_v.T)])\n x = np.linalg.solve(A, b)\n return x[0,0], x[1,0]\n\n def update_parameters(self, lmda: np.ndarray, delta_u: float, delta_v: float,\n q: np.ndarray):\n \"\"\"\n Move our estimate of the ICR based on the free parameters delta_u and\n delta_v. If invalid parameters are produced rescale them so they lie\n within the sphere. If the algorithm has diverged backtrack if possible\n :param lmda: current position of the ICR estimate.\n :param delta_u: free parameter defining how much to move the ICR\n estimate in the direction S_u.\n :param delta_v: free parameter defining how much to move the ICR\n estimate in the direction S_v.\n :param q: list of angles beta representing the steer angle\n (measured relative to the orientation orthogonal to the line to the\n chassis frame origin.)\n :return: the new ICR estimate, a flag indicating divergence of the\n algorithm for this starting point.\n \"\"\"\n lmda_t = lmda\n worse = False\n # while the algorithm produces a worse than or equal to good estimate\n # for q on the surface as lmda from the previous iteration\n while np.linalg.norm(q - self.S(lmda)) <= np.linalg.norm(q - self.S(lmda_t)):\n # set a minimum step size to avoid infinite recursion\n if np.linalg.norm([delta_u, delta_v]) < self.min_delta_size:\n worse = True\n break\n u = lmda[0, 0]\n v = lmda[1, 0]\n u_i = u + delta_u\n v_i = u + delta_v\n # if adding delta_u and delta_v has produced out of bounds values,\n # recursively multiply to ensure they remain within bounds\n while np.linalg.norm([u_i, v_i]) > 1:\n factor = np.linalg.norm([u, v])\n u_i *= factor\n v_i *= factor\n w = math.sqrt(1-np.linalg.norm([u_i, v_i])) # equation 4\n lmda_t = np.array([u_i, v_i, w]).reshape(-1, 1)\n # backtrack by reducing the step size\n delta_u *= 0.5\n delta_v *= 0.5\n if lmda_t[2,0] < 0:\n lmda_t = -lmda_t\n return lmda_t, worse\n\n def handle_singularities(self, lmda: np.ndarray):\n \"\"\"\n Handle the structural singularities that may have been produced when\n the parameters were updated (when the ICR lies on a steering axis).\n :param lmda: the ICR estimate after the parameters were updated.\n :return: if the ICR is on a structural singularity, and the wheel\n number which the singularity is on if there is one\n \"\"\"\n wheel_number = None\n for i in range(self.n_modules):\n # equations 16 and 17 in the paper\n s = column(self.s, i)\n if np.allclose(lmda, s/np.linalg.norm(s)):\n wheel_number = i\n break\n return wheel_number is not None, wheel_number\n\n def S(self, lmda: np.ndarray):\n \"\"\"\n Compute the point in the joint space (space of all beta steering angle\n values) associated with a particular ICR.\n :param lmda: the ICR to compute the point for.\n :return: row vector expressing the point.\n \"\"\"\n S = np.zeros(shape=(self.n_modules,))\n lmda = lmda.T # computations require lambda as a row vector\n for i in range(self.n_modules):\n # equations 16 and 17 in the paper\n a = column(self.a, i)\n a_orth = column(self.a_orth, i)\n l = column(self.l_v, i)\n delta = lmda.dot(a-l)\n omega = lmda.dot(a_orth)\n norm = np.linalg.norm([delta, omega])\n sin_beta = np.sign(delta) * omega / norm\n cos_beta = np.sign(delta) * delta / norm\n S[i] = math.atan2(sin_beta, cos_beta)\n return S\n\n\ndef column(mat, row_i):\n \"\"\"\n Grab a column from a vector as a numpy column vector.\n :param row_i: row index\n :return: the column vector (shape (n, 1))\n \"\"\"\n return mat[:,row_i:row_i+1]\n"
] |
[
[
"numpy.linalg.solve",
"numpy.linalg.norm",
"numpy.sign",
"numpy.cross",
"numpy.array",
"numpy.zeros"
]
] |
i9nn/demucs
|
[
"f17a7af8cd99d0b275d399f55b3ca5bf8dea2e4f"
] |
[
"demucs/wav.py"
] |
[
"# Copyright (c) Facebook, Inc. and its affiliates.\n# All rights reserved.\n#\n# This source code is licensed under the license found in the\n# LICENSE file in the root directory of this source tree.\n\"\"\"Loading wav based datasets, including MusdbHQ.\"\"\"\n\nfrom collections import OrderedDict\nimport hashlib\nimport math\nimport json\nimport os\nfrom pathlib import Path\nimport tqdm\n\nimport musdb\nimport julius\nimport torch as th\nfrom torch import distributed\nimport torchaudio as ta\nfrom torch.nn import functional as F\n\nfrom .audio import convert_audio_channels\nfrom . import distrib\n\nMIXTURE = \"mixture\"\nEXT = \".wav\"\n\n\ndef _track_metadata(track, sources, normalize=True, ext=EXT):\n track_length = None\n track_samplerate = None\n mean = 0\n std = 1\n for source in sources + [MIXTURE]:\n file = track / f\"{source}{ext}\"\n try:\n info = ta.info(str(file))\n except RuntimeError:\n print(file)\n raise\n length = info.num_frames\n if track_length is None:\n track_length = length\n track_samplerate = info.sample_rate\n elif track_length != length:\n raise ValueError(\n f\"Invalid length for file {file}: \"\n f\"expecting {track_length} but got {length}.\")\n elif info.sample_rate != track_samplerate:\n raise ValueError(\n f\"Invalid sample rate for file {file}: \"\n f\"expecting {track_samplerate} but got {info.sample_rate}.\")\n if source == MIXTURE and normalize:\n try:\n wav, _ = ta.load(str(file))\n except RuntimeError:\n print(file)\n raise\n wav = wav.mean(0)\n mean = wav.mean().item()\n std = wav.std().item()\n\n return {\"length\": length, \"mean\": mean, \"std\": std, \"samplerate\": track_samplerate}\n\n\ndef build_metadata(path, sources, normalize=True, ext=EXT):\n \"\"\"\n Build the metadata for `Wavset`.\n\n Args:\n path (str or Path): path to dataset.\n sources (list[str]): list of sources to look for.\n normalize (bool): if True, loads full track and store normalization\n values based on the mixture file.\n ext (str): extension of audio files (default is .wav).\n \"\"\"\n\n meta = {}\n path = Path(path)\n pendings = []\n from concurrent.futures import ThreadPoolExecutor\n with ThreadPoolExecutor(8) as pool:\n for root, folders, files in os.walk(path, followlinks=True):\n root = Path(root)\n if root.name.startswith('.') or folders or root == path:\n continue\n name = str(root.relative_to(path))\n pendings.append((name, pool.submit(_track_metadata, root, sources, normalize, ext)))\n # meta[name] = _track_metadata(root, sources, normalize, ext)\n for name, pending in tqdm.tqdm(pendings, ncols=120):\n meta[name] = pending.result()\n return meta\n\n\nclass Wavset:\n def __init__(\n self,\n root, metadata, sources,\n segment=None, shift=None, normalize=True,\n samplerate=44100, channels=2, ext=EXT):\n \"\"\"\n Waveset (or mp3 set for that matter). Can be used to train\n with arbitrary sources. Each track should be one folder inside of `path`.\n The folder should contain files named `{source}.{ext}`.\n\n Args:\n root (Path or str): root folder for the dataset.\n metadata (dict): output from `build_metadata`.\n sources (list[str]): list of source names.\n segment (None or float): segment length in seconds. If `None`, returns entire tracks.\n shift (None or float): stride in seconds bewteen samples.\n normalize (bool): normalizes input audio, **based on the metadata content**,\n i.e. the entire track is normalized, not individual extracts.\n samplerate (int): target sample rate. if the file sample rate\n is different, it will be resampled on the fly.\n channels (int): target nb of channels. if different, will be\n changed onthe fly.\n ext (str): extension for audio files (default is .wav).\n\n samplerate and channels are converted on the fly.\n \"\"\"\n self.root = Path(root)\n self.metadata = OrderedDict(metadata)\n self.segment = segment\n self.shift = shift or segment\n self.normalize = normalize\n self.sources = sources\n self.channels = channels\n self.samplerate = samplerate\n self.ext = ext\n self.num_examples = []\n for name, meta in self.metadata.items():\n track_duration = meta['length'] / meta['samplerate']\n if segment is None or track_duration < segment:\n examples = 1\n else:\n examples = int(math.ceil((track_duration - self.segment) / self.shift) + 1)\n self.num_examples.append(examples)\n\n def __len__(self):\n return sum(self.num_examples)\n\n def get_file(self, name, source):\n return self.root / name / f\"{source}{self.ext}\"\n\n def __getitem__(self, index):\n for name, examples in zip(self.metadata, self.num_examples):\n if index >= examples:\n index -= examples\n continue\n meta = self.metadata[name]\n num_frames = -1\n offset = 0\n if self.segment is not None:\n offset = int(meta['samplerate'] * self.shift * index)\n num_frames = int(math.ceil(meta['samplerate'] * self.segment))\n wavs = []\n for source in self.sources:\n file = self.get_file(name, source)\n wav, _ = ta.load(str(file), frame_offset=offset, num_frames=num_frames)\n wav = convert_audio_channels(wav, self.channels)\n wavs.append(wav)\n\n example = th.stack(wavs)\n example = julius.resample_frac(example, meta['samplerate'], self.samplerate)\n if self.normalize:\n example = (example - meta['mean']) / meta['std']\n if self.segment:\n length = int(self.segment * self.samplerate)\n example = example[..., :length]\n example = F.pad(example, (0, length - example.shape[-1]))\n return example\n\n\ndef get_wav_datasets(args):\n \"\"\"Extract the wav datasets from the XP arguments.\"\"\"\n sig = hashlib.sha1(str(args.wav).encode()).hexdigest()[:8]\n metadata_file = Path(args.metadata) / ('wav_' + sig + \".json\")\n train_path = Path(args.wav) / \"train\"\n valid_path = Path(args.wav) / \"valid\"\n if not metadata_file.is_file() and distrib.rank == 0:\n metadata_file.parent.mkdir(exist_ok=True, parents=True)\n train = build_metadata(train_path, args.sources)\n valid = build_metadata(valid_path, args.sources)\n json.dump([train, valid], open(metadata_file, \"w\"))\n if distrib.world_size > 1:\n distributed.barrier()\n train, valid = json.load(open(metadata_file))\n if args.full_cv:\n kw_cv = {}\n else:\n kw_cv = {'segment': args.segment, 'shift': args.shift}\n train_set = Wavset(train_path, train, args.sources,\n segment=args.segment, shift=args.shift,\n samplerate=args.samplerate, channels=args.channels,\n normalize=args.normalize)\n valid_set = Wavset(valid_path, valid, [MIXTURE] + list(args.sources),\n samplerate=args.samplerate, channels=args.channels,\n normalize=args.normalize, **kw_cv)\n return train_set, valid_set\n\n\ndef _get_musdb_valid():\n # Return musdb valid set.\n import yaml\n setup_path = Path(musdb.__path__[0]) / 'configs' / 'mus.yaml'\n setup = yaml.safe_load(open(setup_path, 'r'))\n return setup['validation_tracks']\n\n\ndef get_musdb_wav_datasets(args):\n \"\"\"Extract the musdb dataset from the XP arguments.\"\"\"\n sig = hashlib.sha1(str(args.musdb).encode()).hexdigest()[:8]\n metadata_file = Path(args.metadata) / ('musdb_' + sig + \".json\")\n root = Path(args.musdb) / \"train\"\n if not metadata_file.is_file() and distrib.rank == 0:\n metadata_file.parent.mkdir(exist_ok=True, parents=True)\n metadata = build_metadata(root, args.sources)\n json.dump(metadata, open(metadata_file, \"w\"))\n if distrib.world_size > 1:\n distributed.barrier()\n metadata = json.load(open(metadata_file))\n\n valid_tracks = _get_musdb_valid()\n if args.train_valid:\n metadata_train = metadata\n else:\n metadata_train = {name: meta for name, meta in metadata.items() if name not in valid_tracks}\n metadata_valid = {name: meta for name, meta in metadata.items() if name in valid_tracks}\n if args.full_cv:\n kw_cv = {}\n else:\n kw_cv = {'segment': args.segment, 'shift': args.shift}\n train_set = Wavset(root, metadata_train, args.sources,\n segment=args.segment, shift=args.shift,\n samplerate=args.samplerate, channels=args.channels,\n normalize=args.normalize)\n valid_set = Wavset(root, metadata_valid, [MIXTURE] + list(args.sources),\n samplerate=args.samplerate, channels=args.channels,\n normalize=args.normalize, **kw_cv)\n return train_set, valid_set\n"
] |
[
[
"torch.stack",
"torch.nn.functional.pad",
"torch.distributed.barrier"
]
] |
thomasbrockmeier-ams/Open3D-ML
|
[
"1e362bbf133537668923905a12a15c540d9b689d"
] |
[
"ml3d/torch/models/sparseconvnet.py"
] |
[
"import numpy as np\nimport torch\nimport torch.nn as nn\n\nfrom .base_model import BaseModel\nfrom ...utils import MODEL\nfrom ..modules.losses import filter_valid_label\nfrom ...datasets.augment import SemsegAugmentation\nfrom open3d.ml.torch.layers import SparseConv, SparseConvTranspose\nfrom open3d.ml.torch.ops import voxelize, reduce_subarrays_sum\n\n\nclass SparseConvUnet(BaseModel):\n \"\"\"Semantic Segmentation model.\n\n Uses UNet architecture replacing convolutions with Sparse Convolutions.\n\n Attributes:\n name: Name of model.\n Default to \"SparseConvUnet\".\n device: Which device to use (cpu or cuda).\n voxel_size: Voxel length for subsampling.\n multiplier: min length of feature length in each layer.\n conv_block_reps: repetition of Unet Blocks.\n residual_blocks: Whether to use Residual Blocks.\n in_channels: Number of features(default 3 for color).\n num_classes: Number of classes.\n \"\"\"\n\n def __init__(\n self,\n name=\"SparseConvUnet\",\n device=\"cuda\",\n multiplier=16, # Proportional to number of neurons in each layer.\n voxel_size=0.05,\n conv_block_reps=1, # Conv block repetitions.\n residual_blocks=False,\n in_channels=3,\n num_classes=20,\n grid_size=4096,\n batcher='ConcatBatcher',\n augment=None,\n **kwargs):\n super(SparseConvUnet, self).__init__(name=name,\n device=device,\n multiplier=multiplier,\n voxel_size=voxel_size,\n conv_block_reps=conv_block_reps,\n residual_blocks=residual_blocks,\n in_channels=in_channels,\n num_classes=num_classes,\n grid_size=grid_size,\n batcher=batcher,\n augment=augment,\n **kwargs)\n cfg = self.cfg\n self.device = device\n self.augmenter = SemsegAugmentation(cfg.augment)\n self.multiplier = cfg.multiplier\n self.input_layer = InputLayer()\n self.sub_sparse_conv = SubmanifoldSparseConv(in_channels=in_channels,\n filters=multiplier,\n kernel_size=[3, 3, 3])\n self.unet = UNet(conv_block_reps, [\n multiplier, 2 * multiplier, 3 * multiplier, 4 * multiplier,\n 5 * multiplier, 6 * multiplier, 7 * multiplier\n ], residual_blocks)\n self.batch_norm = BatchNormBlock(multiplier)\n self.relu = ReLUBlock()\n self.linear = LinearBlock(multiplier, num_classes)\n self.output_layer = OutputLayer()\n\n def forward(self, inputs):\n pos_list = []\n feat_list = []\n index_map_list = []\n\n for i in range(len(inputs.batch_lengths)):\n pos = inputs.point[i]\n feat = inputs.feat[i]\n feat, pos, index_map = self.input_layer(feat, pos)\n pos_list.append(pos)\n feat_list.append(feat)\n index_map_list.append(index_map)\n\n feat_list = self.sub_sparse_conv(feat_list, pos_list, voxel_size=1.0)\n feat_list = self.unet(pos_list, feat_list)\n feat_list = self.batch_norm(feat_list)\n feat_list = self.relu(feat_list)\n feat_list = self.linear(feat_list)\n output = self.output_layer(feat_list, index_map_list)\n\n return output\n\n def preprocess(self, data, attr):\n points = np.array(data['point'], dtype=np.float32)\n\n if 'label' not in data or data['label'] is None:\n labels = np.zeros((points.shape[0],), dtype=np.int32)\n else:\n labels = np.array(data['label'], dtype=np.int32).reshape((-1,))\n\n if 'feat' not in data or data['feat'] is None:\n raise Exception(\n \"SparseConvnet doesn't work without feature values.\")\n\n feat = np.array(data['feat'], dtype=np.float32)\n\n # Scale to voxel size.\n points *= 1. / self.cfg.voxel_size # Scale = 1/voxel_size\n\n if attr['split'] in ['training', 'train']:\n points, feat, labels = self.augmenter.augment(\n points, feat, labels, self.cfg.get('augment', None))\n\n m = points.min(0)\n M = points.max(0)\n\n # Randomly place pointcloud in 4096 size grid.\n grid_size = self.cfg.grid_size\n offset = -m + np.clip(\n grid_size - M + m - 0.001, 0, None) * np.random.rand(3) + np.clip(\n grid_size - M + m + 0.001, None, 0) * np.random.rand(3)\n\n points += offset\n idxs = (points.min(1) >= 0) * (points.max(1) < 4096)\n\n points = points[idxs]\n feat = feat[idxs]\n labels = labels[idxs]\n\n points = (points.astype(np.int32) + 0.5).astype(\n np.float32) # Move points to voxel center.\n\n data = {}\n data['point'] = points\n data['feat'] = feat\n data['label'] = labels\n\n return data\n\n def transform(self, data, attr):\n data['point'] = torch.from_numpy(data['point'])\n data['feat'] = torch.from_numpy(data['feat'])\n data['label'] = torch.from_numpy(data['label'])\n\n return data\n\n def update_probs(self, inputs, results, test_probs, test_labels):\n result = results.reshape(-1, self.cfg.num_classes)\n probs = torch.nn.functional.softmax(result, dim=-1).cpu().data.numpy()\n labels = np.argmax(probs, 1)\n\n self.trans_point_sampler(patchwise=False)\n\n return probs, labels\n\n def inference_begin(self, data):\n data = self.preprocess(data, {'split': 'test'})\n data['batch_lengths'] = [data['point'].shape[0]]\n data = self.transform(data, {})\n\n self.inference_input = data\n\n def inference_preprocess(self):\n return self.inference_input\n\n def inference_end(self, inputs, results):\n results = torch.reshape(results, (-1, self.cfg.num_classes))\n\n m_softmax = torch.nn.Softmax(dim=-1)\n results = m_softmax(results)\n results = results.cpu().data.numpy()\n\n probs = np.reshape(results, [-1, self.cfg.num_classes])\n\n pred_l = np.argmax(probs, 1)\n\n return {'predict_labels': pred_l, 'predict_scores': probs}\n\n def get_loss(self, Loss, results, inputs, device):\n \"\"\"Calculate the loss on output of the model.\n\n Attributes:\n Loss: Object of type `SemSegLoss`.\n results: Output of the model.\n inputs: Input of the model.\n device: device(cpu or cuda).\n \n Returns:\n Returns loss, labels and scores.\n \"\"\"\n cfg = self.cfg\n labels = torch.cat(inputs['data'].label, 0)\n\n scores, labels = filter_valid_label(results, labels, cfg.num_classes,\n cfg.ignored_label_inds, device)\n\n loss = Loss.weighted_CrossEntropyLoss(scores, labels)\n\n return loss, labels, scores\n\n def get_optimizer(self, cfg_pipeline):\n optimizer = torch.optim.Adam(self.parameters(), lr=cfg_pipeline.adam_lr)\n scheduler = torch.optim.lr_scheduler.ExponentialLR(\n optimizer, cfg_pipeline.scheduler_gamma)\n\n return optimizer, scheduler\n\n\nMODEL._register_module(SparseConvUnet, 'torch')\n\n\nclass BatchNormBlock(nn.Module):\n\n def __init__(self, m, eps=1e-4, momentum=0.01):\n super(BatchNormBlock, self).__init__()\n self.bn = nn.BatchNorm1d(m, eps=eps, momentum=momentum)\n\n def forward(self, feat_list):\n lengths = [feat.shape[0] for feat in feat_list]\n out = self.bn(torch.cat(feat_list, 0))\n out_list = []\n start = 0\n for l in lengths:\n out_list.append(out[start:start + l])\n start += l\n\n return out_list\n\n def __name__(self):\n return \"BatchNormBlock\"\n\n\nclass ReLUBlock(nn.Module):\n\n def __init__(self):\n super(ReLUBlock, self).__init__()\n self.relu = nn.ReLU()\n\n def forward(self, feat_list):\n lengths = [feat.shape[0] for feat in feat_list]\n out = self.relu(torch.cat(feat_list, 0))\n out_list = []\n start = 0\n for l in lengths:\n out_list.append(out[start:start + l])\n start += l\n\n return out_list\n\n def __name__(self):\n return \"ReLUBlock\"\n\n\nclass LinearBlock(nn.Module):\n\n def __init__(self, a, b):\n super(LinearBlock, self).__init__()\n self.linear = nn.Linear(a, b)\n\n def forward(self, feat_list):\n out_list = []\n for feat in feat_list:\n out_list.append(self.linear(feat))\n\n return out_list\n\n def __name__(self):\n return \"LinearBlock\"\n\n\nclass InputLayer(nn.Module):\n\n def __init__(self, voxel_size=1.0):\n super(InputLayer, self).__init__()\n self.voxel_size = torch.Tensor([voxel_size, voxel_size, voxel_size])\n\n def forward(self, features, in_positions):\n v = voxelize(in_positions, self.voxel_size, torch.Tensor([0, 0, 0]),\n torch.Tensor([40960, 40960, 40960]))\n\n # Contiguous repeating positions.\n in_positions = in_positions[v.voxel_point_indices]\n features = features[v.voxel_point_indices]\n\n # Find reverse mapping.\n reverse_map_voxelize = np.zeros((in_positions.shape[0],))\n reverse_map_voxelize[v.voxel_point_indices.cpu().numpy()] = np.arange(\n in_positions.shape[0])\n reverse_map_voxelize = reverse_map_voxelize.astype(np.int32)\n\n # Unique positions.\n in_positions = in_positions[v.voxel_point_row_splits[:-1]]\n\n # Mean of features.\n count = v.voxel_point_row_splits[1:] - v.voxel_point_row_splits[:-1]\n reverse_map_sort = np.repeat(np.arange(count.shape[0]),\n count.cpu().numpy()).astype(np.int32)\n\n features_avg = in_positions.clone()\n features_avg[:, 0] = reduce_subarrays_sum(features[:, 0],\n v.voxel_point_row_splits)\n features_avg[:, 1] = reduce_subarrays_sum(features[:, 1],\n v.voxel_point_row_splits)\n features_avg[:, 2] = reduce_subarrays_sum(features[:, 2],\n v.voxel_point_row_splits)\n\n features_avg = features_avg / count.unsqueeze(1)\n\n return features_avg, in_positions, reverse_map_sort[\n reverse_map_voxelize]\n\n\nclass OutputLayer(nn.Module):\n\n def __init__(self, voxel_size=1.0):\n super(OutputLayer, self).__init__()\n\n def forward(self, features_list, index_map_list):\n out = []\n for feat, index_map in zip(features_list, index_map_list):\n out.append(feat[index_map])\n return torch.cat(out, 0)\n\n\nclass SubmanifoldSparseConv(nn.Module):\n\n def __init__(self,\n in_channels,\n filters,\n kernel_size,\n use_bias=False,\n offset=None,\n normalize=False):\n super(SubmanifoldSparseConv, self).__init__()\n\n if offset is None:\n if kernel_size[0] % 2:\n offset = 0.\n else:\n offset = 0.5\n\n offset = torch.full((3,), offset, dtype=torch.float32)\n self.net = SparseConv(in_channels=in_channels,\n filters=filters,\n kernel_size=kernel_size,\n use_bias=use_bias,\n offset=offset,\n normalize=normalize)\n\n def forward(self,\n features_list,\n in_positions_list,\n out_positions_list=None,\n voxel_size=1.0):\n if out_positions_list is None:\n out_positions_list = in_positions_list\n\n out_feat = []\n for feat, in_pos, out_pos in zip(features_list, in_positions_list,\n out_positions_list):\n out_feat.append(self.net(feat, in_pos, out_pos, voxel_size))\n\n return out_feat\n\n def __name__(self):\n return \"SubmanifoldSparseConv\"\n\n\ndef calculate_grid(in_positions):\n filter = torch.Tensor([[-1, -1, -1], [-1, -1, 0], [-1, 0, -1], [-1, 0, 0],\n [0, -1, -1], [0, -1, 0], [0, 0, -1],\n [0, 0, 0]]).to(in_positions.device)\n\n out_pos = in_positions.long().repeat(1, filter.shape[0]).reshape(-1, 3)\n filter = filter.repeat(in_positions.shape[0], 1)\n\n out_pos = out_pos + filter\n out_pos = out_pos[out_pos.min(1).values >= 0]\n out_pos = out_pos[(~((out_pos.long() % 2).bool()).any(1))]\n out_pos = torch.unique(out_pos, dim=0)\n\n return out_pos + 0.5\n\n\nclass Convolution(nn.Module):\n\n def __init__(self,\n in_channels,\n filters,\n kernel_size,\n use_bias=False,\n offset=None,\n normalize=False):\n super(Convolution, self).__init__()\n\n if offset is None:\n if kernel_size[0] % 2:\n offset = 0.\n else:\n offset = -0.5\n\n offset = torch.full((3,), offset, dtype=torch.float32)\n self.net = SparseConv(in_channels=in_channels,\n filters=filters,\n kernel_size=kernel_size,\n use_bias=use_bias,\n offset=offset,\n normalize=normalize)\n\n def forward(self, features_list, in_positions_list, voxel_size=1.0):\n out_positions_list = []\n for in_positions in in_positions_list:\n out_positions_list.append(calculate_grid(in_positions))\n\n out_feat = []\n for feat, in_pos, out_pos in zip(features_list, in_positions_list,\n out_positions_list):\n out_feat.append(self.net(feat, in_pos, out_pos, voxel_size))\n\n out_positions_list = [out / 2 for out in out_positions_list]\n\n return out_feat, out_positions_list\n\n def __name__(self):\n return \"Convolution\"\n\n\nclass DeConvolution(nn.Module):\n\n def __init__(self,\n in_channels,\n filters,\n kernel_size,\n use_bias=False,\n offset=None,\n normalize=False):\n super(DeConvolution, self).__init__()\n\n if offset is None:\n if kernel_size[0] % 2:\n offset = 0.\n else:\n offset = -0.5\n\n offset = torch.full((3,), offset, dtype=torch.float32)\n self.net = SparseConvTranspose(in_channels=in_channels,\n filters=filters,\n kernel_size=kernel_size,\n use_bias=use_bias,\n offset=offset,\n normalize=normalize)\n\n def forward(self,\n features_list,\n in_positions_list,\n out_positions_list,\n voxel_size=1.0):\n out_feat = []\n for feat, in_pos, out_pos in zip(features_list, in_positions_list,\n out_positions_list):\n out_feat.append(self.net(feat, in_pos, out_pos, voxel_size))\n\n return out_feat\n\n def __name__(self):\n return \"DeConvolution\"\n\n\nclass ConcatFeat(nn.Module):\n\n def __init__(self):\n super(ConcatFeat, self).__init__()\n\n def __name__(self):\n return \"ConcatFeat\"\n\n def forward(self, feat):\n return feat\n\n\nclass JoinFeat(nn.Module):\n\n def __init__(self):\n super(JoinFeat, self).__init__()\n\n def __name__(self):\n return \"JoinFeat\"\n\n def forward(self, feat_cat, feat):\n out = []\n for a, b in zip(feat_cat, feat):\n out.append(torch.cat([a, b], -1))\n\n return out\n\n\nclass NetworkInNetwork(nn.Module):\n\n def __init__(self, nIn, nOut, bias=False):\n super(NetworkInNetwork, self).__init__()\n if nIn == nOut:\n self.linear = nn.Identity()\n else:\n self.linear = nn.Linear(nIn, nOut, bias=bias)\n\n def forward(self, inputs):\n out = []\n for inp in inputs:\n out.append(self.linear(inp))\n\n return out\n\n\nclass ResidualBlock(nn.Module):\n\n def __init__(self, nIn, nOut):\n super(ResidualBlock, self).__init__()\n\n self.lin = NetworkInNetwork(nIn, nOut)\n\n self.batch_norm1 = BatchNormBlock(nIn)\n self.relu1 = ReLUBlock()\n self.sub_sparse_conv1 = SubmanifoldSparseConv(in_channels=nIn,\n filters=nOut,\n kernel_size=[3, 3, 3])\n\n self.batch_norm2 = BatchNormBlock(nOut)\n self.relu2 = ReLUBlock()\n self.sub_sparse_conv2 = SubmanifoldSparseConv(in_channels=nOut,\n filters=nOut,\n kernel_size=[3, 3, 3])\n\n def forward(self, feat_list, pos_list):\n out1 = self.lin(feat_list)\n feat_list = self.batch_norm1(feat_list)\n feat_list = self.relu1(feat_list)\n feat_list = self.sub_sparse_conv1(feat_list, pos_list)\n feat_list = self.batch_norm2(feat_list)\n feat_list = self.relu2(feat_list)\n out2 = self.sub_sparse_conv2(feat_list, pos_list)\n\n return [a + b for a, b in zip(out1, out2)]\n\n def __name__(self):\n return \"ResidualBlock\"\n\n\nclass UNet(nn.Module):\n\n def __init__(self,\n conv_block_reps,\n nPlanes,\n residual_blocks=False,\n downsample=[2, 2],\n leakiness=0):\n super(UNet, self).__init__()\n self.net = nn.ModuleList(\n self.get_UNet(nPlanes, residual_blocks, conv_block_reps))\n self.residual_blocks = residual_blocks\n\n @staticmethod\n def block(layers, a, b, residual_blocks):\n if residual_blocks:\n layers.append(ResidualBlock(a, b))\n\n else:\n layers.append(BatchNormBlock(a))\n layers.append(ReLUBlock())\n layers.append(\n SubmanifoldSparseConv(in_channels=a,\n filters=b,\n kernel_size=[3, 3, 3]))\n\n @staticmethod\n def get_UNet(nPlanes, residual_blocks, conv_block_reps):\n layers = []\n for i in range(conv_block_reps):\n UNet.block(layers, nPlanes[0], nPlanes[0], residual_blocks)\n\n if len(nPlanes) > 1:\n layers.append(ConcatFeat())\n layers.append(BatchNormBlock(nPlanes[0]))\n layers.append(ReLUBlock())\n layers.append(\n Convolution(in_channels=nPlanes[0],\n filters=nPlanes[1],\n kernel_size=[2, 2, 2]))\n layers = layers + UNet.get_UNet(nPlanes[1:], residual_blocks,\n conv_block_reps)\n layers.append(BatchNormBlock(nPlanes[1]))\n layers.append(ReLUBlock())\n layers.append(\n DeConvolution(in_channels=nPlanes[1],\n filters=nPlanes[0],\n kernel_size=[2, 2, 2]))\n\n layers.append(JoinFeat())\n\n for i in range(conv_block_reps):\n UNet.block(layers, nPlanes[0] * (2 if i == 0 else 1),\n nPlanes[0], residual_blocks)\n\n return layers\n\n def forward(self, pos_list, feat_list):\n conv_pos = []\n concat_feat = []\n for module in self.net:\n if isinstance(module, BatchNormBlock):\n feat_list = module(feat_list)\n elif isinstance(module, ReLUBlock):\n feat_list = module(feat_list)\n\n elif isinstance(module, ResidualBlock):\n feat_list = module(feat_list, pos_list)\n\n elif isinstance(module, SubmanifoldSparseConv):\n feat_list = module(feat_list, pos_list)\n\n elif isinstance(module, Convolution):\n conv_pos.append([pos.clone() for pos in pos_list])\n feat_list, pos_list = module(feat_list, pos_list)\n\n elif isinstance(module, DeConvolution):\n feat_list = module(feat_list, [2 * pos for pos in pos_list],\n conv_pos[-1])\n pos_list = conv_pos.pop()\n\n elif isinstance(module, ConcatFeat):\n concat_feat.append([feat.clone() for feat in module(feat_list)])\n\n elif isinstance(module, JoinFeat):\n feat_list = module(concat_feat.pop(), feat_list)\n\n else:\n raise Exception(\"Unknown module {}\".format(module))\n\n return feat_list\n\n\ndef load_unet_wts(net, path):\n wts = list(torch.load(path).values())\n state_dict = net.state_dict()\n i = 0\n for key in state_dict:\n if 'offset' in key or 'tracked' in key:\n continue\n if len(wts[i].shape) == 4:\n shp = wts[i].shape\n state_dict[key] = np.transpose(\n wts[i].reshape(int(shp[0]**(1 / 3)), int(shp[0]**(1 / 3)),\n int(shp[0]**(1 / 3)), shp[-2], shp[-1]),\n (2, 1, 0, 3, 4))\n else:\n state_dict[key] = wts[i]\n i += 1\n\n net.load_state_dict(state_dict)\n"
] |
[
[
"torch.nn.Softmax",
"torch.nn.functional.softmax",
"torch.cat",
"torch.load",
"torch.unique",
"numpy.clip",
"numpy.reshape",
"numpy.arange",
"torch.reshape",
"torch.from_numpy",
"numpy.argmax",
"numpy.zeros",
"torch.nn.BatchNorm1d",
"torch.full",
"torch.nn.Linear",
"numpy.random.rand",
"numpy.array",
"torch.Tensor",
"torch.optim.lr_scheduler.ExponentialLR",
"torch.nn.Identity",
"torch.nn.ReLU"
]
] |
lverwimp/state_gradients
|
[
"1dfa42df068d009d2dddbab7484c87bd7177ec1f"
] |
[
"aux_scripts/relative_memory.py"
] |
[
"#! /usr/bin/env python\n# calculate relative memory =\n# norm(avg gradient matrix * normalized difference vector) / largest singular value for avg gradient matrix\n\nimport sys, glob, os\nimport numpy as np\n\ndiff_vector = sys.argv[1]\ngrad_dir = sys.argv[2]\nnorm_emb_f = sys.argv[3]\nif len(sys.argv) > 4:\n name_avg = sys.argv[4]\n list_grads = glob.glob('{0}/delay_*_avg_{1}_norm.npy'.format(grad_dir, name_avg))\nelse:\n list_grads = glob.glob('{0}/delay_*_avg_norm.npy'.format(grad_dir))\n\n# load difference vector: difference between average embedding\n# for 1 property and average embedding for another property\ndiff = np.load(diff_vector)\nnorm_emb = np.load(norm_emb_f)\nnorm_pca_diff = np.matmul(norm_emb, diff)\n# normalize difference vector\nnorm_diff = norm_pca_diff / np.linalg.norm(norm_pca_diff)\n\nfor f in list_grads:\n delay = os.path.basename(f).replace('delay_','').replace('_avg.npy','')\n\n # calculate numerator: norm(avg gradient matrix * normalized difference vector)\n grad_matrix = np.load(f)\n mult = np.dot(grad_matrix, norm_diff)\n norm = np.linalg.norm(mult)\n\n # calculate svd\n u, s, vh = np.linalg.svd(grad_matrix)\n\n # divide by the largest singular value to get relative memory\n rel_mem = norm / s[0]\n\n print('delay: {0}\\trelative memory: {1}'.format(delay, rel_mem))\n"
] |
[
[
"numpy.dot",
"numpy.linalg.svd",
"numpy.matmul",
"numpy.linalg.norm",
"numpy.load"
]
] |
ysk24ok/espnet
|
[
"cf91a45e2a2c0b0c1ebf807d9dc107247fb73e3f"
] |
[
"espnet/lm/pytorch_backend/lm.py"
] |
[
"#!/usr/bin/env python3\n# Copyright 2017 Johns Hopkins University (Shinji Watanabe)\n# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)\n# This code is ported from the following implementation written in Torch.\n# https://github.com/chainer/chainer/blob/master/examples/ptb/train_ptb_custom_loop.py\n\n\"\"\"LM training in pytorch.\"\"\"\n\nimport copy\nimport json\nimport logging\nimport numpy as np\n\nimport torch\nimport torch.nn as nn\nfrom torch.nn.parallel import data_parallel\n\nfrom chainer import Chain\nfrom chainer.dataset import convert\nfrom chainer import reporter\nfrom chainer import training\nfrom chainer.training import extensions\n\nfrom espnet.lm.lm_utils import count_tokens\nfrom espnet.lm.lm_utils import load_dataset\nfrom espnet.lm.lm_utils import MakeSymlinkToBestModel\nfrom espnet.lm.lm_utils import ParallelSentenceIterator\nfrom espnet.lm.lm_utils import read_tokens\nfrom espnet.nets.lm_interface import dynamic_import_lm\nfrom espnet.nets.lm_interface import LMInterface\nfrom espnet.optimizer.factory import dynamic_import_optimizer\nfrom espnet.scheduler.pytorch import PyTorchScheduler\nfrom espnet.scheduler.scheduler import dynamic_import_scheduler\n\nfrom espnet.asr.asr_utils import snapshot_object\nfrom espnet.asr.asr_utils import torch_load\nfrom espnet.asr.asr_utils import torch_resume\nfrom espnet.asr.asr_utils import torch_snapshot\n\nfrom espnet.utils.training.tensorboard_logger import TensorboardLogger\nfrom tensorboardX import SummaryWriter\n\nfrom espnet.utils.deterministic_utils import set_deterministic_pytorch\nfrom espnet.utils.training.evaluator import BaseEvaluator\nfrom espnet.utils.training.iterators import ShufflingEnabler\nfrom espnet.utils.training.train_utils import check_early_stop\nfrom espnet.utils.training.train_utils import set_early_stop\n\n\ndef compute_perplexity(result):\n \"\"\"Compute and add the perplexity to the LogReport.\n\n :param dict result: The current observations\n \"\"\"\n # Routine to rewrite the result dictionary of LogReport to add perplexity values\n result[\"perplexity\"] = np.exp(result[\"main/nll\"] / result[\"main/count\"])\n if \"validation/main/nll\" in result:\n result[\"val_perplexity\"] = np.exp(\n result[\"validation/main/nll\"] / result[\"validation/main/count\"]\n )\n\n\nclass Reporter(Chain):\n \"\"\"Dummy module to use chainer's trainer.\"\"\"\n\n def report(self, loss):\n \"\"\"Report nothing.\"\"\"\n pass\n\n\ndef concat_examples(batch, device=None, padding=None):\n \"\"\"Concat examples in minibatch.\n\n :param np.ndarray batch: The batch to concatenate\n :param int device: The device to send to\n :param Tuple[int,int] padding: The padding to use\n :return: (inputs, targets)\n :rtype (torch.Tensor, torch.Tensor)\n \"\"\"\n x, t = convert.concat_examples(batch, padding=padding)\n x = torch.from_numpy(x)\n t = torch.from_numpy(t)\n if device is not None and device >= 0:\n x = x.cuda(device)\n t = t.cuda(device)\n return x, t\n\n\nclass BPTTUpdater(training.StandardUpdater):\n \"\"\"An updater for a pytorch LM.\"\"\"\n\n def __init__(\n self,\n train_iter,\n model,\n optimizer,\n schedulers,\n device,\n gradclip=None,\n use_apex=False,\n accum_grad=1,\n ):\n \"\"\"Initialize class.\n\n Args:\n train_iter (chainer.dataset.Iterator): The train iterator\n model (LMInterface) : The model to update\n optimizer (torch.optim.Optimizer): The optimizer for training\n schedulers (espnet.scheduler.scheduler.SchedulerInterface):\n The schedulers of `optimizer`\n device (int): The device id\n gradclip (float): The gradient clipping value to use\n use_apex (bool): The flag to use Apex in backprop.\n accum_grad (int): The number of gradient accumulation.\n\n \"\"\"\n super(BPTTUpdater, self).__init__(train_iter, optimizer)\n self.model = model\n self.device = device\n self.gradclip = gradclip\n self.use_apex = use_apex\n self.scheduler = PyTorchScheduler(schedulers, optimizer)\n self.accum_grad = accum_grad\n\n # The core part of the update routine can be customized by overriding.\n def update_core(self):\n \"\"\"Update the model.\"\"\"\n # When we pass one iterator and optimizer to StandardUpdater.__init__,\n # they are automatically named 'main'.\n train_iter = self.get_iterator(\"main\")\n optimizer = self.get_optimizer(\"main\")\n # Progress the dataset iterator for sentences at each iteration.\n self.model.zero_grad() # Clear the parameter gradients\n accum = {\"loss\": 0.0, \"nll\": 0.0, \"count\": 0}\n for _ in range(self.accum_grad):\n batch = train_iter.__next__()\n # Concatenate the token IDs to matrices and send them to the device\n # self.converter does this job\n # (it is chainer.dataset.concat_examples by default)\n x, t = concat_examples(batch, device=self.device[0], padding=(0, -100))\n if self.device[0] == -1:\n loss, nll, count = self.model(x, t)\n else:\n # apex does not support torch.nn.DataParallel\n loss, nll, count = data_parallel(self.model, (x, t), self.device)\n\n # backward\n loss = loss.mean() / self.accum_grad\n if self.use_apex:\n from apex import amp\n\n with amp.scale_loss(loss, optimizer) as scaled_loss:\n scaled_loss.backward()\n else:\n loss.backward() # Backprop\n # accumulate stats\n accum[\"loss\"] += float(loss)\n accum[\"nll\"] += float(nll.sum())\n accum[\"count\"] += int(count.sum())\n\n for k, v in accum.items():\n reporter.report({k: v}, optimizer.target)\n if self.gradclip is not None:\n nn.utils.clip_grad_norm_(self.model.parameters(), self.gradclip)\n optimizer.step() # Update the parameters\n self.scheduler.step(n_iter=self.iteration)\n\n\nclass LMEvaluator(BaseEvaluator):\n \"\"\"A custom evaluator for a pytorch LM.\"\"\"\n\n def __init__(self, val_iter, eval_model, reporter, device):\n \"\"\"Initialize class.\n\n :param chainer.dataset.Iterator val_iter : The validation iterator\n :param LMInterface eval_model : The model to evaluate\n :param chainer.Reporter reporter : The observations reporter\n :param int device : The device id to use\n\n \"\"\"\n super(LMEvaluator, self).__init__(val_iter, reporter, device=-1)\n self.model = eval_model\n self.device = device\n\n def evaluate(self):\n \"\"\"Evaluate the model.\"\"\"\n val_iter = self.get_iterator(\"main\")\n loss = 0\n nll = 0\n count = 0\n self.model.eval()\n with torch.no_grad():\n for batch in copy.copy(val_iter):\n x, t = concat_examples(batch, device=self.device[0], padding=(0, -100))\n if self.device[0] == -1:\n l, n, c = self.model(x, t)\n else:\n # apex does not support torch.nn.DataParallel\n l, n, c = data_parallel(self.model, (x, t), self.device)\n loss += float(l.sum())\n nll += float(n.sum())\n count += int(c.sum())\n self.model.train()\n # report validation loss\n observation = {}\n with reporter.report_scope(observation):\n reporter.report({\"loss\": loss}, self.model.reporter)\n reporter.report({\"nll\": nll}, self.model.reporter)\n reporter.report({\"count\": count}, self.model.reporter)\n return observation\n\n\ndef train(args):\n \"\"\"Train with the given args.\n\n :param Namespace args: The program arguments\n :param type model_class: LMInterface class for training\n \"\"\"\n model_class = dynamic_import_lm(args.model_module, args.backend)\n assert issubclass(model_class, LMInterface), \"model should implement LMInterface\"\n # display torch version\n logging.info(\"torch version = \" + torch.__version__)\n\n set_deterministic_pytorch(args)\n\n # check cuda and cudnn availability\n if not torch.cuda.is_available():\n logging.warning(\"cuda is not available\")\n\n # get special label ids\n unk = args.char_list_dict[\"<unk>\"]\n eos = args.char_list_dict[\"<eos>\"]\n # read tokens as a sequence of sentences\n val, n_val_tokens, n_val_oovs = load_dataset(\n args.valid_label, args.char_list_dict, args.dump_hdf5_path\n )\n train, n_train_tokens, n_train_oovs = load_dataset(\n args.train_label, args.char_list_dict, args.dump_hdf5_path\n )\n logging.info(\"#vocab = \" + str(args.n_vocab))\n logging.info(\"#sentences in the training data = \" + str(len(train)))\n logging.info(\"#tokens in the training data = \" + str(n_train_tokens))\n logging.info(\n \"oov rate in the training data = %.2f %%\"\n % (n_train_oovs / n_train_tokens * 100)\n )\n logging.info(\"#sentences in the validation data = \" + str(len(val)))\n logging.info(\"#tokens in the validation data = \" + str(n_val_tokens))\n logging.info(\n \"oov rate in the validation data = %.2f %%\" % (n_val_oovs / n_val_tokens * 100)\n )\n\n use_sortagrad = args.sortagrad == -1 or args.sortagrad > 0\n # Create the dataset iterators\n batch_size = args.batchsize * max(args.ngpu, 1)\n if batch_size * args.accum_grad > args.batchsize:\n logging.info(\n f\"batch size is automatically increased \"\n f\"({args.batchsize} -> {batch_size * args.accum_grad})\"\n )\n train_iter = ParallelSentenceIterator(\n train,\n batch_size,\n max_length=args.maxlen,\n sos=eos,\n eos=eos,\n shuffle=not use_sortagrad,\n )\n val_iter = ParallelSentenceIterator(\n val, batch_size, max_length=args.maxlen, sos=eos, eos=eos, repeat=False\n )\n epoch_iters = int(len(train_iter.batch_indices) / args.accum_grad)\n logging.info(\"#iterations per epoch = %d\" % epoch_iters)\n logging.info(\"#total iterations = \" + str(args.epoch * epoch_iters))\n # Prepare an RNNLM model\n if args.train_dtype in (\"float16\", \"float32\", \"float64\"):\n dtype = getattr(torch, args.train_dtype)\n else:\n dtype = torch.float32\n model = model_class(args.n_vocab, args).to(dtype=dtype)\n if args.ngpu > 0:\n model.to(\"cuda\")\n gpu_id = list(range(args.ngpu))\n else:\n gpu_id = [-1]\n\n # Save model conf to json\n model_conf = args.outdir + \"/model.json\"\n with open(model_conf, \"wb\") as f:\n logging.info(\"writing a model config file to \" + model_conf)\n f.write(\n json.dumps(vars(args), indent=4, ensure_ascii=False, sort_keys=True).encode(\n \"utf_8\"\n )\n )\n\n logging.warning(\n \"num. model params: {:,} (num. trained: {:,} ({:.1f}%))\".format(\n sum(p.numel() for p in model.parameters()),\n sum(p.numel() for p in model.parameters() if p.requires_grad),\n sum(p.numel() for p in model.parameters() if p.requires_grad)\n * 100.0\n / sum(p.numel() for p in model.parameters()),\n )\n )\n\n # Set up an optimizer\n opt_class = dynamic_import_optimizer(args.opt, args.backend)\n optimizer = opt_class.from_args(model.parameters(), args)\n if args.schedulers is None:\n schedulers = []\n else:\n schedulers = [dynamic_import_scheduler(v)(k, args) for k, v in args.schedulers]\n\n # setup apex.amp\n if args.train_dtype in (\"O0\", \"O1\", \"O2\", \"O3\"):\n try:\n from apex import amp\n except ImportError as e:\n logging.error(\n f\"You need to install apex for --train-dtype {args.train_dtype}. \"\n \"See https://github.com/NVIDIA/apex#linux\"\n )\n raise e\n model, optimizer = amp.initialize(model, optimizer, opt_level=args.train_dtype)\n use_apex = True\n else:\n use_apex = False\n\n # FIXME: TOO DIRTY HACK\n reporter = Reporter()\n setattr(model, \"reporter\", reporter)\n setattr(optimizer, \"target\", reporter)\n setattr(optimizer, \"serialize\", lambda s: reporter.serialize(s))\n\n updater = BPTTUpdater(\n train_iter,\n model,\n optimizer,\n schedulers,\n gpu_id,\n gradclip=args.gradclip,\n use_apex=use_apex,\n accum_grad=args.accum_grad,\n )\n trainer = training.Trainer(updater, (args.epoch, \"epoch\"), out=args.outdir)\n trainer.extend(LMEvaluator(val_iter, model, reporter, device=gpu_id))\n trainer.extend(\n extensions.LogReport(\n postprocess=compute_perplexity,\n trigger=(args.report_interval_iters, \"iteration\"),\n )\n )\n trainer.extend(\n extensions.PrintReport(\n [\n \"epoch\",\n \"iteration\",\n \"main/loss\",\n \"perplexity\",\n \"val_perplexity\",\n \"elapsed_time\",\n ]\n ),\n trigger=(args.report_interval_iters, \"iteration\"),\n )\n trainer.extend(extensions.ProgressBar(update_interval=args.report_interval_iters))\n # Save best models\n trainer.extend(torch_snapshot(filename=\"snapshot.ep.{.updater.epoch}\"))\n trainer.extend(snapshot_object(model, \"rnnlm.model.{.updater.epoch}\"))\n # T.Hori: MinValueTrigger should be used, but it fails when resuming\n trainer.extend(MakeSymlinkToBestModel(\"validation/main/loss\", \"rnnlm.model\"))\n\n if use_sortagrad:\n trainer.extend(\n ShufflingEnabler([train_iter]),\n trigger=(args.sortagrad if args.sortagrad != -1 else args.epoch, \"epoch\"),\n )\n if args.resume:\n logging.info(\"resumed from %s\" % args.resume)\n torch_resume(args.resume, trainer)\n\n set_early_stop(trainer, args, is_lm=True)\n if args.tensorboard_dir is not None and args.tensorboard_dir != \"\":\n writer = SummaryWriter(args.tensorboard_dir)\n trainer.extend(\n TensorboardLogger(writer), trigger=(args.report_interval_iters, \"iteration\")\n )\n\n trainer.run()\n check_early_stop(trainer, args.epoch)\n\n # compute perplexity for test set\n if args.test_label:\n logging.info(\"test the best model\")\n torch_load(args.outdir + \"/rnnlm.model.best\", model)\n test = read_tokens(args.test_label, args.char_list_dict)\n n_test_tokens, n_test_oovs = count_tokens(test, unk)\n logging.info(\"#sentences in the test data = \" + str(len(test)))\n logging.info(\"#tokens in the test data = \" + str(n_test_tokens))\n logging.info(\n \"oov rate in the test data = %.2f %%\" % (n_test_oovs / n_test_tokens * 100)\n )\n test_iter = ParallelSentenceIterator(\n test, batch_size, max_length=args.maxlen, sos=eos, eos=eos, repeat=False\n )\n evaluator = LMEvaluator(test_iter, model, reporter, device=gpu_id)\n result = evaluator()\n compute_perplexity(result)\n logging.info(f\"test perplexity: {result['perplexity']}\")\n"
] |
[
[
"torch.nn.parallel.data_parallel",
"torch.from_numpy",
"torch.no_grad",
"torch.cuda.is_available",
"numpy.exp"
]
] |
MuhammetALAPAN/Kaggle.com-Sql-BigQuery-Studies
|
[
"b17568dd9cd074629a8789ca3c2fc351030f76a7"
] |
[
"venv/Lib/site-packages/bigquery/core/Table.py"
] |
[
"import pandas as pd\n\nfrom bigquery.core.Column import detect_type, find_sample_value\n\n\ndef get_table_info(_dbstream, table_and_schema_name):\n split = table_and_schema_name.split(\".\")\n if len(split) == 2:\n table_name = split[1]\n schema_name = split[0]\n else:\n raise Exception(\"Invalid table or schema name\")\n query = \"SELECT column_name, data_type, is_nullable FROM %s.INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME='%s'\" % (\n schema_name, table_name)\n return _dbstream.execute_query(query)\n\n\ndef format_create_table(_dbstream, data):\n columns_name = data[\"columns_name\"]\n rows = data[\"rows\"]\n params = {}\n df = pd.DataFrame(rows, columns=columns_name)\n df = df.where((pd.notnull(df)), None)\n for i in range(len(columns_name)):\n name = columns_name[i]\n example_max, example_min = find_sample_value(df, name, i)\n col = dict()\n col[\"example\"] = example_max\n type_max = detect_type(_dbstream, name=name, example=example_max, types=data.get(\"types\"))\n if type_max == \"TIMESTAMP\":\n type_min = detect_type(_dbstream, name=name, example=example_min, types=data.get(\"types\"))\n if type_min == type_max:\n col[\"type\"] = type_max\n else:\n col[\"type\"] = type_min\n else:\n col[\"type\"] = type_max\n params[name] = col\n\n query = \"\"\"\"\"\"\n query = query + \"CREATE TABLE %(table_name)s (\"\n col = list(params.keys())\n for i in range(len(col)):\n k = col[i]\n string_example = \" --example:\" + str(params[k][\"example\"])[:10].replace(\"\\n\", \"\").replace(\"%\", \"\") + ''\n if i == len(col) - 1:\n query = query + \"\\n \" + k + ' ' + params[k][\"type\"] + string_example\n else:\n query = query + \"\\n \" + k + ' ' + params[k][\"type\"] + ',' + string_example\n query = query + \"\\n )\"\n print(query)\n return query\n\n\ndef create_table(_dbstream, data, other_table_to_update):\n query = format_create_table(_dbstream, data)\n try:\n _dbstream.execute_query(query % {\"table_name\": data[\"table_name\"]})\n if other_table_to_update:\n _dbstream.execute_query(query % {\"table_name\": other_table_to_update})\n except Exception as e:\n if \" was not found \" in str(e).lower() and \" dataset \" in str(e).lower():\n schema_name = data['table_name'].split(\".\")[0]\n _dbstream.create_schema(schema_name)\n else:\n raise e\n\n\ndef create_columns(_dbstream, data, other_table_to_update):\n table_name = data[\"table_name\"]\n rows = data[\"rows\"]\n columns_name = data[\"columns_name\"]\n infos = get_table_info(_dbstream, table_name)\n all_column_in_table = [e['column_name'] for e in infos]\n df = pd.DataFrame(rows, columns=columns_name)\n df = df.where((pd.notnull(df)), None)\n query_table = \"alter table %s \\n\" % table_name\n if other_table_to_update:\n query_other_table = \"alter table %s \\n\" % other_table_to_update\n queries_table = []\n queries_other_table = []\n for column_name in columns_name:\n if column_name not in all_column_in_table:\n example_max, example_min = find_sample_value(df, column_name, columns_name.index(column_name))\n type_max = detect_type(_dbstream, name=column_name, example=example_max, types=data.get(\"types\"))\n if type_max == \"TIMESTAMP\":\n type_min = detect_type(_dbstream, name=column_name, example=example_min, types=data.get(\"types\"))\n if type_min == type_max:\n type_ = type_max\n else:\n type_ = \"STRING\"\n else:\n type_ = type_max\n query = \"\"\"\n add COLUMN %s %s\n \"\"\" % (column_name, type_)\n queries_table.append(query)\n if other_table_to_update:\n query = \"\"\"add COLUMN %s %s\"\"\" % (column_name, type_)\n queries_other_table.append(query)\n if queries_table:\n query = query_table + ',\\n '.join(queries_table) + \";\"\n _dbstream.execute_query(query)\n if queries_other_table:\n query = query_other_table + ',\\n '.join(queries_other_table) + \";\"\n _dbstream.execute_query(query)\n return 0\n"
] |
[
[
"pandas.notnull",
"pandas.DataFrame"
]
] |
ksboy/fairseq
|
[
"bba33ad64e10efd7d3d95b5a0b6ad125216542cf"
] |
[
"fairseq/optim/adafactor.py"
] |
[
"# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport math\nimport torch\nimport torch.optim\n\nfrom . import FairseqOptimizer, register_optimizer\n\n\n@register_optimizer('adafactor')\nclass FairseqAdafactor(FairseqOptimizer):\n def __init__(self, args, params):\n super().__init__(args, params)\n self._optimizer = Adafactor(params, **self.optimizer_config)\n\n @staticmethod\n def add_args(parser):\n \"\"\"Add optimizer-specific arguments to the parser.\"\"\"\n # fmt: off\n parser.add_argument('--adafactor-eps', default='(1e-30, 1e-3)', metavar=\"E\",\n help='epsilons for Adafactor optimizer')\n parser.add_argument('--clip-threshold', type=float, default=1.0, metavar=\"C\",\n help='threshold for clipping update root mean square')\n parser.add_argument('--decay-rate', type=float, default=-0.8, metavar=\"D\",\n help='decay rate of the second moment estimator')\n parser.add_argument('--beta1', type=float, default=None, metavar=\"B\",\n help='beta for first moment estimator. Optional')\n parser.add_argument('--scale-parameter', action='store_true',\n help='scale learning rate by root mean square of parameter.')\n parser.add_argument('--weight-decay', '--wd', default=0.0, type=float, metavar='WD',\n help='weight decay')\n parser.add_argument('--warmup-init', action='store_true',\n help='use relative step for warm-up learning rate schedule')\n parser.add_argument('--relative-step', action='store_true',\n help='set learning rate to inverse square root of timestep.'\n 'If false, external learning rate applied')\n # fmt: on\n\n @property\n def optimizer_config(self):\n \"\"\"\n Return a kwarg dictionary that will be used to override optimizer\n args stored in checkpoints. This allows us to load a checkpoint and\n resume training using a different set of optimizer args, e.g., with a\n different learning rate.\n Note : Convergence issues empirically observed with fp16 on.\n Might require search for appropriate configuration.\n \"\"\"\n return {\n 'lr': self.args.lr[0],\n 'eps': eval(self.args.adafactor_eps),\n 'clip_threshold': self.args.clip_threshold,\n 'beta1': self.args.beta1,\n 'decay_rate': self.args.decay_rate,\n 'scale_parameter': self.args.scale_parameter,\n 'weight_decay': self.args.weight_decay,\n 'relative_step': self.args.relative_step,\n 'warmup_init': self.args.warmup_init,\n }\n\n\nclass Adafactor(torch.optim.Optimizer):\n \"\"\"Implements Adafactor algorithm.\n\n This implementation is based on:\n `Adafactor: Adaptive Learning Rates with Sublinear Memory Cost`\n (see https://arxiv.org/abs/1804.04235)\n\n Arguments:\n params (iterable): iterable of parameters to optimize or dicts defining\n parameter groups\n lr (float, optional): external learning rate (default: None)\n eps (tuple[float, float]): regularization constans for square gradient\n and parameter scale respectively (default: (1e-30, 1e-3))\n clip_threshold (float): threshold of root mean square of\n final gradient update (default: 1.0)\n decay_rate (float): coefficient used to compute running averages of square\n gradient (default: -0.8)\n beta1 (float): coefficient used for computing running averages of gradient\n (default: None)\n weight_decay (float, optional): weight decay (L2 penalty) (default: 0)\n scale_parameter (bool): if true, learning rate is scaled by root mean square of\n parameter (default: True)\n relative_step (bool): if true, time-dependent learning rate is computed\n instead of external learning rate (default: True)\n warmup_init (bool): time-dependent learning rate computation depends on\n whether warm-up initialization is being used (default: False)\n \"\"\"\n\n def __init__(self, params, lr=None, eps=(1e-30, 1e-3), clip_threshold=1.0,\n decay_rate=-0.8, beta1=None, weight_decay=0.0, scale_parameter=True,\n relative_step=True, warmup_init=False):\n defaults = dict(lr=lr, eps=eps, clip_threshold=clip_threshold, decay_rate=decay_rate,\n beta1=beta1, weight_decay=weight_decay, scale_parameter=scale_parameter,\n relative_step=relative_step, warmup_init=warmup_init)\n super(Adafactor, self).__init__(params, defaults)\n\n @property\n def supports_memory_efficient_fp16(self):\n return True\n\n def _get_lr(self, param_group, param_state):\n rel_step_sz = param_group['lr']\n if param_group['relative_step']:\n min_step = 1e-6 * param_state['step'] if param_group['warmup_init'] else 1e-2\n rel_step_sz = min(min_step, 1.0/math.sqrt(param_state['step']))\n param_scale = 1.0\n if param_group['scale_parameter']:\n param_scale = max(param_group['eps'][1], param_state['RMS'])\n return param_scale * rel_step_sz\n\n def _get_options(self, param_group, param_shape):\n factored = len(param_shape) >= 2\n use_first_moment = param_group['beta1'] is not None\n return factored, use_first_moment\n\n def _rms(self, tensor):\n return tensor.norm(2) / (tensor.numel() ** 0.5)\n\n def _approx_sq_grad(self, exp_avg_sq_row, exp_avg_sq_col, output):\n r_factor = (exp_avg_sq_row / exp_avg_sq_row.mean(dim=-1)).rsqrt_().unsqueeze(-1)\n c_factor = exp_avg_sq_col.unsqueeze(-2).rsqrt()\n torch.mul(r_factor, c_factor, out=output)\n\n def step(self, closure=None):\n \"\"\"Performs a single optimization step.\n\n Arguments:\n closure (callable, optional): A closure that reevaluates the model\n and returns the loss.\n \"\"\"\n loss = None\n if closure is not None:\n loss = closure()\n\n for group in self.param_groups:\n for p in group['params']:\n if p.grad is None:\n continue\n grad = p.grad.data.float()\n if grad.is_sparse:\n raise RuntimeError('Adafactor does not support sparse gradients.')\n\n state = self.state[p]\n grad_shape = grad.shape\n\n factored, use_first_moment = self._get_options(group, grad_shape)\n # State Initialization\n if len(state) == 0:\n state['step'] = 0\n\n if use_first_moment:\n # Exponential moving average of gradient values\n state['exp_avg'] = torch.zeros_like(grad)\n if factored:\n state['exp_avg_sq_row'] = torch.zeros(grad_shape[:-1]).type_as(grad)\n state['exp_avg_sq_col'] = torch.zeros(grad_shape[:-2] + grad_shape[-1:]).type_as(grad)\n else:\n state['exp_avg_sq'] = torch.zeros_like(grad)\n\n state['RMS'] = 0\n else:\n if use_first_moment:\n state['exp_avg'] = state['exp_avg'].type_as(grad)\n if factored:\n state['exp_avg_sq_row'] = state['exp_avg_sq_row'].type_as(grad)\n state['exp_avg_sq_col'] = state['exp_avg_sq_col'].type_as(grad)\n else:\n state['exp_avg_sq'] = state['exp_avg_sq'].type_as(grad)\n\n p_data_fp32 = p.data.float()\n\n state['step'] += 1\n state['RMS'] = self._rms(p_data_fp32)\n group['lr'] = self._get_lr(group, state)\n\n beta2t = 1.0 - math.pow(state['step'], group['decay_rate'])\n update = (grad**2) + group['eps'][0]\n if factored:\n exp_avg_sq_row = state['exp_avg_sq_row']\n exp_avg_sq_col = state['exp_avg_sq_col']\n\n exp_avg_sq_row.mul_(beta2t).add_(1.0 - beta2t, update.mean(dim=-1))\n exp_avg_sq_col.mul_(beta2t).add_(1.0 - beta2t, update.mean(dim=-2))\n\n # Approximation of exponential moving average of square of gradient\n self._approx_sq_grad(exp_avg_sq_row, exp_avg_sq_col, update)\n update.mul_(grad)\n else:\n exp_avg_sq = state['exp_avg_sq']\n\n exp_avg_sq.mul_(beta2t).add_(1.0 - beta2t, update)\n torch.rsqrt(exp_avg_sq, out=update).mul_(grad)\n\n update.div_(max(1.0, self._rms(update) / group['clip_threshold']))\n update.mul_(group['lr'])\n\n if use_first_moment:\n exp_avg = state['exp_avg']\n exp_avg.mul_(group['beta1']).add_(1 - group['beta1'], update)\n update = exp_avg\n\n if group['weight_decay'] != 0:\n p_data_fp32.add_(-group['weight_decay'] * group['lr'], p_data_fp32)\n\n p_data_fp32.add_(-update)\n\n p.data.copy_(p_data_fp32)\n\n return loss\n"
] |
[
[
"torch.rsqrt",
"torch.mul",
"torch.zeros_like",
"torch.zeros"
]
] |
sailfish009/FLAML
|
[
"492990655d09a144d99e91de4dedda761a70302e"
] |
[
"test/test_automl.py"
] |
[
"import unittest\n\nimport numpy as np\nimport scipy.sparse\nfrom sklearn.datasets import load_boston, load_iris\n\nfrom flaml import AutoML, get_output_from_log\n\n\ndef custom_metric(X_test, y_test, estimator, labels, X_train, y_train):\n from sklearn.metrics import log_loss\n y_pred = estimator.predict_proba(X_test)\n test_loss = log_loss(y_test, y_pred, labels=labels)\n y_pred = estimator.predict_proba(X_train)\n train_loss = log_loss(y_train, y_pred, labels=labels)\n alpha = 0.5\n return test_loss * (1 + alpha) - alpha * train_loss, [test_loss, train_loss]\n\n\nclass TestAutoML(unittest.TestCase):\n\n def test_dataframe(self):\n self.test_classification(True)\n\n def test_custom_metric(self):\n\n automl_experiment = AutoML()\n automl_settings = {\n \"time_budget\": 10,\n 'eval_method': 'holdout',\n \"metric\": custom_metric,\n \"task\": 'classification',\n \"log_file_name\": \"test/iris_custom.log\",\n \"log_training_metric\": True,\n 'log_type': 'all',\n \"model_history\": True\n }\n X_train, y_train = load_iris(return_X_y=True)\n automl_experiment.fit(X_train=X_train, y_train=y_train,\n **automl_settings)\n print(automl_experiment.classes_)\n print(automl_experiment.predict_proba(X_train))\n print(automl_experiment.model)\n print(automl_experiment.config_history)\n print(automl_experiment.model_history)\n print(automl_experiment.best_iteration)\n print(automl_experiment.best_estimator)\n automl_experiment = AutoML()\n estimator = automl_experiment.get_estimator_from_log(\n automl_settings[\"log_file_name\"], record_id=0,\n objective='multi')\n print(estimator)\n time_history, best_valid_loss_history, valid_loss_history, \\\n config_history, train_loss_history = get_output_from_log(\n filename=automl_settings['log_file_name'], time_budget=6)\n print(train_loss_history)\n\n def test_classification(self, as_frame=False):\n\n automl_experiment = AutoML()\n automl_settings = {\n \"time_budget\": 4,\n \"metric\": 'accuracy',\n \"task\": 'classification',\n \"log_file_name\": \"test/iris.log\",\n \"log_training_metric\": True,\n \"model_history\": True\n }\n X_train, y_train = load_iris(return_X_y=True, as_frame=as_frame)\n automl_experiment.fit(X_train=X_train, y_train=y_train,\n **automl_settings)\n print(automl_experiment.classes_)\n print(automl_experiment.predict_proba(X_train)[:5])\n print(automl_experiment.model)\n print(automl_experiment.config_history)\n print(automl_experiment.model_history)\n print(automl_experiment.best_iteration)\n print(automl_experiment.best_estimator)\n del automl_settings[\"metric\"]\n del automl_settings[\"model_history\"]\n del automl_settings[\"log_training_metric\"]\n automl_experiment = AutoML()\n duration = automl_experiment.retrain_from_log(\n log_file_name=automl_settings[\"log_file_name\"],\n X_train=X_train, y_train=y_train,\n train_full=True, record_id=0)\n print(duration)\n print(automl_experiment.model)\n print(automl_experiment.predict_proba(X_train)[:5])\n\n def test_regression(self):\n\n automl_experiment = AutoML()\n automl_settings = {\n \"time_budget\": 2,\n \"metric\": 'mse',\n \"task\": 'regression',\n \"log_file_name\": \"test/boston.log\",\n \"log_training_metric\": True,\n \"model_history\": True\n }\n X_train, y_train = load_boston(return_X_y=True)\n n = len(y_train)\n automl_experiment.fit(X_train=X_train[:n >> 1], y_train=y_train[:n >> 1],\n X_val=X_train[n >> 1:], y_val=y_train[n >> 1:],\n **automl_settings)\n assert automl_experiment.y_val.shape[0] == n - (n >> 1)\n assert automl_experiment.eval_method == 'holdout'\n print(automl_experiment.predict(X_train))\n print(automl_experiment.model)\n print(automl_experiment.config_history)\n print(automl_experiment.model_history)\n print(automl_experiment.best_iteration)\n print(automl_experiment.best_estimator)\n print(get_output_from_log(automl_settings[\"log_file_name\"], 1))\n\n def test_sparse_matrix_classification(self):\n\n automl_experiment = AutoML()\n automl_settings = {\n \"time_budget\": 2,\n \"metric\": 'auto',\n \"task\": 'classification',\n \"log_file_name\": \"test/sparse_classification.log\",\n \"split_type\": \"uniform\",\n \"model_history\": True\n }\n X_train = scipy.sparse.random(1554, 21, dtype=int)\n y_train = np.random.randint(3, size=1554)\n automl_experiment.fit(X_train=X_train, y_train=y_train,\n **automl_settings)\n print(automl_experiment.classes_)\n print(automl_experiment.predict_proba(X_train))\n print(automl_experiment.model)\n print(automl_experiment.config_history)\n print(automl_experiment.model_history)\n print(automl_experiment.best_iteration)\n print(automl_experiment.best_estimator)\n\n def test_sparse_matrix_regression(self):\n\n automl_experiment = AutoML()\n automl_settings = {\n \"time_budget\": 2,\n \"metric\": 'mae',\n \"task\": 'regression',\n \"log_file_name\": \"test/sparse_regression.log\",\n \"model_history\": True\n }\n X_train = scipy.sparse.random(300, 900, density=0.0001)\n y_train = np.random.uniform(size=300)\n X_val = scipy.sparse.random(100, 900, density=0.0001)\n y_val = np.random.uniform(size=100)\n automl_experiment.fit(X_train=X_train, y_train=y_train,\n X_val=X_val, y_val=y_val,\n **automl_settings)\n assert automl_experiment.X_val.shape == X_val.shape\n print(automl_experiment.predict(X_train))\n print(automl_experiment.model)\n print(automl_experiment.config_history)\n print(automl_experiment.model_history)\n print(automl_experiment.best_iteration)\n print(automl_experiment.best_estimator)\n print(automl_experiment.best_config)\n print(automl_experiment.best_loss)\n print(automl_experiment.best_config_train_time)\n\n def test_sparse_matrix_xgboost(self):\n\n automl_experiment = AutoML()\n automl_settings = {\n \"time_budget\": 2,\n \"metric\": 'ap',\n \"task\": 'classification',\n \"log_file_name\": \"test/sparse_classification.log\",\n \"estimator_list\": [\"xgboost\"],\n \"log_type\": \"all\",\n }\n X_train = scipy.sparse.eye(900000)\n y_train = np.random.randint(2, size=900000)\n automl_experiment.fit(X_train=X_train, y_train=y_train,\n **automl_settings)\n print(automl_experiment.predict(X_train))\n print(automl_experiment.model)\n print(automl_experiment.config_history)\n print(automl_experiment.model_history)\n print(automl_experiment.best_iteration)\n print(automl_experiment.best_estimator)\n\n def test_sparse_matrix_lr(self):\n\n automl_experiment = AutoML()\n automl_settings = {\n \"time_budget\": 2,\n \"metric\": 'f1',\n \"task\": 'classification',\n \"log_file_name\": \"test/sparse_classification.log\",\n \"estimator_list\": [\"lrl1\", \"lrl2\"],\n \"log_type\": \"all\",\n }\n X_train = scipy.sparse.random(3000, 900, density=0.1)\n y_train = np.random.randint(2, size=3000)\n automl_experiment.fit(X_train=X_train, y_train=y_train,\n **automl_settings)\n print(automl_experiment.predict(X_train))\n print(automl_experiment.model)\n print(automl_experiment.config_history)\n print(automl_experiment.model_history)\n print(automl_experiment.best_iteration)\n print(automl_experiment.best_estimator)\n\n def test_sparse_matrix_regression_cv(self):\n\n automl_experiment = AutoML()\n automl_settings = {\n \"time_budget\": 2,\n 'eval_method': 'cv',\n \"task\": 'regression',\n \"log_file_name\": \"test/sparse_regression.log\",\n \"model_history\": True\n }\n X_train = scipy.sparse.random(100, 100)\n y_train = np.random.uniform(size=100)\n automl_experiment.fit(X_train=X_train, y_train=y_train,\n **automl_settings)\n print(automl_experiment.predict(X_train))\n print(automl_experiment.model)\n print(automl_experiment.config_history)\n print(automl_experiment.model_history)\n print(automl_experiment.best_iteration)\n print(automl_experiment.best_estimator)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n"
] |
[
[
"sklearn.datasets.load_iris",
"sklearn.metrics.log_loss",
"sklearn.datasets.load_boston",
"numpy.random.uniform",
"numpy.random.randint"
]
] |
yathomasi/Fantasy-Premier-League
|
[
"892eb610fb081f56af7fa93cd72446e9a17db7e5"
] |
[
"parsers.py"
] |
[
"import csv\nimport os\n\n# from utility import uprint\nimport pandas as pd\n\n\ndef extract_stat_names(dict_of_stats):\n \"\"\"Extracts all the names of the statistics\n\n Args:\n dict_of_stats (dict): Dictionary containing key-alue pair of stats\n \"\"\"\n stat_names = []\n for key, val in dict_of_stats.items():\n stat_names += [key]\n return stat_names\n\n\ndef parse_top_players(data, base_filename):\n rows = []\n for event in data[\"events\"]:\n gw = event[\"id\"]\n player_id = event[\"top_element\"]\n points = event[\"top_element_info\"][\"points\"]\n row = {}\n row[\"gw\"] = gw\n row[\"player_id\"] = player_id\n row[\"points\"] = points\n rows += [row]\n f = open(os.path.join(base_filename, \"best_players.csv\"), \"w+\", newline=\"\")\n w = csv.DictWriter(f, [\"gw\", \"player_id\", \"points\"])\n w.writeheader()\n for row in rows:\n w.writerow(row)\n\n\ndef parse_players(list_of_players, base_filename):\n stat_names = extract_stat_names(list_of_players[0])\n filename = base_filename + \"players_raw.csv\"\n os.makedirs(os.path.dirname(filename), exist_ok=True)\n f = open(filename, \"w+\", encoding=\"utf8\", newline=\"\")\n w = csv.DictWriter(f, sorted(stat_names))\n w.writeheader()\n for player in list_of_players:\n w.writerow(\n {k: str(v).encode(\"utf-8\").decode(\"utf-8\") for k, v in player.items()}\n )\n\n\ndef parse_player_history(list_of_histories, base_filename, player_name, Id):\n if len(list_of_histories) > 0:\n stat_names = extract_stat_names(list_of_histories[0])\n filename = base_filename + player_name + \"_\" + str(Id) + \"/history.csv\"\n os.makedirs(os.path.dirname(filename), exist_ok=True)\n f = open(filename, \"w+\", encoding=\"utf8\", newline=\"\")\n w = csv.DictWriter(f, sorted(stat_names))\n w.writeheader()\n for history in list_of_histories:\n w.writerow(history)\n\n\ndef parse_player_gw_history(list_of_gw, base_filename, player_name, Id):\n if len(list_of_gw) > 0:\n stat_names = extract_stat_names(list_of_gw[0])\n filename = base_filename + player_name + \"_\" + str(Id) + \"/gw.csv\"\n os.makedirs(os.path.dirname(filename), exist_ok=True)\n f = open(filename, \"w+\", encoding=\"utf8\", newline=\"\")\n w = csv.DictWriter(f, sorted(stat_names))\n w.writeheader()\n for gw in list_of_gw:\n w.writerow(gw)\n\n\ndef parse_gw_entry_history(data, outfile_base):\n for gw in data:\n picks = gw[\"picks\"]\n event = gw[\"entry_history\"][\"event\"]\n filename = \"picks_\" + str(event) + \".csv\"\n picks_df = pd.DataFrame.from_records(picks)\n picks_df.to_csv(os.path.join(outfile_base, filename), index=False)\n\n\ndef parse_entry_history(data, outfile_base):\n chips_df = pd.DataFrame.from_records(data[\"chips\"])\n chips_df.to_csv(os.path.join(outfile_base, \"chips.csv\"))\n season_df = pd.DataFrame.from_records(data[\"past\"])\n season_df.to_csv(os.path.join(outfile_base, \"history.csv\"))\n # profile_data = data[\"entry\"].pop('kit', data[\"entry\"])\n # profile_df = pd.DataFrame.from_records(profile_data)\n # profile_df.to_csv(os.path.join(outfile_base, 'profile.csv'))\n gw_history_df = pd.DataFrame.from_records(data[\"current\"])\n gw_history_df.to_csv(os.path.join(outfile_base, \"gws.csv\"), index=False)\n\n\ndef parse_entry_leagues(data, outfile_base):\n classic_leagues_df = pd.DataFrame.from_records(data[\"leagues\"][\"classic\"])\n classic_leagues_df.to_csv(os.path.join(outfile_base, \"classic_leagues.csv\"))\n try:\n cup_leagues_df = pd.DataFrame.from_records(data[\"leagues\"][\"cup\"][\"matches\"])\n cup_leagues_df.to_csv(os.path.join(outfile_base, \"cup_leagues.csv\"))\n except KeyError:\n print(\"No cups yet\")\n h2h_leagues_df = pd.DataFrame.from_records(data[\"leagues\"][\"h2h\"])\n h2h_leagues_df.to_csv(os.path.join(outfile_base, \"h2h_leagues.csv\"))\n\n\ndef parse_transfer_history(data, outfile_base):\n wildcards_df = pd.DataFrame.from_records(data)\n wildcards_df.to_csv(os.path.join(outfile_base, \"transfers.csv\"), index=False)\n\n\ndef parse_fixtures(data, outfile_base):\n fixtures_df = pd.DataFrame.from_records(data)\n fixtures_df.to_csv(os.path.join(outfile_base, \"fixtures.csv\"), index=False)\n\n\ndef parse_team_data(data, outfile_base):\n teams_df = pd.DataFrame.from_records(data)\n teams_df.to_csv(os.path.join(outfile_base, \"teams.csv\"), index=False)\n"
] |
[
[
"pandas.DataFrame.from_records"
]
] |
tijsmaas/SWaveNet
|
[
"4f34fc7b2c1b5139387bfec8d32266270e881b38"
] |
[
"models/Regressor.py"
] |
[
"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nclass Regressor(nn.Module):\n def __init__(self, Loss, hid, m):\n super(Regressor, self).__init__()\n self.loss_function = Loss;\n self.m = m\n if (self.loss_function == 'uni-Gaussian-novar'):\n self.mean = nn.Linear(hid, self.m);\n self.var = torch.rand(1) * -1;\n self.var = nn.Parameter(self.var.cuda());\n \n \n if (self.loss_function == 'Gaussian'):\n \n self.mean = nn.Linear(hid, self.m);\n self.var = nn.Linear(hid, self.m);\n \n if (self.loss_function[:12] == 'mul-Gaussian'):\n \n self.K = int(self.loss_function.split('@')[-1]);\n self.mean = nn.Linear(hid, self.m * self.K);\n self.var = nn.Linear(hid, self.m * self.K);\n self.pi = nn.Linear(hid, self.m * self.K);\n \n if (self.loss_function[:7] == 'softmax'):\n \n self.K = int(self.loss_function.split('@')[-1]);\n self.softmax = nn.Linear(hid, self.m * self.K)\n \n def forward(self, X):\n \n batch_size, m = X.size(0), self.m;\n \n if (self.loss_function == 'uni-Gaussian-novar'):\n mean = self.mean(X);\n var = self.var.view(1,1).expand(batch_size, m).contiguous();\n return [mean, var];\n \n if (self.loss_function == 'Gaussian'):\n \n return [self.mean(X), self.var(X)];\n \n if (self.loss_function[:12] == 'mul-Gaussian'):\n \n seqlen, batch_size, _ = X.size();\n mean = self.mean(X).view(seqlen, batch_size, m, self.K); \n var = self.var(X).view(seqlen, batch_size, m, self.K);\n pi = self.pi(X).view(seqlen * batch_size * m, self.K);\n pi = F.softmax(pi);\n pi = pi.view(seqlen, batch_size, m, self.K);\n return [mean, var, pi];\n \n \n if (self.loss_function[:7] == 'softmax'):\n seqlen, batch_size, _ = X.size();\n predict = self.softmax(X).view(seqlen * batch_size * m, self.K);\n predict = F.log_softmax(predict);\n predict = predict.view(seqlen, batch_size, m, self.K)\n return [predict];\n \n print('no loss found');\n"
] |
[
[
"torch.nn.Linear",
"torch.nn.functional.softmax",
"torch.rand",
"torch.nn.functional.log_softmax"
]
] |
katetolstaya/gym-flock
|
[
"b09bdfbbe4a96fe052958d1f9e1e9dd314f58419"
] |
[
"gym_flock/envs/old/mapping.py"
] |
[
"import gym\nfrom gym import spaces, error, utils\nfrom gym.utils import seeding\nimport numpy as np\nimport configparser\nfrom os import path\nimport matplotlib.pyplot as plt\nfrom matplotlib.pyplot import gca\n\nfont = {'family': 'sans-serif',\n 'weight': 'bold',\n 'size': 14}\n\n\nclass MappingEnv(gym.Env):\n\n def __init__(self):\n\n # config_file = path.join(path.dirname(__file__), \"params_flock.cfg\")\n # config = configparser.ConfigParser()\n # config.read(config_file)\n # config = config['flock']\n\n self.nearest_agents = 7\n self.nearest_targets = 7\n\n self.mean_pooling = True # normalize the adjacency matrix by the number of neighbors or not\n self.centralized = True\n\n # number states per agent\n self.nx_system = 4\n # number of actions per agent\n self.nu = 2\n\n # default problem parameters\n self.n_agents = 100 # int(config['network_size'])\n # self.comm_radius = 0.9 # float(config['comm_radius'])\n self.dt = 0.1 # #float(config['system_dt'])\n self.v_max = 5.0 # float(config['max_vel_init'])\n\n self.v_bias = self.v_max\n\n # intitialize state matrices\n self.x = None\n self.u = None\n self.mean_vel = None\n self.init_vel = None\n self.greedy_action = None\n\n self.diff = None\n self.r2 = None\n self.adj_mat = None\n self.adj_mat_mean = None\n\n self.diff_targets = None\n self.r2_targets = None\n\n self.target_observed = None\n self.state_network = None\n self.state_values = None\n self.reward = None\n\n self.max_accel = 1\n\n # self.action_space = spaces.Box(low=-self.max_accel, high=self.max_accel, shape=(2 * self.n_agents,),\n # dtype=np.float32)\n #\n # self.observation_space = spaces.Box(low=-np.Inf, high=np.Inf, shape=(self.n_agents, ),\n # dtype=np.float32)\n\n # target initialization\n self.px_max = 100\n self.py_max = 100\n x = np.linspace(-1.0 * self.px_max, self.px_max, self.n_agents)\n y = np.linspace(-1.0 * self.py_max, self.py_max, self.n_agents)\n\n tx, ty = np.meshgrid(x, y)\n tx = tx.reshape((-1, 1))\n ty = ty.reshape((-1, 1))\n self.obs_rad = 2.0\n self.obs_rad2 = self.obs_rad * self.obs_rad\n\n self.target_x = np.stack((tx, ty), axis=1).reshape((-1, 2))\n\n self.target_unobserved = np.ones((self.n_agents * self.n_agents, 2), dtype=np.bool)\n\n # rendering initialization\n self.fig = None\n self.ax = None\n self.line1 = None\n self.line2 = None\n self.action_scalar = 10.0\n\n self.seed()\n\n def reset(self):\n x = np.zeros((self.n_agents, self.nx_system))\n self.target_unobserved = np.ones((self.n_agents * self.n_agents, 2), dtype=np.bool)\n\n x[:, 0] = np.random.uniform(low=-self.px_max, high=self.px_max, size=(self.n_agents,))\n x[:, 1] = np.random.uniform(low=-self.py_max, high=self.py_max, size=(self.n_agents,))\n\n #bias = np.random.uniform(low=-self.v_bias, high=self.v_bias, size=(2,))\n x[:, 2] = np.random.uniform(low=-self.v_max, high=self.v_max, size=(self.n_agents,)) #+ bias[0]\n x[:, 3] = np.random.uniform(low=-self.v_max, high=self.v_max, size=(self.n_agents,)) #+ bias[1]\n\n # keep good initialization\n self.mean_vel = np.mean(x[:, 2:4], axis=0)\n self.init_vel = x[:, 2:4]\n self.x = x\n # self.a_net = self.get_connectivity(self.x)\n self.compute_helpers()\n return self.state_values, self.state_network\n\n def params_from_cfg(self, args):\n # TODO\n pass\n # # self.comm_radius = args.getfloat('comm_radius')\n # # self.comm_radius2 = self.comm_radius * self.comm_radius\n # # self.vr = 1 / self.comm_radius2 + np.log(self.comm_radius2)\n # #\n # # self.n_agents = args.getint('n_agents')\n # # self.r_max = self.r_max * np.sqrt(self.n_agents)\n #\n # # self.action_space = spaces.Box(low=-self.max_accel, high=self.max_accel, shape=(2 * self.n_agents,),\n # # dtype=np.float32)\n # #\n # # self.observation_space = spaces.Box(low=-np.Inf, high=np.Inf, shape=(self.n_agents, self.n_features),\n # # dtype=np.float32)\n #\n # self.v_max = args.getfloat('v_max')\n # self.v_bias = self.v_max\n # self.dt = args.getfloat('dt')\n\n def seed(self, seed=None):\n self.np_random, seed = seeding.np_random(seed)\n return [seed]\n\n def step(self, u):\n\n # u = np.reshape(u, (-1, 2))\n assert u.shape == (self.n_agents, self.nu)\n u = np.clip(u, a_min=-self.max_accel, a_max=self.max_accel)\n self.u = u * self.action_scalar\n\n old_x = np.copy(self.x)\n\n # x position\n self.x[:, 0] = self.x[:, 0] + self.x[:, 2] * self.dt + self.u[:, 0] * self.dt * self.dt * 0.5\n # y position\n self.x[:, 1] = self.x[:, 1] + self.x[:, 3] * self.dt + self.u[:, 1] * self.dt * self.dt * 0.5\n # x velocity\n self.x[:, 2] = self.x[:, 2] + self.u[:, 0] * self.dt\n # y velocity\n self.x[:, 3] = self.x[:, 3] + self.u[:, 1] * self.dt\n\n # clip velocities\n self.x[:, 2:4] = np.clip(self.x[:, 2:4], -1.0*self.v_max, self.v_max)\n\n dist_traveled = np.sum(np.linalg.norm(self.x[:, 0:2] - old_x[:, 0:2], axis=1))\n\n self.compute_helpers()\n done = (0 == np.sum(self.target_unobserved))\n\n return (self.state_values, self.state_network), 10.0 * self.reward - dist_traveled, done, {}\n\n def compute_helpers(self):\n\n # TODO - check this, and initialize stuff in the init(), and try to make more efficient\n\n # Neighbors computations\n self.diff = self.x.reshape((self.n_agents, 1, self.nx_system)) - self.x.reshape(\n (1, self.n_agents, self.nx_system))\n self.r2 = np.multiply(self.diff[:, :, 0], self.diff[:, :, 0]) + np.multiply(self.diff[:, :, 1],\n self.diff[:, :, 1])\n np.fill_diagonal(self.r2, np.Inf)\n\n nearest = np.argsort(self.r2, axis=1)\n obs_neigh = np.zeros((self.n_agents, self.nearest_agents * 4))\n self.adj_mat = np.zeros((self.n_agents, self.n_agents))\n for i in range(self.nearest_agents):\n ind2, ind3 = np.meshgrid(nearest[:, i], range(4), indexing='ij')\n ind1, _ = np.meshgrid(range(self.n_agents), range(4), indexing='ij')\n obs_neigh[:, i * self.nx_system:(i + 1) * self.nx_system] = np.reshape(\n self.diff[ind1.flatten(), ind2.flatten(), ind3.flatten()], (-1, 4))\n self.adj_mat[:, nearest[:, i]] = 1.0\n\n # Normalize the adjacency matrix by the number of neighbors - results in mean pooling, instead of sum pooling\n n_neighbors = np.reshape(np.sum(self.adj_mat, axis=1), (self.n_agents, 1)) # correct - checked this\n n_neighbors[n_neighbors == 0] = 1\n self.adj_mat_mean = self.adj_mat / n_neighbors\n\n # Targets computations\n self.diff_targets = self.x[:, 0:2].reshape((self.n_agents, 1, 2)) - self.target_x[\n self.target_unobserved].reshape(\n (1, -1, 2))\n self.r2_targets = np.multiply(self.diff_targets[:, :, 0], self.diff_targets[:, :, 0]) + np.multiply(\n self.diff_targets[:, :, 1],\n self.diff_targets[:, :, 1])\n\n nearest_targets = np.argsort(self.r2_targets, axis=1)\n obs_target = np.zeros((self.n_agents, self.nearest_targets * 2))\n\n for i in range(min(self.nearest_targets, np.shape(nearest_targets)[1])):\n\n ind2, ind3 = np.meshgrid(nearest_targets[:, i], range(2), indexing='ij')\n ind1, _ = np.meshgrid(range(self.n_agents), range(2), indexing='ij')\n obs_target[:, i * 2:(i + 1) * 2] = np.reshape(\n self.diff_targets[ind1.flatten(), ind2.flatten(), ind3.flatten()], (-1, 2))\n\n self.target_observed = np.any(self.r2_targets < self.obs_rad2, axis=0).reshape((-1, 1))\n self.target_unobserved[self.target_unobserved] = np.tile(np.logical_not(self.target_observed), (1, 2)).flatten()\n\n self.reward = np.sum(self.target_observed.astype(np.int))\n self.state_values = np.hstack((obs_neigh, obs_target))\n\n self.greedy_action = -1.0 * obs_target[:, 0:2]\n\n if self.mean_pooling:\n self.state_network = self.adj_mat_mean\n else:\n self.state_network = self.adj_mat\n\n def controller(self):\n \"\"\"\n The controller for flocking from Turner 2003.\n Returns: the optimal action\n \"\"\"\n\n # TODO\n # return np.zeros((self.n_agents, 2))\n return self.greedy_action / 10.0\n\n def render(self, mode='human'):\n \"\"\"\n Render the environment with agents as points in 2D space\n \"\"\"\n if self.fig is None:\n plt.ion()\n fig = plt.figure()\n self.ax = fig.add_subplot(111)\n line1, = self.ax.plot(self.x[:, 0], self.x[:, 1], 'bo')\n locs = self.target_x[self.target_unobserved].reshape((-1, 2))\n line2, = self.ax.plot(locs[:, 0], locs[:, 1], 'rx')\n plt.ylim(-1.0 * self.py_max, 1.0 * self.py_max)\n plt.xlim(-1.0 * self.px_max, 1.0 * self.px_max)\n a = gca()\n a.set_xticklabels(a.get_xticks(), font)\n a.set_yticklabels(a.get_yticks(), font)\n plt.title('GNN Controller')\n self.fig = fig\n self.line1 = line1\n self.line2 = line2\n\n # TODO render unobserved targets\n else:\n self.line1.set_xdata(self.x[:, 0])\n self.line1.set_ydata(self.x[:, 1])\n locs = self.target_x[self.target_unobserved].reshape((-1,2))\n self.line2.set_xdata(locs[:, 0])\n self.line2.set_ydata(locs[:, 1])\n\n self.fig.canvas.draw()\n self.fig.canvas.flush_events()\n\n def close(self):\n pass\n"
] |
[
[
"numpy.linspace",
"numpy.mean",
"numpy.fill_diagonal",
"numpy.any",
"numpy.hstack",
"matplotlib.pyplot.gca",
"numpy.clip",
"numpy.stack",
"numpy.copy",
"numpy.zeros",
"matplotlib.pyplot.figure",
"numpy.logical_not",
"numpy.multiply",
"matplotlib.pyplot.title",
"matplotlib.pyplot.ylim",
"numpy.argsort",
"numpy.meshgrid",
"matplotlib.pyplot.ion",
"numpy.sum",
"numpy.linalg.norm",
"numpy.ones",
"matplotlib.pyplot.xlim",
"numpy.shape",
"numpy.random.uniform"
]
] |
kspurlock/CS430-Demos
|
[
"81a1ee923aa3e9ea27d67a412aef1f49a7204932",
"81a1ee923aa3e9ea27d67a412aef1f49a7204932"
] |
[
"code/dataset_creation.py",
"code/image_collector.py"
] |
[
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jul 6 16:37:04 2021\n\n@author: kylei\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom sklearn.datasets import make_blobs\n\nif __name__ == \"__main__\":\n n_classes = 2\n data, labels = make_blobs(\n n_samples=1000,\n centers=n_classes,\n random_state=10,\n center_box=(-10.0, 10.0), # Changes center of each sample cluster\n cluster_std=2.5, # Changes sample spread\n )\n\n toy_dataset = pd.DataFrame(data=data)\n toy_dataset[2] = labels\n\n # Plotting\n fig, ax = plt.subplots()\n\n np.random.seed(192) # Set seed for colours\n\n # Randomly generate a colour for each class\n colours = []\n for i in range(n_classes):\n colours.append(\n [np.random.uniform(0, 1), np.random.uniform(0, 1), np.random.uniform(0, 1)]\n )\n\n for label in range(n_classes):\n ax.scatter(\n x=data[labels == label, 0],\n y=data[labels == label, 1],\n color=colours[label],\n s=40,\n label=\"Class {c}\".format(c=label),\n )\n\n ax.set(xlabel=\"X\", ylabel=\"Y\", title=\"Toy Example\")\n\n ax.legend(loc=\"upper right\")\n plt.show()",
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Sep 1 20:09:09 2021\n\n@author: kylei\n\"\"\"\n#%%\nfrom google_images_search import GoogleImagesSearch\nfrom io import BytesIO\nfrom PIL import Image\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport numpy as np\n\nglobal CX_ID, API_KEY, LABEL\n\n# Read more here on how to get your own keys:\n# https://pypi.org/project/Google-Images-Search/\n\nCX_ID = \"\" # Custom search engine key\nAPI_KEY = \"\" # Google Cloud API key\nLABEL = 0\n\n\ndef image_search(query, query_num):\n # Holds the image as a byte stream\n my_bytes_io = BytesIO()\n\n gis = GoogleImagesSearch(API_KEY, CX_ID)\n\n search_params = {\n \"q\": query,\n \"num\": query_num,\n \"fileType\": \"jpg|png\",\n }\n\n img_array = []\n gis.search(search_params)\n\n it = 0\n for image in gis.results():\n # Here we tell the BytesIO object to go back to address 0\n my_bytes_io.seek(0)\n\n # Take raw image data\n raw_image_data = image.get_raw_data()\n\n # This function writes the raw image data to the object\n image.copy_to(my_bytes_io, raw_image_data)\n\n # Or without the raw data which will be automatically taken\n # Inside the copy_to() method\n image.copy_to(my_bytes_io)\n\n # We go back to address 0 again so PIL can read it from start to finish\n my_bytes_io.seek(0)\n\n # Create a temporary image object\n try:\n temp_img = Image.open(my_bytes_io)\n\n # Downloads the original image for view later\n image.download(r\"./images\")\n\n temp_img = temp_img.resize(size=(250, 250)) # Change size to 250px by 250px\n\n # temp_img = temp_img.convert(mode=\"L\") # Monochrome (1 channel)\n # temp_img = temp_img.convert(mode=\"CMYK\") # Cyan, Magenta, Yellow, Black (4 channels)\n temp_img = temp_img.convert(mode=\"RGB\") # RGB (3 channels)\n\n # Convert the image into a matrix representation (features)\n numpydata = np.asarray(temp_img)\n\n # Plot the image\n plt.imshow(numpydata)\n\n # Append to data array\n img_array.append(numpydata)\n\n img_array_np = np.array(img_array)\n img_array_np = img_array_np / 255.0\n it += 1\n print(it)\n\n except Exception as e:\n print(e)\n print(\"Error on converting image\")\n\n labels = np.full((query_num, 1), LABEL)\n\n return img_array_np, labels\n\n\n#%%\nif __name__ == \"__main__\":\n img_array_np0, labels0 = image_search(\"cat\", 10)\n LABEL += 1\n img_array_np1, labels1 = image_search(\"stop sign\", 10)\n LABEL += 1\n img_array_np2, labels2 = image_search(\"car\", 10)\n LABEL += 1\n img_array_np3, labels3 = image_search(\"man\", 10)\n\n # Stack labels and image matrices row-wise\n labels = np.vstack((labels0, labels1, labels2, labels3))\n data = np.vstack((img_array_np0, img_array_np1, img_array_np2, img_array_np3))\n\n # Save data as pickeled numpy arrays\n np.save(r\"./images/data\", data)\n np.save(r\"./images/labels\", labels)\n\n# %%\n"
] |
[
[
"numpy.random.seed",
"matplotlib.pyplot.subplots",
"pandas.DataFrame",
"numpy.random.uniform",
"matplotlib.pyplot.show",
"sklearn.datasets.make_blobs"
],
[
"matplotlib.pyplot.imshow",
"numpy.asarray",
"numpy.save",
"numpy.full",
"numpy.array",
"numpy.vstack"
]
] |
open-biotech/bio-rtd
|
[
"c3e2cf4d7d646bda719e5fc6f694a1cae0e412c0"
] |
[
"bio_rtd/peak_shapes.py"
] |
[
"\"\"\"\nPeak shapes based on mean residence time (`rt_mean`).\n\nNotes\n-----\nFunctions are evaluated for given time vectors. Peaks are considered\nclipped if they do not fully fit on the time vector.\n\nFor un-clipped peak, the integral over the peak over time == 1.\n\nFor un-clipped peak, first momentum == `rt_mean`.\n\n\"\"\"\n\n__version__ = '0.7.1'\n__author__ = 'Jure Sencar'\n\nimport typing as _typing\nimport numpy as _np\n\nfrom scipy import special as _special\n\nfrom bio_rtd.logger import RtdLogger as _RtdLogger\n\n\ndef gaussian(t: _np.ndarray, rt_mean: float, sigma: float,\n logger: _typing.Union[_RtdLogger, None] = None) -> _np.ndarray:\n \"\"\"Gaussian distribution.\n\n p = exp(- ((t - rt_mean) / sigma) ** 2 / 2) / (sigma * sqrt(2 * pi))\n\n Parameters\n ----------\n t\n Time vector.\n rt_mean\n Mean residence time (== first momentum of un-clipped peak)\n sigma\n Standard deviation.\n logger\n Logger for logging suspicious parameters or peak shapes.\n\n Returns\n -------\n p: ndarray\n Evaluated pdf for specified time vector (`t`).\n\n \"\"\"\n if logger: # warnings for unsuitable peak parameters\n if sigma < 4 * (t[1] - t[0]):\n logger.w(f\"Gaussian peak: sigma < 4 * dt\")\n if rt_mean + 3 * sigma > t[-1]:\n logger.w(f\"Gaussian peak: rt_mean + 3 * sigma > t[-1]\")\n if rt_mean - 3 * sigma < t[0]:\n logger.w(f\"Gaussian peak: rt_mean - 3 * sigma < t[0]\")\n\n p = - (t - rt_mean) ** 2 / 2 / sigma ** 2\n p[p > 700] = 700\n p[p < -700] = -700\n p = _np.exp(p) / sigma / _np.sqrt(2 * _np.pi)\n\n if logger:\n _report_suspicious_peak_shape(t, p, rt_mean, logger)\n return p\n\n\n# noinspection DuplicatedCode\ndef emg(t, rt_mean, sigma, skew,\n logger: _typing.Union[_RtdLogger, None] = None):\n \"\"\"Exponentially modified Gaussian distribution.\n\n Parameters\n ----------\n t\n Time vector.\n rt_mean\n Mean residence time (== first momentum of un-clipped peak).\n sigma\n Standard deviation of Gaussian part.\n skew\n The rate of exponential part. Recommended: 1/40 < `skew` < 10.\n logger\n Logger for logging suspicious parameters or peak shapes.\n\n Returns\n -------\n p: ndarray\n Evaluated pdf for specified time vector (`t`).\n\n \"\"\"\n # first momentum of emg = rt_mean = t0 + 1 / skew\n t0 = rt_mean - 1 / skew\n # exp argument\n p = skew / 2 * (2 * (t0 - t) + skew * sigma ** 2)\n\n if logger: # warnings for unsuitable peak parameters\n if sigma + 1 / skew < 4 * (t[1] - t[0]):\n logger.w(f\"EMG peak: sigma + 1 / skew < 4 * dt\")\n if rt_mean + 2 * sigma + 1 / skew > t[-1]:\n logger.w(f\"EMG peak: rt_mean + 2 * sigma + 2 / skew > t[-1]\")\n if t0 < 3 * sigma:\n logger.w(f\"EMG peak: t0 < 3 * sigma; t0 = rt_mean - 1 / skew\")\n if skew < 1 / 40:\n logger.w(f\"EMG peak: skew < 1/40\")\n if skew > 10:\n logger.w(f\"EMG peak: skew > 10\")\n if _np.any(p > 200):\n logger.w(f\"EMG peak: exp argument (p) > 200\")\n if _np.any(p < -200): # not that relevant as it results in 0\n logger.i(f\"EMG peak: exp argument (p) < -200\")\n\n p[p > 700] = 700\n p[p < -700] = -700\n # curve\n p = _np.exp(p) * _special.erfc(\n (t0 - t + skew * sigma ** 2) / (2 ** 0.5 * sigma)\n ) * skew / 2\n\n if logger:\n _report_suspicious_peak_shape(t, p, rt_mean, logger)\n return p\n\n\n# noinspection DuplicatedCode\ndef skewed_normal(t, rt_mean, sigma, skew,\n logger: _typing.Union[_RtdLogger, None] = None):\n \"\"\"Skewed normal distribution.\n\n For `skew` == 0, the distribution becomes Gaussian distribution.\n\n Parameters\n ----------\n t\n Time vector.\n rt_mean\n Mean residence time (== first momentum of un-clipped peak).\n sigma\n Standard deviation of Gaussian part.\n skew\n Skewness of the peak. Recommended: -20 < `skew` < 20.\n logger\n Logger for logging suspicious parameters or peak shapes.\n\n Returns\n -------\n p: ndarray\n Evaluated pdf for specified time vector (`t`).\n\n \"\"\"\n if logger: # warnings for unsuitable peak parameters\n if sigma < 4 * (t[1] - t[0]):\n logger.w(f\"Skewed normal peak: sigma < 4 * dt\")\n if rt_mean + 3 * sigma > t[-1]:\n logger.w(f\"Skewed normal peak: rt_mean + 3 * sigma > t[-1]\")\n if rt_mean < 3 * sigma:\n logger.w(f\"Skewed normal peak: rt_mean < 3 * sigma\")\n if skew < -20:\n logger.w(f\"Skewed normal peak: skew < -20\")\n if skew > 20:\n logger.w(f\"Skewed normal peak: skew > 20\")\n\n # rt_mean = t0 + sigma * np.sqrt(2 / np.pi) * skew / (1 + skew**2)\n t0 = rt_mean - sigma * skew * _np.sqrt(2 / _np.pi / (1 + skew ** 2))\n # skew\n x = (t - t0) / sigma\n p = gaussian(t, t0, sigma) * (1 + _special.erf(skew * x / _np.sqrt(2)))\n\n if logger:\n _report_suspicious_peak_shape(t, p, rt_mean, logger)\n return p\n\n\ndef tanks_in_series(t: _np.ndarray, rt_mean: float, n_tanks: int,\n logger: _typing.Union[_RtdLogger, None] = None,\n allow_open_end=False,\n ) -> _np.ndarray:\n \"\"\"N tanks in series distribution.\n\n `rt_mean` is for entire unit operation (all tanks together).\n\n For `n_tanks` == 1, the distribution results in exponential decay.\n\n Parameters\n ----------\n t\n Time vector.\n rt_mean\n Mean residence time (== first momentum of un-clipped peak).\n n_tanks\n Number of tanks. Recommended: 1 <= `n_tanks` < 50\n logger\n Logger for logging suspicious parameters or peak shapes.\n\n Returns\n -------\n p: ndarray\n Evaluated pdf for specified time vector (`t`).\n\n \"\"\"\n\n if logger: # warnings for unsuitable peak parameters\n if rt_mean > t[-1] / 4 and not allow_open_end:\n logger.w(f\"Tanks in series peak: rt_mean > t[-1] / 4\")\n if t[0] > 0:\n logger.e(f\"Tanks in series peak: Initial time point > 0\")\n if n_tanks < 1:\n logger.e(f\"Tanks in series peak: n_tanks {n_tanks} (< 1)\")\n if n_tanks > 50:\n logger.w(f\"Tanks in series peak: n_tanks {n_tanks} (> 50)\")\n\n if n_tanks == 1:\n p = _np.exp(_np.clip(-t / rt_mean, -100, 0)) / rt_mean\n else:\n p = t ** (n_tanks - 1) / _np.math.factorial(n_tanks - 1) \\\n / (rt_mean / n_tanks) ** n_tanks \\\n * _np.exp(_np.clip(-t / rt_mean * n_tanks, -100, 0))\n\n if logger:\n _report_suspicious_peak_shape(t, p, rt_mean, logger,\n ignore_min_start=True,\n open_end=allow_open_end)\n return p\n\n\ndef _report_suspicious_peak_shape(\n t: _np.ndarray, p: _np.ndarray, rt_mean: float,\n logger: _typing.Union[_RtdLogger, None],\n ignore_min_start=False,\n open_end=False):\n dt = t[1] - t[0]\n # check values at edges\n if not ignore_min_start:\n rel_start = p[0] / p.max()\n if rel_start > 0.05:\n logger.e(f\"Peak shape: relative value at start: {rel_start}\")\n elif rel_start > 0.001:\n logger.w(f\"Peak shape: relative value at start: {rel_start}\")\n if not open_end:\n rel_end = p[-1] / p.max()\n if rel_end > 0.05:\n logger.e(f\"Peak shape: relative value at end: {rel_end}\")\n elif rel_end > 0.001:\n logger.w(f\"Peak shape: relative value at end: {rel_end}\")\n # check rt_mean\n p_rt_mean = _np.sum(t * p) * dt\n rel_diff = abs(rt_mean - p_rt_mean) / rt_mean\n if rel_diff > 0.1:\n logger.e(f\"Peak shape: relative difference in rt_mean: {rel_diff}\")\n elif rel_diff > 0.01:\n logger.w(f\"Peak shape: relative difference in rt_mean: {rel_diff}\")\n # check normalization\n s = p.sum() * dt\n if abs(s - 1) > 0.1:\n logger.e(f\"Peak shape: integral: {s}\")\n elif abs(s - 1) > 0.01:\n logger.w(f\"Peak shape: integral: {s}\")\n"
] |
[
[
"numpy.sqrt",
"numpy.clip",
"numpy.math.factorial",
"scipy.special.erfc",
"numpy.any",
"numpy.exp",
"numpy.sum"
]
] |
mwhitehill/Tacotron-2
|
[
"62c7f32b6620ff427d396be56ff60d5fc0bc0bb2"
] |
[
"code/tacotron/models/attention.py"
] |
[
"\"\"\"Attention file for location based attention (compatible with tensorflow attention wrapper)\"\"\"\n\nimport tensorflow as tf\nfrom tensorflow.contrib.seq2seq.python.ops.attention_wrapper import BahdanauAttention\nfrom tensorflow.python.layers import core as layers_core\nfrom tensorflow.python.ops import array_ops, math_ops, nn_ops, variable_scope\n\n\n#From https://github.com/tensorflow/tensorflow/blob/r1.7/tensorflow/contrib/seq2seq/python/ops/attention_wrapper.py\ndef _compute_attention(attention_mechanism, cell_output, attention_state,\n\t\t\t\t\t attention_layer, prev_max_attentions):\n\t\"\"\"Computes the attention and alignments for a given attention_mechanism.\"\"\"\n\talignments, next_attention_state, max_attentions = attention_mechanism(\n\t\tcell_output, state=attention_state, prev_max_attentions=prev_max_attentions)\n\n\t# Reshape from [batch_size, memory_time] to [batch_size, 1, memory_time]\n\texpanded_alignments = array_ops.expand_dims(alignments, 1)\n\t# Context is the inner product of alignments and values along the\n\t# memory time dimension.\n\t# alignments shape is\n\t# [batch_size, 1, memory_time]\n\t# attention_mechanism.values shape is\n\t# [batch_size, memory_time, memory_size]\n\t# the batched matmul is over memory_time, so the output shape is\n\t# [batch_size, 1, memory_size].\n\t# we then squeeze out the singleton dim.\n\tcontext = math_ops.matmul(expanded_alignments, attention_mechanism.values)\n\tcontext = array_ops.squeeze(context, [1])\n\n\tif attention_layer is not None:\n\t\tattention = attention_layer(array_ops.concat([cell_output, context], 1))\n\telse:\n\t\tattention = context\n\n\treturn attention, alignments, next_attention_state, max_attentions\n\ndef _location_sensitive_score(W_query, W_fil, W_keys):\n\t\"\"\"Impelements Bahdanau-style (cumulative) scoring function.\n\tThis attention is described in:\n\t\tJ. K. Chorowski, D. Bahdanau, D. Serdyuk, K. Cho, and Y. Ben-\n\t gio, “Attention-based models for speech recognition,” in Ad-\n\t vances in Neural Information Processing Systems, 2015, pp.\n\t 577–585.\n\n\t#############################################################################\n\t\t\t hybrid attention (content-based + location-based)\n\t\t\t\t\t\t\t f = F * α_{i-1}\n\t energy = dot(v_a, tanh(W_keys(h_enc) + W_query(h_dec) + W_fil(f) + b_a))\n\t#############################################################################\n\n\tArgs:\n\t\tW_query: Tensor, shape '[batch_size, 1, attention_dim]' to compare to location features.\n\t\tW_location: processed previous alignments into location features, shape '[batch_size, max_time, attention_dim]'\n\t\tW_keys: Tensor, shape '[batch_size, max_time, attention_dim]', typically the encoder outputs.\n\tReturns:\n\t\tA '[batch_size, max_time]' attention score (energy)\n\t\"\"\"\n\t# Get the number of hidden units from the trailing dimension of keys\n\tdtype = W_query.dtype\n\tnum_units = W_keys.shape[-1].value or array_ops.shape(W_keys)[-1]\n\n\tv_a = tf.get_variable(\n\t\t'attention_variable_projection', shape=[num_units], dtype=dtype,\n\t\tinitializer=tf.contrib.layers.xavier_initializer())\n\tb_a = tf.get_variable(\n\t\t'attention_bias', shape=[num_units], dtype=dtype,\n\t\tinitializer=tf.zeros_initializer())\n\n\treturn tf.reduce_sum(v_a * tf.tanh(W_keys + W_query + W_fil + b_a), [2])\n\ndef _smoothing_normalization(e):\n\t\"\"\"Applies a smoothing normalization function instead of softmax\n\tIntroduced in:\n\t\tJ. K. Chorowski, D. Bahdanau, D. Serdyuk, K. Cho, and Y. Ben-\n\t gio, “Attention-based models for speech recognition,” in Ad-\n\t vances in Neural Information Processing Systems, 2015, pp.\n\t 577–585.\n\n\t############################################################################\n\t\t\t\t\t\tSmoothing normalization function\n\t\t\t\ta_{i, j} = sigmoid(e_{i, j}) / sum_j(sigmoid(e_{i, j}))\n\t############################################################################\n\n\tArgs:\n\t\te: matrix [batch_size, max_time(memory_time)]: expected to be energy (score)\n\t\t\tvalues of an attention mechanism\n\tReturns:\n\t\tmatrix [batch_size, max_time]: [0, 1] normalized alignments with possible\n\t\t\tattendance to multiple memory time steps.\n\t\"\"\"\n\treturn tf.nn.sigmoid(e) / tf.reduce_sum(tf.nn.sigmoid(e), axis=-1, keepdims=True)\n\n\nclass LocationSensitiveAttention(BahdanauAttention):\n\t\"\"\"Impelements Bahdanau-style (cumulative) scoring function.\n\tUsually referred to as \"hybrid\" attention (content-based + location-based)\n\tExtends the additive attention described in:\n\t\"D. Bahdanau, K. Cho, and Y. Bengio, “Neural machine transla-\n tion by jointly learning to align and translate,” in Proceedings\n of ICLR, 2015.\"\n\tto use previous alignments as additional location features.\n\n\tThis attention is described in:\n\tJ. K. Chorowski, D. Bahdanau, D. Serdyuk, K. Cho, and Y. Ben-\n gio, “Attention-based models for speech recognition,” in Ad-\n vances in Neural Information Processing Systems, 2015, pp.\n 577–585.\n\t\"\"\"\n\n\tdef __init__(self,\n\t\t\t\t num_units,\n\t\t\t\t memory,\n\t\t\t\t hparams,\n\t\t\t\t is_training,\n\t\t\t\t mask_encoder=True,\n\t\t\t\t memory_sequence_length=None,\n\t\t\t\t smoothing=False,\n\t\t\t\t cumulate_weights=True,\n\t\t\t\t name='LocationSensitiveAttention',\n\t\t\t\t use_scope=False):\n\t\t\"\"\"Construct the Attention mechanism.\n\t\tArgs:\n\t\t\tnum_units: The depth of the query mechanism.\n\t\t\tmemory: The memory to query; usually the output of an RNN encoder. This\n\t\t\t\ttensor should be shaped `[batch_size, max_time, ...]`.\n\t\t\tmask_encoder (optional): Boolean, whether to mask encoder paddings.\n\t\t\tmemory_sequence_length (optional): Sequence lengths for the batch entries\n\t\t\t\tin memory. If provided, the memory tensor rows are masked with zeros\n\t\t\t\tfor values past the respective sequence lengths. Only relevant if mask_encoder = True.\n\t\t\tsmoothing (optional): Boolean. Determines which normalization function to use.\n\t\t\t\tDefault normalization function (probablity_fn) is softmax. If smoothing is\n\t\t\t\tenabled, we replace softmax with:\n\t\t\t\t\t\ta_{i, j} = sigmoid(e_{i, j}) / sum_j(sigmoid(e_{i, j}))\n\t\t\t\tIntroduced in:\n\t\t\t\t\tJ. K. Chorowski, D. Bahdanau, D. Serdyuk, K. Cho, and Y. Ben-\n\t\t\t\t gio, “Attention-based models for speech recognition,” in Ad-\n\t\t\t\t vances in Neural Information Processing Systems, 2015, pp.\n\t\t\t\t 577–585.\n\t\t\t\tThis is mainly used if the model wants to attend to multiple input parts\n\t\t\t\tat the same decoding step. We probably won't be using it since multiple sound\n\t\t\t\tframes may depend on the same character/phone, probably not the way around.\n\t\t\t\tNote:\n\t\t\t\t\tWe still keep it implemented in case we want to test it. They used it in the\n\t\t\t\t\tpaper in the context of speech recognition, where one phoneme may depend on\n\t\t\t\t\tmultiple subsequent sound frames.\n\t\t\tname: Name to use when creating ops.\n\t\t\"\"\"\n\t\t#Create normalization function\n\t\t#Setting it to None defaults in using softmax\n\t\tnormalization_function = _smoothing_normalization if (smoothing == True) else None\n\t\tmemory_length = memory_sequence_length if (mask_encoder==True) else None\n\t\twith tf.variable_scope(\"attention\"):\n\t\t\tsuper(LocationSensitiveAttention, self).__init__(\n\t\t\tnum_units=num_units,\n\t\t\tmemory=memory,\n\t\t\tmemory_sequence_length=memory_length,\n\t\t\tprobability_fn=normalization_function,\n\t\t\tname=name)\n\n\t\tself.location_convolution = tf.layers.Conv1D(filters=hparams.attention_filters,\n\t\t\tkernel_size=hparams.attention_kernel, padding='same', use_bias=True,\n\t\t\tbias_initializer=tf.zeros_initializer(), name='location_features_convolution')\n\t\tself.location_layer = tf.layers.Dense(units=num_units, use_bias=False,\n\t\t\tdtype=tf.float32, name='location_features_layer')\n\t\tself._cumulate = cumulate_weights\n\t\tself.synthesis_constraint = hparams.synthesis_constraint and not is_training\n\t\tself.attention_win_size = tf.convert_to_tensor(hparams.attention_win_size, dtype=tf.int32)\n\t\tself.constraint_type = hparams.synthesis_constraint_type\n\n\tdef __call__(self, query, state, prev_max_attentions):\n\t\t\"\"\"Score the query based on the keys and values.\n\t\tArgs:\n\t\t\tquery: Tensor of dtype matching `self.values` and shape\n\t\t\t\t`[batch_size, query_depth]`.\n\t\t\tstate (previous alignments): Tensor of dtype matching `self.values` and shape\n\t\t\t\t`[batch_size, alignments_size]`\n\t\t\t\t(`alignments_size` is memory's `max_time`).\n\t\tReturns:\n\t\t\talignments: Tensor of dtype matching `self.values` and shape\n\t\t\t\t`[batch_size, alignments_size]` (`alignments_size` is memory's\n\t\t\t\t`max_time`).\n\t\t\"\"\"\n\t\tprevious_alignments = state\n\t\twith variable_scope.variable_scope(None, \"Location_Sensitive_Attention\", [query]):\n\n\t\t\t# processed_query shape [batch_size, query_depth] -> [batch_size, attention_dim]\n\t\t\tprocessed_query = self.query_layer(query) if self.query_layer else query\n\t\t\t# -> [batch_size, 1, attention_dim]\n\t\t\tprocessed_query = tf.expand_dims(processed_query, 1)\n\n\t\t\t# processed_location_features shape [batch_size, max_time, attention dimension]\n\t\t\t# [batch_size, max_time] -> [batch_size, max_time, 1]\n\t\t\texpanded_alignments = tf.expand_dims(previous_alignments, axis=2)\n\t\t\t# location features [batch_size, max_time, filters]\n\t\t\tf = self.location_convolution(expanded_alignments)\n\t\t\t# Projected location features [batch_size, max_time, attention_dim]\n\t\t\tprocessed_location_features = self.location_layer(f)\n\n\t\t\t# energy shape [batch_size, max_time]\n\t\t\tenergy = _location_sensitive_score(processed_query, processed_location_features, self.keys)\n\n\t\tif self.synthesis_constraint:\n\t\t\tTx = tf.shape(energy)[-1]\n\t\t\t# prev_max_attentions = tf.squeeze(prev_max_attentions, [-1])\n\t\t\tif self.constraint_type == 'monotonic':\n\t\t\t\tkey_masks = tf.sequence_mask(prev_max_attentions, Tx)\n\t\t\t\treverse_masks = tf.sequence_mask(Tx - self.attention_win_size - prev_max_attentions, Tx)[:, ::-1]\n\t\t\telse:\n\t\t\t\tassert self.constraint_type == 'window'\n\t\t\t\tkey_masks = tf.sequence_mask(prev_max_attentions - (self.attention_win_size // 2 + (self.attention_win_size % 2 != 0)), Tx)\n\t\t\t\treverse_masks = tf.sequence_mask(Tx - (self.attention_win_size // 2) - prev_max_attentions, Tx)[:, ::-1]\n\t\t\t\n\t\t\tmasks = tf.logical_or(key_masks, reverse_masks)\n\t\t\tpaddings = tf.ones_like(energy) * (-2 ** 32 + 1) # (N, Ty/r, Tx)\n\t\t\tenergy = tf.where(tf.equal(masks, False), energy, paddings)\n\n\t\t# alignments shape = energy shape = [batch_size, max_time]\n\t\talignments = self._probability_fn(energy, previous_alignments)\n\t\tmax_attentions = tf.argmax(alignments, -1, output_type=tf.int32) # (N, Ty/r)\n\n\t\t# Cumulate alignments\n\t\tif self._cumulate:\n\t\t\tnext_state = alignments + previous_alignments\n\t\telse:\n\t\t\tnext_state = alignments\n\n\t\treturn alignments, next_state, max_attentions\n\n\nclass SimpleBahdanauAttention():\n\tdef __init__(self, units, values):\n\t\tsuper(SimpleBahdanauAttention, self).__init__()\n\n\t\tself.units = units\n\t\tself.values = values\n\t\twith tf.variable_scope(\"attention_emt\"):\n\t\t\tself.W1 = tf.layers.Dense(units,name='W1')\n\t\t\tself.W2 = tf.layers.Dense(units,name='W2')\n\t\t\tself.V = tf.layers.Dense(1,name='V')\n\n\tdef __call__(self, query):\n\t\t# hidden shape == (batch_size, hidden size)\n\t\t# hidden_with_time_axis shape == (batch_size, 1, hidden size)\n\t\t# we are doing this to perform addition to calculate the score\n\t\thidden_with_time_axis = tf.expand_dims(query, 1)\n\n\t\t# score shape == (batch_size, max_length, 1)\n\t\t# we get 1 at the last axis because we are applying score to self.V\n\t\t# the shape of the tensor before applying self.V is (batch_size, max_length, units)\n\t\tscore = self.V(tf.nn.tanh(self.W1(self.values) + self.W2(hidden_with_time_axis)))\n\n\t\t# attention_weights shape == (batch_size, max_length, 1)\n\t\tattention_weights = tf.nn.softmax(score, dim=1)\n\n\t\t# context_vector shape after sum == (batch_size, hidden_size)\n\t\tcontext_vector = attention_weights * self.values\n\t\tcontext_vector = tf.reduce_sum(context_vector, axis=1)\n\t\tattention_weights = tf.squeeze(attention_weights,squeeze_dims=-1)\n\n\t\treturn context_vector, attention_weights\n"
] |
[
[
"tensorflow.convert_to_tensor",
"tensorflow.python.ops.array_ops.shape",
"tensorflow.reduce_sum",
"tensorflow.equal",
"tensorflow.python.ops.array_ops.squeeze",
"tensorflow.tanh",
"tensorflow.logical_or",
"tensorflow.squeeze",
"tensorflow.contrib.layers.xavier_initializer",
"tensorflow.python.ops.variable_scope.variable_scope",
"tensorflow.python.ops.math_ops.matmul",
"tensorflow.argmax",
"tensorflow.nn.sigmoid",
"tensorflow.shape",
"tensorflow.zeros_initializer",
"tensorflow.layers.Dense",
"tensorflow.sequence_mask",
"tensorflow.nn.softmax",
"tensorflow.python.ops.array_ops.concat",
"tensorflow.ones_like",
"tensorflow.expand_dims",
"tensorflow.variable_scope",
"tensorflow.python.ops.array_ops.expand_dims"
]
] |
edunasci/FlexAEAD
|
[
"a9a3225f6edb4a42a1733a4a69e42e1f8ac42c99"
] |
[
"flexaeadv11/ref/python/test.py"
] |
[
"#!/usr/bin/env python3\n\nfrom FlexAEADv11SBox import FlexAEADv11SBox\nfrom FlexAEADv11 import FlexAEADv11\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport math\n\n#\"\"\"\ndef dirShuffleLayer( a1 ):\n half = int(len(a1)/2)\n state = np.zeros(np.shape(a1),dtype=int)\n for i in range(half):\n state[(2*i)+0] = a1[(half*0)+i]\n state[(2*i)+1] = a1[(half*1)+i]\n return state\n \ndef invShuffleLayer( a1 ):\n half = int(len(a1)/2)\n state = np.zeros(np.shape(a1),dtype=int)\n for i in range(half):\n state[(half*0)+i] = a1[(2*i)+0]\n state[(half*1)+i] = a1[(2*i)+1]\n return a1\n#\"\"\"\n\"\"\"\ndef dirShuffleLayer( a1 ):\n quarter = int(len(a1)/4)\n state = np.zeros(np.shape(a1),dtype=int)\n for i in range(quarter):\n state[(4*i)+0] = a1[(quarter*0)+i]\n state[(4*i)+1] = a1[(quarter*1)+i]\n state[(4*i)+2] = a1[(quarter*2)+i]\n state[(4*i)+3] = a1[(quarter*3)+i]\n return state\n \ndef invShuffleLayer( a1 ):\n quarter = int(len(a1)/4)\n state = np.zeros(np.shape(a1),dtype=int)\n for i in range(quarter):\n state[(quarter*0)+i] = a1[(4*i)+0]\n state[(quarter*1)+i] = a1[(4*i)+1]\n state[(quarter*2)+i] = a1[(4*i)+2]\n state[(quarter*3)+i] = a1[(4*i)+3]\n return a1\n\"\"\"\ndef dirMixQuartersLayer( a1 ):\n quarter = int(len(a1)/4)\n state = np.zeros(np.shape(a1),dtype=int)\n for i in range(quarter):\n state[(0*quarter)+i] = a1[(1*quarter)+i]+a1[(2*quarter)+i]+a1[(3*quarter)+i]\n state[(1*quarter)+i] = a1[(0*quarter)+i]+a1[(2*quarter)+i]+a1[(3*quarter)+i]\n state[(2*quarter)+i] = a1[(0*quarter)+i]+a1[(1*quarter)+i]+a1[(3*quarter)+i]\n state[(3*quarter)+i] = a1[(0*quarter)+i]+a1[(1*quarter)+i]+a1[(2*quarter)+i]\n return state\n\nif __name__ == \"__main__\":\n import sys\n import random\n import time\n # track execution time\n from datetime import datetime\n startTime=datetime.now()\n #\n np.set_printoptions(threshold=1024)\n #plt.ion()\n for i in [8, 16, 32]:\n print( '' )\n print( '' )\n print( '#### {} bits block ####'.format(8*i) )\n print( '#### validate functions ####' )\n block=b''\n for n in range(i):\n block += bytes([random.randint(0,256)])\n #block=bytes([*range(i)])\n a1 = np.identity(i,dtype=int)\n print( ' block -> ' + block.hex())\n print( ' a1 -> ')\n print(a1)\n block = FlexAEADv11.dirShuffleLayer(block)\n print( ' dirShuffleLayer -> ' + block.hex())\n block = FlexAEADv11.invShuffleLayer(block)\n print( ' invShuffleLayer -> ' + block.hex())\n block = FlexAEADv11.dirMixQuartersLayer(block)\n print( 'dirMixQuartersLayer -> ' + block.hex())\n block = FlexAEADv11.dirMixQuartersLayer(block)\n print( 'invMixQuartersLayer -> ' + block.hex())\n print( '#### permutation function effect ####' )\n print( ' block -> ' + block.hex())\n for n in range(int(math.log(i,2))+1):\n #for n in range(i):\n block = FlexAEADv11.dirShuffleLayer(block)\n a1 = dirShuffleLayer(a1)\n print( ' dirShuffleLayer -> ' + block.hex())\n block = FlexAEADv11.dirMixQuartersLayer(block)\n a1 = dirMixQuartersLayer(a1)\n print( 'dirMixQuartersLayer -> ' + block.hex())\n block = FlexAEADv11.dirSBoxLayer(block)\n print( ' dirSBoxLayer -> ' + block.hex())\n print( ' a1 -> ')\n print(a1)\n print( ' np.max(a1) -> {}'.format(np.max(a1)))\n print( ' np.min(a1) -> {}'.format(np.min(a1)))\n print( ' dif -> {}'.format(np.max(a1)-np.min(a1)))\n print( ' n+1 -> {}'.format(n+1))\n #imgplot = plt.imshow(a1)\n #plt.show()\n #time.sleep(2)\n #print( '#### result ####' )\n #print( ' block -> ' + block.hex())\n #print( ' a1 -> ')\n #print(a1)\n imgplot = plt.imshow(a1)\n plt.show()\n #time.sleep(2)\n\n \n # track execution time\n finishTime=datetime.now()\n print( '\\nStart: {}, Finish:{}, Running Time: {}'\n ''.format(startTime.replace(microsecond=0),\n finishTime.replace(microsecond=0),\n finishTime-startTime))\n ################### END #################\n \n"
] |
[
[
"matplotlib.pyplot.imshow",
"numpy.min",
"numpy.set_printoptions",
"numpy.max",
"numpy.identity",
"numpy.shape",
"matplotlib.pyplot.show"
]
] |
Sheriff-A/OpenCV
|
[
"3f1864d921b39a87f802a6b2d1ffef95fc45d813"
] |
[
"shape-detection.py"
] |
[
"import cv2 as cv\nimport numpy as np\n\nframeWidth = 640\nframeHeight = 480\ncap = cv.VideoCapture(0)\ncap.set(3, frameWidth)\ncap.set(3, frameHeight)\n\n\ndef empty(a):\n pass\n\n\ncv.namedWindow('Parameters')\ncv.resizeWindow('Parameters', frameWidth, frameHeight // 2)\ncv.createTrackbar('Threshold1', 'Parameters', 20, 255, empty)\ncv.createTrackbar('Threshold2', 'Parameters', 20, 255, empty)\ncv.createTrackbar('Area', 'Parameters', 2000, 30000, empty)\n\n\ndef get_contours(img, img_contour):\n contours, hierarchy = cv.findContours(img, cv.RETR_EXTERNAL, cv.CHAIN_APPROX_NONE)\n\n for cnt in contours:\n area = cv.contourArea(cnt)\n area_min = cv.getTrackbarPos('Area', 'Parameters')\n if area > area_min:\n cv.drawContours(img_contour, cnt, -1, (255, 0, 255), 7)\n peri = cv.arcLength(cnt, True)\n approx = cv.approxPolyDP(cnt, 0.02 * peri, True)\n # print(len(approx))\n x, y, w, h = cv.boundingRect(approx)\n cv.rectangle(img_contour, (x, y), (x + w, y + h), (0, 255, 0), 5)\n cv.putText(img_contour, 'Points: ' + str(len(approx)), (x + w + 20, y + 20),\n cv.FONT_HERSHEY_COMPLEX, .7, (0, 255, 0), 2)\n cv.putText(img_contour, 'Area: ' + str(int(area)), (x + w + 20, y + 45),\n cv.FONT_HERSHEY_COMPLEX, .7, (0, 255, 0), 2)\n\n\nwhile True:\n _, frame = cap.read()\n # frame = cv.flip(frame, -1) # Vertical Flip\n contour = frame.copy()\n\n blur = cv.GaussianBlur(frame, (7, 7), 1)\n gray = cv.cvtColor(blur, cv.COLOR_BGR2GRAY)\n\n threshold1 = cv.getTrackbarPos('Threshold1', 'Parameters')\n threshold2 = cv.getTrackbarPos('Threshold2', 'Parameters')\n canny = cv.Canny(gray, threshold1, threshold2)\n kernel = np.ones((5, 5))\n dil = cv.dilate(canny, kernel, iterations=1)\n get_contours(dil, contour)\n\n frame = np.hstack([frame, cv.cvtColor(dil, cv.COLOR_GRAY2BGR), contour])\n cv.imshow('Frame', frame)\n if cv.waitKey(1) == ord('q'):\n break\n\ncap.release()\ncv.destroyAllWindows()\n"
] |
[
[
"numpy.ones"
]
] |
LosWochos76/haushalt_wetter_extractor
|
[
"1132c38ed3fd132262b7263368db7a7eebe6086d"
] |
[
"budget.py"
] |
[
"import pdfplumber\nimport xlsxwriter\nimport sys\nimport os\nimport pandas as pd\nfrom page import Page\n\nclass Budget:\n\tdef __init__(self, pdf_filename, year):\n\t\tself.pdf_filename = pdf_filename\n\t\tself.source_year = year\n\t\tself.cache_dir = \"page_cache_\" + str(year)\n\t\tself.pages = []\n\t\tself.dataframe = pd.DataFrame(columns=['Quelle','Seite','Produktbereich','Produktgruppe','Produkt','Rechtsbindung','Typ','Position','Ansatz','Wert'])\n\t\n\tdef extract_text_from_pdf(self):\n\t\tif not os.path.exists(self.cache_dir):\n\t\t\tprint(\"Extrahiere Text aus dem PDF-Dokument...\")\n\t\t\tos.mkdir(self.cache_dir)\n\n\t\t\twith pdfplumber.open(self.pdf_filename) as pdf:\n\t\t\t\tpages_count = len(pdf.pages);\n\t\t\t\tfor page_number in range(pages_count):\n\t\t\t\t\tpage_text = str(pdf.pages[page_number].extract_text())\n\t\t\t\t\tf = open(os.path.join(self.cache_dir, \"{0:0=3d}\".format(page_number + 1) + \".txt\"), \"w\")\n\t\t\t\t\tf.write(page_text)\n\t\t\t\t\tf.close()\n\t\telse:\n\t\t\tprint(\"Das Verzeichnis '\" + self.cache_dir + \"' existiert bereits! Löschen, um Extraktion erneut durchzuführen!\")\n\t\n\tdef read_pages_from_text_files(self):\n\t\tprint(\"Verarbeite Textdateien aus '\" + self.cache_dir + \"'...\")\n\n\t\tfiles = [f for f in os.listdir(self.cache_dir) if os.path.isfile(os.path.join(self.cache_dir, f))]\n\t\tfor file_name in sorted(files):\n\t\t\tpage_number = int(file_name[0:3])\n\t\t\twith open(os.path.join(self.cache_dir, file_name), \"r\") as myfile:\n\t\t\t\tpage_lines = myfile.readlines()\n\t\t\t\tself._add_page(page_number, page_lines)\n\t\n\tdef write_data_to_excel(self):\n\t\tprint(\"Schreibe Excel-Sheet mit Daten...\")\n\n\t\tfilename = \"Data_\" + str(self.source_year) + \".xlsx\"\n\t\twriter = pd.ExcelWriter(filename, engine='xlsxwriter')\n\t\tself.dataframe.to_excel(writer, index=False)\n\t\twriter.save()\n\t\n\tdef write_product_list_to_excel(self):\n\t\tprint(\"Schreibe Excel-Sheet für Produkte...\")\n\t\tworkbook = xlsxwriter.Workbook(\"Produkte_\" + str(self.source_year) + \".xlsx\")\n\t\tworksheet = workbook.add_worksheet()\n\t\tprodukte = self.get_products()\n\n\t\tfor i in range(0, len(produkte)):\n\t\t\trow = produkte[i]\n\t\t\tfor j in range(0, len(row)):\n\t\t\t\tcolumn = row[j]\n\t\t\t\tworksheet.write(i, j, column)\n\n\t\tworkbook.close()\n\n\tdef _add_page(self, page_number, page_lines):\n\t\tp = Page(self.source_year, page_number, page_lines)\n\n\t\tif p.is_page_relevant():\n\t\t\tself.pages.append(p)\n\t\t\trows = p.extract_data()\n\t\t\tself.dataframe = self.dataframe.append(rows, ignore_index=True)\n\t\t\n\tdef get_products(self):\n\t\tproducts = []\n\t\t\n\t\tfor i in range(0, len(self.pages)):\n\t\t\tpage = self.pages[i]\n\t\t\tif page.meta[\"typ\"] == \"TF\":\n\t\t\t\tproducts.append([page.meta[\"obergruppe\"], page.meta[\"mittelgruppe\"], page.meta[\"untergruppe\"], page.meta[\"name\"], page.meta[\"rechtsbindung\"]])\n\t\t\n\t\treturn products"
] |
[
[
"pandas.DataFrame",
"pandas.ExcelWriter"
]
] |
acdick/endangered_species_classification
|
[
"cb022785a3dde20082c9c4ef18aa6d3e75f4ee1b"
] |
[
"Source/classifiers.py"
] |
[
"from sklearn.model_selection import GridSearchCV\n\nfrom sklearn.dummy import DummyClassifier\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.naive_bayes import MultinomialNB\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.ensemble import AdaBoostClassifier\nfrom sklearn.svm import SVC\n\nfrom sklearn.metrics import precision_score\nfrom sklearn.metrics import recall_score\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.metrics import f1_score\nfrom sklearn.metrics import confusion_matrix\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nsns.set_style(\"darkgrid\")\n\nimport pandas as pd\n\ndef grid_search_dummy_classifier(parameters):\n \n classifier = {'Classifier': 'Dummy',\n 'Grid Search': GridSearchCV(\n DummyClassifier(),\n parameters,\n cv=5,\n scoring=['accuracy', 'precision_macro', 'recall_macro', 'f1_macro'],\n refit='f1_macro',\n return_train_score=False,\n verbose=1)}\n \n return classifier\n\ndef grid_search_logistic_regression(parameters):\n \n classifier = {'Classifier': 'Logistic Regression',\n 'Grid Search': GridSearchCV(\n LogisticRegression(),\n parameters,\n cv=5,\n scoring=['accuracy', 'precision_macro', 'recall_macro', 'f1_macro'],\n refit='f1_macro',\n return_train_score=False,\n verbose=10,\n n_jobs=-1)}\n \n return classifier\n\ndef grid_search_multinomial_nb(parameters):\n \n classifier = {'Classifier': 'Multinomial Naive Bayes',\n 'Grid Search': GridSearchCV(\n MultinomialNB(),\n parameters,\n cv=5,\n scoring=['accuracy', 'precision_macro', 'recall_macro', 'f1_macro'],\n refit='f1_macro',\n return_train_score=False,\n verbose=10,\n n_jobs=-1)}\n \n return classifier\n\ndef grid_search_k_neighbors_classifier(parameters):\n \n classifier = {'Classifier': 'K Nearest Neighbors',\n 'Grid Search': GridSearchCV(\n KNeighborsClassifier(),\n parameters,\n cv=5,\n scoring=['accuracy', 'precision_macro', 'recall_macro', 'f1_macro'],\n refit='f1_macro',\n return_train_score=False,\n verbose=10,\n n_jobs=-1)}\n \n return classifier\n\ndef grid_search_decision_tree_classifier(parameters):\n \n classifier = {'Classifier': 'Decision Tree',\n 'Grid Search': GridSearchCV(\n DecisionTreeClassifier(),\n parameters,\n cv=5,\n scoring=['accuracy', 'precision_macro', 'recall_macro', 'f1_macro'],\n refit='f1_macro',\n return_train_score=False,\n verbose=10,\n n_jobs=-1)}\n \n return classifier\n\ndef grid_search_random_forest_classifier(parameters):\n \n classifier = {'Classifier': 'Random Forest',\n 'Grid Search': GridSearchCV(\n RandomForestClassifier(),\n parameters,\n cv=5,\n scoring=['accuracy', 'precision_macro', 'recall_macro', 'f1_macro'],\n refit='f1_macro',\n return_train_score=False,\n verbose=10,\n n_jobs=-1)}\n \n return classifier\n\ndef grid_search_ada_boost_classifier(parameters):\n \n classifier = {'Classifier': 'Ada Boost',\n 'Grid Search': GridSearchCV(\n AdaBoostClassifier(),\n parameters,\n cv=5,\n scoring=['accuracy', 'precision_macro', 'recall_macro', 'f1_macro'],\n refit='f1_macro',\n return_train_score=False,\n verbose=10,\n n_jobs=-1)}\n \n return classifier\n\ndef grid_search_support_vector_classifier(parameters):\n \n classifier = {'Classifier': 'Support Vector Machines',\n 'Grid Search': GridSearchCV(\n SVC(),\n parameters,\n cv=5,\n scoring=['accuracy', 'precision_macro', 'recall_macro', 'f1_macro'],\n refit='f1_macro',\n return_train_score=False,\n verbose=10,\n n_jobs=-1)}\n \n return classifier\n\ndef fit_predict_measure(balance, X_train, X_test, y_train, y_test, y_labels, classifiers):\n \n all_models = pd.DataFrame()\n \n for classifier in classifiers:\n \n # fit training set\n print('Running jobs: ' + classifier['Classifier'])\n classifier['Grid Search'].fit(X_train, y_train)\n \n # cross-validated training metrics\n train = pd.DataFrame(classifier['Grid Search'].cv_results_)\n \n train = train[['mean_fit_time',\n 'mean_score_time',\n 'params',\n 'mean_test_accuracy',\n 'mean_test_precision_macro',\n 'mean_test_recall_macro',\n 'mean_test_f1_macro']]\n \n train = train.rename(index=str, columns={'mean_fit_time': 'Fit Time',\n 'mean_score_time': 'Score Time',\n 'params': 'Parameters',\n 'mean_test_accuracy': 'Accuracy',\n 'mean_test_precision_macro': 'Precision',\n 'mean_test_recall_macro': 'Recall',\n 'mean_test_f1_macro': 'F1 Score'})\n \n train['Balance'] = balance\n train['Classifier'] = classifier['Classifier']\n train['Split'] = 'Train'\n train['Total Time'] = train['Fit Time'] + train['Score Time']\n all_models = all_models.append(train, ignore_index=True)\n \n # hold-out test performance for best estimators\n y_hat_test = classifier['Grid Search'].predict(X_test)\n \n all_models = all_models.append(\n {'Parameters': classifier['Grid Search'].best_params_,\n 'Fit Time': train.loc[train['Parameters'] == classifier['Grid Search'].best_params_, 'Fit Time'][0],\n 'Score Time': train.loc[train['Parameters'] == classifier['Grid Search'].best_params_, 'Score Time'][0],\n 'Total Time': train.loc[train['Parameters'] == classifier['Grid Search'].best_params_, 'Total Time'][0],\n 'Accuracy': accuracy_score( y_test, y_hat_test),\n 'Precision': precision_score(y_test, y_hat_test, average='macro'),\n 'Recall': recall_score( y_test, y_hat_test, average='macro'),\n 'F1 Score': f1_score( y_test, y_hat_test, average='macro'),\n 'Balance': balance,\n 'Classifier': classifier['Classifier'],\n 'Split': 'Test',\n 'Confusion Matrix': confusion_matrix(y_test, y_hat_test, labels=y_labels)}, ignore_index=True)\n \n all_models = all_models[['Balance', 'Classifier', 'Parameters', 'Split',\n 'Accuracy', 'Precision', 'Recall', 'F1 Score',\n 'Fit Time', 'Score Time', 'Total Time',\n 'Confusion Matrix']]\n \n return all_models, classifiers\n\ndef plot_confusion_matrices(confusion_matrices, y_labels):\n fig, axes = plt.subplots(nrows=confusion_matrices.shape[0],\n ncols=confusion_matrices.shape[1],\n figsize=(14,36))\n \n for i in range(confusion_matrices.shape[0]):\n for j in range(confusion_matrices.shape[1]):\n cm = confusion_matrices.iloc[i][j]\n cm = cm.astype('float') / np.sum(cm)\n cm = pd.DataFrame(cm, columns=y_labels, index=y_labels)\n \n sns.heatmap(\n cm,\n cmap='Blues',\n cbar=False,\n annot=True,\n fmt='.0%',\n linewidths=.5,\n square=True,\n ax=axes[i,j])\n \n title = confusion_matrices.columns[j] + ': ' + confusion_matrices.index[i]\n axes[i,j].set_title(label=title)\n axes[i,j].set(xlabel='Predicted Label', ylabel='True Label')\n axes[i,j].set_yticklabels(labels=y_labels, fontdict={'verticalalignment': 'center'})\n \n return fig, axes"
] |
[
[
"sklearn.dummy.DummyClassifier",
"sklearn.linear_model.LogisticRegression",
"sklearn.ensemble.RandomForestClassifier",
"sklearn.naive_bayes.MultinomialNB",
"sklearn.metrics.precision_score",
"matplotlib.pyplot.subplots",
"pandas.DataFrame",
"sklearn.neighbors.KNeighborsClassifier",
"sklearn.metrics.confusion_matrix",
"sklearn.tree.DecisionTreeClassifier",
"sklearn.ensemble.AdaBoostClassifier",
"sklearn.svm.SVC",
"sklearn.metrics.f1_score",
"sklearn.metrics.recall_score",
"numpy.sum",
"sklearn.metrics.accuracy_score"
]
] |
jaehobang/Eva
|
[
"e7f649990b8bca3bc29b3832c0ecf32efb402647"
] |
[
"eva_storage/baselines/indexing/external/ssd/open_images_downloader.py"
] |
[
"import time\nimport boto3\nfrom botocore import UNSIGNED\nfrom botocore.config import Config\nimport botocore\nimport logging\nfrom multiprocessing import Pool, Manager\nimport pandas as pd\nimport os\nimport argparse\nimport sys\nimport functools\nfrom urllib import request\n\n\ns3 = boto3.client('s3', config=Config(signature_version=UNSIGNED))\n\n\ndef download(bucket, root, retry, counter, lock, path):\n i = 0\n src = path\n dest = f\"{root}/{path}\"\n while i < retry:\n try:\n if not os.path.exists(dest):\n s3.download_file(bucket, src, dest)\n else:\n logging.info(f\"{dest} already exists.\")\n with lock:\n counter.value += 1\n if counter.value % 100 == 0:\n logging.warning(f\"Downloaded {counter.value} images.\")\n return\n except botocore.exceptions.ClientError as e:\n if e.response['Error']['Code'] == \"404\":\n logging.warning(f\"The file s3://{bucket}/{src} does not exist.\")\n return\n i += 1\n logging.warning(f\"Sleep {i} and try again.\")\n time.sleep(i)\n logging.warning(f\"Failed to download the file s3://{bucket}/{src}. Exception: {e}\")\n\n\ndef batch_download(bucket, file_paths, root, num_workers=10, retry=10):\n with Pool(num_workers) as p:\n m = Manager()\n counter = m.Value('i', 0)\n lock = m.Lock()\n download_ = functools.partial(download, bucket, root, retry, counter, lock)\n p.map(download_, file_paths)\n\n\ndef http_download(url, path):\n with request.urlopen(url) as f:\n with open(path, \"wb\") as fout:\n buf = f.read(1024)\n while buf:\n fout.write(buf)\n buf = f.read(1024)\n\n\ndef log_counts(values):\n for k, count in values.value_counts().iteritems():\n logging.warning(f\"{k}: {count}/{len(values)} = {count/len(values):.2f}.\")\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(\n description='Dowload open image dataset by class.')\n\n parser.add_argument(\"--root\", type=str,\n help='The root directory that you want to store the open image data.')\n parser.add_argument(\"include_depiction\", action=\"store_true\",\n help=\"Do you want to include drawings or depictions?\")\n parser.add_argument(\"--class_names\", type=str,\n help=\"the classes you want to download.\")\n parser.add_argument(\"--num_workers\", type=int, default=10,\n help=\"the classes you want to download.\")\n parser.add_argument(\"--retry\", type=int, default=10,\n help=\"retry times when downloading.\")\n parser.add_argument(\"--filter_file\", type=str, default=\"\",\n help=\"This file specifies the image ids you want to exclude.\")\n parser.add_argument('--remove_overlapped', action='store_true',\n help=\"Remove single boxes covered by group boxes.\")\n return parser.parse_args()\n\n\nif __name__ == '__main__':\n logging.basicConfig(stream=sys.stdout, level=logging.WARNING,\n format='%(asctime)s - %(name)s - %(message)s')\n\n args = parse_args()\n bucket = \"open-images-dataset\"\n names = [e.strip() for e in args.class_names.split(\",\")]\n class_names = []\n group_filters = []\n percentages = []\n for name in names:\n t = name.split(\":\")\n class_names.append(t[0].strip())\n if len(t) >= 2 and t[1].strip():\n group_filters.append(t[1].strip())\n else:\n group_filters.append(\"\")\n if len(t) >= 3 and t[2].strip():\n percentages.append(float(t[2].strip()))\n else:\n percentages.append(1.0)\n\n if not os.path.exists(args.root):\n os.makedirs(args.root)\n\n excluded_images = set()\n if args.filter_file:\n for line in open(args.filter_file):\n img_id = line.strip()\n if not img_id:\n continue\n excluded_images.add(img_id)\n\n class_description_file = os.path.join(args.root, \"class-descriptions-boxable.csv\")\n if not os.path.exists(class_description_file):\n url = \"https://temporal_filters.googleapis.com/openimages/2018_04/class-descriptions-boxable.csv\"\n logging.warning(f\"Download {url}.\")\n http_download(url, class_description_file)\n\n class_descriptions = pd.read_csv(class_description_file,\n names=[\"id\", \"ClassName\"])\n class_descriptions = class_descriptions[class_descriptions['ClassName'].isin(class_names)]\n\n image_files = []\n for dataset_type in [\"train\", \"validation\", \"test\"]:\n image_dir = os.path.join(args.root, dataset_type)\n os.makedirs(image_dir, exist_ok=True)\n\n annotation_file = f\"{args.root}/{dataset_type}-annotations-bbox.csv\"\n if not os.path.exists(annotation_file):\n url = f\"https://temporal_filters.googleapis.com/openimages/2018_04/{dataset_type}/{dataset_type}-annotations-bbox.csv\"\n logging.warning(f\"Download {url}.\")\n http_download(url, annotation_file)\n logging.warning(f\"Read annotation file {annotation_file}\")\n annotations = pd.read_csv(annotation_file)\n annotations = pd.merge(annotations, class_descriptions,\n left_on=\"LabelName\", right_on=\"id\",\n how=\"inner\")\n if not args.include_depiction:\n annotations = annotations.loc[annotations['IsDepiction'] != 1, :]\n\n filtered = []\n for class_name, group_filter, percentage in zip(class_names, group_filters, percentages):\n sub = annotations.loc[annotations['ClassName'] == class_name, :]\n excluded_images |= set(sub['ImageID'].sample(frac=1 - percentage))\n\n if group_filter == '~group':\n excluded_images |= set(sub.loc[sub['IsGroupOf'] == 1, 'ImageID'])\n elif group_filter == 'group':\n excluded_images |= set(sub.loc[sub['IsGroupOf'] == 0, 'ImageID'])\n filtered.append(sub)\n\n annotations = pd.concat(filtered)\n annotations = annotations.loc[~annotations['ImageID'].isin(excluded_images), :]\n\n\n if args.remove_overlapped:\n images_with_group = annotations.loc[annotations['IsGroupOf'] == 1, 'ImageID']\n annotations = annotations.loc[~(annotations['ImageID'].isin(set(images_with_group)) & (annotations['IsGroupOf'] == 0)), :]\n annotations = annotations.sample(frac=1.0)\n\n logging.warning(f\"{dataset_type} bounding boxes size: {annotations.shape[0]}\")\n logging.warning(\"Approximate Image Stats: \")\n log_counts(annotations.drop_duplicates([\"ImageID\", \"ClassName\"])[\"ClassName\"])\n logging.warning(\"Label distribution: \")\n log_counts(annotations['ClassName'])\n\n logging.warning(f\"Shuffle dataset.\")\n\n\n sub_annotation_file = f\"{args.root}/sub-{dataset_type}-annotations-bbox.csv\"\n logging.warning(f\"Save {dataset_type} data to {sub_annotation_file}.\")\n annotations.to_csv(sub_annotation_file, index=False)\n image_files.extend(f\"{dataset_type}/{id}.jpg\" for id in set(annotations['ImageID']))\n logging.warning(f\"Start downloading {len(image_files)} images.\")\n batch_download(bucket, image_files, args.root, args.num_workers, args.retry)\n logging.warning(\"Task Done.\")\n"
] |
[
[
"pandas.merge",
"pandas.read_csv",
"pandas.concat"
]
] |
keiserlab/consensus-learning-paper
|
[
"2d204362569489b9ab4c861b6cb6c5b819659ada"
] |
[
"data_prep.py"
] |
[
"\"\"\"\nScript to generate various CSVs and get things ready for the deep learning pipeline \n\"\"\"\nimport time\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport copy\nfrom PIL import Image\nimport os\nfrom sklearn.metrics import roc_curve, auc, precision_recall_curve, roc_auc_score\nimport shutil \nfrom itertools import combinations\nimport socket\nimport random\nimport pickle\nimport matplotlib.patheffects as path_effects\nimport shutil\n\n##=================================================================\n##DATA PROCESSING FUNCTIONS\n##=================================================================\n\ndef constructRandomCSVFolds():\n \"\"\"\n Create random CSVs that each have classes distributed according to the average of each expert annotator\n \"\"\"\n # USERS = ['Borys','Dugger', 'Flanagan', 'Kofler', 'McAleese']\n USERS = ['NP1', 'NP2', 'NP3', 'NP4', 'NP5']\n for i in range(0,5):\n for phase in [\"train\", \"val\"]:\n for fold in [0,1,2,3]:\n cored, diffuse, caa = 0,0,0\n for user in USERS:\n if phase == \"train\":\n read_df = pd.read_csv(\"csvs/phase1/cross_validation/train_duplicate_{}_fold_{}.csv\".format(user, fold))\n if phase == \"val\":\n read_df = pd.read_csv(\"csvs/phase1/cross_validation/val_{}_fold_{}.csv\".format(user, fold))\n cored += sum([1 for element in list(read_df[\"cored\"]) if element > .99])\n diffuse += sum([1 for element in list(read_df[\"diffuse\"]) if element > .99])\n caa += sum([1 for element in list(read_df[\"CAA\"]) if element > .99])\n cored = int(cored / float(len(USERS))) #average # of cored per user\n diffuse = int(diffuse / float(len(USERS)))\n caa = int(caa / float(len(USERS)))\n cored_entries = np.concatenate((np.zeros(len(read_df) - cored), np.ones(cored)))\n diffuse_entries = np.concatenate((np.zeros(len(read_df) - diffuse), np.ones(diffuse)))\n CAA_entries = np.concatenate((np.zeros(len(read_df) - caa), np.ones(caa)))\n np.random.shuffle(cored_entries)\n np.random.shuffle(diffuse_entries)\n np.random.shuffle(CAA_entries)\n assert len(cored_entries) == len(diffuse_entries) == len(CAA_entries) == len(read_df)\n images = list(read_df[\"imagename\"])\n names = [\"random\" for i in range(0, len(read_df))]\n random_df = pd.DataFrame(list(zip(names,images, cored_entries, diffuse_entries, CAA_entries)), columns =['username','imagename', 'cored', 'diffuse', 'CAA'])\n assert len(random_df) == len(read_df)\n ##write both train and val random csvs\n if phase == \"train\":\n random_df.to_csv(\"csvs/phase1/cross_validation/train_duplicate_random{}_fold_{}.csv\".format(i, fold))\n if phase == \"val\":\n random_df.to_csv(\"csvs/phase1/cross_validation/val_random{}_fold_{}.csv\".format(i, fold))\n\ndef constructRandomTestSet():\n \"\"\"\n Requires expert test sets, will write a random test set abiding by the same avg class distributions for each expert test set\n \"\"\"\n # USERS = [\"Kofler\", \"Flanagan\", \"McAleese\", \"Dugger\", \"Borys\"]\n USERS = ['NP1', 'NP2', 'NP3', 'NP4', 'NP5']\n summations = [0,0,0] #number of positive annotations in test set over all users, one for each class \n for user in USERS:\n df = pd.read_csv(\"csvs/phase1/test_set/{}_test_set.csv\".format(user))\n for index, row in df.iterrows():\n cored, diffuse, CAA = row['cored'], row['diffuse'], row['CAA']\n if cored > .99:\n summations[0] += 1\n if diffuse > .99:\n summations[1] += 1\n if CAA > .99:\n summations[2] += 1\n avg_counts = [(summations[i] / float(len(USERS))) for i in range(0, 3)]\n cored = [1] * int(avg_counts[0]) + [0] * (len(df) - int(avg_counts[0]))\n random.shuffle(cored)\n diffuse = [1] * int(avg_counts[1]) + [0] * (len(df) - int(avg_counts[1]))\n random.shuffle(diffuse)\n CAA = [1] * int(avg_counts[2]) + [0] * (len(df) - int(avg_counts[2]))\n random.shuffle(CAA)\n df['username'] = \"random_test\"\n df['cored'] = cored\n df['diffuse'] = diffuse\n df['CAA'] = CAA\n df.to_csv(\"csvs/phase1/test_set/random_test_set.csv\")\n\ndef extractConsensusLabels():\n \"\"\"\n from the combined phase 1 labels csv,\n extracts 5 different CSV labels, one for each consensus scheme of agreed by at least 1, by at least 2, ...., by at least 5\n \"\"\"\n ##first construct maps for each labeler with key:user, key: image name, value:(cored, diffuse, CAA) label\n # users = [\"Dugger\", \"Borys\", \"Flanagan\", \"Kofler\", \"McAleese\"]\n USERS = [\"NP{}\".format(i) for i in range(1,6)]\n mapp = {user : {} for user in USERS} \n for user in USERS:\n df = pd.read_csv(\"csvs/phase1/binary_labels/phase1_labels_{}.csv\".format(user))\n for index, row in df.iterrows():\n image_name = row[\"imagename\"]\n user = row[\"username\"]\n cored = row[\"cored\"]\n diffuse = row[\"diffuse\"]\n CAA = row[\"CAA\"]\n negative = row[\"negative\"]\n flag = row[\"flag\"]\n notsure = row[\"notsure\"]\n mapp[user][image_name] = (cored, diffuse, CAA, negative, flag, notsure)\n ##make sure the set of images for each labeler are identical\n sets = [set(mapp[user].keys()) for user in USERS]\n for set1 in sets:\n for set2 in sets:\n assert set1 == set2\n images = sets[0]\n ##now construct the new csv with the consensus-of-n strategy with all the mapps in place to query\n for n in range(1, 6):\n new_entries = []\n for image in images:\n new_labels = [] ##running list of tuples, one tuple per user \n for user in USERS: \n new_labels.append(mapp[user][image])\n sum0, sum1, sum2, sum3, sum4, sum5 = 0, 0, 0, 0, 0, 0\n for i in range(0, len(new_labels)):\n sum0 += new_labels[i][0]\n sum1 += new_labels[i][1]\n sum2 += new_labels[i][2]\n sum3 += new_labels[i][3]\n sum4 += new_labels[i][4]\n sum5 += new_labels[i][5]\n new_entry0, new_entry1, new_entry2, new_entry3, new_entry4, new_entry5, = 0, 0, 0, 0, 0, 0\n if sum0 >= n:\n new_entry0 = 1\n if sum1 >= n:\n new_entry1 = 1\n if sum2 >= n:\n new_entry2 = 1\n if sum3 >= n:\n new_entry3 = 1\n if sum4 >= n:\n new_entry4 = 1\n if sum5 >= n:\n new_entry5 = 1\n new_entry = (image, \"consensus_of_{}\".format(n), new_entry0, new_entry1, new_entry2, new_entry3, new_entry4, new_entry5)\n new_entries.append(new_entry)\n df = pd.DataFrame(new_entries, columns =['imagename','username','cored', 'diffuse', 'CAA', 'negative', 'flag', 'notsure'])\n df = df.sort_values(by=\"imagename\")\n df = df[['imagename', 'username', 'cored', 'diffuse', 'CAA', 'negative', 'flag', 'notsure']]\n df.to_csv(\"csvs/phase1/binary_labels/phase1_labels_consensus_of_\" + str(n) + \".csv\")\n\ndef constructFloatLabels():\n \"\"\"\n Converts each binary label csv into floating point(portion of labeled bounding boxes that lay in the input image)\n Requires image_details.csv generated from blob_detect.py\n Also creates a conglomerate floating point csv with all expert floating point labels combined into one csv: phaseILabels_floating_point.csv\n \"\"\"\n USERS = [\"NP{}\".format(i) for i in range(1,6)] + [\"UG{}\".format(i) for i in [1,2]]\n CONSENSUS = [\"consensus_of_{}\".format(n) for n in [1,2,3,4,5]]\n ALL = USERS + CONSENSUS \n for user in ALL: \n label_csv = \"csvs/phase1/binary_labels/phase1_labels_{}.csv\".format(user) \n #load labels csv\n labels = pd.read_csv(label_csv)\n labels['tilename'] = labels['imagename'].str.split('/').str[-1]\n labels['sourcetile'] = labels['tilename'].str.split('_').str[:-1].str.join('_')\n\n ##csv contains img coords and bbox coords\n image_details = pd.read_csv(\"csvs/phase1/image_details_phase1.csv\") #details ziqi generated \n # image_details = pd.read_csv(\"csvs/phase1/image_details.csv\") #details I generated \n image_details['sourcetile'] = image_details['imagename'].str.split('_').str[:-1].str.join('_')\n\n ##set the imagename as the index\n image_details = image_details.set_index('imagename')\n ##get coordinates of image and bboxes, and append to labels df as new columns\n img_coords = []\n blob_coords = []\n for index, row in labels.iterrows():\n image = image_details.loc[row['tilename']]\n img_box = image['image coordinates (xywh)']\n img_box = img_box[1:-1].split(' ')\n img_box = [int(x) for x in img_box if x]\n img_coords.append(img_box)\n blob_box = image['blob coordinates (xywh)']\n blob_box = blob_box[1:-1].split(' ')\n blob_box = [int(x) for x in blob_box if x]\n blob_coords.append(blob_box)\n grouped_tiles = labels.groupby(['sourcetile'])\n tiles = list(grouped_tiles.groups)\n labels['img_coords'] = img_coords\n labels['blob_coords'] = blob_coords\n ##calculate overlaps and assign floating labels and construct new final dataframe \n dfs = []\n for tile in tiles:\n images = labels[labels['sourcetile'] == tile]\n overlap = get_overlap(images['img_coords'].tolist(), images['blob_coords'].tolist())\n labs = np.array(images[['cored','diffuse','CAA','negative','flag','notsure']])\n new_label = np.matmul(overlap, labs)\n df = pd.DataFrame()\n df['username'] = images.username\n if \"consensus\" not in user:\n df['timestamp'] = images.timestamp\n df['sourcetile']=images.sourcetile\n df['imagename'] = [name for name in images.imagename]\n df['cored'] = new_label[:,0]\n df['diffuse'] = new_label[:,1]\n df['CAA'] = new_label[:,2]\n df['negative'] = new_label[:,3]\n df['flag'] = new_label[:,4]\n df['notsure'] = new_label[:,5]\n dfs.append(df)\n new_labels = pd.concat(dfs)\n ##sort by tilename, and write to file\n new_labels['source'] = new_labels['imagename'].str.split('/').str[0]\n new_labels = new_labels.sort_values(by=['imagename'])\n new_labels.to_csv(\"csvs/phase1/floating_point_labels/{}_floating_point.csv\".format(user))\n \n ##combine all individual floating point labels to one csv\n for i in range(0, len(USERS)):\n if i == 0:\n df = pd.read_csv(\"csvs/phase1/floating_point_labels/\" + USERS[i] + \"_floating_point.csv\")\n else:\n df_user = pd.read_csv(\"csvs/phase1/floating_point_labels/\" + USERS[i] + \"_floating_point.csv\")\n df = df.append(df_user)\n df = df.sort_values(by=['imagename'])\n df.to_csv(\"csvs/phase1/floating_point_labels/phaseILabels_floating_point.csv\")\n\n##=================================================================\n##HELPER FUNCTIONS\n##=================================================================\n\ndef get_overlap(image_coords=[np.zeros(4)], blob_coords=[np.ones(4)]):\n \"\"\"\n IMAGE_COORDS - coordinates of images (xywh)\n (array of arrays)\n BLOB_COORDS - tight bounding box of blob (xywh)\n (array of arrays)\n returns: percent of blob contained within image_coords (dan: of shape n x n for n input arrays)\n \"\"\"\n # Ensure these are np.arrays\n image_coords = np.array(image_coords)\n blob_coords = np.array(blob_coords)\n blob_areas = blob_coords[:,2] * blob_coords[:,3]\n img_x_max = image_coords[:,0] + image_coords[:,2]\n img_x_min = image_coords[:,0]\n img_y_max = image_coords[:,1] + image_coords[:,3]\n img_y_min = image_coords[:,1]\n img_x_max = img_x_max[:, np.newaxis]\n img_x_min = img_x_min[:, np.newaxis]\n img_y_max = img_y_max[:, np.newaxis]\n img_y_min = img_y_min[:, np.newaxis]\n blob_x_max = blob_coords[:,0] + blob_coords[:,2]\n blob_x_min = blob_coords[:,0]\n blob_y_max = blob_coords[:,1] + blob_coords[:,3]\n blob_y_min = blob_coords[:,1]\n dx = np.minimum(img_x_max, blob_x_max) - np.maximum(img_x_min, blob_x_min)\n dy = np.minimum(img_y_max, blob_y_max) - np.maximum(img_y_min, blob_y_min)\n # everything negative has no meaningful overlap\n dx = np.maximum(0, dx)\n dy = np.maximum(0, dy)\n return (dx * dy) / blob_areas\n\n##=================================================================\n##Runner Calls\n##=================================================================\nconstructRandomCSVFolds()\nconstructRandomTestSet()\nextractConsensusLabels()\nconstructFloatLabels()\n\n\n\n\n\n\n\n\n\n\n"
] |
[
[
"pandas.concat",
"pandas.read_csv",
"numpy.maximum",
"numpy.minimum",
"numpy.matmul",
"pandas.DataFrame",
"numpy.ones",
"numpy.random.shuffle",
"numpy.array",
"numpy.zeros"
]
] |
apaniukov/workbench
|
[
"2f2653ecfd0143d2d53e33ad84379f13443fdfaa"
] |
[
"wb/main/dataset_utils/dataset_adapters.py"
] |
[
"\"\"\"\n OpenVINO DL Workbench\n Dataset adapter classes.\n\n Copyright (c) 2018-2021 Intel Corporation\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n http://www.apache.org/licenses/LICENSE-2.0\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\"\"\"\nimport json\nimport os\nimport re\nfrom pathlib import Path\nfrom typing import ClassVar, Optional, Tuple, Union, Callable, List, Dict, Any, Iterator, Type, Generator, TextIO\n\nimport pandas\nfrom pandas import DataFrame\n\n\nfrom config.constants import (LFW_PAIRS_LENGTH, LNDREID_LANDMARK_LENGTH, VGGFACE2_BBOX_LENGTH, VOC_ANNOTATIONS_FOLDER,\n VOC_IMAGESETS_FOLDER, VOC_IMAGES_FOLDER, VOC_MASKS_FOLDER)\nfrom wb.error.inconsistent_upload_error import InconsistentDatasetError\nfrom wb.main.accuracy_utils.yml_abstractions.annotation import Annotation\nfrom wb.main.dataset_utils.dataset_recognizers import COCORecognizer, CommonSemanticSegmentationRecognizer, \\\n CommonSuperResolutionRecognizer, ImagenetRecognizer, LFWRecognizer, NotAnnotatedRecognizer, OpenImagesRecognizer, \\\n VGGFace2Recognizer, \\\n VOCRecognizer, \\\n WiderFaceRecognizer\nfrom wb.main.enumerates import AnnotationConverterEnum, CSVDatasetSeparatorEnum\nfrom wb.main.shared.constants import ALLOWED_EXTENSIONS_IMG\nfrom wb.main.shared.enumerates import TaskEnum\nfrom wb.main.shared.utils import find_all_paths_by_exts\n\n\ndef register_dataset_adapter(task_type: Optional[TaskEnum], recognizer: ClassVar):\n def decorate(cls):\n recognizer.task_type_to_adapter[task_type] = cls\n return cls\n\n return decorate\n\n\nclass BaseImageDatasetAdapter:\n converter = None\n supported_task_specific_variables = []\n\n def __init__(self, dataset_path: str, task_specific_variables: dict = None):\n self.dataset_path = Path(dataset_path)\n if not self.dataset_path.exists():\n raise FileNotFoundError(self.dataset_path)\n if not self.dataset_path.is_dir():\n raise NotADirectoryError(self.dataset_path)\n\n self.task_specific_variables = task_specific_variables or {}\n\n self.images_dir = self.get_images_dir()\n self.params = self.get_params()\n\n def get_images_dir(self) -> Path:\n \"\"\"Return absolute path to the directory containing images.\"\"\"\n raise NotImplementedError\n\n def get_task_specific_constants(self) -> dict:\n \"\"\"\n Return task-specific annotation conversion params embedded in the dataset structure.\n These parameters are not exposed to the user.\n \"\"\"\n raise NotImplementedError\n\n def get_task_specific_variables(self) -> dict:\n \"\"\"\n Return task-specific annotation conversion params provided by the user.\n Since these parameters are exposed to the user, filtering is performed as a sefety measure.\n \"\"\"\n return {\n key: value\n for key, value in self.task_specific_variables.items()\n if key in self.supported_task_specific_variables\n }\n\n def get_params(self) -> dict:\n \"\"\"Return all annotation conversion params.\"\"\"\n params = {\n **self.get_task_specific_constants(),\n **self.get_task_specific_variables(),\n 'converter': self.converter,\n 'images_dir': self.images_dir, # Used by Accuracy Checker for content checking.\n }\n dataset_meta_path = self.get_dataset_meta()\n if dataset_meta_path:\n params['dataset_meta_file'] = dataset_meta_path\n return params\n\n def abs_path(self, relative_path: str) -> Path:\n absolute_path = self.dataset_path / relative_path\n if not absolute_path.exists():\n raise InconsistentDatasetError('Cannot find {}'.format(relative_path))\n return absolute_path\n\n def to_annotation(self) -> dict:\n serializable_params = {\n key: value if isinstance(value, (str, bool, int, float)) else str(value)\n for key, value in self.params.items()\n }\n return {\n 'data_source': str(self.images_dir),\n 'annotation': Annotation(**serializable_params),\n }\n\n def get_dataset_meta(self) -> Union[Path, None]:\n \"\"\"Return dataset meta file path, if present.\"\"\"\n try:\n return next(self.dataset_path.rglob('dataset_meta.json'))\n except StopIteration:\n return None\n\n def get_default_label_data(self) -> Tuple[int, int]:\n \"\"\"\n Return a tuple with the number of labels and the max label ID of the dataset.\n\n Should return hardcoded values that are default for this type of dataset\n or read labels from the file specific for this type of dataset, if it is present.\n \"\"\"\n raise NotImplementedError\n\n def get_label_data(self) -> Tuple[int, int]:\n \"\"\"\n Return a tuple with the number of labels and the max label ID of the dataset.\n\n Uses values from `dataset_meta.json`, if it is present and contains labels,\n or uses values from `get_default_labels_number_and_max_id`.\n \"\"\"\n dataset_meta_path = self.params.get('dataset_meta_file')\n if dataset_meta_path:\n with open(str(dataset_meta_path)) as file:\n try:\n dataset_meta = json.load(file)\n except json.JSONDecodeError:\n raise InconsistentDatasetError('Malformed \"dataset_meta.json\", not JSON.')\n if 'label_map' in dataset_meta:\n return len(dataset_meta['label_map']), max(dataset_meta['label_map'].keys())\n if 'labels' in dataset_meta:\n labels_number = len(dataset_meta['labels'])\n return labels_number, labels_number\n return self.get_default_label_data()\n\n\nclass ImagenetBaseAdapter(BaseImageDatasetAdapter):\n\n # For imagenet_txt, all images are contained in a single subfoler or dataset root\n def get_images_dir(self) -> Path:\n for subpath in self.dataset_path.iterdir():\n if subpath.is_dir() and all(item.is_file() for item in subpath.iterdir()):\n return subpath\n return self.dataset_path\n\n def get_task_specific_constants(self) -> dict:\n pass\n\n def get_default_label_data(self) -> Tuple[int, int]:\n pass\n\n\n@register_dataset_adapter(TaskEnum.classification, ImagenetRecognizer)\nclass ImagenetClassificationAdapter(ImagenetBaseAdapter):\n converter = AnnotationConverterEnum.imagenet.value\n supported_task_specific_variables = ['has_background']\n\n def get_default_label_data(self) -> Tuple[int, int]:\n labels_file = self.params.get('labels_file')\n if labels_file:\n with open(str(labels_file)) as file:\n num_labels = sum(1 for line in file if line.strip()) # One label per line.\n return num_labels, num_labels # Max ID is always the same as the number of labels for ImageNet.\n return 1000, 1000\n\n def get_task_specific_constants(self) -> dict:\n params = {\n 'annotation_file': self.get_annotation_file_path(),\n }\n labels_file = self.get_labels_file()\n if labels_file:\n params['labels_file'] = labels_file\n return params\n\n def get_annotation_file_path(self) -> Path:\n annotation_file_paths = [path for path in self.dataset_path.iterdir() if self.is_imagenet_annotation_file(path)]\n if not annotation_file_paths:\n raise InconsistentDatasetError('Cannot find annotation file.')\n if len(annotation_file_paths) > 1:\n raise InconsistentDatasetError(\n 'Too many annotation files: {}.'.format([path.name for path in annotation_file_paths]))\n return annotation_file_paths[0]\n\n @staticmethod\n def is_imagenet_annotation_file(path: Path) -> bool:\n if not path.is_file() or path.suffix.lower() != '.txt':\n return False\n with open(str(path)) as file:\n return all(re.match(r'^\\S+[ \\t]+[0-9]+$', line.rstrip(' \\t\\r\\n'))\n and len(line.split()) == 2\n for line in file if line.strip('\\r\\n'))\n\n def get_labels_file(self) -> Union[Path, None]:\n try:\n return next(self.dataset_path.rglob('synset_words.txt'))\n except StopIteration:\n return None\n\n\n@register_dataset_adapter(TaskEnum.custom, ImagenetRecognizer)\nclass ImagenetCustomAdapter(ImagenetClassificationAdapter):\n converter = AnnotationConverterEnum.custom.value\n\n\n@register_dataset_adapter(TaskEnum.inpainting, ImagenetRecognizer)\nclass ImagenetInpaintingAdapter(ImagenetBaseAdapter):\n converter = AnnotationConverterEnum.inpainting.value\n\n def get_params(self) -> dict:\n params = super().get_params()\n params.pop('dataset_meta_file', None)\n return params\n\n def get_task_specific_constants(self) -> dict:\n return {}\n\n def get_default_label_data(self) -> Tuple[int, int]:\n return 0, 0\n\n\n@register_dataset_adapter(TaskEnum.style_transfer, ImagenetRecognizer)\nclass ImagenetStyleTransferAdapter(ImagenetBaseAdapter):\n converter = AnnotationConverterEnum.style_transfer.value\n\n def get_params(self) -> dict:\n params = super().get_params()\n params.pop('dataset_meta_file', None)\n return params\n\n def get_task_specific_constants(self) -> dict:\n return {}\n\n def get_default_label_data(self) -> Tuple[int, int]:\n return 0, 0\n\n\nclass VOCBaseAdapter(BaseImageDatasetAdapter):\n imageset_dir = None\n\n @property\n def voc_root(self) -> Optional[Path]:\n \"\"\"Find the root containing 3 essential VOC folders\"\"\"\n voc_folders = {VOC_IMAGES_FOLDER, VOC_ANNOTATIONS_FOLDER, VOC_IMAGESETS_FOLDER}\n folders = [self.dataset_path, *[item for item in self.dataset_path.rglob('*') if item.is_dir()]]\n for folder in folders:\n if voc_folders <= {item.name for item in folder.iterdir() if item.is_dir()}:\n return folder\n raise InconsistentDatasetError('Cannot find VOC root folder')\n\n def get_images_dir(self) -> Path:\n try:\n return self.abs_path(f'{self.voc_root}/{VOC_IMAGES_FOLDER}')\n except InconsistentDatasetError:\n raise InconsistentDatasetError('Cannot find images folder for this dataset')\n\n def get_task_specific_constants(self) -> dict:\n pass\n\n def get_default_label_data(self) -> Tuple[int, int]:\n return 20, 20\n\n def get_labelmap_file(self) -> Optional[Path]:\n target_folder = self.abs_path(f'{self.voc_root}')\n target_path = target_folder / 'labelmap.txt'\n if target_path.is_file():\n return target_path\n return None\n\n def get_imageset_file(self) -> Optional[Path]:\n image_sets_folder = f'{self.voc_root}/{VOC_IMAGESETS_FOLDER}'\n try:\n path_to_dir = self.abs_path(os.path.join(image_sets_folder, self.imageset_dir))\n except InconsistentDatasetError:\n path_to_dir = self.abs_path(image_sets_folder)\n\n for filename in ('val.txt', 'test.txt', 'train.txt', 'trainval.txt'):\n path = path_to_dir / filename\n if path.is_file():\n return path\n raise InconsistentDatasetError('Cannot find an imageset file for this dataset.')\n\n\n@register_dataset_adapter(TaskEnum.object_detection, VOCRecognizer)\nclass VOCDetectionAdapter(VOCBaseAdapter):\n converter = AnnotationConverterEnum.voc_detection.value\n supported_task_specific_variables = ['has_background']\n imageset_dir = 'Main'\n\n def get_task_specific_constants(self) -> dict:\n return {\n 'imageset_file': self.get_imageset_file(),\n 'annotations_dir': self.abs_path(f'{self.voc_root}/{VOC_ANNOTATIONS_FOLDER}'),\n }\n\n\n@register_dataset_adapter(TaskEnum.semantic_segmentation, VOCRecognizer)\nclass VOCSegmentationAdapter(VOCBaseAdapter):\n converter = AnnotationConverterEnum.voc_segmentation.value\n imageset_dir = 'Segmentation'\n\n def get_task_specific_constants(self) -> dict:\n result = {\n 'imageset_file': self.get_imageset_file(),\n 'mask_dir': self.abs_path(f'{self.voc_root}/{VOC_MASKS_FOLDER}'),\n }\n labelmap_file = self.get_labelmap_file()\n if labelmap_file:\n result['labelmap_file'] = labelmap_file\n return result\n\n def to_annotation(self) -> dict:\n result = super().to_annotation()\n result['additional_data_source'] = str(self.abs_path(f'{self.voc_root}/{VOC_MASKS_FOLDER}'))\n return result\n\n\n@register_dataset_adapter(TaskEnum.inpainting, VOCRecognizer)\nclass VOCInpaintingAdapter(VOCBaseAdapter):\n converter = AnnotationConverterEnum.inpainting.value\n supported_task_specific_variables = []\n imageset_dir = 'Main'\n\n def get_params(self) -> dict:\n params = super().get_params()\n params.pop('dataset_meta_file', None)\n return params\n\n def get_task_specific_constants(self) -> dict:\n return {}\n\n\n@register_dataset_adapter(TaskEnum.style_transfer, VOCRecognizer)\nclass VOCStyleTransferAdapter(VOCBaseAdapter):\n converter = AnnotationConverterEnum.style_transfer.value\n imageset_dir = 'Main'\n\n def get_params(self) -> dict:\n params = super().get_params()\n params.pop('dataset_meta_file', None)\n return params\n\n def get_task_specific_constants(self) -> dict:\n return {}\n\n\n@register_dataset_adapter(TaskEnum.custom, VOCRecognizer)\nclass VocCustomAdapter(VOCDetectionAdapter):\n converter = AnnotationConverterEnum.custom.value\n\n\nclass COCOBaseAdapter(BaseImageDatasetAdapter):\n annotation_subset = None\n\n def get_images_dir(self) -> Path:\n image_paths = find_all_paths_by_exts(self.dataset_path, ALLOWED_EXTENSIONS_IMG, True, Path)\n try:\n image_path = next(image_paths)\n return image_path.parent\n except StopIteration:\n raise InconsistentDatasetError(\n 'Cannot find any images of supported types: {}.'.format(ALLOWED_EXTENSIONS_IMG))\n\n def get_task_specific_constants(self) -> dict:\n pass\n\n def get_default_label_data(self) -> Tuple[int, int]:\n with open(str(self.get_annotation_file_path())) as file:\n categories = json.load(file)['categories']\n return len(categories), max(c['id'] for c in categories)\n\n def get_annotation_file_path(self) -> Path:\n json_paths = find_all_paths_by_exts(self.dataset_path, ['json'], True, Path)\n annotation_file_paths = {path.name.split('_')[0]: path\n for path in json_paths if self.is_suitable_annotation_file(path)}\n if not annotation_file_paths:\n raise InconsistentDatasetError('Cannot find annotation file.')\n\n if not self.annotation_subset:\n return list(annotation_file_paths.values())[0]\n return annotation_file_paths.get(self.annotation_subset, None)\n\n @classmethod\n def is_suitable_annotation_file(cls, path: Path):\n try:\n with open(str(path)) as file:\n annotation_file_content = json.load(file)\n annotation_keys = annotation_file_content['annotations'][0].keys()\n return (\n {'annotations', 'categories', 'images'} <= set(annotation_file_content.keys())\n and cls.check_annotation_file(annotation_keys)\n )\n except (IsADirectoryError, json.JSONDecodeError, KeyError, IndexError, AttributeError):\n return False\n\n @staticmethod\n def check_annotation_file(annotation_keys):\n raise NotImplementedError\n\n\n@register_dataset_adapter(TaskEnum.object_detection, COCORecognizer)\nclass COCODetectionAdapter(COCOBaseAdapter):\n converter = AnnotationConverterEnum.mscoco_detection.value\n supported_task_specific_variables = ['has_background', 'use_full_label_map']\n annotation_subset = 'instances'\n\n def get_task_specific_constants(self) -> dict:\n result = {}\n annotation_file = self.get_annotation_file_path()\n if annotation_file:\n result['annotation_file'] = annotation_file\n\n return result\n\n @staticmethod\n def check_annotation_file(annotation_keys):\n return 'bbox' in annotation_keys and 'keypoints' not in annotation_keys\n\n\n@register_dataset_adapter(TaskEnum.instance_segmentation, COCORecognizer)\nclass COCOSegmentationAdapter(COCOBaseAdapter):\n converter = AnnotationConverterEnum.mscoco_mask_rcnn.value\n supported_task_specific_variables = ['has_background', 'use_full_label_map']\n annotation_subset = 'instances'\n\n def get_task_specific_constants(self) -> dict:\n result = {}\n annotation_file = self.get_annotation_file_path()\n if annotation_file:\n result['annotation_file'] = annotation_file\n\n return result\n\n @staticmethod\n def check_annotation_file(annotation_keys):\n return 'bbox' in annotation_keys and 'segmentation' in annotation_keys and 'keypoints' not in annotation_keys\n\n\n@register_dataset_adapter(TaskEnum.inpainting, COCORecognizer)\nclass COCOInpaintingAdapter(COCOBaseAdapter):\n converter = AnnotationConverterEnum.inpainting.value\n\n def get_params(self) -> dict:\n params = super().get_params()\n params.pop('dataset_meta_file', None)\n return params\n\n def get_task_specific_constants(self) -> dict:\n return {}\n\n @staticmethod\n def check_annotation_file(annotation_keys):\n return True\n\n\n@register_dataset_adapter(TaskEnum.style_transfer, COCORecognizer)\nclass COCOStyleTransferAdapter(COCOBaseAdapter):\n converter = AnnotationConverterEnum.style_transfer.value\n\n def get_params(self) -> dict:\n params = super().get_params()\n params.pop('dataset_meta_file', None)\n return params\n\n def get_task_specific_constants(self) -> dict:\n return {}\n\n @staticmethod\n def check_annotation_file(annotation_keys):\n return True\n\n\n@register_dataset_adapter(TaskEnum.custom, COCORecognizer)\nclass COCOCustomAdapter(COCOBaseAdapter):\n converter = AnnotationConverterEnum.custom.value\n\n def get_task_specific_constants(self) -> dict:\n return {\n 'annotation_file': self.get_annotation_file_path(),\n }\n\n @staticmethod\n def check_annotation_file(annotation_keys):\n return True\n\n\nclass CommonSemanticSegmentationBaseAdapter(BaseImageDatasetAdapter):\n\n def get_images_dir(self) -> Path:\n return self.images_dir_name\n\n @property\n def images_dir_name(self) -> Path:\n \"\"\"Return absolute path to the directory containing images.\"\"\"\n candidate_folders = [folder for folder in self.dataset_path.iterdir() if folder.name.lower() == 'images']\n return next(iter(candidate_folders))\n\n def get_task_specific_constants(self) -> dict:\n pass\n\n def get_extension(self, directory: str):\n paths = find_all_paths_by_exts(self.abs_path(directory), ALLOWED_EXTENSIONS_IMG, False, Path)\n try:\n path = next(paths)\n return path.suffix\n except StopIteration:\n raise InconsistentDatasetError(\n 'Cannot find any images of supported types: {} in directory \"{}\"'.format(\n ALLOWED_EXTENSIONS_IMG, directory))\n\n def get_dataset_meta(self) -> Path:\n try:\n dataset_meta_path = next(self.dataset_path.rglob('dataset_meta.json'))\n except StopIteration:\n raise InconsistentDatasetError('Required \"dataset_meta.json\" file not found.')\n with open(str(dataset_meta_path)) as file:\n try:\n dataset_meta = json.load(file)\n except json.JSONDecodeError:\n raise InconsistentDatasetError('Malformed \"dataset_meta.json\", not JSON.')\n dataset_meta_keys = set(dataset_meta.keys())\n if not dataset_meta_keys & {'label_map', 'labels'} or 'segmentation_colors' not in dataset_meta_keys:\n raise InconsistentDatasetError(\n 'Malformed \"dataset_meta.json\", \"labels\" or \"label_map\" and \"segmentation_colors\" keys are required.')\n return dataset_meta_path\n\n def get_default_label_data(self) -> Tuple[int, int]:\n return 0, 0\n\n\n@register_dataset_adapter(TaskEnum.semantic_segmentation, CommonSemanticSegmentationRecognizer)\nclass CSSSegmentationAdapter(CommonSemanticSegmentationBaseAdapter):\n converter = AnnotationConverterEnum.common_semantic_segmentation.value\n\n @property\n def masks_dir_name(self) -> Optional[str]:\n for path in self.dataset_path.iterdir():\n if path.is_dir() and path.name.lower() == 'masks':\n return path.name\n raise InconsistentDatasetError('Malformed dataset: no \"masks\" folder found')\n\n def get_task_specific_constants(self) -> dict:\n return {\n 'masks_dir': self.abs_path(self.masks_dir_name),\n 'image_postfix': self.get_extension(str(self.images_dir_name)),\n 'mask_postfix': self.get_extension(self.masks_dir_name),\n 'dataset_meta_file': self.get_dataset_meta(),\n }\n\n def to_annotation(self) -> dict:\n result = super().to_annotation()\n result['additional_data_source'] = str(self.abs_path(self.masks_dir_name))\n return result\n\n\n@register_dataset_adapter(TaskEnum.inpainting, CommonSemanticSegmentationRecognizer)\nclass CSSInpaintingAdapter(CommonSemanticSegmentationBaseAdapter):\n converter = AnnotationConverterEnum.inpainting.value\n\n def get_params(self) -> dict:\n params = super().get_params()\n params.pop('dataset_meta_file', None)\n return params\n\n def get_task_specific_constants(self) -> dict:\n return {}\n\n\n@register_dataset_adapter(TaskEnum.style_transfer, CommonSemanticSegmentationRecognizer)\nclass CSSStyleTransferAdapter(CommonSemanticSegmentationBaseAdapter):\n converter = AnnotationConverterEnum.style_transfer.value\n\n def get_params(self) -> dict:\n params = super().get_params()\n params.pop('dataset_meta_file', None)\n return params\n\n def get_task_specific_constants(self) -> dict:\n return {}\n\n\n@register_dataset_adapter(TaskEnum.custom, CommonSemanticSegmentationRecognizer)\nclass CSSCustomAdapter(CSSSegmentationAdapter):\n converter = AnnotationConverterEnum.custom.value\n\n\nclass CommonSuperResolutionBaseAdapter(BaseImageDatasetAdapter):\n images_dir_name = ''\n lr_directory = 'LR'\n\n def get_images_dir(self) -> Path:\n \"\"\"Return absolute path to the directory containing the low-res images.\"\"\"\n return self.abs_path(self.images_dir_name)\n\n def get_task_specific_constants(self) -> dict:\n pass\n\n def get_default_label_data(self) -> Tuple[int, int]:\n return 0, 0\n\n\n@register_dataset_adapter(TaskEnum.super_resolution, CommonSuperResolutionRecognizer)\nclass CSRSuperResolutionAdapter(CommonSuperResolutionBaseAdapter):\n converter = AnnotationConverterEnum.super_resolution_dir_based.value\n supported_task_specific_variables = ['two_streams']\n hr_directory = 'HR'\n upsampled_directory = 'upsampled'\n\n def check_upsample(self) -> bool:\n paths = [item for item in Path(self.dataset_path).iterdir() if item.is_dir()]\n return self.abs_path(self.upsampled_directory) in paths\n\n def get_task_specific_constants(self) -> dict:\n params = {\n 'lr_dir': str(self.abs_path(self.lr_directory)),\n 'hr_dir': str(self.abs_path(self.hr_directory)),\n 'relaxed_names': True\n }\n if self.check_upsample():\n params['upsampled_dir'] = str(self.abs_path(self.upsampled_directory))\n return params\n\n def to_annotation(self) -> dict:\n result = super().to_annotation()\n result['additional_data_source'] = str(self.abs_path(self.hr_directory))\n return result\n\n\n@register_dataset_adapter(TaskEnum.inpainting, CommonSuperResolutionRecognizer)\nclass CSRInpaintingAdapter(CommonSuperResolutionBaseAdapter):\n images_dir_name = 'HR'\n converter = AnnotationConverterEnum.inpainting.value\n\n def get_task_specific_constants(self) -> dict:\n return {}\n\n\n@register_dataset_adapter(TaskEnum.style_transfer, CommonSuperResolutionRecognizer)\nclass CSRStyleTransferAdapter(CommonSuperResolutionBaseAdapter):\n images_dir_name = 'HR'\n converter = AnnotationConverterEnum.style_transfer.value\n\n def get_task_specific_constants(self) -> dict:\n return {}\n\n\n@register_dataset_adapter(TaskEnum.custom, CommonSuperResolutionRecognizer)\nclass CSRCustomAdapter(CSRSuperResolutionAdapter):\n converter = AnnotationConverterEnum.custom.value\n\n\nclass LabelledFaceAdapter(BaseImageDatasetAdapter):\n def get_specific_params_getters(self):\n raise NotImplementedError\n\n @property\n def annotation_path(self) -> Path:\n return next(path for path in self.dataset_path.iterdir() if all(obj.is_file() for obj in path.iterdir()))\n\n def get_images_dir(self) -> Optional[Path]:\n for item in self.dataset_path.iterdir():\n if all(path.is_dir() for path in item.iterdir()):\n return item\n return None\n\n def get_task_specific_constants(self) -> dict:\n parameters = {}\n specific_params_getters = self.get_specific_params_getters()\n for parameter_name, parameter_value in specific_params_getters.items():\n if not parameter_value:\n continue\n parameters[parameter_name] = parameter_value\n return parameters\n\n def get_annotation(self, annotation_path: Path, length: int) -> Optional[Path]:\n for file_path in annotation_path.iterdir():\n annotation_checker = self.get_annotation_file_reader(file_path)\n with open(file_path, 'r') as target_file:\n if not annotation_checker(target_file, length):\n continue\n return file_path\n return None\n\n @staticmethod\n def csv_annotation_file_check(target_file: TextIO, target_length: int) -> bool:\n return len(target_file.readline().rstrip(',').split(',')) == target_length\n\n @staticmethod\n def txt_annotation_file_check(target_file: TextIO, target_length: int) -> bool:\n return len(target_file.readline().rstrip().replace('\\t', ' ').split(' ')) == target_length\n\n def get_annotation_file_reader(self, file_path: Path) -> Callable[[TextIO, int], bool]:\n annotation_file_checkers = {\n '.txt': self.txt_annotation_file_check,\n '.csv': self.csv_annotation_file_check,\n }\n return annotation_file_checkers[file_path.suffix]\n\n def get_default_label_data(self) -> Tuple[int, int]:\n return 0, 0\n\n\nclass LFWBaseAdapter(LabelledFaceAdapter):\n\n def get_specific_params_getters(self):\n return {\n 'pairs_file': self.get_annotation(self.annotation_path, LFW_PAIRS_LENGTH),\n 'landmarks_file': self.get_annotation(self.annotation_path, LNDREID_LANDMARK_LENGTH)\n }\n\n\n@register_dataset_adapter(TaskEnum.face_recognition, LFWRecognizer)\nclass LFWFaceDetectionAdapter(LFWBaseAdapter):\n converter = AnnotationConverterEnum.lfw.value\n\n\n@register_dataset_adapter(TaskEnum.custom, LFWRecognizer)\nclass LFWFaceDetectionCustomAdapter(LFWBaseAdapter):\n converter = AnnotationConverterEnum.custom.value\n\n\nclass VGGFace2BaseAdapter(LabelledFaceAdapter):\n\n def get_specific_params_getters(self):\n return {\n 'landmarks_csv_file': self.get_annotation(self.annotation_path, LNDREID_LANDMARK_LENGTH),\n 'bbox_csv_file': self.get_annotation(self.annotation_path, VGGFACE2_BBOX_LENGTH)\n }\n\n\n@register_dataset_adapter(TaskEnum.landmark_detection, VGGFace2Recognizer)\nclass VGGFace2LandmarkAdapter(VGGFace2BaseAdapter):\n converter = AnnotationConverterEnum.vgg_face.value\n\n\n@register_dataset_adapter(TaskEnum.custom, VGGFace2Recognizer)\nclass VGGFace2LandmarkCustomAdapter(VGGFace2BaseAdapter):\n converter = AnnotationConverterEnum.custom.value\n\n\nclass WiderFaceBaseAdapter(BaseImageDatasetAdapter):\n\n def get_images_dir(self) -> Optional[Path]:\n for item in self.dataset_path.iterdir():\n if item.is_dir() and all(path.is_dir()\n and all(subpath.suffix[1:] in ALLOWED_EXTENSIONS_IMG for subpath in path.iterdir())\n for path in item.iterdir()):\n return item\n return None\n\n def get_task_specific_constants(self) -> dict:\n raise NotImplementedError\n\n def get_default_label_data(self) -> Tuple[int, int]:\n # Wider only has 2 classes: 0 for background, 1 for face\n return 2, 1\n\n def get_annotation(self, length: int) -> Optional[Path]:\n annotation_files = [im_path for im_path in find_all_paths_by_exts(self.dataset_path, ['txt'],\n recursive=True,\n result_type=Path)]\n for file_path in annotation_files:\n with file_path.open('r') as target_file:\n if len(target_file.readline().rstrip().replace('\\t', ' ').split(' ')) == length:\n return file_path\n return None\n\n def get_dataset_meta(self) -> Path:\n annotation_files = [im_path for im_path in find_all_paths_by_exts(self.dataset_path, ['json'], True, Path)]\n for file in annotation_files:\n with file.open('r') as outfile:\n contents = json.load(outfile)\n if 'label_map' in contents:\n return file\n\n return self.build_dataset_meta()\n\n def build_dataset_meta(self) -> Path:\n template = {\n 'label_map': {\n '0': '__background__',\n '1': 'face'\n },\n 'background_label': 0\n }\n\n out_path = self.dataset_path / 'dataset_meta.json'\n with out_path.open('w') as outfile:\n json.dump(template, outfile)\n\n return out_path\n\n\n@register_dataset_adapter(TaskEnum.object_detection, WiderFaceRecognizer)\nclass WiderFaceODAdapter(WiderFaceBaseAdapter):\n converter = AnnotationConverterEnum.wider.value\n\n def get_task_specific_constants(self) -> dict:\n return {\n 'annotation_file': self.get_annotation(1)\n }\n\n\n@register_dataset_adapter(TaskEnum.custom, WiderFaceRecognizer)\nclass WiderFaceCustomAdapter(WiderFaceBaseAdapter):\n converter = AnnotationConverterEnum.custom.value\n\n def get_task_specific_constants(self):\n pass\n\n\nclass OpenImagesBaseAdapter(BaseImageDatasetAdapter):\n def get_images_dir(self) -> Path:\n image_paths = [im_path for im_path in find_all_paths_by_exts(self.dataset_path, ALLOWED_EXTENSIONS_IMG,\n True, Path)]\n\n return image_paths[0].parent\n\n def get_task_specific_constants(self) -> dict:\n raise NotImplementedError\n\n def get_default_label_data(self) -> Tuple[int, int]:\n labels_file = self.get_annotation(2)\n with open(labels_file, 'r') as outfile:\n return sum(1 for _ in outfile), 0\n\n def get_annotation(self, length: int) -> Optional[Path]:\n annotation_files = [im_path for im_path in find_all_paths_by_exts(self.dataset_path, ['csv'], True, Path)]\n for file_path in annotation_files:\n with open(file_path, 'r') as target_file:\n if len(target_file.readline().rstrip(',').split(',')) == length:\n return file_path\n return None\n\n\n@register_dataset_adapter(TaskEnum.object_detection, OpenImagesRecognizer)\nclass OpenImagesODAdapter(OpenImagesBaseAdapter):\n converter = AnnotationConverterEnum.open_images_detection.value\n\n def get_task_specific_constants(self) -> dict:\n return {\n 'labels_file': self.get_annotation(2),\n 'bbox_csv_file': self.get_annotation(13)\n }\n\n\n@register_dataset_adapter(TaskEnum.custom, OpenImagesRecognizer)\nclass OpenImagesCustomAdapter(OpenImagesBaseAdapter):\n converter = AnnotationConverterEnum.custom.value\n\n def get_task_specific_constants(self) -> dict:\n pass\n\n\n@register_dataset_adapter(None, NotAnnotatedRecognizer)\nclass NotAnnotatedAdapter:\n converter = AnnotationConverterEnum.image_processing\n supported_task_specific_variables = []\n\n # pylint: disable=unused-argument\n def __init__(self, dataset_path: str, model_dependent_params: None = None):\n self.dataset_path = Path(dataset_path)\n if not self.dataset_path.exists():\n raise FileNotFoundError(self.dataset_path)\n if not self.dataset_path.is_dir():\n raise NotADirectoryError(self.dataset_path)\n\n self.images_dir = self.get_images_dir()\n\n def get_images_dir(self) -> Path:\n image_paths = find_all_paths_by_exts(self.dataset_path, ALLOWED_EXTENSIONS_IMG, True, Path)\n try:\n image_path = next(image_paths)\n return image_path.parent\n except StopIteration:\n raise InconsistentDatasetError(\n 'Cannot find any images of supported types: {}.'.format(ALLOWED_EXTENSIONS_IMG))\n\n def to_annotation(self) -> dict:\n return {\n 'data_source': str(self.images_dir),\n 'annotation': Annotation(**{\n 'converter': self.converter.value,\n 'data_dir': str(self.images_dir)\n }),\n }\n\n\n@register_dataset_adapter(TaskEnum.custom, NotAnnotatedRecognizer)\nclass NotAnnotatedCustomAdapter(NotAnnotatedAdapter):\n pass\n\n\n# allow accuracy analysis with parent model on not annotated dataset\n@register_dataset_adapter(TaskEnum.classification, NotAnnotatedRecognizer)\nclass NotAnnotatedClassificationAdapter(NotAnnotatedAdapter):\n pass\n\n\n@register_dataset_adapter(TaskEnum.object_detection, NotAnnotatedRecognizer)\nclass NotAnnotatedODAdapter(NotAnnotatedAdapter):\n pass\n\n\n@register_dataset_adapter(TaskEnum.instance_segmentation, NotAnnotatedRecognizer)\nclass NotAnnotatedISAdapter(NotAnnotatedAdapter):\n pass\n\n\n@register_dataset_adapter(TaskEnum.semantic_segmentation, NotAnnotatedRecognizer)\nclass NotAnnotatedSSAdapter(NotAnnotatedAdapter):\n pass\n\n\n@register_dataset_adapter(TaskEnum.semantic_segmentation, NotAnnotatedRecognizer)\nclass NotAnnotatedSSAdapter(NotAnnotatedAdapter):\n pass\n\n\nclass BaseTextDatasetAdapter:\n _task_type: TaskEnum\n task_type_to_adapter: Dict[TaskEnum, Type['BaseTextDatasetAdapter']] = {}\n\n def __init_subclass__(cls, **kwargs):\n super().__init_subclass__(**kwargs)\n cls.task_type_to_adapter[cls._task_type] = cls\n\n @classmethod\n def from_model(cls, dataset_model: 'DatasetsModel', **kwargs) -> 'BaseTextDatasetAdapter':\n task_type = next(iter(dataset_model.task_types)).task_type\n adapter = cls.task_type_to_adapter[task_type]\n return adapter(**adapter.get_init_kwargs(dataset_model), **kwargs)\n\n @staticmethod\n def get_init_kwargs(dataset_model: 'DatasetsModel') -> Dict[str, Any]:\n raise NotImplementedError\n\n @classmethod\n def get_dataset_job_data(cls, request: Dict[str, Any]):\n try:\n task_type = TaskEnum(request['taskType'])\n adapter = cls.task_type_to_adapter[task_type]\n except (ValueError, KeyError):\n raise Exception('Unsupported task type!')\n return adapter.get_task_specific_data(request)\n\n @classmethod\n def get_task_specific_data(cls, request: Dict[str, Any]) -> Dict[str, Any]:\n raise NotImplementedError\n\n def __iter__(self):\n raise NotImplementedError\n\n def feature_iter(self) -> Iterator:\n \"\"\"Return data without label\"\"\"\n raise NotImplementedError\n\n\nclass BaseCSVDatasetAdapter(BaseTextDatasetAdapter):\n _task_type = None\n _column_order: List[str]\n\n def __init__(self, file_path: Union[str, Path], number_of_rows: Optional[int] = None):\n self.dataset: DataFrame = pandas.read_csv(\n file_path,\n names=self._column_order,\n index_col=False,\n nrows=number_of_rows,\n )\n\n def __len__(self):\n return len(self.dataset)\n\n def __iter__(self):\n return self.dataset.itertuples(index=False, name=\"CSVDatasetRow\")\n\n @staticmethod\n def get_init_kwargs(dataset_model: 'DatasetsModel') -> Dict[str, Any]:\n return dict(file_path=next(Path(dataset_model.path).iterdir()))\n\n def feature_iter(self) -> Generator[List[str], None, None]:\n \"\"\"Return data without label column\"\"\"\n yield from (list(row[:-1]) if len(row) > 2 else row[0] for row in self)\n\n @classmethod\n def get_task_specific_data(cls, request: Dict[str, Any]) -> Dict[str, Any]:\n return dict(\n columns=[request['columns'][column_name] for column_name in cls._column_order],\n header=request['header'],\n encoding=request['encoding'],\n separator=CSVDatasetSeparatorEnum(request['separator']),\n task_type=cls._task_type,\n )\n\n\nclass CSVTextClassificationDatasetAdapter(BaseCSVDatasetAdapter):\n _column_order = ['text', 'label']\n _task_type = TaskEnum.text_classification\n\n\nclass CSVTextualEntailmentDatasetAdapter(BaseCSVDatasetAdapter):\n _column_order = ['premise', 'hypothesis', 'label']\n _task_type = TaskEnum.textual_entailment\n"
] |
[
[
"pandas.read_csv"
]
] |
carderne/descarteslabs-python
|
[
"757b480efb8d58474a3bf07f1dbd90652b46ed64"
] |
[
"descarteslabs/scenes/tests/test_scene.py"
] |
[
"import pytest\nimport unittest\nimport mock\nimport datetime\nimport collections\nimport textwrap\nimport warnings\nimport shapely.geometry\nimport numpy as np\n\nfrom descarteslabs.common.dotdict import DotDict\nfrom descarteslabs.scenes import Scene, geocontext\nfrom descarteslabs.scenes.scene import _strptime_helper\n\nfrom descarteslabs.client.services.metadata import Metadata\n\nfrom .mock_data import _metadata_get, _metadata_get_bands, _raster_ndarray\n\nmetadata_client = Metadata()\n\n\nclass MockScene(Scene):\n \"Circumvent __init__ method to create a Scene with arbitrary geometry and properties objects\"\n\n def __init__(self, geometry, properties):\n self.geometry = DotDict(geometry)\n self.properties = DotDict(properties)\n\n\nclass TestScene(unittest.TestCase):\n\n MOCK_RGBA_PROPERTIES = {\n \"product\": \"mock_product\",\n \"id\": \"mock_id\",\n \"bands\": {\n \"red\": {\n \"type\": \"spectral\",\n \"dtype\": \"UInt16\",\n \"data_range\": [0, 10000],\n \"default_range\": [0, 4000],\n \"physical_range\": [0.0, 1.0],\n },\n \"green\": {\n \"type\": \"spectral\",\n \"dtype\": \"UInt16\",\n \"data_range\": [0, 10000],\n \"default_range\": [0, 4000],\n \"physical_range\": [0.0, 1.0],\n },\n \"blue\": {\n \"type\": \"spectral\",\n \"dtype\": \"UInt16\",\n \"data_range\": [0, 10000],\n \"default_range\": [0, 4000],\n \"physical_range\": [0.0, 1.0],\n },\n \"alpha\": {\"type\": \"mask\", \"dtype\": \"UInt16\", \"data_range\": [0, 1]},\n },\n }\n\n @mock.patch(\"descarteslabs.client.services.metadata.Metadata.get\", _metadata_get)\n @mock.patch(\n \"descarteslabs.client.services.metadata.Metadata.get_bands_by_id\",\n _metadata_get_bands,\n )\n def test_init(self):\n scene_id = \"landsat:LC08:PRE:TOAR:meta_LC80270312016188_v1\"\n metadata = metadata_client.get(scene_id)\n bands = metadata_client.get_bands_by_id(scene_id)\n # Scene constructor expects Feature (as returned by metadata.search)\n metadata = {\n \"type\": \"Feature\",\n \"geometry\": metadata.pop(\"geometry\"),\n \"id\": metadata.pop(\"id\"),\n \"key\": metadata.pop(\"key\"),\n \"properties\": metadata,\n }\n\n scene = Scene(metadata, bands)\n\n assert scene.properties.id == scene_id\n assert scene.properties.product == \"landsat:LC08:PRE:TOAR\"\n assert abs(len(scene.properties.bands) - 24) < 4\n assert isinstance(scene.properties.bands, dict)\n assert scene.properties.crs == \"EPSG:32615\"\n assert isinstance(scene.geometry, shapely.geometry.Polygon)\n assert isinstance(scene.__geo_interface__, dict)\n\n def test_default_ctx(self):\n # test doesn't fail with nothing\n ctx = MockScene({}, {}).default_ctx()\n assert ctx == geocontext.AOI(bounds_crs=None, align_pixels=False)\n\n # no geotrans\n ctx = MockScene({}, {\"crs\": \"EPSG:4326\"}).default_ctx()\n assert ctx == geocontext.AOI(\n crs=\"EPSG:4326\", bounds_crs=None, align_pixels=False\n )\n\n # north-up geotrans - resolution\n with warnings.catch_warnings(record=True) as w:\n warnings.simplefilter(\n \"always\"\n ) # otherwise, the duplicate warning is suppressed the second time\n ctx = MockScene(\n {},\n {\n \"crs\": \"EPSG:4326\",\n # origin: (0, 0), pixel size: 2, rotation: 0 degrees\n \"geotrans\": [0, 2, 0, 0, 0, -2],\n },\n ).default_ctx()\n assert len(w) == 0\n assert ctx.resolution == 2\n\n # non-north-up geotrans - resolution\n with warnings.catch_warnings(record=True) as w:\n warnings.simplefilter(\"always\")\n ctx = MockScene(\n {},\n {\n \"crs\": \"EPSG:4326\",\n # origin: (0, 0), pixel size: 2, rotation: 30 degrees\n \"geotrans\": (\n 0.0,\n 1.7320508075688774,\n -1,\n 0.0,\n 1,\n 1.7320508075688774,\n ),\n },\n ).default_ctx()\n warning = w[0]\n assert \"The GeoContext will *not* return this Scene's original data\" in \\\n str(warning.message)\n assert ctx.resolution == 2\n\n # north-up geotrans - bounds\n ctx = MockScene(\n {},\n {\n \"crs\": \"EPSG:4326\",\n # origin: (10, 20), pixel size: 2, rotation: 0 degrees\n \"geotrans\": [10, 2, 0, 20, 0, -2],\n \"raster_size\": [1, 2],\n },\n ).default_ctx()\n assert ctx.bounds == (10, 16, 12, 20)\n\n # non-north-up geotrans - bounds\n with warnings.catch_warnings(record=True) as w:\n warnings.simplefilter(\"always\")\n ctx = MockScene(\n {},\n {\n \"crs\": \"EPSG:4326\",\n # origin: (0, 0), pixel size: 2, rotation: 45 degrees\n \"geotrans\": (\n 0.0,\n np.sqrt(2),\n np.sqrt(2),\n 0.0,\n np.sqrt(2),\n -np.sqrt(2),\n ),\n \"raster_size\": [1, 1],\n },\n ).default_ctx()\n warning = w[0]\n assert \"The GeoContext will *not* return this Scene's original data\" in \\\n str(warning.message)\n diagonal = np.sqrt(2 ** 2 + 2 ** 2)\n assert ctx.bounds == (0, -diagonal / 2, diagonal, diagonal / 2)\n\n @mock.patch(\"descarteslabs.client.services.metadata.Metadata.get\", _metadata_get)\n @mock.patch(\n \"descarteslabs.client.services.metadata.Metadata.get_bands_by_id\",\n _metadata_get_bands,\n )\n def test_from_id(self):\n scene_id = \"landsat:LC08:PRE:TOAR:meta_LC80270312016188_v1\"\n scene, ctx = Scene.from_id(scene_id)\n\n assert scene.properties.id == scene_id\n assert isinstance(scene.geometry, shapely.geometry.Polygon)\n assert isinstance(ctx, geocontext.AOI)\n\n @mock.patch(\"descarteslabs.client.services.metadata.Metadata.get\", _metadata_get)\n @mock.patch(\n \"descarteslabs.client.services.metadata.Metadata.get_bands_by_id\",\n _metadata_get_bands,\n )\n @mock.patch(\"descarteslabs.scenes.scene.Raster.ndarray\", _raster_ndarray)\n def test_load_one_band(self):\n scene, ctx = Scene.from_id(\"landsat:LC08:PRE:TOAR:meta_LC80270312016188_v1\")\n arr, info = scene.ndarray(\"red\", ctx.assign(resolution=1000), raster_info=True)\n\n assert arr.shape == (1, 239, 235)\n assert arr.mask[0, 2, 2]\n assert not arr.mask[0, 115, 116]\n assert len(info[\"geoTransform\"]) == 6\n\n with pytest.raises(TypeError):\n scene.ndarray(\"blue\", ctx, invalid_argument=True)\n\n @mock.patch(\"descarteslabs.client.services.metadata.Metadata.get\", _metadata_get)\n @mock.patch(\n \"descarteslabs.client.services.metadata.Metadata.get_bands_by_id\",\n _metadata_get_bands,\n )\n def test_nonexistent_band_fails(self):\n scene, ctx = Scene.from_id(\"landsat:LC08:PRE:TOAR:meta_LC80270312016188_v1\")\n with pytest.raises(ValueError):\n scene.ndarray(\"blue yellow\", ctx)\n\n @mock.patch(\"descarteslabs.client.services.metadata.Metadata.get\", _metadata_get)\n @mock.patch(\n \"descarteslabs.client.services.metadata.Metadata.get_bands_by_id\",\n _metadata_get_bands,\n )\n @mock.patch(\"descarteslabs.scenes.scene.Raster.ndarray\", _raster_ndarray)\n def test_different_band_dtypes(self):\n scene, ctx = Scene.from_id(\"landsat:LC08:PRE:TOAR:meta_LC80270312016188_v1\")\n scene.properties.bands[\"green\"][\"dtype\"] = \"Int16\"\n arr, info = scene.ndarray(\n \"red green\", ctx.assign(resolution=600), mask_alpha=False\n )\n assert arr.dtype.type == np.int32\n\n @mock.patch(\"descarteslabs.client.services.metadata.Metadata.get\", _metadata_get)\n @mock.patch(\n \"descarteslabs.client.services.metadata.Metadata.get_bands_by_id\",\n _metadata_get_bands,\n )\n @mock.patch(\"descarteslabs.scenes.scene.Raster.ndarray\", _raster_ndarray)\n def test_load_multiband(self):\n scene, ctx = Scene.from_id(\"landsat:LC08:PRE:TOAR:meta_LC80270312016188_v1\")\n arr = scene.ndarray(\"red green blue\", ctx.assign(resolution=1000))\n\n assert arr.shape == (3, 239, 235)\n assert (arr.mask[:, 2, 2]).all()\n assert not (arr.mask[:, 115, 116]).all()\n\n @mock.patch(\"descarteslabs.client.services.metadata.Metadata.get\", _metadata_get)\n @mock.patch(\n \"descarteslabs.client.services.metadata.Metadata.get_bands_by_id\",\n _metadata_get_bands,\n )\n @mock.patch(\"descarteslabs.scenes.scene.Raster.ndarray\", _raster_ndarray)\n def test_load_multiband_axis_last(self):\n scene, ctx = Scene.from_id(\"landsat:LC08:PRE:TOAR:meta_LC80270312016188_v1\")\n arr = scene.ndarray(\n \"red green blue\", ctx.assign(resolution=1000), bands_axis=-1\n )\n\n assert arr.shape == (239, 235, 3)\n assert (arr.mask[2, 2, :]).all()\n assert not (arr.mask[115, 116, :]).all()\n\n with pytest.raises(ValueError):\n arr = scene.ndarray(\n \"red green blue\", ctx.assign(resolution=1000), bands_axis=3\n )\n with pytest.raises(ValueError):\n arr = scene.ndarray(\n \"red green blue\", ctx.assign(resolution=1000), bands_axis=-3\n )\n\n @mock.patch(\"descarteslabs.client.services.metadata.Metadata.get\", _metadata_get)\n @mock.patch(\n \"descarteslabs.client.services.metadata.Metadata.get_bands_by_id\",\n _metadata_get_bands,\n )\n @mock.patch(\"descarteslabs.scenes.scene.Raster.ndarray\", _raster_ndarray)\n def test_load_nomask(self):\n scene, ctx = Scene.from_id(\"landsat:LC08:PRE:TOAR:meta_LC80270312016188_v1\")\n arr = scene.ndarray(\n [\"red\", \"nir\"],\n ctx.assign(resolution=1000),\n mask_nodata=False,\n mask_alpha=False,\n )\n\n assert not hasattr(arr, \"mask\")\n assert arr.shape == (2, 239, 235)\n\n @mock.patch(\"descarteslabs.client.services.metadata.Metadata.get\", _metadata_get)\n @mock.patch(\n \"descarteslabs.client.services.metadata.Metadata.get_bands_by_id\",\n _metadata_get_bands,\n )\n @mock.patch(\"descarteslabs.scenes.scene.Raster.ndarray\", _raster_ndarray)\n def test_auto_mask_alpha_false(self):\n scene, ctx = Scene.from_id(\n \"modis:mod11a2:006:meta_MOD11A2.A2017305.h09v05.006.2017314042814_v1\"\n )\n arr = scene.ndarray(\n [\"Clear_sky_days\", \"Clear_sky_nights\"],\n ctx.assign(resolution=1000),\n mask_nodata=False,\n )\n\n assert not hasattr(arr, \"mask\")\n assert arr.shape == (2, 688, 473)\n\n @mock.patch(\"descarteslabs.client.services.metadata.Metadata.get\", _metadata_get)\n @mock.patch(\n \"descarteslabs.client.services.metadata.Metadata.get_bands_by_id\",\n _metadata_get_bands,\n )\n @mock.patch(\"descarteslabs.scenes.scene.Raster.ndarray\", _raster_ndarray)\n def test_mask_alpha_string(self):\n scene, ctx = Scene.from_id(\n \"modis:mod11a2:006:meta_MOD11A2.A2017305.h09v05.006.2017314042814_v1\"\n )\n arr = scene.ndarray(\n [\"Clear_sky_days\", \"Clear_sky_nights\"],\n ctx.assign(resolution=1000),\n mask_alpha=\"Clear_sky_nights\",\n mask_nodata=False,\n )\n\n assert hasattr(arr, \"mask\")\n assert arr.shape == (2, 688, 473)\n\n @mock.patch(\"descarteslabs.client.services.metadata.Metadata.get\", _metadata_get)\n @mock.patch(\n \"descarteslabs.client.services.metadata.Metadata.get_bands_by_id\",\n _metadata_get_bands,\n )\n @mock.patch(\"descarteslabs.scenes.scene.Raster.ndarray\", _raster_ndarray)\n def test_mask_missing_alpha(self):\n scene, ctx = Scene.from_id(\n \"modis:mod11a2:006:meta_MOD11A2.A2017305.h09v05.006.2017314042814_v1\"\n )\n with pytest.raises(ValueError):\n scene.ndarray(\n [\"Clear_sky_days\", \"Clear_sky_nights\"],\n ctx.assign(resolution=1000),\n mask_alpha=True,\n mask_nodata=False,\n )\n\n @mock.patch(\"descarteslabs.client.services.metadata.Metadata.get\", _metadata_get)\n @mock.patch(\n \"descarteslabs.client.services.metadata.Metadata.get_bands_by_id\",\n _metadata_get_bands,\n )\n @mock.patch(\"descarteslabs.scenes.scene.Raster.ndarray\", _raster_ndarray)\n def test_mask_missing_band(self):\n scene, ctx = Scene.from_id(\n \"modis:mod11a2:006:meta_MOD11A2.A2017305.h09v05.006.2017314042814_v1\"\n )\n with pytest.raises(ValueError):\n scene.ndarray(\n [\"Clear_sky_days\", \"Clear_sky_nights\"],\n ctx.assign(resolution=1000),\n mask_alpha=\"missing_band\",\n mask_nodata=False,\n )\n\n @mock.patch(\"descarteslabs.client.services.metadata.Metadata.get\", _metadata_get)\n @mock.patch(\n \"descarteslabs.client.services.metadata.Metadata.get_bands_by_id\",\n _metadata_get_bands,\n )\n @mock.patch(\"descarteslabs.scenes.scene.Raster.ndarray\", _raster_ndarray)\n def test_auto_mask_alpha_true(self):\n scene, ctx = Scene.from_id(\"landsat:LC08:PRE:TOAR:meta_LC80270312016188_v1\")\n arr = scene.ndarray(\n [\"red\", \"green\", \"blue\"], ctx.assign(resolution=1000), mask_nodata=False\n )\n\n assert hasattr(arr, \"mask\")\n assert arr.shape == (3, 239, 235)\n\n @mock.patch(\"descarteslabs.client.services.metadata.Metadata.get\", _metadata_get)\n @mock.patch(\n \"descarteslabs.client.services.metadata.Metadata.get_bands_by_id\",\n _metadata_get_bands,\n )\n @mock.patch(\"descarteslabs.scenes.scene.Raster.ndarray\", _raster_ndarray)\n def with_alpha(self):\n scene, ctx = Scene.from_id(\"landsat:LC08:PRE:TOAR:meta_LC80270312016188_v1\")\n\n arr = scene.ndarray([\"red\", \"alpha\"], ctx.assign(resolution=1000))\n assert arr.shape == (2, 239, 235)\n assert (arr.mask == (arr.data[1] == 0)).all()\n\n arr = scene.ndarray([\"alpha\"], ctx.assign(resolution=1000), mask_nodata=False)\n assert arr.shape == (1, 239, 235)\n assert (arr.mask == (arr.data == 0)).all()\n\n with pytest.raises(ValueError):\n arr = scene.ndarray(\"alpha red\", ctx.assign(resolution=1000))\n\n def test_bands_to_list(self):\n assert Scene._bands_to_list(\"one\") == [\"one\"]\n assert Scene._bands_to_list([\"one\"]) == [\"one\"]\n assert Scene._bands_to_list(\"one two three\") == [\"one\", \"two\", \"three\"]\n assert Scene._bands_to_list([\"one\", \"two\", \"three\"]) == [\"one\", \"two\", \"three\"]\n with pytest.raises(TypeError):\n Scene._bands_to_list(1)\n with pytest.raises(ValueError):\n Scene._bands_to_list([])\n\n def test_scenes_bands_dict(self):\n meta_bands = {\n \"someproduct:red\": {\"name\": \"red\", \"id\": \"someproduct:red\"},\n \"someproduct:green\": {\"name\": \"green\", \"id\": \"someproduct:green\"},\n \"someproduct:ndvi\": {\"name\": \"ndvi\", \"id\": \"someproduct:ndvi\"},\n \"derived:ndvi\": {\"name\": \"ndvi\", \"id\": \"derived:ndvi\"},\n }\n scenes_bands = Scene._scenes_bands_dict(meta_bands)\n assert set(scenes_bands.keys()) == {\"red\", \"green\", \"ndvi\", \"derived:ndvi\"}\n assert scenes_bands.ndvi == meta_bands[\"someproduct:ndvi\"]\n assert scenes_bands[\"derived:ndvi\"] == meta_bands[\"derived:ndvi\"]\n\n def test_raw_data_type(self):\n mock_properties = {\n \"product\": \"mock_product\",\n \"bands\": {\n \"one\": dict(dtype=\"UInt16\"),\n \"two\": dict(dtype=\"UInt16\"),\n \"derived:three\": dict(dtype=\"UInt16\"),\n \"derived:one\": dict(dtype=\"UInt16\"),\n \"its_a_byte\": dict(dtype=\"Byte\"),\n \"signed\": dict(dtype=\"Int16\"),\n \"future_unknown_type\": dict(dtype=\"FutureInt16\"),\n \"alpha\": dict(dtype=\"Byte\"),\n },\n }\n s = MockScene({}, mock_properties)\n assert s.scaling_parameters([\"its_a_byte\"], scaling=None)[1] == \"Byte\"\n assert s.scaling_parameters([\"one\", \"two\"], scaling=None)[1] == \"UInt16\"\n assert s.scaling_parameters([\"its_a_byte\", \"alpha\"], scaling=None)[1] == \"Byte\"\n # alpha ignored from common datatype\n assert s.scaling_parameters([\"one\", \"alpha\"], scaling=None)[1] == \"UInt16\"\n assert s.scaling_parameters([\"alpha\"], scaling=None)[1] == \"Byte\"\n assert s.scaling_parameters(\n [\"one\", \"two\", \"derived:three\", \"derived:one\"], scaling=None\n )[1] == \\\n \"UInt16\"\n assert s.scaling_parameters([\"one\", \"its_a_byte\"], scaling=None)[1] == \"UInt16\"\n assert s.scaling_parameters([\"signed\", \"its_a_byte\"], scaling=None)[1] == \"Int16\"\n assert s.scaling_parameters([\"one\", \"signed\"], scaling=None)[1] == \"Int32\"\n\n with pytest.raises(ValueError, match=\"is not available\"):\n s.scaling_parameters([\"one\", \"woohoo\"], scaling=None)\n with pytest.raises(ValueError, match=\"did you mean\"):\n s.scaling_parameters(\n [\"one\", \"three\"], scaling=None\n ) # should hint that derived:three exists\n with pytest.raises(ValueError, match=\"Invalid data type\"):\n s.scaling_parameters([\"its_a_byte\", \"future_unknown_type\"], scaling=None)\n\n def test__naive_dateparse(self):\n assert _strptime_helper(\"2017-08-31T00:00:00+00:00\") is not None\n assert _strptime_helper(\"2017-08-31T00:00:00.00+00:00\") is not None\n assert _strptime_helper(\"2017-08-31T00:00:00Z\") is not None\n assert _strptime_helper(\"2017-08-31T00:00:00\") is not None\n\n def test_coverage(self):\n scene_geometry = shapely.geometry.Point(0.0, 0.0).buffer(1)\n\n scene = Scene(dict(id=\"foo\", geometry=scene_geometry, properties={}), {})\n\n # same geometry (as a GeoJSON)\n assert scene.coverage(scene_geometry.__geo_interface__) == pytest.approx(1.0, abs=1e-6)\n\n # geom is larger\n geom_larger = shapely.geometry.Point(0.0, 0.0).buffer(2)\n assert scene.coverage(geom_larger) == 0.25\n\n # geom is smaller\n geom_smaller = shapely.geometry.Point(0.0, 0.0).buffer(0.5)\n assert scene.coverage(geom_smaller) == 1.0\n\n @mock.patch(\"descarteslabs.scenes.scene._download._download\")\n def test_download(self, mock_geotiff):\n scene = MockScene(\n {},\n {\n \"id\": \"foo:bar\",\n \"bands\": {\"nir\": {\"dtype\": \"UInt16\"}, \"yellow\": {\"dtype\": \"UInt16\"}},\n },\n )\n ctx = geocontext.AOI(bounds=[30, 40, 50, 60], resolution=2, crs=\"EPSG:4326\")\n scene.download(\"nir yellow\", ctx)\n mock_geotiff.assert_called_once()\n\n def test_scaling_parameters_none(self):\n scene = MockScene({}, self.MOCK_RGBA_PROPERTIES)\n scales, data_type = scene.scaling_parameters(\"red green blue alpha\")\n assert scales is None\n assert data_type == \"UInt16\"\n\n def test_scaling_parameters_dtype(self):\n scene = MockScene({}, self.MOCK_RGBA_PROPERTIES)\n scales, data_type = scene.scaling_parameters(\n \"red green blue alpha\", None, \"UInt32\"\n )\n assert scales is None\n assert data_type == \"UInt32\"\n\n def test_scaling_parameters_raw(self):\n scene = MockScene({}, self.MOCK_RGBA_PROPERTIES)\n scales, data_type = scene.scaling_parameters(\"red green blue alpha\", \"raw\")\n assert scales == [None, None, None, None]\n assert data_type == \"UInt16\"\n\n def test_scaling_parameters_display(self):\n scene = MockScene({}, self.MOCK_RGBA_PROPERTIES)\n scales, data_type = scene.scaling_parameters(\"red green blue alpha\", \"display\")\n assert scales == [(0, 4000, 0, 255), (0, 4000, 0, 255), (0, 4000, 0, 255), None]\n assert data_type == \"Byte\"\n\n def test_scaling_parameters_display_uint16(self):\n scene = MockScene({}, self.MOCK_RGBA_PROPERTIES)\n scales, data_type = scene.scaling_parameters(\n \"red green blue alpha\", \"display\", \"UInt16\"\n )\n assert scales == [(0, 4000, 0, 255), (0, 4000, 0, 255), (0, 4000, 0, 255), None]\n assert data_type == \"UInt16\"\n\n def test_scaling_parameters_auto(self):\n scene = MockScene({}, self.MOCK_RGBA_PROPERTIES)\n scales, data_type = scene.scaling_parameters(\"red green blue alpha\", \"auto\")\n assert scales == [(), (), (), None]\n assert data_type == \"Byte\"\n\n def test_scaling_parameters_physical(self):\n scene = MockScene({}, self.MOCK_RGBA_PROPERTIES)\n scales, data_type = scene.scaling_parameters(\"red green blue alpha\", \"physical\")\n assert scales == [\n (0, 10000, 0.0, 1.0),\n (0, 10000, 0.0, 1.0),\n (0, 10000, 0.0, 1.0),\n None,\n ]\n assert data_type == \"Float64\"\n\n def test_scaling_parameters_physical_int32(self):\n scene = MockScene({}, self.MOCK_RGBA_PROPERTIES)\n scales, data_type = scene.scaling_parameters(\n \"red green blue alpha\", \"physical\", \"Int32\"\n )\n assert scales == [\n (0, 10000, 0.0, 1.0),\n (0, 10000, 0.0, 1.0),\n (0, 10000, 0.0, 1.0),\n None,\n ]\n assert data_type == \"Int32\"\n\n def test_scaling_parameters_bad_mode(self):\n scene = MockScene({}, self.MOCK_RGBA_PROPERTIES)\n with pytest.raises(ValueError):\n scales, data_type = scene.scaling_parameters(\"red green blue alpha\", \"mode\")\n\n def test_scaling_parameters_list(self):\n scene = MockScene({}, self.MOCK_RGBA_PROPERTIES)\n scales, data_type = scene.scaling_parameters(\n \"red green blue alpha\", [(0, 10000), \"display\", (), None]\n )\n assert scales == [(0, 10000, 0, 255), (0, 4000, 0, 255), (), None]\n assert data_type == \"Byte\"\n\n def test_scaling_parameters_list_alpha(self):\n scene = MockScene({}, self.MOCK_RGBA_PROPERTIES)\n scales, data_type = scene.scaling_parameters(\n \"red green blue alpha\", [(0, 4000), (0, 4000), (0, 4000), \"raw\"]\n )\n assert scales == [(0, 4000, 0, 255), (0, 4000, 0, 255), (0, 4000, 0, 255), None]\n assert data_type == \"Byte\"\n\n def test_scaling_parameters_list_bad_length(self):\n scene = MockScene({}, self.MOCK_RGBA_PROPERTIES)\n with pytest.raises(ValueError):\n scales, data_type = scene.scaling_parameters(\n \"red green blue alpha\", [(0, 10000), \"display\", ()]\n )\n\n def test_scaling_parameters_list_bad_mode(self):\n scene = MockScene({}, self.MOCK_RGBA_PROPERTIES)\n with pytest.raises(ValueError):\n scales, data_type = scene.scaling_parameters(\n \"red green blue alpha\", [(0, 10000), \"mode\", (), None]\n )\n\n def test_scaling_parameters_dict(self):\n scene = MockScene({}, self.MOCK_RGBA_PROPERTIES)\n scales, data_type = scene.scaling_parameters(\n \"red green blue alpha\",\n {\"red\": \"display\", \"green\": (0, 10000), \"default_\": \"auto\"},\n )\n assert scales == [(0, 4000, 0, 255), (0, 10000, 0, 255), (), None]\n assert data_type == \"Byte\"\n\n def test_scaling_parameters_dict_default(self):\n scene = MockScene({}, self.MOCK_RGBA_PROPERTIES)\n scales, data_type = scene.scaling_parameters(\n \"red green blue alpha\", {\"red\": (0, 4000, 0, 255), \"default_\": \"raw\"}\n )\n assert scales == [(0, 4000, 0, 255), None, None, None]\n assert data_type == \"UInt16\"\n\n def test_scaling_parameters_dict_default_none(self):\n scene = MockScene({}, self.MOCK_RGBA_PROPERTIES)\n scales, data_type = scene.scaling_parameters(\n \"red green blue alpha\", {\"red\": \"display\", \"green\": \"display\"}\n )\n assert scales == [(0, 4000, 0, 255), (0, 4000, 0, 255), None, None]\n assert data_type == \"Byte\"\n\n def test_scaling_parameters_tuple_range(self):\n scene = MockScene({}, self.MOCK_RGBA_PROPERTIES)\n scales, data_type = scene.scaling_parameters(\n \"red green blue alpha\", [(0, 10000, 0, 255), (0, 4000), (), None]\n )\n assert scales == [(0, 10000, 0, 255), (0, 4000, 0, 255), (), None]\n assert data_type == \"Byte\"\n\n def test_scaling_parameters_tuple_range_uint16(self):\n scene = MockScene({}, self.MOCK_RGBA_PROPERTIES)\n scales, data_type = scene.scaling_parameters(\n \"red green blue alpha\", [(0, 10000, 0, 10000), (0, 4000), (), None]\n )\n assert scales == [(0, 10000, 0, 10000), (0, 4000, 0, 65535), (), None]\n assert data_type == \"UInt16\"\n\n def test_scaling_parameters_tuple_range_float(self):\n scene = MockScene({}, self.MOCK_RGBA_PROPERTIES)\n scales, data_type = scene.scaling_parameters(\n \"red green blue alpha\", [(0, 10000, 0, 1.0), (0, 4000), (0, 4000), None]\n )\n assert scales == [(0, 10000, 0, 1), (0, 4000, 0, 1), (0, 4000, 0, 1), None]\n assert data_type == \"Float64\"\n\n def test_scaling_parameters_tuple_pct(self):\n scene = MockScene({}, self.MOCK_RGBA_PROPERTIES)\n scales, data_type = scene.scaling_parameters(\n \"red green blue alpha\",\n [(\"0%\", \"100%\", \"0%\", \"100%\"), (\"2%\", \"98%\", \"2%\", \"98%\"), \"display\", None],\n )\n assert scales == [\n (0, 4000, 0, 255),\n (80, 3920, 5, 250),\n (0, 4000, 0, 255),\n None,\n ]\n assert data_type == \"Byte\"\n\n def test_scaling_parameters_tuple_pct_float(self):\n scene = MockScene({}, self.MOCK_RGBA_PROPERTIES)\n scales, data_type = scene.scaling_parameters(\n \"red green blue alpha\",\n [\n (\"0%\", \"100%\", \"0%\", \"100%\"),\n (\"2%\", \"98%\", \"2%\", \"98%\"),\n \"physical\",\n None,\n ],\n )\n assert scales == [\n (0, 10000, 0, 1),\n (200, 9800, 0.02, 0.98),\n (0, 10000, 0, 1),\n None,\n ]\n assert data_type == \"Float64\"\n\n def test_scaling_parameters_bad_data_type(self):\n scene = MockScene({}, self.MOCK_RGBA_PROPERTIES)\n with pytest.raises(ValueError):\n scales, data_type = scene.scaling_parameters(\n \"red green blue alpha\", None, \"data_type\"\n )\n\n\nclass TestSceneRepr(unittest.TestCase):\n def setUp(self):\n # date format is locale-dependent, so a hardcoded date string could fail for users from different locales\n date = datetime.datetime(2015, 6, 1, 14, 25, 10)\n self.date_str = date.strftime(\"%c\")\n\n properties = {\n \"id\": \"prod:foo\",\n \"product\": \"prod\",\n \"crs\": \"EPSG:32615\",\n \"date\": date,\n \"bands\": collections.OrderedDict(\n [ # necessary to ensure deterministic order in tests\n (\n \"blue\",\n {\n \"resolution\": 5,\n \"resolution_unit\": \"smoot\",\n \"dtype\": \"UInt16\",\n \"data_range\": [0, 10000],\n \"physical_range\": [0, 1],\n \"data_unit\": \"TOAR\",\n },\n ),\n (\n \"alpha\",\n {\n \"resolution\": 5,\n \"resolution_unit\": \"smoot\",\n \"dtype\": \"UInt8\",\n \"data_range\": [0, 1],\n \"physical_range\": [0, 1],\n },\n ),\n ]\n ),\n }\n properties = DotDict(properties)\n self.scene = MockScene({}, properties)\n\n def test_basic(self):\n repr_str = repr(self.scene)\n match_str = \"\"\"\\\n Scene \"prod:foo\"\n * Product: \"prod\"\n * CRS: \"EPSG:32615\"\n * Date: {}\n * Bands:\n * blue: 5 smoot, UInt16, [0, 10000] -> [0, 1] in units \"TOAR\"\n * alpha: 5 smoot, UInt8, [0, 1] -> [0, 1]\"\"\".format(\n self.date_str\n )\n\n assert repr_str == textwrap.dedent(match_str)\n\n def test_missing_band_part(self):\n del self.scene.properties.bands[\"blue\"][\"physical_range\"]\n del self.scene.properties.bands[\"blue\"][\"dtype\"]\n repr_str = repr(self.scene)\n match_str = \"\"\"\\\n Scene \"prod:foo\"\n * Product: \"prod\"\n * CRS: \"EPSG:32615\"\n * Date: {}\n * Bands:\n * blue: 5 smoot, [0, 10000] in units \"TOAR\"\n * alpha: 5 smoot, UInt8, [0, 1] -> [0, 1]\"\"\".format(\n self.date_str\n )\n\n assert repr_str == textwrap.dedent(match_str)\n\n def test_missing_all_band_parts(self):\n self.scene.properties.bands[\"alpha\"] = {}\n repr_str = repr(self.scene)\n match_str = \"\"\"\\\n Scene \"prod:foo\"\n * Product: \"prod\"\n * CRS: \"EPSG:32615\"\n * Date: {}\n * Bands:\n * blue: 5 smoot, UInt16, [0, 10000] -> [0, 1] in units \"TOAR\"\n * alpha\"\"\".format(\n self.date_str\n )\n\n assert repr_str == textwrap.dedent(match_str)\n\n def test_no_bands(self):\n self.scene.properties.bands = {}\n repr_str = repr(self.scene)\n match_str = \"\"\"\\\n Scene \"prod:foo\"\n * Product: \"prod\"\n * CRS: \"EPSG:32615\"\n * Date: {}\"\"\".format(\n self.date_str\n )\n\n assert repr_str == textwrap.dedent(match_str)\n\n def test_truncate_hella_bands(self):\n self.scene.properties.bands.update({str(i): {} for i in range(100)})\n repr_str = repr(self.scene)\n match_str = \"\"\"\\\n Scene \"prod:foo\"\n * Product: \"prod\"\n * CRS: \"EPSG:32615\"\n * Date: {}\n * Bands: 102\"\"\".format(\n self.date_str\n )\n\n assert repr_str == textwrap.dedent(match_str)\n"
] |
[
[
"numpy.sqrt"
]
] |
meghbhalerao/cnnormaliztion
|
[
"90cf80d4de5cf86dd8bba625bd757c9540cded48"
] |
[
"code/criterion.py"
] |
[
"import torch\nimport torch.nn.functional as F\ndef marginal_loss(score,labels):\n \"\"\"\n args:\n score:batch * top_k\n labels: batch * top_k\n \"\"\"\n predict = F.softmax(score, dim=-1)\n loss = predict * labels #element-wise\n loss = loss.sum(dim=-1) # sum all positive scores\n loss = loss[loss > 0] # filter sets with at least one positives\n loss = torch.clamp(loss, min=1e-9, max=1) # for numerical stability\n loss = -torch.log(loss) # for negative log likelihood\n if len(loss) == 0:\n loss = loss.sum() # will return zero loss\n else:\n loss = loss.mean()\n return loss\n\n\n "
] |
[
[
"torch.clamp",
"torch.nn.functional.softmax",
"torch.log"
]
] |
Purg/SMQTK-Indexing
|
[
"24b5f875ec01a93f1c4842381a6de88041166604"
] |
[
"tests/impls/nn_index/test_lsh.py"
] |
[
"import collections\nimport json\nimport random\nimport types\nfrom typing import Any, Callable, Dict, Iterable, Optional, Tuple\nimport unittest.mock as mock\nimport unittest\n\nimport numpy as np\nimport pytest\n\nfrom smqtk_core.configuration import configuration_test_helper\n\nfrom smqtk_dataprovider.exceptions import ReadOnlyError\nfrom smqtk_dataprovider.impls.key_value_store.memory import MemoryKeyValueStore\n\nfrom smqtk_descriptors import DescriptorElement\nfrom smqtk_descriptors.impls.descriptor_element.memory import DescriptorMemoryElement\nfrom smqtk_descriptors.impls.descriptor_set.memory import MemoryDescriptorSet\n\nfrom smqtk_indexing import HashIndex, LshFunctor, NearestNeighborsIndex\nfrom smqtk_indexing.impls.hash_index.linear import LinearHashIndex\nfrom smqtk_indexing.impls.hash_index.sklearn_balltree import SkLearnBallTreeHashIndex\nfrom smqtk_indexing.impls.lsh_functor.itq import ItqFunctor\nfrom smqtk_indexing.impls.nn_index.lsh import LSHNearestNeighborIndex\n\n\nclass DummyHashFunctor (LshFunctor):\n\n @classmethod\n def is_usable(cls) -> bool:\n return True\n\n def get_config(self) -> Dict[str, Any]:\n \"\"\" stub \"\"\"\n\n def get_hash(self, descriptor: np.ndarray) -> np.ndarray:\n \"\"\"\n Dummy function that returns the bits of the integer sum of descriptor\n vector.\n\n :param descriptor: Descriptor vector (float-typed) we should generate\n the hash of.\n\n :return: Generated bit-vector (boolean-typed) as a numpy array of\n booleans.\n\n \"\"\"\n return np.asarray([int(c) for c in bin(int(descriptor.sum()))[2:]],\n bool)\n\n\nclass TestLshIndex (unittest.TestCase):\n\n def test_is_usable(self) -> None:\n # Should always be usable since this is a shell class.\n self.assertTrue(LSHNearestNeighborIndex.is_usable())\n\n def test_findable(self) -> None:\n assert LSHNearestNeighborIndex in NearestNeighborsIndex.get_impls()\n\n def test_configuration(self) -> None:\n i = LSHNearestNeighborIndex(\n lsh_functor=ItqFunctor(), descriptor_set=MemoryDescriptorSet(),\n hash2uuids_kvstore=MemoryKeyValueStore(),\n hash_index=LinearHashIndex(), distance_method='euclidean',\n read_only=True\n )\n for inst in configuration_test_helper(i): # type: LSHNearestNeighborIndex\n assert isinstance(inst.lsh_functor, LshFunctor)\n assert isinstance(inst.descriptor_set, MemoryDescriptorSet)\n assert isinstance(inst.hash_index, LinearHashIndex)\n assert isinstance(inst.hash2uuids_kvstore, MemoryKeyValueStore)\n assert inst.distance_method == 'euclidean'\n assert inst.read_only is True\n\n def test_configuration_none_HI(self) -> None:\n c = LSHNearestNeighborIndex.get_default_config()\n\n # Check that default is in JSON format and is decoded to the same\n # result.\n self.assertEqual(json.loads(json.dumps(c)), c)\n\n # Make a simple configuration\n c['lsh_functor']['type'] = 'smqtk_indexing.impls.lsh_functor.itq.ItqFunctor'\n c['descriptor_set']['type'] = 'smqtk_descriptors.impls.descriptor_set.memory.MemoryDescriptorSet'\n c['hash2uuids_kvstore']['type'] = 'smqtk_dataprovider.impls.key_value_store.memory.MemoryKeyValueStore'\n c['hash_index']['type'] = None\n index = LSHNearestNeighborIndex.from_config(c)\n\n self.assertIsInstance(index.lsh_functor, ItqFunctor)\n self.assertIsInstance(index.descriptor_set, MemoryDescriptorSet)\n self.assertIsNone(index.hash_index)\n self.assertIsInstance(index.hash2uuids_kvstore, MemoryKeyValueStore)\n\n # Can convert instance config to JSON\n self.assertEqual(\n json.loads(json.dumps(index.get_config())),\n index.get_config()\n )\n\n def test_get_dist_func_euclidean(self) -> None:\n f = LSHNearestNeighborIndex._get_dist_func('euclidean')\n self.assertIsInstance(f, types.FunctionType)\n self.assertAlmostEqual(\n f(np.array([0, 0]), np.array([0, 1])),\n 1.0\n )\n\n def test_get_dist_func_cosine(self) -> None:\n f = LSHNearestNeighborIndex._get_dist_func('cosine')\n self.assertIsInstance(f, types.FunctionType)\n self.assertAlmostEqual(\n f(np.array([1, 0]), np.array([0, 1])),\n 1.0\n )\n self.assertAlmostEqual(\n f(np.array([1, 0]), np.array([1, 1])),\n 0.5\n )\n\n def test_get_dist_func_hik(self) -> None:\n f = LSHNearestNeighborIndex._get_dist_func('hik')\n self.assertIsInstance(f, types.FunctionType)\n self.assertAlmostEqual(\n f(np.array([0, 0]), np.array([0, 1])),\n 1.0\n )\n self.assertAlmostEqual(\n f(np.array([1, 0]), np.array([0, 1])),\n 1.0\n )\n self.assertAlmostEqual(\n f(np.array([1, 1]), np.array([0, 1])),\n 0.0\n )\n\n def test_get_dist_func_invalid_string(self) -> None:\n self.assertRaises(\n ValueError,\n LSHNearestNeighborIndex._get_dist_func,\n 'not-valid-string'\n )\n\n def test_count_empty_hash2uid(self) -> None:\n \"\"\"\n Test that an empty hash-to-uid mapping results in a 0 return regardless\n of descriptor-set state.\n \"\"\"\n descr_set = MemoryDescriptorSet()\n hash_kvs = MemoryKeyValueStore()\n self.assertEqual(descr_set.count(), 0)\n self.assertEqual(hash_kvs.count(), 0)\n\n lsh = LSHNearestNeighborIndex(DummyHashFunctor(), descr_set, hash_kvs)\n self.assertEqual(lsh.count(), 0)\n\n # Additions to the descriptor-set should not impact LSH index \"size\"\n lsh.descriptor_set.add_descriptor(DescriptorMemoryElement('t', 0))\n self.assertEqual(lsh.descriptor_set.count(), 1)\n self.assertEqual(lsh.hash2uuids_kvstore.count(), 0)\n self.assertEqual(lsh.count(), 0)\n\n lsh.descriptor_set.add_descriptor(DescriptorMemoryElement('t', 1))\n self.assertEqual(lsh.descriptor_set.count(), 2)\n self.assertEqual(lsh.hash2uuids_kvstore.count(), 0)\n self.assertEqual(lsh.count(), 0)\n\n lsh.hash2uuids_kvstore.add(0, {0})\n self.assertEqual(lsh.descriptor_set.count(), 2)\n self.assertEqual(lsh.count(), 1)\n\n lsh.hash2uuids_kvstore.add(0, {0, 1})\n self.assertEqual(lsh.descriptor_set.count(), 2)\n self.assertEqual(lsh.count(), 2)\n\n lsh.hash2uuids_kvstore.add(0, {0, 1, 2})\n self.assertEqual(lsh.descriptor_set.count(), 2)\n self.assertEqual(lsh.count(), 3)\n\n def test_build_index_read_only(self) -> None:\n index = LSHNearestNeighborIndex(DummyHashFunctor(),\n MemoryDescriptorSet(),\n MemoryKeyValueStore(), read_only=True)\n self.assertRaises(\n ReadOnlyError,\n index._build_index, []\n )\n\n def test_build_index_fresh_build(self) -> None:\n descr_set = MemoryDescriptorSet()\n hash_kvs = MemoryKeyValueStore()\n index = LSHNearestNeighborIndex(DummyHashFunctor(),\n descr_set, hash_kvs)\n\n descriptors = [\n DescriptorMemoryElement('t', 0),\n DescriptorMemoryElement('t', 1),\n DescriptorMemoryElement('t', 2),\n DescriptorMemoryElement('t', 3),\n DescriptorMemoryElement('t', 4),\n ]\n # Vectors of length 1 for easy dummy hashing prediction.\n for i, d in enumerate(descriptors):\n d.set_vector(np.ones(1, float) * i)\n index.build_index(descriptors)\n\n # Make sure descriptors are now in attached index and in\n # key-value-store.\n self.assertEqual(descr_set.count(), 5)\n for d in descriptors:\n self.assertIn(d, descr_set)\n # Dummy hash function bins sum of descriptor vectors.\n self.assertEqual(hash_kvs.count(), 5)\n for i in range(5):\n self.assertSetEqual(hash_kvs.get(i), {i})\n\n def test_build_index_fresh_build_with_hash_index(self) -> None:\n descr_set = MemoryDescriptorSet()\n hash_kvs = MemoryKeyValueStore()\n linear_hi = LinearHashIndex() # simplest hash index, heap-sorts.\n index = LSHNearestNeighborIndex(DummyHashFunctor(),\n descr_set, hash_kvs, linear_hi)\n\n descriptors = [\n DescriptorMemoryElement('t', 0),\n DescriptorMemoryElement('t', 1),\n DescriptorMemoryElement('t', 2),\n DescriptorMemoryElement('t', 3),\n DescriptorMemoryElement('t', 4),\n ]\n # Vectors of length 1 for easy dummy hashing prediction.\n for i, d in enumerate(descriptors):\n d.set_vector(np.ones(1, float) * i)\n index.build_index(descriptors)\n # Hash index should have been built with hash vectors, and linearHI\n # converts those to integers for storage.\n self.assertEqual(linear_hi.index, {0, 1, 2, 3, 4})\n\n def test_update_index_read_only(self) -> None:\n index = LSHNearestNeighborIndex(DummyHashFunctor(),\n MemoryDescriptorSet(),\n MemoryKeyValueStore(), read_only=True)\n self.assertRaises(\n ReadOnlyError,\n index._update_index, []\n )\n\n def test_update_index_no_existing_index(self) -> None:\n # Test that calling update_index with no existing index acts like\n # building the index fresh. This test is basically the same as\n # test_build_index_fresh_build but using update_index instead.\n descr_set = MemoryDescriptorSet()\n hash_kvs = MemoryKeyValueStore()\n index = LSHNearestNeighborIndex(DummyHashFunctor(),\n descr_set, hash_kvs)\n\n descriptors = [\n DescriptorMemoryElement('t', 0),\n DescriptorMemoryElement('t', 1),\n DescriptorMemoryElement('t', 2),\n DescriptorMemoryElement('t', 3),\n DescriptorMemoryElement('t', 4),\n ]\n # Vectors of length 1 for easy dummy hashing prediction.\n for d in descriptors:\n d.set_vector(np.ones(1, float) * d.uuid())\n index.update_index(descriptors)\n\n # Make sure descriptors are now in attached index and in key-value-store\n self.assertEqual(descr_set.count(), 5)\n for d in descriptors:\n self.assertIn(d, descr_set)\n # Dummy hash function bins sum of descriptor vectors.\n self.assertEqual(hash_kvs.count(), 5)\n for i in range(5):\n self.assertSetEqual(hash_kvs.get(i), {i})\n\n def test_update_index_add_new_descriptors(self) -> None:\n # Test that calling update index after a build index causes index\n # components to be properly updated.\n descr_set = MemoryDescriptorSet()\n hash_kvs = MemoryKeyValueStore()\n index = LSHNearestNeighborIndex(DummyHashFunctor(),\n descr_set, hash_kvs)\n descriptors1 = [\n DescriptorMemoryElement('t', 0),\n DescriptorMemoryElement('t', 1),\n DescriptorMemoryElement('t', 2),\n DescriptorMemoryElement('t', 3),\n DescriptorMemoryElement('t', 4),\n ]\n descriptors2 = [\n DescriptorMemoryElement('t', 5),\n DescriptorMemoryElement('t', 6),\n ]\n # Vectors of length 1 for easy dummy hashing prediction.\n for d in descriptors1 + descriptors2:\n d.set_vector(np.ones(1, float) * d.uuid())\n\n # Build initial index.\n index.build_index(descriptors1)\n self.assertEqual(descr_set.count(), 5)\n for d in descriptors1:\n self.assertIn(d, descr_set)\n for d in descriptors2:\n self.assertNotIn(d, descr_set)\n # Dummy hash function bins sum of descriptor vectors.\n self.assertEqual(hash_kvs.count(), 5)\n for i in range(5):\n self.assertSetEqual(hash_kvs.get(i), {i})\n\n # Update index and check that components have new data.\n index.update_index(descriptors2)\n self.assertEqual(descr_set.count(), 7)\n for d in descriptors1 + descriptors2:\n self.assertIn(d, descr_set)\n # Dummy hash function bins sum of descriptor vectors.\n self.assertEqual(hash_kvs.count(), 7)\n for i in range(7):\n self.assertSetEqual(hash_kvs.get(i), {i})\n\n def test_update_index_duplicate_descriptors(self) -> None:\n \"\"\"\n Test that updating a built index with the same descriptors results in\n idempotent behavior.\n \"\"\"\n descr_set = MemoryDescriptorSet()\n hash_kvs = MemoryKeyValueStore()\n index = LSHNearestNeighborIndex(DummyHashFunctor(),\n descr_set, hash_kvs)\n\n # Identical Descriptors to build and update on (different instances)\n descriptors1 = [\n DescriptorMemoryElement('t', 0).set_vector([0]),\n DescriptorMemoryElement('t', 1).set_vector([1]),\n DescriptorMemoryElement('t', 2).set_vector([2]),\n DescriptorMemoryElement('t', 3).set_vector([3]),\n DescriptorMemoryElement('t', 4).set_vector([4]),\n ]\n descriptors2 = [\n DescriptorMemoryElement('t', 0).set_vector([0]),\n DescriptorMemoryElement('t', 1).set_vector([1]),\n DescriptorMemoryElement('t', 2).set_vector([2]),\n DescriptorMemoryElement('t', 3).set_vector([3]),\n DescriptorMemoryElement('t', 4).set_vector([4]),\n ]\n\n index.build_index(descriptors1)\n index.update_index(descriptors2)\n\n assert descr_set.count() == 5\n # Above descriptors should be considered \"in\" the descriptor set now.\n for d in descriptors1:\n assert d in descr_set\n for d in descriptors2:\n assert d in descr_set\n # Known hashes of the above descriptors should be in the KVS\n assert set(hash_kvs.keys()) == {0, 1, 2, 3, 4}\n assert hash_kvs.get(0) == {0}\n assert hash_kvs.get(1) == {1}\n assert hash_kvs.get(2) == {2}\n assert hash_kvs.get(3) == {3}\n assert hash_kvs.get(4) == {4}\n\n def test_update_index_similar_descriptors(self) -> None:\n \"\"\"\n Test that updating a built index with similar descriptors (same\n vectors, different UUIDs) results in contained structures having an\n expected state.\n \"\"\"\n descr_set = MemoryDescriptorSet()\n hash_kvs = MemoryKeyValueStore()\n index = LSHNearestNeighborIndex(DummyHashFunctor(),\n descr_set, hash_kvs)\n\n # Similar Descriptors to build and update on (different instances)\n descriptors1 = [\n DescriptorMemoryElement('t', 0).set_vector([0]),\n DescriptorMemoryElement('t', 1).set_vector([1]),\n DescriptorMemoryElement('t', 2).set_vector([2]),\n DescriptorMemoryElement('t', 3).set_vector([3]),\n DescriptorMemoryElement('t', 4).set_vector([4]),\n ]\n descriptors2 = [\n DescriptorMemoryElement('t', 5).set_vector([0]),\n DescriptorMemoryElement('t', 6).set_vector([1]),\n DescriptorMemoryElement('t', 7).set_vector([2]),\n DescriptorMemoryElement('t', 8).set_vector([3]),\n DescriptorMemoryElement('t', 9).set_vector([4]),\n ]\n\n index.build_index(descriptors1)\n index.update_index(descriptors2)\n\n assert descr_set.count() == 10\n # Above descriptors should be considered \"in\" the descriptor set now.\n for d in descriptors1:\n assert d in descr_set\n for d in descriptors2:\n assert d in descr_set\n # Known hashes of the above descriptors should be in the KVS\n assert set(hash_kvs.keys()) == {0, 1, 2, 3, 4}\n assert hash_kvs.get(0) == {0, 5}\n assert hash_kvs.get(1) == {1, 6}\n assert hash_kvs.get(2) == {2, 7}\n assert hash_kvs.get(3) == {3, 8}\n assert hash_kvs.get(4) == {4, 9}\n\n def test_update_index_existing_descriptors_frozenset(self) -> None:\n \"\"\"\n Same as ``test_update_index_similar_descriptors`` but testing that\n we can update the index when seeded with structures with existing\n values.\n \"\"\"\n # Similar Descriptors to build and update on (different instances)\n descriptors1 = [\n DescriptorMemoryElement('t', 0).set_vector([0]),\n DescriptorMemoryElement('t', 1).set_vector([1]),\n DescriptorMemoryElement('t', 2).set_vector([2]),\n DescriptorMemoryElement('t', 3).set_vector([3]),\n DescriptorMemoryElement('t', 4).set_vector([4]),\n ]\n descriptors2 = [\n DescriptorMemoryElement('t', 5).set_vector([0]),\n DescriptorMemoryElement('t', 6).set_vector([1]),\n DescriptorMemoryElement('t', 7).set_vector([2]),\n DescriptorMemoryElement('t', 8).set_vector([3]),\n DescriptorMemoryElement('t', 9).set_vector([4]),\n ]\n\n descr_set = MemoryDescriptorSet()\n descr_set.add_many_descriptors(descriptors1)\n\n hash_kvs = MemoryKeyValueStore()\n hash_kvs.add(0, frozenset({0}))\n hash_kvs.add(1, frozenset({1}))\n hash_kvs.add(2, frozenset({2}))\n hash_kvs.add(3, frozenset({3}))\n hash_kvs.add(4, frozenset({4}))\n\n index = LSHNearestNeighborIndex(DummyHashFunctor(),\n descr_set, hash_kvs)\n index.update_index(descriptors2)\n\n assert descr_set.count() == 10\n # Above descriptors should be considered \"in\" the descriptor set now.\n for d in descriptors1:\n assert d in descr_set\n for d in descriptors2:\n assert d in descr_set\n # Known hashes of the above descriptors should be in the KVS\n assert set(hash_kvs.keys()) == {0, 1, 2, 3, 4}\n assert hash_kvs.get(0) == {0, 5}\n assert hash_kvs.get(1) == {1, 6}\n assert hash_kvs.get(2) == {2, 7}\n assert hash_kvs.get(3) == {3, 8}\n assert hash_kvs.get(4) == {4, 9}\n\n def test_update_index_with_hash_index(self) -> None:\n # Similar test to `test_update_index_add_new_descriptors` but with a\n # linear hash index.\n descr_set = MemoryDescriptorSet()\n hash_kvs = MemoryKeyValueStore()\n linear_hi = LinearHashIndex() # simplest hash index, heap-sorts.\n index = LSHNearestNeighborIndex(DummyHashFunctor(),\n descr_set, hash_kvs, linear_hi)\n\n descriptors1 = [\n DescriptorMemoryElement('t', 0),\n DescriptorMemoryElement('t', 1),\n DescriptorMemoryElement('t', 2),\n DescriptorMemoryElement('t', 3),\n DescriptorMemoryElement('t', 4),\n ]\n descriptors2 = [\n DescriptorMemoryElement('t', 5),\n DescriptorMemoryElement('t', 6),\n ]\n # Vectors of length 1 for easy dummy hashing prediction.\n for d in descriptors1 + descriptors2:\n d.set_vector(np.ones(1, float) * d.uuid())\n\n # Build initial index.\n index.build_index(descriptors1)\n # Initial hash index should only encode hashes for first batch of\n # descriptors.\n self.assertSetEqual(linear_hi.index, {0, 1, 2, 3, 4})\n\n # Update index and check that components have new data.\n index.update_index(descriptors2)\n # Now the hash index should include all descriptor hashes.\n self.assertSetEqual(linear_hi.index, {0, 1, 2, 3, 4, 5, 6})\n\n def test_remove_from_index_read_only(self) -> None:\n d_set = MemoryDescriptorSet()\n hash_kvs = MemoryKeyValueStore()\n idx = LSHNearestNeighborIndex(DummyHashFunctor(), d_set, hash_kvs,\n read_only=True)\n self.assertRaises(\n ReadOnlyError,\n idx.remove_from_index,\n ['uid1', 'uid2']\n )\n\n def test_remove_from_index_no_existing_index(self) -> None:\n # Test that attempting to remove from an instance with no existing\n # index (meaning empty descriptor-set and key-value-store) results in\n # a key error.\n d_set = MemoryDescriptorSet()\n hash_kvs = MemoryKeyValueStore()\n idx = LSHNearestNeighborIndex(DummyHashFunctor(), d_set, hash_kvs)\n self.assertRaisesRegex(\n KeyError,\n 'uid1',\n idx.remove_from_index,\n ['uid1']\n )\n\n def test_remove_from_index_invalid_uid(self) -> None:\n # Test that attempting to remove a single invalid UID causes a key\n # error and does not affect index.\n\n # Descriptors are 1 dim, value == index.\n descriptors = [\n DescriptorMemoryElement('t', 0),\n DescriptorMemoryElement('t', 1),\n DescriptorMemoryElement('t', 2),\n DescriptorMemoryElement('t', 3),\n DescriptorMemoryElement('t', 4),\n ]\n # Vectors of length 1 for easy dummy hashing prediction.\n for d in descriptors:\n d.set_vector(np.ones(1, float) * d.uuid())\n # uid -> descriptor\n expected_dset_table = {\n 0: descriptors[0],\n 1: descriptors[1],\n 2: descriptors[2],\n 3: descriptors[3],\n 4: descriptors[4],\n }\n # hash int -> set[uid]\n expected_kvs_table = {\n 0: {0},\n 1: {1},\n 2: {2},\n 3: {3},\n 4: {4},\n }\n\n d_set = MemoryDescriptorSet()\n hash_kvs = MemoryKeyValueStore()\n idx = LSHNearestNeighborIndex(DummyHashFunctor(), d_set, hash_kvs)\n idx.build_index(descriptors)\n # Assert we have the correct expected values\n assert isinstance(idx.descriptor_set, MemoryDescriptorSet)\n self.assertEqual(idx.descriptor_set._table, expected_dset_table)\n assert isinstance(idx.hash2uuids_kvstore, MemoryKeyValueStore)\n self.assertEqual(idx.hash2uuids_kvstore._table, expected_kvs_table)\n\n # Attempt to remove descriptor with a UID we did not build with.\n self.assertRaisesRegex(\n KeyError, '5',\n idx.remove_from_index, [5]\n )\n # Index should not have been modified.\n self.assertEqual(idx.descriptor_set._table, expected_dset_table)\n self.assertEqual(idx.hash2uuids_kvstore._table, expected_kvs_table)\n\n # Attempt to remove multiple UIDs, one valid and one invalid\n self.assertRaisesRegex(\n KeyError, '5',\n idx.remove_from_index, [2, 5]\n )\n # Index should not have been modified.\n self.assertEqual(idx.descriptor_set._table, expected_dset_table)\n self.assertEqual(idx.hash2uuids_kvstore._table, expected_kvs_table)\n\n def test_remove_from_index(self) -> None:\n # Test that removing by UIDs does the correct thing.\n\n # Descriptors are 1 dim, value == index.\n descriptors = [\n DescriptorMemoryElement('t', 0),\n DescriptorMemoryElement('t', 1),\n DescriptorMemoryElement('t', 2),\n DescriptorMemoryElement('t', 3),\n DescriptorMemoryElement('t', 4),\n ]\n # Vectors of length 1 for easy dummy hashing prediction.\n for d in descriptors:\n d.set_vector(np.ones(1, float) * d.uuid())\n d_set = MemoryDescriptorSet()\n hash_kvs = MemoryKeyValueStore()\n idx = LSHNearestNeighborIndex(DummyHashFunctor(), d_set, hash_kvs)\n idx.build_index(descriptors)\n\n # Attempt removing 1 uid.\n idx.remove_from_index([3])\n assert isinstance(idx.descriptor_set, MemoryDescriptorSet)\n self.assertEqual(idx.descriptor_set._table, {\n 0: descriptors[0],\n 1: descriptors[1],\n 2: descriptors[2],\n 4: descriptors[4],\n })\n assert isinstance(idx.hash2uuids_kvstore, MemoryKeyValueStore)\n self.assertEqual(idx.hash2uuids_kvstore._table, {\n 0: {0},\n 1: {1},\n 2: {2},\n 4: {4},\n })\n\n def test_remove_from_index_shared_hashes(self) -> None:\n \"\"\"\n Test that removing a descriptor (by UID) that shares a hash with other\n descriptors does not trigger removal of its hash.\n \"\"\"\n # Simulate descriptors all hashing to the same hash value: 0\n hash_func = DummyHashFunctor()\n hash_func.get_hash = mock.Mock(return_value=np.asarray([0], bool)) # type: ignore\n\n d_set = MemoryDescriptorSet()\n hash2uids_kvs = MemoryKeyValueStore()\n idx = LSHNearestNeighborIndex(hash_func, d_set, hash2uids_kvs)\n\n # Descriptors are 1 dim, value == index.\n descriptors = [\n DescriptorMemoryElement('t', 0),\n DescriptorMemoryElement('t', 1),\n DescriptorMemoryElement('t', 2),\n DescriptorMemoryElement('t', 3),\n DescriptorMemoryElement('t', 4),\n ]\n # Vectors of length 1 for easy dummy hashing prediction.\n for d in descriptors:\n d.set_vector(np.ones(1, float) * d.uuid())\n idx.build_index(descriptors)\n # We expect the descriptor-set and kvs to look like the following now:\n self.assertDictEqual(d_set._table, {\n 0: descriptors[0],\n 1: descriptors[1],\n 2: descriptors[2],\n 3: descriptors[3],\n 4: descriptors[4],\n })\n self.assertDictEqual(hash2uids_kvs._table, {0: {0, 1, 2, 3, 4}})\n\n # Mock out hash index as if we had an implementation so we can check\n # call to its remove_from_index method.\n idx.hash_index = mock.Mock(spec=HashIndex)\n\n idx.remove_from_index([2, 4])\n\n # Only uid 2 and 4 descriptors should be gone from d-set, kvs should\n # still have the 0 key and its set value should only contain uids 0, 1\n # and 3. `hash_index.remove_from_index` should not be called because\n # no hashes should be marked for removal.\n self.assertDictEqual(d_set._table, {\n 0: descriptors[0],\n 1: descriptors[1],\n 3: descriptors[3],\n })\n self.assertDictEqual(hash2uids_kvs._table, {0: {0, 1, 3}})\n idx.hash_index.remove_from_index.assert_not_called()\n\n def test_remove_from_index_shared_hashes_partial(self) -> None:\n \"\"\"\n Test that only some hashes are removed from the hash index, but not\n others when those hashes still refer to other descriptors.\n \"\"\"\n # Simulate initial state with some descriptor hashed to one value and\n # other descriptors hashed to another.\n\n # Vectors of length 1 for easy dummy hashing prediction.\n descriptors = [\n DescriptorMemoryElement('t', 0).set_vector([0]),\n DescriptorMemoryElement('t', 1).set_vector([1]),\n DescriptorMemoryElement('t', 2).set_vector([2]),\n DescriptorMemoryElement('t', 3).set_vector([3]),\n DescriptorMemoryElement('t', 4).set_vector([4]),\n ]\n\n # Dummy hash function to do the simulated thing\n hash_func = DummyHashFunctor()\n hash_func.get_hash = mock.Mock( # type: ignore\n # Vectors of even sum hash to 0, odd to 1.\n side_effect=lambda vec: [vec.sum() % 2]\n )\n\n d_set = MemoryDescriptorSet()\n d_set._table = {\n 0: descriptors[0],\n 1: descriptors[1],\n 2: descriptors[2],\n 3: descriptors[3],\n 4: descriptors[4],\n }\n\n hash2uid_kvs = MemoryKeyValueStore()\n hash2uid_kvs._table = {\n 0: {0, 2, 4},\n 1: {1, 3},\n }\n\n idx = LSHNearestNeighborIndex(hash_func, d_set, hash2uid_kvs)\n idx.hash_index = mock.Mock(spec=HashIndex)\n\n idx.remove_from_index([1, 2, 3])\n # Check that only one hash vector was passed to hash_index's removal\n # method (deque of hash-code vectors).\n idx.hash_index.remove_from_index.assert_called_once_with(\n collections.deque([\n [1],\n ])\n )\n self.assertDictEqual(d_set._table, {\n 0: descriptors[0],\n 4: descriptors[4],\n })\n self.assertDictEqual(hash2uid_kvs._table, {0: {0, 4}})\n\n\nclass TestLshIndexAlgorithms (unittest.TestCase):\n \"\"\"\n Various tests on the ``nn`` method for different inputs and parameters.\n \"\"\"\n\n RANDOM_SEED: int = 0\n\n def _make_ftor_itq(\n self,\n bits: int = 32\n ) -> Tuple[ItqFunctor, Callable[[Iterable[DescriptorElement]], None]]:\n itq_ftor = ItqFunctor(bit_length=bits, random_seed=self.RANDOM_SEED)\n\n def itq_fit(d_iter: Iterable[DescriptorElement]) -> None:\n itq_ftor.fit(d_iter)\n\n return itq_ftor, itq_fit\n\n # noinspection PyMethodMayBeStatic\n def _make_hi_linear(self) -> LinearHashIndex:\n return LinearHashIndex()\n\n def _make_hi_balltree(self) -> SkLearnBallTreeHashIndex:\n return SkLearnBallTreeHashIndex(random_seed=self.RANDOM_SEED)\n\n #\n # Test LSH with random vectors\n #\n def _random_euclidean(\n self,\n hash_ftor: LshFunctor,\n hash_idx: Optional[HashIndex],\n ftor_train_hook: Callable[[Iterable[DescriptorElement]], None] = lambda d: None\n ) -> None:\n # :param hash_ftor: Hash function class for generating hash codes for\n # descriptors.\n # :param hash_idx: Hash index instance to use in local LSH algo\n # instance.\n # :param ftor_train_hook: Function for training functor if necessary.\n\n # make random descriptors\n i = 1000\n dim = 256\n td = []\n np.random.seed(self.RANDOM_SEED)\n for j in range(i):\n d = DescriptorMemoryElement('random', j)\n d.set_vector(np.random.rand(dim))\n td.append(d)\n\n ftor_train_hook(td)\n\n di = MemoryDescriptorSet()\n kvstore = MemoryKeyValueStore()\n index = LSHNearestNeighborIndex(hash_ftor, di, kvstore,\n hash_index=hash_idx,\n distance_method='euclidean')\n index.build_index(td)\n\n # test query from build set -- should return same descriptor when k=1\n q = td[255]\n r, dists = index.nn(q, 1)\n self.assertEqual(r[0], q)\n\n # test query very near a build vector\n td_q = td[0]\n q = DescriptorMemoryElement('query', i)\n td_q_v = td_q.vector()\n assert td_q_v is not None\n v = td_q_v.copy()\n v_min = max(v.min(), 0.1)\n v[0] += v_min\n v[dim-1] -= v_min\n q.set_vector(v)\n r, dists = index.nn(q, 1)\n self.assertFalse(np.array_equal(q.vector(), td_q.vector()))\n self.assertEqual(r[0], td_q)\n\n # random query\n q = DescriptorMemoryElement('query', i+1)\n q.set_vector(np.random.rand(dim))\n\n # for any query of size k, results should at least be in distance order\n r, dists = index.nn(q, 10)\n for j in range(1, len(dists)):\n self.assertGreater(dists[j], dists[j-1])\n r, dists = index.nn(q, i)\n for j in range(1, len(dists)):\n self.assertGreater(dists[j], dists[j-1])\n\n def test_random_euclidean__itq__None(self) -> None:\n ftor, fit = self._make_ftor_itq()\n self._random_euclidean(ftor, None, fit)\n\n def test_random_euclidean__itq__linear(self) -> None:\n ftor, fit = self._make_ftor_itq()\n hi = self._make_hi_linear()\n self._random_euclidean(ftor, hi, fit)\n\n @pytest.mark.skipif(\n not SkLearnBallTreeHashIndex.is_usable(),\n reason=\"SkLearnBallTreeHashIndex is not usable in the current environment.\"\n )\n def test_random_euclidean__itq__balltree(self) -> None:\n ftor, fit = self._make_ftor_itq()\n hi = self._make_hi_balltree()\n self._random_euclidean(ftor, hi, fit)\n\n #\n # Test unit vectors\n #\n def _known_unit(\n self,\n hash_ftor: LshFunctor,\n hash_idx: Optional[HashIndex],\n dist_method: str,\n ftor_train_hook: Callable[[Iterable[DescriptorElement]], None] = lambda d: None\n ) -> None:\n ###\n # Unit vectors - Equal distance\n #\n dim = 5\n test_descriptors = []\n for i in range(dim):\n v = np.zeros(dim, float)\n v[i] = 1.\n test_descriptors.append(\n DescriptorMemoryElement('unit', i).set_vector(v)\n )\n\n ftor_train_hook(test_descriptors)\n\n di = MemoryDescriptorSet()\n kvstore = MemoryKeyValueStore()\n index = LSHNearestNeighborIndex(hash_ftor, di, kvstore,\n hash_index=hash_idx,\n distance_method=dist_method)\n index.build_index(test_descriptors)\n\n # query with zero vector\n # -> all modeled descriptors have no intersection, dists should be 1.0,\n # or maximum distance by histogram intersection\n q = DescriptorMemoryElement('query', 0)\n q.set_vector(np.zeros(dim, float))\n r, dists = index.nn(q, dim)\n # All dists should be 1.0, r order doesn't matter\n for d in dists:\n self.assertEqual(d, 1.)\n\n # query with index element\n q = test_descriptors[3]\n r, dists = index.nn(q, 1)\n self.assertEqual(r[0], q)\n self.assertEqual(dists[0], 0.)\n\n r, dists = index.nn(q, dim)\n self.assertEqual(r[0], q)\n self.assertEqual(dists[0], 0.)\n\n def test_known_unit__euclidean__itq__None(self) -> None:\n ftor, fit = self._make_ftor_itq(5)\n self._known_unit(ftor, None, 'euclidean', fit)\n\n def test_known_unit__hik__itq__None(self) -> None:\n ftor, fit = self._make_ftor_itq(5)\n self._known_unit(ftor, None, 'hik', fit)\n\n def test_known_unit__euclidean__itq__linear(self) -> None:\n ftor, fit = self._make_ftor_itq(5)\n hi = self._make_hi_linear()\n self._known_unit(ftor, hi, 'euclidean', fit)\n\n def test_known_unit__hik__itq__linear(self) -> None:\n ftor, fit = self._make_ftor_itq(5)\n hi = self._make_hi_linear()\n self._known_unit(ftor, hi, 'hik', fit)\n\n @pytest.mark.skipif(\n not SkLearnBallTreeHashIndex.is_usable(),\n reason=\"SkLearnBallTreeHashIndex is not usable in the current environment.\"\n )\n def test_known_unit__euclidean__itq__balltree(self) -> None:\n ftor, fit = self._make_ftor_itq(5)\n hi = self._make_hi_balltree()\n self._known_unit(ftor, hi, 'euclidean', fit)\n\n @pytest.mark.skipif(\n not SkLearnBallTreeHashIndex.is_usable(),\n reason=\"SkLearnBallTreeHashIndex is not usable in the current environment.\"\n )\n def test_known_unit__hik__itq__balltree(self) -> None:\n ftor, fit = self._make_ftor_itq(5)\n hi = self._make_hi_balltree()\n self._known_unit(ftor, hi, 'hik', fit)\n\n #\n # Test with known vectors and euclidean dist\n #\n def _known_ordered_euclidean(\n self,\n hash_ftor: LshFunctor,\n hash_idx: Optional[HashIndex],\n ftor_train_hook: Callable[[Iterable[DescriptorElement]], None] = lambda d: None\n ) -> None:\n # make vectors to return in a known euclidean distance order\n i = 1000\n test_descriptors = [\n DescriptorMemoryElement('ordered', j).set_vector(np.array([j, j*2], float))\n for j in range(i)\n ]\n random.shuffle(test_descriptors)\n\n ftor_train_hook(test_descriptors)\n\n di = MemoryDescriptorSet()\n kvstore = MemoryKeyValueStore()\n index = LSHNearestNeighborIndex(hash_ftor, di, kvstore,\n hash_index=hash_idx,\n distance_method='euclidean')\n index.build_index(test_descriptors)\n\n # Since descriptors were built in increasing distance from (0,0),\n # returned descriptors for a query of [0,0] should be in index order.\n q = DescriptorMemoryElement('query', i)\n q.set_vector(np.array([0, 0], float))\n # top result should have UUID == 0 (nearest to query)\n r, dists = index.nn(q, 5)\n self.assertEqual(r[0].uuid(), 0)\n self.assertEqual(r[1].uuid(), 1)\n self.assertEqual(r[2].uuid(), 2)\n self.assertEqual(r[3].uuid(), 3)\n self.assertEqual(r[4].uuid(), 4)\n # global search should be in complete order\n r, dists = index.nn(q, i)\n for j, d, dist in zip(range(i), r, dists):\n self.assertEqual(d.uuid(), j)\n\n def test_known_ordered_euclidean__itq__None(self) -> None:\n ftor, fit = self._make_ftor_itq(1)\n self._known_ordered_euclidean(ftor, None, fit)\n\n def test_known_ordered_euclidean__itq__linear(self) -> None:\n ftor, fit = self._make_ftor_itq(1)\n hi = self._make_hi_linear()\n self._known_ordered_euclidean(ftor, hi, fit)\n\n @pytest.mark.skipif(\n not SkLearnBallTreeHashIndex.is_usable(),\n reason=\"SkLearnBallTreeHashIndex is not usable in the current environment.\"\n )\n def test_known_ordered_euclidean__itq__balltree(self) -> None:\n ftor, fit = self._make_ftor_itq(1)\n hi = self._make_hi_balltree()\n self._known_ordered_euclidean(ftor, hi, fit)\n"
] |
[
[
"numpy.random.seed",
"numpy.asarray",
"numpy.ones",
"numpy.random.rand",
"numpy.array",
"numpy.zeros"
]
] |
huimlight/SoftTeacher
|
[
"97064fbcce1ab87b40977544ba7a9c488274d66f"
] |
[
"ssod/datasets/builder.py"
] |
[
"from collections.abc import Mapping, Sequence\nfrom functools import partial\n\nimport torch\nfrom mmcv.parallel import DataContainer\nfrom mmcv.runner import get_dist_info\nfrom mmcv.utils import Registry, build_from_cfg\nfrom mmdet.datasets.builder import worker_init_fn\nfrom mmdet.datasets.samplers import (\n DistributedGroupSampler,\n DistributedSampler,\n GroupSampler,\n)\nfrom torch.nn import functional as F\nfrom torch.utils.data import DataLoader\nfrom torch.utils.data.dataloader import default_collate\n\nSAMPLERS = Registry(\"sampler\")\n\nSAMPLERS.register_module(module=DistributedGroupSampler)\nSAMPLERS.register_module(module=DistributedSampler)\nSAMPLERS.register_module(module=GroupSampler)\n\n\ndef build_sampler(cfg, dist=False, group=False, default_args=None):\n if cfg and (\"type\" in cfg):\n sampler_type = cfg.get(\"type\")\n else:\n sampler_type = default_args.get(\"type\")\n if group:\n sampler_type = \"Group\" + sampler_type\n if dist:\n sampler_type = \"Distributed\" + sampler_type\n\n if cfg:\n cfg.update(type=sampler_type)\n else:\n cfg = dict(type=sampler_type)\n\n return build_from_cfg(cfg, SAMPLERS, default_args)\n\n\ndef build_dataloader(\n dataset,\n samples_per_gpu,\n workers_per_gpu,\n num_gpus=1,\n dist=True,\n shuffle=True,\n seed=None,\n sampler_cfg=None,\n **kwargs,\n):\n rank, world_size = get_dist_info()\n default_sampler_cfg = dict(type=\"Sampler\", dataset=dataset)\n if shuffle:\n default_sampler_cfg.update(samples_per_gpu=samples_per_gpu)\n else:\n default_sampler_cfg.update(shuffle=False)\n if dist:\n default_sampler_cfg.update(num_replicas=world_size, rank=rank, seed=seed)\n sampler = build_sampler(sampler_cfg, dist, shuffle, default_sampler_cfg)\n\n batch_size = samples_per_gpu\n num_workers = workers_per_gpu\n else:\n sampler = (\n build_sampler(sampler_cfg, default_args=default_sampler_cfg)\n if shuffle\n else None\n )\n batch_size = num_gpus * samples_per_gpu\n num_workers = num_gpus * workers_per_gpu\n\n init_fn = (\n partial(worker_init_fn, num_workers=num_workers, rank=rank, seed=seed)\n if seed is not None\n else None\n )\n\n data_loader = DataLoader(\n dataset,\n batch_size=batch_size,\n sampler=sampler,\n num_workers=num_workers,\n collate_fn=partial(collate, samples_per_gpu=samples_per_gpu, flatten=True),\n pin_memory=False,\n worker_init_fn=init_fn,\n **kwargs,\n )\n return data_loader\n\n\ndef collate(batch, samples_per_gpu=1, flatten=False):\n \"\"\"Puts each data field into a tensor/DataContainer with outer dimension\n batch size.\n\n Extend default_collate to add support for\n :type:`~mmcv.parallel.DataContainer`. There are 3 cases.\n\n 1. cpu_only = True, e.g., meta data\n 2. cpu_only = False, stack = True, e.g., images tensors\n 3. cpu_only = False, stack = False, e.g., gt bboxes\n \"\"\"\n if not isinstance(batch, Sequence):\n raise TypeError(f\"{batch.dtype} is not supported.\")\n\n if isinstance(batch[0], DataContainer):\n stacked = []\n if batch[0].cpu_only:\n for i in range(0, len(batch), samples_per_gpu):\n stacked.append(\n [sample.data for sample in batch[i : i + samples_per_gpu]]\n )\n return DataContainer(\n stacked, batch[0].stack, batch[0].padding_value, cpu_only=True\n )\n elif batch[0].stack:\n for i in range(0, len(batch), samples_per_gpu):\n assert isinstance(batch[i].data, torch.Tensor)\n\n if batch[i].pad_dims is not None:\n ndim = batch[i].dim()\n assert ndim > batch[i].pad_dims\n max_shape = [0 for _ in range(batch[i].pad_dims)]\n for dim in range(1, batch[i].pad_dims + 1):\n max_shape[dim - 1] = batch[i].size(-dim)\n for sample in batch[i : i + samples_per_gpu]:\n for dim in range(0, ndim - batch[i].pad_dims):\n assert batch[i].size(dim) == sample.size(dim)\n for dim in range(1, batch[i].pad_dims + 1):\n max_shape[dim - 1] = max(\n max_shape[dim - 1], sample.size(-dim)\n )\n padded_samples = []\n for sample in batch[i : i + samples_per_gpu]:\n pad = [0 for _ in range(batch[i].pad_dims * 2)]\n for dim in range(1, batch[i].pad_dims + 1):\n pad[2 * dim - 1] = max_shape[dim - 1] - sample.size(-dim)\n padded_samples.append(\n F.pad(sample.data, pad, value=sample.padding_value)\n )\n stacked.append(default_collate(padded_samples))\n elif batch[i].pad_dims is None:\n stacked.append(\n default_collate(\n [sample.data for sample in batch[i : i + samples_per_gpu]]\n )\n )\n else:\n raise ValueError(\"pad_dims should be either None or integers (1-3)\")\n\n else:\n for i in range(0, len(batch), samples_per_gpu):\n stacked.append(\n [sample.data for sample in batch[i : i + samples_per_gpu]]\n )\n return DataContainer(stacked, batch[0].stack, batch[0].padding_value)\n elif any([isinstance(b, Sequence) for b in batch]):\n if flatten:\n flattened = []\n for b in batch:\n if isinstance(b, Sequence):\n flattened.extend(b)\n else:\n flattened.extend([b])\n return collate(flattened, len(flattened))\n else:\n transposed = zip(*batch)\n return [collate(samples, samples_per_gpu) for samples in transposed]\n elif isinstance(batch[0], Mapping):\n return {\n key: collate([d[key] for d in batch], samples_per_gpu) for key in batch[0]\n }\n else:\n return default_collate(batch)\n"
] |
[
[
"torch.nn.functional.pad",
"torch.utils.data.dataloader.default_collate"
]
] |
DecretumWang/sfft
|
[
"a0f64a76ff57bae78637df62feb1c6ce7578950f"
] |
[
"sfft/EasyCrowdedPacket.py"
] |
[
"import os\nimport time\nimport numpy as np\nimport os.path as pa\nfrom astropy.io import fits\nfrom tempfile import mkdtemp\nfrom astropy.time import Time\nfrom sfft.utils.meta.FileLockKit import FileLock\nfrom sfft.AutoCrowdedPrep import Auto_CrowdedPrep\n\n__author__ = \"Lei Hu <[email protected]>\"\n__version__ = \"v1.0\"\n\nclass Easy_CrowdedPacket:\n @staticmethod\n def ECP(FITS_REF, FITS_SCI, FITS_DIFF=None, FITS_Solution=None, ForceConv=None, \\\n GKerHW=None, KerHWRatio=2.0, KerHWLimit=(2, 20), KerPolyOrder=2, BGPolyOrder=2, \\\n ConstPhotRatio=True, backend='Pycuda', CUDA_DEVICE='0', NUM_CPU_THREADS=8, \\\n MaskSatContam=False, BACKSIZE_SUPER=128, GAIN_KEY='GAIN', SATUR_KEY='SATURATE', \\\n DETECT_THRESH=5.0, StarExt_iter=2, PriorBanMask=None, GLockFile=None):\n\n \"\"\"\n * Parameters for Crowded-Flavor SFFT\n # ----------------------------- Computing Enviornment --------------------------------- #\n\n -backend ['Pycuda'] # can be 'Pycuda', 'Cupy' and 'Numpy'. \n # Pycuda backend and Cupy backend require GPU device(s), \n # while 'Numpy' is a pure CPU-based backend.\n # Cupy backend is even faster than Pycuda, however, it consume more GPU memory.\n\n -CUDA_DEVICE ['0'] # it specifies certain GPU device to conduct the subtraction task.\n # the GPU devices are usually numbered 0 to N-1 (you may use command nvidia-smi to check).\n # this argument becomes trivial for Numpy backend\n\n -NUM_CPU_THREADS [8] # it specifies the number of CPU threads used for Numpy backend.\n # SFFT of Numpy backend has been implemented with pyfftw and numba, \n # that allow parallel computing on CPUs. However, Numpy backend is \n # generally much slower than GPU backends.\n\n -GLockFile [None] # File path for file locking to avoid GPU to deal with multiple tasks at the same time.\n # -GLockFile = None means SFFT will automatically create a temporary file.\n\n # ----------------------------- Preliminary Image-Masking with Saturation Rejection --------------------------------- #\n\n -GAIN_KEY ['GAIN'] # keyword of GAIN in FITS header (of reference & science), for SExtractor configuration\n # NOTE: we need to use SExtractor check image SEGMENTATION to mask Saturated sources.\n\n -GAIN_KEY ['SATURATE'] # keyword of saturation in FITS header (of reference & science), for SExtractor configuration\n # Remarks: note that Crowded-Flavor SFFT does not require sky-subtracted images as inputs,\n # so the default keyword is the common name for saturation level.\n\n -DETECT_THRESH [5.0] # Detect threshold for SExtractor configuration.\n\n -StarExt_iter [2] # make a further dilation for the masked region initially determined by SExtractor SEGMENTATION.\n # -StarExt_iter means the iteration times of the dilation process. \n \n -PriorBanMask [None] # a Numpy boolean array, with shape consistent with reference (science).\n # one can deliver a customized mask that covers known \n # variables/transients/bad-pixels by this argument.\n # SFFT will then merge (Union operation) the user-defined mask and \n # the SExtractor-determined saturation mask as the final mask.\n\n # ----------------------------- SFFT Subtraction --------------------------------- #\n\n -ForceConv [None] # it determines which image will be convolved, can be 'REF', 'SCI' and None.\n # -ForceConv = None means SFFT will determine the convolution direction according to \n # FWHM_SCI and FWHM_REF: the image with better seeing will be convolved to avoid deconvolution.\n\n -GKerHW [None] # The given kernel half-width, None means the kernel size will be \n # automatically determined by -KerHWRatio (to be seeing-related). \n\n -KerHWRatio [2.0] # The ratio between FWHM and the kernel half-width\n # KerHW = int(KerHWRatio * Max(FWHM_REF, FWHM_SCI))\n\n -KerHWLimit [(2, 20)] # The lower & upper bounds for kernel half-width \n # KerHW is updated as np.clip(KerHW, KerHWLimit[0], KerHWLimit[1]) \n # Remarks: this is useful for a survey since it can constrain the peak GPU memory usage.\n\n -KerPolyOrder [2] # Polynomial degree of kernel spatial variation.\n\n -BGPolyOrder [2] # Polynomial degree of background spatial variation.\n # This argument is trivial for Sparse-Flavor SFFT as input images have been sky subtracted.\n\n -ConstPhotRatio [True] # Constant photometric ratio between images ? can be True or False\n # ConstPhotRatio = True: the sum of convolution kernel is restricted to be a \n # constant across the field. \n # ConstPhotRatio = False: the flux scaling between images is modeled by a \n # polynomial with degree -KerPolyOrder.\n\n # ----------------------------- Input & Output --------------------------------- #\n\n -FITS_REF [] # File path of input reference image\n\n -FITS_SCI [] # File path of input science image\n\n -FITS_DIFF [None] # File path of output difference image\n\n -FITS_Solution [None] # File path of the solution of the linear system \n # it is an array of (..., a_ijab, ... b_pq, ...)\n\n # Important Notice:\n # a): if reference is convolved in SFFT, then DIFF = SCI - Convolved_REF.\n # [difference image is expected to have PSF & flux zero-point consistent with science image]\n # e.g., -ForceConv='REF' or -ForceConv=None and reference has better seeing.\n # b): if science is convolved in SFFT, then DIFF = Convolved_SCI - REF\n # [difference image is expected to have PSF & flux zero-point consistent with reference image]\n # e.g., -ForceConv='SCI' or -ForceConv=None and science has better seeing.\n #\n # Remarks: this convention is to guarantee that transients emerge on science image always \n # show a positive signal on difference images.\n\n \"\"\"\n\n # * Perform Crowded-Prep [MaskSat]\n SFFTPrepDict = Auto_CrowdedPrep(FITS_REF=FITS_REF, FITS_SCI=FITS_SCI).\\\n AutoMask(BACKSIZE_SUPER=BACKSIZE_SUPER, GAIN_KEY=GAIN_KEY, SATUR_KEY=SATUR_KEY, \\\n DETECT_THRESH=DETECT_THRESH, StarExt_iter=StarExt_iter, PriorBanMask=PriorBanMask)\n \n # * Determine ConvdSide & KerHW\n FWHM_REF = SFFTPrepDict['FWHM_REF']\n FWHM_SCI = SFFTPrepDict['FWHM_SCI']\n\n if ForceConv is None:\n if FWHM_SCI >= FWHM_REF: ConvdSide = 'REF'\n else: ConvdSide = 'SCI'\n else: ConvdSide = ForceConv\n\n if GKerHW is None:\n FWHM_La = np.max([FWHM_REF, FWHM_SCI])\n KerHW = int(np.clip(KerHWRatio * FWHM_La, KerHWLimit[0], KerHWLimit[1]))\n else: KerHW = GKerHW\n\n if GLockFile is None:\n TDIR = mkdtemp(suffix=None, prefix='4lock', dir=None)\n LockFile = pa.join(TDIR, 'tmplock.txt')\n else: LockFile = GLockFile\n\n # * Configure SFFT \n from sfft.sfftcore.SFFTConfigure import SingleSFFTConfigure\n\n PixA_REF = SFFTPrepDict['PixA_REF']\n PixA_SCI = SFFTPrepDict['PixA_SCI']\n SFFTConfig = SingleSFFTConfigure.SSC(NX=PixA_REF.shape[0], NY=PixA_REF.shape[1], KerHW=KerHW, \\\n KerPolyOrder=KerPolyOrder, BGPolyOrder=BGPolyOrder, ConstPhotRatio=ConstPhotRatio, \\\n backend=backend, CUDA_DEVICE=CUDA_DEVICE, NUM_CPU_THREADS=NUM_CPU_THREADS)\n \n with FileLock(LockFile):\n with open(LockFile, \"a\") as f:\n LTIME0 = Time.now()\n\n # * Perform Ultimate Subtraction\n from sfft.sfftcore.SFFTSubtract import GeneralSFFTSubtract\n\n SatMask_REF = SFFTPrepDict['REF-SAT-Mask']\n SatMask_SCI = SFFTPrepDict['SCI-SAT-Mask']\n NaNmask_U = SFFTPrepDict['Union-NaN-Mask']\n PixA_mREF = SFFTPrepDict['PixA_mREF']\n PixA_mSCI = SFFTPrepDict['PixA_mSCI']\n\n if ConvdSide == 'REF':\n PixA_mI, PixA_mJ = PixA_mREF, PixA_mSCI\n if NaNmask_U is not None:\n PixA_I, PixA_J = PixA_REF.copy(), PixA_SCI.copy()\n PixA_I[NaNmask_U] = PixA_mI[NaNmask_U]\n PixA_J[NaNmask_U] = PixA_mJ[NaNmask_U]\n else: PixA_I, PixA_J = PixA_REF, PixA_SCI\n if MaskSatContam: \n ContamMask_I = SatMask_REF\n ContamMask_J = SatMask_SCI\n else: ContamMask_I = None\n\n if ConvdSide == 'SCI':\n PixA_mI, PixA_mJ = PixA_mSCI, PixA_mREF\n if NaNmask_U is not None:\n PixA_I, PixA_J = PixA_SCI.copy(), PixA_REF.copy()\n PixA_I[NaNmask_U] = PixA_mI[NaNmask_U]\n PixA_J[NaNmask_U] = PixA_mJ[NaNmask_U]\n else: PixA_I, PixA_J = PixA_SCI, PixA_REF\n if MaskSatContam: \n ContamMask_I = SatMask_SCI\n ContamMask_J = SatMask_REF\n else: ContamMask_I = None\n \n Tsub_start = time.time()\n _tmp = GeneralSFFTSubtract.GSS(PixA_I=PixA_I, PixA_J=PixA_J, PixA_mI=PixA_mI, PixA_mJ=PixA_mJ, \\\n SFFTConfig=SFFTConfig, ContamMask_I=ContamMask_I, backend=backend, \\\n CUDA_DEVICE=CUDA_DEVICE, NUM_CPU_THREADS=NUM_CPU_THREADS)\n Solution, PixA_DIFF, ContamMask_CI = _tmp\n if MaskSatContam:\n ContamMask_DIFF = np.logical_or(ContamMask_CI, ContamMask_J)\n print('MeLOn Report: Ultimate Subtraction Takes [%.3f s]' %(time.time() - Tsub_start))\n \n # * Modifications on difference image\n # a) when REF is convolved, DIFF = SCI - Conv(REF)\n # PSF(DIFF) is coincident with PSF(SCI), transients on SCI are positive signal in DIFF.\n # b) when SCI is convolved, DIFF = Conv(SCI) - REF\n # PSF(DIFF) is coincident with PSF(REF), transients on SCI are still positive signal in DIFF.\n\n if NaNmask_U is not None:\n # ** Mask Union-NaN region\n PixA_DIFF[NaNmask_U] = np.nan\n if MaskSatContam:\n # ** Mask Saturate-Contaminate region \n PixA_DIFF[ContamMask_DIFF] = np.nan\n if ConvdSide == 'SCI': \n # ** Flip difference when science is convolved\n PixA_DIFF = -PixA_DIFF\n \n LTIME1 = Time.now()\n Lmessage = 'FILELOCK | REF = %s & SCI = %s | %s + %.2f s' \\\n %(pa.basename(FITS_REF), pa.basename(FITS_SCI), \\\n LTIME0.isot, (LTIME1.mjd-LTIME0.mjd)*24*3600)\n \n print('\\n---@--- %s ---@---\\n' %Lmessage)\n f.write('EasyCrowdedPacket: %s \\n' %Lmessage)\n f.flush()\n\n if GLockFile is None:\n os.system('rm -rf %s' %TDIR)\n\n # * Save difference image\n if FITS_DIFF is not None:\n _hdl = fits.open(FITS_SCI)\n _hdl[0].data[:, :] = PixA_DIFF.T\n _hdl[0].header['NAME_REF'] = (pa.basename(FITS_REF), 'MeLOn: SFFT')\n _hdl[0].header['NAME_SCI'] = (pa.basename(FITS_SCI), 'MeLOn: SFFT')\n _hdl[0].header['FWHM_REF'] = (FWHM_REF, 'MeLOn: SFFT')\n _hdl[0].header['FWHM_SCI'] = (FWHM_SCI, 'MeLOn: SFFT')\n _hdl[0].header['KERORDER'] = (KerPolyOrder, 'MeLOn: SFFT')\n _hdl[0].header['BGORDER'] = (BGPolyOrder, 'MeLOn: SFFT')\n _hdl[0].header['CPHOTR'] = (str(ConstPhotRatio), 'MeLOn: SFFT')\n _hdl[0].header['KERHW'] = (KerHW, 'MeLOn: SFFT')\n _hdl[0].header['CONVD'] = (ConvdSide , 'MeLOn: SFFT')\n _hdl.writeto(FITS_DIFF, overwrite=True)\n _hdl.close()\n \n # * Save solution array\n if FITS_Solution is not None:\n phdu = fits.PrimaryHDU()\n phdu.header['DK'] = (SFFTConfig[0]['DK'], 'MeLOn: SFFT')\n phdu.header['DB'] = (SFFTConfig[0]['DB'], 'MeLOn: SFFT')\n phdu.header['L0'] = (SFFTConfig[0]['L0'], 'MeLOn: SFFT')\n phdu.header['L1'] = (SFFTConfig[0]['L1'], 'MeLOn: SFFT')\n \n phdu.header['FIJ'] = (SFFTConfig[0]['Fij'], 'MeLOn: SFFT')\n phdu.header['FAB'] = (SFFTConfig[0]['Fab'], 'MeLOn: SFFT')\n phdu.header['FPQ'] = (SFFTConfig[0]['Fpq'], 'MeLOn: SFFT')\n phdu.header['FIJAB'] = (SFFTConfig[0]['Fijab'], 'MeLOn: SFFT')\n\n PixA_Solution = Solution.reshape((-1, 1))\n phdu.data = PixA_Solution.T\n fits.HDUList([phdu]).writeto(FITS_Solution, overwrite=True)\n \n return SFFTPrepDict, Solution, PixA_DIFF\n"
] |
[
[
"numpy.logical_or",
"numpy.max",
"numpy.clip"
]
] |
mirlomusica/neural_training
|
[
"6864e8bfa39190e92284bff697a419a16d5e6450"
] |
[
"models/definitions/transformer_net.py"
] |
[
"\"\"\"\n Modifications to the original J.Johnson's architecture:\n 1. Instance normalization is used instead of batch normalization *\n 2. Instead of learned up-sampling use nearest-neighbor up-sampling followed by convolution **\n 3. No scaled tanh at the output of the network ***\n\n * Ulyanov showed that this gives better results, checkout the paper here: https://arxiv.org/pdf/1607.08022.pdf\n ** Distill pub blog showed this to have better results: http://distill.pub/2016/deconv-checkerboard/\n *** I tried using it even opened an issue on the original Johnson's repo (written in Lua) - no improvements\n\n Note: checkout the details about original Johnson's architecture here:\n https://cs.stanford.edu/people/jcjohns/papers/fast-style/fast-style-supp.pdf\n\"\"\"\n\nimport torch\n\n\nclass TransformerNet(torch.nn.Module):\n def __init__(self):\n super().__init__()\n # Non-linearity\n self.relu = torch.nn.ReLU()\n\n # Down-sampling convolution layers\n num_of_channels = [3, 32, 64, 128]\n kernel_sizes = [9, 3, 3]\n stride_sizes = [1, 2, 2]\n self.conv1 = ConvLayer(num_of_channels[0], num_of_channels[1], kernel_size=kernel_sizes[0], stride=stride_sizes[0])\n self.in1 = torch.nn.InstanceNorm2d(num_of_channels[1], affine=True)\n self.conv2 = ConvLayer(num_of_channels[1], num_of_channels[2], kernel_size=kernel_sizes[1], stride=stride_sizes[1])\n self.in2 = torch.nn.InstanceNorm2d(num_of_channels[2], affine=True)\n self.conv3 = ConvLayer(num_of_channels[2], num_of_channels[3], kernel_size=kernel_sizes[2], stride=stride_sizes[2])\n self.in3 = torch.nn.InstanceNorm2d(num_of_channels[3], affine=True)\n\n # Residual layers\n res_block_num_of_filters = 128\n self.res1 = ResidualBlock(res_block_num_of_filters)\n self.res2 = ResidualBlock(res_block_num_of_filters)\n self.res3 = ResidualBlock(res_block_num_of_filters)\n self.res4 = ResidualBlock(res_block_num_of_filters)\n self.res5 = ResidualBlock(res_block_num_of_filters)\n\n # Up-sampling convolution layers\n num_of_channels.reverse()\n kernel_sizes.reverse()\n stride_sizes.reverse()\n self.up1 = UpsampleConvLayer(num_of_channels[0], num_of_channels[1], kernel_size=kernel_sizes[0], stride=stride_sizes[0])\n self.in4 = torch.nn.InstanceNorm2d(num_of_channels[1], affine=True)\n self.up2 = UpsampleConvLayer(num_of_channels[1], num_of_channels[2], kernel_size=kernel_sizes[1], stride=stride_sizes[1])\n self.in5 = torch.nn.InstanceNorm2d(num_of_channels[2], affine=True)\n self.up3 = ConvLayer(num_of_channels[2], num_of_channels[3], kernel_size=kernel_sizes[2], stride=stride_sizes[2])\n\n def forward(self, x):\n y = self.relu(self.in1(self.conv1(x)))\n y = self.relu(self.in2(self.conv2(y)))\n y = self.relu(self.in3(self.conv3(y)))\n y = self.res1(y)\n y = self.res2(y)\n y = self.res3(y)\n y = self.res4(y)\n y = self.res5(y)\n y = self.relu(self.in4(self.up1(y)))\n y = self.relu(self.in5(self.up2(y)))\n # No tanh activation here as originally proposed by J.Johnson, I didn't get any improvements by using it,\n # if you get better results using it feel free to make a PR\n return self.up3(y)\n\n\nclass ConvLayer(torch.nn.Module):\n \"\"\"\n A small wrapper around nn.Conv2d, so as to make the code cleaner and allow for experimentation with padding\n \"\"\"\n def __init__(self, in_channels, out_channels, kernel_size, stride):\n super().__init__()\n self.conv2d = torch.nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding=kernel_size//2, padding_mode='reflect')\n\n def forward(self, x):\n return self.conv2d(x)\n\n\nclass ResidualBlock(torch.nn.Module):\n \"\"\"\n Originally introduced in (Microsoft Research Asia, He et al.): https://arxiv.org/abs/1512.03385\n Modified architecture according to suggestions in this blog: http://torch.ch/blog/2016/02/04/resnets.html\n\n The only difference from the original is: There is no ReLU layer after the addition of identity and residual\n \"\"\"\n\n def __init__(self, channels):\n super(ResidualBlock, self).__init__()\n kernel_size = 3\n stride_size = 1\n self.conv1 = ConvLayer(channels, channels, kernel_size=kernel_size, stride=stride_size)\n self.in1 = torch.nn.InstanceNorm2d(channels, affine=True)\n self.conv2 = ConvLayer(channels, channels, kernel_size=kernel_size, stride=stride_size)\n self.in2 = torch.nn.InstanceNorm2d(channels, affine=True)\n self.relu = torch.nn.ReLU()\n\n def forward(self, x):\n residual = x\n out = self.relu(self.in1(self.conv1(x)))\n out = self.in2(self.conv2(out))\n return out + residual # modification: no ReLu after the addition\n\n\nclass UpsampleConvLayer(torch.nn.Module):\n \"\"\"\n Nearest-neighbor up-sampling followed by a convolution\n Appears to give better results than learned up-sampling aka transposed conv (avoids the checkerboard artifact)\n\n Initially proposed on distill pub: http://distill.pub/2016/deconv-checkerboard/\n \"\"\"\n\n def __init__(self, in_channels, out_channels, kernel_size, stride):\n super().__init__()\n self.upsampling_factor = stride\n self.conv2d = ConvLayer(in_channels, out_channels, kernel_size, stride=1)\n\n def forward(self, x):\n if self.upsampling_factor > 1:\n x = torch.nn.functional.interpolate(x, scale_factor=self.upsampling_factor, mode='nearest')\n return self.conv2d(x)\n\n"
] |
[
[
"torch.nn.ReLU",
"torch.nn.Conv2d",
"torch.nn.InstanceNorm2d",
"torch.nn.functional.interpolate"
]
] |
gicsaw/VHTS
|
[
"dd23cbb0216dd628bb95bf4bb7e93ce49514c89d"
] |
[
"bin/sub_dock.py"
] |
[
"#!/usr/bin/env python\nimport sys\nimport os\nimport argparse\nimport vhts.pydock as pydock\nfrom filelock import FileLock\nimport pandas as pd\n\n\ndef get_job_from_list(list_dir):\n list_file = list_dir + '/list.txt'\n if not os.path.exists(list_file):\n job_idx = None\n return job_idx\n freeze_lock = FileLock('{}.lock'.format(list_file))\n with freeze_lock.acquire(timeout=30):\n with open(list_file, 'r') as fp:\n lines = fp.readlines()\n if len(lines) == 0:\n job_idx = None\n else:\n job_idx = lines[0].strip()\n with open(list_file, 'w') as fp:\n for line in lines[1:]:\n fp.write(line)\n return job_idx\n\n\ndef set_job_from_list(job_idx, list_dir):\n list_file = list_dir + '/list.txt'\n freeze_lock = FileLock('{}.lock'.format(list_file))\n with freeze_lock.acquire(timeout=30):\n if os.path.exists(list_file):\n with open(list_file, 'r') as fp:\n lines = fp.readlines()\n else:\n lines = list()\n with open(list_file, 'w') as fp:\n for line in lines:\n fp.write(line)\n line = job_idx + '\\n'\n fp.write(line)\n return\n\n\ndef remove_job_from_list(job_idx, list_dir):\n list_file = list_dir + '/list.txt'\n freeze_lock = FileLock('{}.lock'.format(list_file))\n with freeze_lock.acquire(timeout=30):\n if os.path.exists(list_file):\n with open(list_file, 'r') as fp:\n lines = fp.readlines()\n with open(list_file, 'w') as fp:\n for line in lines:\n if line.strip() != job_idx:\n fp.write(line)\n return\n\n\ndef get_and_set_job(params_dict):\n todo_dir = params_dict['todo_dir']\n current_dir = params_dict['current_dir']\n job_idx = get_job_from_list(todo_dir)\n if job_idx is None:\n return job_idx\n set_job_from_list(job_idx, current_dir)\n smi_file_format = params_dict['smi_file_format']\n job_todo_file = todo_dir + '/' + job_idx + '.' + smi_file_format\n job_current_file = current_dir + '/' + job_idx + '.' + smi_file_format\n\n os.replace(job_todo_file, job_current_file)\n return job_idx\n\n\ndef run_docking(job_idx, docking_vina, params_dict):\n current_dir = params_dict['current_dir']\n done_dir = params_dict['done_dir']\n field_separator = params_dict['field_separator']\n smi_file_format = params_dict['smi_file_format']\n job_current_file = current_dir + '/' + job_idx + '.' + smi_file_format\n job_done_file = done_dir + '/' + job_idx + '.' + smi_file_format\n if smi_file_format == 'pkl':\n df = pd.read_pickle(job_current_file)\n else:\n df = pd.read_csv(job_current_file, sep=field_separator, header=0)\n\n# num_data = df.shape[0]\n# df.rename(columns={0: 'MOL_IDX', 1: 'MOL_ID', 2: 'SMILES'}, inplace=True)\n smiles_list = df[['MOL_ID', 'SMILES']].values.tolist()\n result_dict = docking_vina.predict(smiles_list)\n affinity_list = result_dict['docking']\n docking_min = [x[0] for x in affinity_list]\n# docking = [x for x in affinity_list]\n docking = affinity_list\n df['Docking1'] = docking_min\n df['Docking'] = docking\n if params_dict['rescoring']:\n rescoring = result_dict['docking_re']\n df['Docking_re'] = rescoring\n\n use_my_module = params_dict['use_my_module']\n my_module_path = params_dict['my_module_path']\n docking_params = params_dict['docking_params']\n\n if use_my_module:\n my_module_dir = os.path.dirname(my_module_path)\n sys.path.append(my_module_dir)\n import my_module\n my_module.my_score_to_df(df, docking_params, result_dict)\n\n sep = field_separator\n if sep == '\\s+':\n sep = ' '\n\n if smi_file_format == 'pkl':\n df.to_pickle(job_done_file)\n else:\n df.to_csv(job_done_file, sep=sep, float_format='%.3f',\n header=True, index=False)\n\n return\n\n\ndef move_done(job_idx, params_dict):\n current_dir = params_dict['current_dir']\n done_dir = params_dict['done_dir']\n remove_job_from_list(job_idx, current_dir)\n set_job_from_list(job_idx, done_dir)\n smi_file_format = params_dict['smi_file_format']\n job_current_file = current_dir + '/' + job_idx + '.' + smi_file_format\n os.remove(job_current_file)\n return job_idx\n\n\ndef working(docking_vina, params_dict):\n pid = os.getpid()\n out_log = params_dict['out_log']\n log_file = params_dict['log_file']\n\n line_out = 'Start sub_dock pid: %d' % (pid)\n if out_log == 'file':\n fp_log = open(log_file, 'w')\n fp_log.write(line_out + '\\n')\n fp_log.flush()\n elif out_log == 'print':\n print(line_out, flush=True)\n\n while True:\n job_idx = get_and_set_job(params_dict)\n line_out = 'get a job: %s' % job_idx\n if out_log == 'file':\n fp_log.write(line_out + '\\n')\n fp_log.flush()\n\n elif out_log == 'print':\n print(line_out, flush=True)\n if job_idx is None:\n line_out = 'End sub_dock pid %d' % pid\n if out_log == 'file':\n fp_log.write(line_out + '\\n')\n fp_log.flush()\n\n elif out_log == 'print':\n print(line_out, flush=True)\n break\n run_docking(job_idx, docking_vina, params_dict)\n move_done(job_idx, params_dict)\n line_out = 'done job: %s' % job_idx\n if out_log == 'file':\n fp_log.write(line_out + '\\n')\n fp_log.flush()\n\n elif out_log == 'print':\n print(line_out, flush=True)\n if out_log == 'file':\n fp_log.close()\n\n return\n\n\nclass LoadFromConfig(argparse.Action):\n def __call__(self, parser, namespace, values, option_string=None):\n with values as f:\n parser.parse_args(f.read().split(), namespace)\n\n\nclass ExtendAction(argparse.Action):\n\n def __call__(self, parser, namespace, values, option_string=None):\n items = getattr(namespace, self.dest) or []\n items.extend(values)\n setattr(namespace, self.dest, items)\n\n\ndef parser_arg(parser):\n # vina parameter\n\n parser.register('action', 'extend', ExtendAction)\n parser.add_argument('--arg_file', type=open, required=False, default=None,\n action=LoadFromConfig, help='argment file')\n parser.add_argument('--dock_config', type=str, required=False,\n default=None, help='docking config file ')\n parser.add_argument('-v', '--vina_program', type=str, required=False,\n default='qvina02',\n help='select vina, qvina02, or smina')\n parser.add_argument('--my_module', type=str, required=False,\n default=None,\n help='set user python module path (for pifinder)')\n parser.add_argument('--neutralize', action='store_true',\n required=False, help='neutralize smiles ')\n parser.add_argument('--pH', type=float, default=None,\n required=False, help='protonate state for pH 7.4 ')\n parser.add_argument('--output_save', action='store_true', required=False,\n help='default output pdbqt is temp file ')\n parser.add_argument('--gen3d_dir', type=str, required=False, default='tmp',\n help='3d initial conformation directory')\n parser.add_argument('--dock_dir', type=str, required=False,\n default='tmp', help='binding conformation directory')\n parser.add_argument('--num_sub_proc', type=int, required=False,\n default=10, help=' --num_sub_proc 10')\n parser.add_argument('--timeout_gen3d', type=int, required=False,\n default=1, help=' --timeout_gen3d 1')\n parser.add_argument('--timeout_dock', type=int, required=False,\n default=120, help=' --timeout_dock 120')\n parser.add_argument('--tlen', type=int, default='7', required=False,\n help='lenth of sub directory name, default: 7')\n parser.add_argument('--pout', type=int, default='0', required=False,\n help='print processing out: 0 or number, default: 0')\n parser.add_argument('--rescoring_program', type=str, required=False,\n default='smina', help='smina path')\n parser.add_argument('--rescoring_config', type=str, required=False,\n default=None, help='docking config file for rescoring')\n\n return\n\n\ndef arg_to_params(parser):\n\n use_my_module = False\n for i, m in enumerate(sys.argv):\n if m == '--my_module':\n my_module_path = sys.argv[i+1]\n use_my_module = True\n my_module_dir = os.path.dirname(my_module_path)\n sys.path.append(my_module_dir)\n import my_module\n my_module.parser_arg(parser)\n\n args = parser.parse_args()\n\n vina_program = args.vina_program\n num_sub_proc = args.num_sub_proc\n timeout_gen3d = args.timeout_gen3d\n timeout_dock = args.timeout_dock\n output_save = args.output_save\n gen3d_dir = args.gen3d_dir\n dock_dir = args.dock_dir\n dock_config_file = args.dock_config\n\n tlen = args.tlen\n pout = args.pout\n neutralize = args.neutralize\n pH = args.pH\n\n rescoring = False\n rescoring_config_file = args.rescoring_config\n rescoring_program = args.rescoring_program\n if rescoring_config_file is not None:\n rescoring = True\n\n docking_params = dict()\n docking_params['vina_program'] = vina_program\n docking_params['gen3d_dir'] = gen3d_dir\n docking_params['dock_dir'] = dock_dir\n docking_params['num_sub_proc'] = num_sub_proc\n docking_params['timeout_gen3d'] = timeout_gen3d\n docking_params['timeout_dock'] = timeout_dock\n docking_params['output_save'] = output_save\n docking_params['tlen'] = tlen\n docking_params['pout'] = pout\n docking_params['neutralize'] = neutralize\n docking_params['pH'] = pH\n docking_params['dock_config_file'] = dock_config_file\n docking_params['rescoring'] = rescoring\n docking_params['rescoring_program'] = rescoring_program\n docking_params['rescoring_config_file'] = rescoring_config_file\n\n my_module_path = args.my_module\n docking_params['use_my_module'] = use_my_module\n docking_params['my_module_path'] = my_module_path\n\n if use_my_module:\n docking_params = my_module.arg_to_params(parser, docking_params)\n\n return args, docking_params\n\n\ndef main():\n\n parser = argparse.ArgumentParser(description='worker for docking')\n parser.add_argument('--work_dir', type=str, required=False,\n default='workflow', help='workflow directory')\n parser.add_argument('-s', '--smi_file_format', type=str, required=False,\n default='pkl', help='pkl (default), txt, csv, tsv')\n parser.add_argument('--out_log', type=str, required=False,\n default=None,\n help='print : screen, or file : sub_dock_$PID.log' +\n 'default: do not print output')\n\n parser_arg(parser)\n\n args, docking_params = arg_to_params(parser)\n if len(sys.argv) < 2:\n parser.print_usage()\n sys.exit()\n if args.dock_config is None:\n parser.print_usage()\n print('dock_config is missing')\n sys.exit()\n\n work_dir = args.work_dir\n todo_dir = work_dir + '/todo'\n current_dir = work_dir + '/current'\n done_dir = work_dir + '/done'\n smi_file_format = args.smi_file_format\n if smi_file_format == 'txt':\n field_separator = '\\s+'\n elif smi_file_format == 'csv':\n field_separator = ','\n elif smi_file_format == 'tsv':\n field_separator = '\\t'\n else:\n field_separator = None\n\n out_log = args.out_log\n pid = os.getpid()\n log_file = 'sub_dock_%d.log' % (pid)\n\n docking_vina = pydock.DockingVina(docking_params)\n\n params_dict = dict()\n params_dict['work_dir'] = work_dir\n params_dict['todo_dir'] = todo_dir\n params_dict['current_dir'] = current_dir\n params_dict['done_dir'] = done_dir\n params_dict['field_separator'] = field_separator\n params_dict['smi_file_format'] = smi_file_format\n params_dict['out_log'] = out_log\n params_dict['log_file'] = log_file\n\n params_dict['rescoring'] = docking_params['rescoring']\n params_dict['use_my_module'] = docking_params['use_my_module']\n params_dict['my_module_path'] = docking_params['my_module_path']\n params_dict['docking_params'] = docking_params\n\n working(docking_vina, params_dict)\n\n\nif __name__ == \"__main__\":\n main()\n"
] |
[
[
"pandas.read_pickle",
"pandas.read_csv"
]
] |
JiayinL/Dropout-Prediction
|
[
"fd1de819579b641ff8c7aa416c1fb5cb6c6a7114"
] |
[
"LR_GRU.py"
] |
[
"#!/usr/bin/env python\n# coding: utf-8\n\n\nimport pandas as pd\nimport pandas_profiling\nfrom sklearn.preprocessing import LabelEncoder\nfrom tqdm import tqdm\nfrom joblib import Parallel,delayed\nimport numpy as np\nimport json\nimport re\nimport time\nimport torch\nimport torch.nn as nn\nimport torch.nn.parallel\nimport torch.optim as optim\nimport torch.utils.data\nfrom torch.autograd import Variable\nimport torch.nn.utils.rnn as rnn_utils\nfrom sklearn.utils import shuffle\n\ndef time_transform(t):\n # 先转换为时间数组\n timeArray = time.strptime(t, \"%Y-%m-%d %H:%M:%S\")\n # 转换为时间戳\n timeStamp = int(time.mktime(timeArray))\n return timeStamp\ndef Z_score(mean_, std_,x):\n return (x-mean_)/std_\ndef max_mean_std(data):\n return np.max(data), np.mean(data), np.std(data)\ndef calculate_acc(predictions, truth):\n hit = 0\n for i in range(len(predictions)):\n if predictions[i] == truth[i]:\n hit = hit +1\n return hit/len(predictions)\n\n\ntrain_set_course_vec = pd.read_csv('train_set_course_vec.csv',converters={'label_list': eval, 'course_vecs_CNN':eval, 'course_vecs_LR':eval})\ntest_set_course_vec = pd.read_csv('test_set_course_vec.csv',converters={'label_list': eval, 'course_vecs_CNN':eval, 'course_vecs_LR':eval})\n\n\ndef training_data_prep():\n course_id = []\n video_id = []\n continues_feature1 = []\n data = train_set_course_vec[['label_list','course_vecs_LR','course_vecs_CNN']]\n\n data = shuffle(data) #Shuffle data\n #get y\n labels = data['label_list'].values.tolist()\n y = [ item for elem in labels for item in elem]\n \n \n \n #get x for LR\n course_info_LR = data['course_vecs_LR'].values.tolist()\n\n course_id_LR = []\n continues_feature1 = []\n for i in range(len(course_info_LR)): #get a course\n c = course_info_LR[i]\n course_cat1 = []\n course_con = []\n for j in range(len(c)): #get a subject\n s = c[j]\n cat_feture1 = s[0] #get course_id and video_id\n course_cat1.append(cat_feture1)\n con_feture = s[1:] #get continues features\n course_con.append(con_feture)\n \n course_id_LR.append(course_cat1) \n continues_feature1.append(course_con)\n \n \n #get x for CNN\n course_info_CNN = data['course_vecs_CNN'].values.tolist()\n course_list = [ item for elem in course_info_CNN for item in elem]\n# print(course_list[0][0])\n course_id_CNN = []\n video_id = []\n continues_feature2 = []\n for i in range(len(course_list)): #get a course\n c = course_list[i]\n course_cat1 = []\n course_cat2 = []\n course_con = []\n for j in range(len(c)): #get a subject\n s = c[j]\n cat_feture1 = s[0] #get course_id and video_id\n cat_feture2 = s[1]\n course_cat1.append(cat_feture1)\n course_cat2.append(cat_feture2)\n con_feture = s[2:] #get continues features\n course_con.append(con_feture)\n if len(course_cat1)<sequence_len:\n length = sequence_len - len(course_cat1)\n temp_course_id = [706] * length\n temp_video_id = [38180] * length\n temp2 = [[0,0,0,0,0,0,0,0,0,0,0,0,0]] * length\n course_cat1 = course_cat1 + temp_course_id\n course_cat2 = course_cat2 + temp_video_id\n course_con = course_con + temp2\n\n course_id_CNN.append(course_cat1) \n video_id.append(course_cat2) \n continues_feature2.append(course_con)\n\n # to tensor\n continues_feature1 = [ item for elem in continues_feature1 for item in elem]\n course_id_LR = [ item for elem in course_id_LR for item in elem]\n \n continues_feature1 = torch.tensor(continues_feature1)\n course_id_LR = torch.tensor(course_id_LR)\n \n \n continues_feature2 = torch.tensor(continues_feature2)\n course_id_CNN = torch.tensor(course_id_CNN)\n video_id = torch.tensor(video_id)\n\n y = torch.tensor(y)\n return continues_feature1,continues_feature2,course_id_LR,course_id_CNN,video_id,y\n\n\ndef test_data_prep():\n course_id = []\n video_id = []\n continues_feature = []\n data = test_set_course_vec[['label_list','course_vecs_LR','course_vecs_CNN']]\n\n labels = data['label_list'].values.tolist()\n y = [ item for elem in labels for item in elem]\n \n \n #get x for LR\n course_info_LR = data['course_vecs_LR'].values.tolist()\n\n course_id_LR = []\n continues_feature1 = []\n for i in range(len(course_info_LR)): #get a course\n c = course_info_LR[i]\n course_cat1 = []\n course_con = []\n for j in range(len(c)): #get a subject\n s = c[j]\n cat_feture1 = s[0] #get course_id and video_id\n course_cat1.append(cat_feture1)\n con_feture = s[1:] #get continues features\n course_con.append(con_feture)\n \n course_id_LR.append(course_cat1) \n continues_feature1.append(course_con)\n \n \n #get x for CNN\n course_info_CNN = data['course_vecs_CNN'].values.tolist()\n course_list = [ item for elem in course_info_CNN for item in elem]\n# print(course_list[0][0])\n course_id_CNN = []\n video_id = []\n continues_feature2 = []\n for i in range(len(course_list)): #get a course\n c = course_list[i]\n course_cat1 = []\n course_cat2 = []\n course_con = []\n for j in range(len(c)): #get a subject\n s = c[j]\n cat_feture1 = s[0] #get course_id and video_id\n cat_feture2 = s[1]\n course_cat1.append(cat_feture1)\n course_cat2.append(cat_feture2)\n con_feture = s[2:] #get continues features\n course_con.append(con_feture)\n if len(course_cat1)<sequence_len:\n length = sequence_len - len(course_cat1)\n temp_course_id = [706] * length\n temp_video_id = [38180] * length\n temp2 = [[0,0,0,0,0,0,0,0,0,0,0,0,0]] * length\n course_cat1 = course_cat1 + temp_course_id\n course_cat2 = course_cat2 + temp_video_id\n course_con = course_con + temp2\n\n course_id_CNN.append(course_cat1) \n video_id.append(course_cat2) \n continues_feature2.append(course_con)\n\n # to tensor\n continues_feature1 = [ item for elem in continues_feature1 for item in elem]\n course_id_LR = [ item for elem in course_id_LR for item in elem]\n \n continues_feature1 = torch.tensor(continues_feature1)\n course_id_LR = torch.tensor(course_id_LR)\n \n \n continues_feature2 = torch.tensor(continues_feature2)\n course_id_CNN = torch.tensor(course_id_CNN)\n video_id = torch.tensor(video_id)\n\n y = torch.tensor(y)\n return continues_feature1,continues_feature2,course_id_LR,course_id_CNN,video_id,y\n\n\ndef prediction(course_vecs_LR,course_vecs_CNN):\n course_id = []\n video_id = []\n continues_feature = []\n\n \n \n #get x for LR\n course_info_LR = course_vecs_LR\n\n course_id_LR = []\n continues_feature1 = []\n for i in range(len(course_info_LR)): #get a course\n c = course_info_LR[i]\n \n cat_feture1 = c[0] #get course_id and video_id\n\n con_feture = c[1:] #get continues features\n\n \n course_id_LR.append(cat_feture1) \n continues_feature1.append(con_feture)\n \n \n #get x for CNN\n# course_info_CNN = course_vecs_CNN\n course_list = course_vecs_CNN\n# print(course_list[0][0])\n course_id_CNN = []\n video_id = []\n continues_feature2 = []\n for i in range(len(course_list)): #get a course\n c = course_list[i]\n course_cat1 = []\n course_cat2 = []\n course_con = []\n for j in range(len(c)): #get a subject\n s = c[j]\n cat_feture1 = s[0] #get course_id and video_id\n cat_feture2 = s[1]\n course_cat1.append(cat_feture1)\n course_cat2.append(cat_feture2)\n con_feture = s[2:] #get continues features\n course_con.append(con_feture)\n if len(course_cat1)<sequence_len:\n length = sequence_len - len(course_cat1)\n temp_course_id = [706] * length\n temp_video_id = [38180] * length\n temp2 = [[0,0,0,0,0,0,0,0,0,0,0,0,0]] * length\n course_cat1 = course_cat1 + temp_course_id\n course_cat2 = course_cat2 + temp_video_id\n course_con = course_con + temp2\n\n course_id_CNN.append(course_cat1) \n video_id.append(course_cat2) \n continues_feature2.append(course_con)\n\n # to tensor\n continues_feature1 = [ item for elem in continues_feature1 for item in elem]\n course_id_LR = [ item for elem in course_id_LR for item in elem]\n \n continues_feature1 = torch.tensor(continues_feature1)\n course_id_LR = torch.tensor(course_id_LR)\n \n \n continues_feature2 = torch.tensor(continues_feature2)\n course_id_CNN = torch.tensor(course_id_CNN)\n video_id = torch.tensor(video_id)\n\n y = torch.tensor(y)\n return continues_feature1,continues_feature2,course_id_LR,course_id_CNN,video_id,y\n\n\nnb_courses = 706+1\ncourse_emb_size = 5\nnb_videos = 38181+1\nvideo_emb_size = 15\nsequence_len = 70\n# in_channel = \nfeature_size2 = course_emb_size + video_emb_size + 13\nfeature_size1 = course_emb_size + 42\nhidden_dim = 64\nnum_of_lstm_layer = 1\n\n\n\nclass LR_GRU(nn.Module):\n \n def __init__(self):\n super(LR_GRU, self).__init__() \n \n self.course_embedding = torch.nn.Embedding(nb_courses, course_emb_size)\n self.video_embedding = torch.nn.Embedding(nb_videos, video_emb_size)\n \n \n self.bi_gru = nn.GRU(input_size = feature_size2, hidden_size = hidden_dim // 2, num_layers=num_of_lstm_layer, bidirectional=True)\n \n \n self.lr_fc = nn.Linear(feature_size1, 1)\n \n self.fc1 = nn.Linear(hidden_dim, 32)\n self.fc2 = nn.Linear(32, 16)\n self.fc3 = nn.Linear(16, 1)\n \n self.ReLU_activation = nn.ReLU()\n self.tanh_activation = nn.Tanh()\n self.sigmoid_activation = nn.Sigmoid() \n \n self.final_fc = nn.Linear(2, 1)\n\n def forward(self, courseid_LR,courseid_CNN,continuesfeature1,continuesfeature2,videoid):\n \n #course_id (batch_size, max_sen_len)\n #continues (batch_size, max_sen_len, feature_size)\n emb1_LR = self.course_embedding(courseid_LR)\n emb1_CNN = self.course_embedding(courseid_CNN)# (batch_size,max_sen_len, embed_size)\n emb2 = self.video_embedding(videoid)\n \n # LR part\n LR_x = torch.cat([emb1_LR,continuesfeature1], 1)\n LR_result = self.sigmoid_activation(self.lr_fc(LR_x))\n \n #GRU part\n \n GRU_x = torch.cat([emb1_CNN,emb2,continuesfeature2], 2)\n GRU_x = GRU_x.permute(1, 0, 2) # Batch_size * (feature_dim) * max_sen_len\n bigru_out, _ = self.bi_gru(GRU_x) \n output = bigru_out[-1] \n \n info_fusion = self.tanh_activation(self.fc1(output))\n info_fusion = self.tanh_activation(self.fc2(info_fusion)) \n final_out = self.fc3(info_fusion)\n GRU_result = self.sigmoid_activation(final_out)\n \n \n #combine two result\n final_input = torch.cat((LR_result, GRU_result), dim=1)\n result = self.sigmoid_activation(self.final_fc(final_input)) \n \n return result,LR_result,GRU_result\n\n\n\nclass LR_single_direc_GRU(nn.Module):\n \n def __init__(self):\n super(LR_single_direc_GRU, self).__init__() \n \n self.course_embedding = torch.nn.Embedding(nb_courses, course_emb_size)\n self.video_embedding = torch.nn.Embedding(nb_videos, video_emb_size)\n \n \n self.bi_gru = nn.GRU(input_size = feature_size2, hidden_size = hidden_dim , num_layers=num_of_lstm_layer, bidirectional=False)\n \n \n self.lr_fc = nn.Linear(feature_size1, 1)\n \n self.fc1 = nn.Linear(hidden_dim, 16)\n self.fc2 = nn.Linear(16, 8)\n self.fc3 = nn.Linear(8, 1)\n \n self.ReLU_activation = nn.ReLU()\n self.tanh_activation = nn.Tanh()\n self.sigmoid_activation = nn.Sigmoid() \n \n self.final_fc = nn.Linear(2, 1)\n\n def forward(self, courseid_LR,courseid_CNN,continuesfeature1,continuesfeature2,videoid):\n \n #course_id (batch_size, max_sen_len)\n #continues (batch_size, max_sen_len, feature_size)\n emb1_LR = self.course_embedding(courseid_LR)\n emb1_CNN = self.course_embedding(courseid_CNN)# (batch_size,max_sen_len, embed_size)\n emb2 = self.video_embedding(videoid)\n \n # LR part\n LR_x = torch.cat([emb1_LR,continuesfeature1], 1)\n LR_result = self.sigmoid_activation(self.lr_fc(LR_x))\n \n #GRU part\n \n GRU_x = torch.cat([emb1_CNN,emb2,continuesfeature2], 2)\n GRU_x = GRU_x.permute(1, 0, 2) # Batch_size * (feature_dim) * max_sen_len\n bigru_out, _ = self.bi_gru(GRU_x) \n output = bigru_out[-1] \n \n info_fusion = self.tanh_activation(self.fc1(output))\n info_fusion = self.tanh_activation(self.fc2(info_fusion)) \n final_out = self.fc3(info_fusion)\n GRU_result = self.sigmoid_activation(final_out)\n \n \n #combine two result\n final_input = torch.cat((LR_result, GRU_result), dim=1)\n result = self.sigmoid_activation(self.final_fc(final_input)) \n \n return result,LR_result,GRU_result\n\n\nmodel = LR_single_direc_GRU()\noptimizer = optim.Adam(model.parameters(), lr=0.001)\n# MSELoss = nn.MSELoss()\nMSELoss = nn.BCELoss()\n\nepoach_count = 15 #40\nbatchSize = 512\nloss_value = []\nacc_value = []\ntimes = []\n\n\nfor m in model.modules():\n if isinstance(m, (nn.Conv1d, nn.Linear)):\n nn.init.xavier_uniform_(m.weight)\n\n\ntest_continues_feature1,test_continues_feature2,test_course_id_LR,test_course_id_CNN,test_video_id,ground_truth = test_data_prep()\nground_truth = ground_truth.detach().numpy().tolist()\n\n\n\nfor epoach in range(epoach_count):\n start = time.time()\n continues_feature1,continues_feature2,course_id_LR,course_id_CNN,video_id,y = training_data_prep()\n numOfMinibatches = int(len(course_id_CNN) / batchSize) + 1\n numOfLastMinibatch = len(course_id_CNN) % batchSize\n# loss_value = []\n for batchID in range(numOfMinibatches):\n if batchID == numOfMinibatches-1:\n numbOfBatches = numOfLastMinibatch\n else:\n numbOfBatches = batchSize\n leftIndex = batchID * batchSize\n rightIndex = leftIndex + numbOfBatches\n \n \n courseid_LR = course_id_LR[leftIndex: rightIndex].clone().long()\n videoid = video_id[leftIndex: rightIndex].clone().long()\n continuesfeature1 = continues_feature1[leftIndex: rightIndex].clone() \n courseid_CNN = course_id_CNN[leftIndex: rightIndex].clone().long()\n continuesfeature2 = continues_feature2[leftIndex: rightIndex].clone()\n \n predictions,LR_result,GRU_result = model(courseid_LR,courseid_CNN,continuesfeature1,continuesfeature2,videoid)\n \n\n predictions = torch.flatten(predictions)\n LR_result = torch.flatten(LR_result)\n GRU_result = torch.flatten(GRU_result)\n\n loss_final = MSELoss(predictions,y[leftIndex: rightIndex].float())\n loss_lr = MSELoss(LR_result,y[leftIndex: rightIndex].float())\n loss_gru = MSELoss(GRU_result,y[leftIndex: rightIndex].float())\n# print('loss: ',loss)\n loss = loss_final + loss_lr +loss_gru\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n loss_value.append(loss.item())\n \n \n #testing\n \n if(batchID%100==0):\n test_numOfMinibatches = int(len(test_course_id_LR) / batchSize) + 1\n test_numOfLastMinibatch = len(test_course_id_LR) % batchSize\n results = []\n for test_batchID in range(test_numOfMinibatches):\n if test_batchID == test_numOfMinibatches-1:\n test_numbOfBatches = test_numOfLastMinibatch\n else:\n test_numbOfBatches = batchSize\n test_leftIndex = test_batchID * batchSize\n test_rightIndex = test_leftIndex + test_numbOfBatches\n \n \n \n test_courseid_LR = test_course_id_LR[test_leftIndex: test_rightIndex].clone().long()\n test_videoid = test_video_id[test_leftIndex: test_rightIndex].clone().long()\n test_continuesfeature1 = test_continues_feature1[test_leftIndex: test_rightIndex].clone()\n test_courseid_CNN = test_course_id_CNN[test_leftIndex: test_rightIndex].clone().long()\n test_continuesfeature2 = test_continues_feature2[test_leftIndex: test_rightIndex].clone()\n \n test_predictions,_,_ = model(test_courseid_LR,test_courseid_CNN,test_continuesfeature1,test_continuesfeature2,test_videoid)\n test_predictions = torch.round(torch.flatten(test_predictions))\n results.append(test_predictions.detach().numpy().tolist())\n result = [ item for elem in results for item in elem]\n# ground_truth = ground_truth.detach().numpy().tolist()\n acc = calculate_acc(result,ground_truth)\n acc_value.append(acc)\n print('Epoch[{}/{}],loss:{:.4f},loss_final:{:.4f},loss_LR:{:.4f},loss_GRU:{:.4f},acc:{:.4f}'.format(epoach, epoach_count,loss.item(),loss_final.item(),loss_lr.item(),loss_gru.item(),acc))\n\n# batchIndex = batchList[leftIndex: rightIndex]\n end = time.time()\n interval = end-start \n times.append(interval) \n print('time:{:.4f}'.format(interval))\n\n\n\ntorch.save(model.state_dict(), 'lr_gru.model')\n\n\n\n"
] |
[
[
"pandas.read_csv",
"torch.cat",
"sklearn.utils.shuffle",
"torch.nn.GRU",
"torch.nn.Embedding",
"torch.tensor",
"torch.nn.BCELoss",
"numpy.max",
"numpy.std",
"torch.nn.Linear",
"numpy.mean",
"torch.nn.Tanh",
"torch.nn.Sigmoid",
"torch.nn.init.xavier_uniform_",
"torch.flatten",
"torch.nn.ReLU"
]
] |
SamVanhoutte/azure-time-travel
|
[
"a87a6c96025bea4123859d5f739bbd90ea5fcd30"
] |
[
"ml/EngineFailurePrediction/score.py"
] |
[
"import json\nimport numpy as np\nimport pandas as pd\nimport os\nimport tensorflow as tf\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Activation\nfrom tensorflow.keras.optimizers import SGD\nfrom tensorflow.keras.layers import Dense, Dropout, Flatten\nfrom tensorflow.keras.layers import Conv2D, MaxPooling2D\nfrom tensorflow.keras.layers import Input, Dense, Conv2D, MaxPooling2D, UpSampling2D\nfrom tensorflow.keras.models import Model\nfrom tensorflow.keras.preprocessing import image\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Dense, Dropout, Activation, BatchNormalization\nfrom tensorflow.keras.applications.vgg19 import VGG19\nfrom tensorflow.keras.applications.vgg19 import preprocess_input, decode_predictions\nfrom tensorflow.keras.preprocessing.text import Tokenizer\nfrom tensorflow.keras.utils import to_categorical\nfrom tensorflow.keras.losses import categorical_crossentropy, binary_crossentropy, cosine_similarity\nfrom tensorflow.keras import backend as K\nfrom tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint\n\n\nfrom tensorflow.keras.layers import Input, Dense, Conv2D, MaxPooling2D, UpSampling2D, Conv2DTranspose\nfrom tensorflow.keras.models import Model\n\n\nfrom azureml.core.model import Model\nfrom inference_schema.schema_decorators import input_schema, output_schema\nfrom inference_schema.parameter_types.numpy_parameter_type import NumpyParameterType\n\nseries_model = None\n\ndef init():\n global series_model\n\n series_model_root = Model.get_model_path('EngineFailurePrediction')\n print('Series Model root:', series_model_root)\n #series_model_file = os.path.join(series_model_root, 'model')\n #print('Series Model file:', series_model_file)\n series_model = tf.keras.models.load_model(series_model_root)\n series_model.compile(loss = ('binary_crossentropy'), optimizer='adam', metrics=['acc'])\n print(series_model.summary())\n\n\ninput_sample = np.random.rand(2, 1, 30,24)\noutput_sample = np.array([[0],[1]])\n\n\n@input_schema('data', NumpyParameterType(input_sample, enforce_shape=False))\n@output_schema(NumpyParameterType(output_sample))\ndef run(data):\n print(type(data))\n\n data = np.array(data)\n\n log_data({\"data shape\": str(data.shape)})\n\n # If one sample is given, we'll reshape to have multiple dimensions\n if(len(data.shape)==2):\n log_data({\"message\": \"Reshaping to 3D array\"})\n data = data.reshape(1, data.shape[0], data.shape[1])\n if(len(data.shape)==4):\n log_data({\"message\": \"Reshaping 4D array to 3D array\"})\n data = data.reshape(data.shape[0], data.shape[2], data.shape[3])\n\n if(len(data.shape)!=3):\n log_data({\"exception\": \"An array of shape (n, 30, 24) is expected as input.\"})\n raise ValueError(\"An array of shape (n, 30, 24) is expected as input.\") \n if(data.shape[2] != 24):\n log_data({\"exception\": \"The time windows should contain 24 features\"})\n raise ValueError(\"The time windows should contain 24 features\")\n if(data.shape[1] < 30):\n return [[0]]\n\n if(data.shape[1] != 30):\n log_data({\"message\": \"Padding with zeroed samples\"})\n data = np.pad(data, ((0, 0),(30 - data.shape[1], 0),(0, 0)), 'constant')\n\n # make prediction\n failure_expected = series_model.predict(data)\n log_data({\"predictions\": str(failure_expected)})\n\n return failure_expected.tolist()\ndef log_data(logging_data: dict):\n print(json.dumps(logging_data))\n"
] |
[
[
"tensorflow.keras.models.load_model",
"numpy.array",
"numpy.random.rand",
"numpy.pad"
]
] |
enesmsahin/ML_Decoder
|
[
"a6aef44c029d6ee69c507e4609a86093a25f219f",
"a6aef44c029d6ee69c507e4609a86093a25f219f"
] |
[
"save_results_oi.py",
"src_files/models/utils/factory.py"
] |
[
"import json\nimport os\nimport os.path as op\nimport argparse\nimport time\n\nimport torch\nimport torch.nn.parallel\nimport torch.optim\nimport torch.utils.data.distributed\n\nfrom src_files.helper_functions.bn_fusion import fuse_bn_recursively\nfrom src_files.models import create_model\nimport matplotlib\n\nfrom src_files.models.tresnet.tresnet import InplacABN_to_ABN\n\nmatplotlib.use('TkAgg')\nmatplotlib.use('TkAgg')\nfrom PIL import Image\nimport numpy as np\nimport glob\nfrom pathlib import Path\nfrom tqdm import tqdm\n\nparser = argparse.ArgumentParser(description='PyTorch MS_COCO infer')\nparser.add_argument('--num-classes', default=9605, type=int)\nparser.add_argument('--model-path', type=str, default='downloadedModels/tresnet_m_open_images_200_groups_86_8.pth')\nparser.add_argument('--model-name', type=str, default='tresnet_m')\nparser.add_argument('--image-size', type=int, default=224)\n# parser.add_argument('--dataset-type', type=str, default='MS-COCO')\nparser.add_argument('--th', type=float, default=0.20)\nparser.add_argument('--top-k', type=float, default=20)\n# ML-Decoder\nparser.add_argument('--use-ml-decoder', default=1, type=int)\nparser.add_argument('--num-of-groups', default=200, type=int) # full-decoding\nparser.add_argument('--decoder-embedding', default=768, type=int)\nparser.add_argument('--zsl', default=0, type=int)\nparser.add_argument('--allowed_classes_file', type=str, default=\"open_images_class-descriptions-boxable_v5.csv\", help=\"Open Images CSV File indicating boxable classes\")\nparser.add_argument('--trainable_classes_file', type=str, default=\"oidv6-classes-trainable.txt\")\nparser.add_argument('--all_classes_file', type=str, default=\"oidv6-class-descriptions.csv\")\n\nprediction_tsv_files = (\n # \"/home/deepuser/deepnas/DISK4/DISK4/Enes/data/oi/model_0060000/0/predictions.tsv\",\n # \"/home/deepuser/deepnas/DISK4/DISK4/Enes/data/oi/model_0060000/1/predictions.tsv\",\n # \"/home/deepuser/deepnas/DISK4/DISK4/Enes/data/oi/model_0060000/2/predictions.tsv\",\n # \"/home/deepuser/deepnas/DISK4/DISK4/Enes/data/oi/model_0060000/3/predictions.tsv\",\n \"/home/deepuser/deepnas/DISK4/DISK4/Enes/data/oi/model_0060000/4/predictions.tsv\",\n \"/home/deepuser/deepnas/DISK4/DISK4/Enes/data/oi/model_0060000/5/predictions.tsv\",\n \"/home/deepuser/deepnas/DISK4/DISK4/Enes/data/oi/model_0060000/6/predictions.tsv\",\n \"/home/deepuser/deepnas/DISK4/DISK4/Enes/data/oi/model_0060000/7/predictions.tsv\",\n)\nimg_root_dir = \"/home/deepuser/deepnas/DISK4/DISK4/Enes/IMAGES/open_images/train/\"\n\ndef get_allowed_image_tags_for_open_images(image_tag_file_path: str) -> list:\n \"\"\"Read allowed image tags to a list from the csv file in the format of\n Open Images dataset.\n\n Args:\n image_tag_file_path (str): path to csv file containing allowed image tags\n\n Returns:\n list[str]: list of allowed tags\n \"\"\"\n tags_list = []\n tag_ids_list = []\n with open(image_tag_file_path) as f:\n for line in f:\n tag_id, tag_name = line.split(\",\", maxsplit=1)\n tag_name = tag_name.strip()\n # tag_name = tag_name.split(\"(\")[0].strip() # Remove tags with explanatory parentheses\n tags_list.append(tag_name)\n tag_ids_list.append(tag_id)\n\n return tags_list, tag_ids_list\n\ndef get_orig_trainable_class_names(trainable_class_file_path, all_classes_file_path, out_file=None):\n trainable_cls_ids = []\n with open(trainable_class_file_path, \"r\") as trainable_f:\n for line in trainable_f:\n cls_id = line.strip()\n trainable_cls_ids.append(cls_id)\n\n trainable_cls_names = []\n ordered_trainable_cls_ids = []\n \n if out_file is not None:\n with open(out_file, \"w\") as out_f:\n with open(all_classes_file_path, \"r\") as all_f:\n for line_all in all_f:\n cls_id, cls_name = line_all.split(\",\", maxsplit=1)\n if cls_id in trainable_cls_ids:\n out_f.write(line_all)\n trainable_cls_names.append(cls_name.strip().strip(\"\\\"\"))\n ordered_trainable_cls_ids.append(cls_id)\n\n else:\n with open(all_classes_file_path, \"r\") as all_f:\n for line_all in all_f:\n cls_id, cls_name = line_all.split(\",\", maxsplit=1)\n if cls_id in trainable_cls_ids:\n trainable_cls_names.append(cls_name.strip().strip(\"\\\"\"))\n ordered_trainable_cls_ids.append(cls_id)\n\n return trainable_cls_names, ordered_trainable_cls_ids\n\n \ndef save_allowed_classes_list(save_path, class_list):\n with open(save_path, \"w\") as f:\n for cls in class_list:\n f.write(cls + \"\\n\")\n\ndef main():\n print('Inference code on a single image')\n\n # parsing args\n args = parser.parse_args()\n\n # Setup model\n print('creating model {}...'.format(args.model_name))\n model = create_model(args, load_head=True).cuda()\n state = torch.load(args.model_path, map_location='cpu')\n model.load_state_dict(state['model'], strict=True)\n ########### eliminate BN for faster inference ###########\n model = model.cpu()\n model = InplacABN_to_ABN(model)\n model = fuse_bn_recursively(model)\n model = model.cuda().half().eval()\n #######################################################\n print('done')\n\n\n classes_list = np.array(list(state['idx_to_class'].values()))\n print('done\\n')\n\n classes_list_orig, class_ids_list_orig = get_orig_trainable_class_names(args.trainable_classes_file, args.all_classes_file)\n allowed_image_tags, allowed_image_tag_ids = get_allowed_image_tags_for_open_images(args.allowed_classes_file)\n\n allowed_indices_in_classes_list = [class_ids_list_orig.index(allowed_cls_id) for allowed_cls_id in allowed_image_tag_ids if allowed_cls_id in class_ids_list_orig]\n classes_list_allowed = classes_list[allowed_indices_in_classes_list]\n\n # save_allowed_classes_list(\"allowed_classes_523.txt\", classes_list_allowed)\n\n classes_list_allowed = np.array(classes_list_allowed)\n # doing inference\n print('loading image and doing inference...')\n for prediction_tsv_file in tqdm(prediction_tsv_files):\n print(f\"Starting {prediction_tsv_file}\")\n with open(prediction_tsv_file, \"r\") as pred_tsv_f:\n out_path = op.dirname(prediction_tsv_file)\n out_path = op.join(out_path, \"labels\")\n os.makedirs(out_path)\n with open(op.join(out_path, \"label.tsv\"), \"w\") as out_label_tsv_f:\n out_line = \"\"\n lines = pred_tsv_f.readlines()\n for line in tqdm(lines):\n img_id, curr_preds = line.split(\"\\t\")\n img_path = op.join(img_root_dir, img_id + \".jpg\")\n try:\n im = Image.open(img_path)\n except FileNotFoundError:\n raise Exception(f\"Image Not Found: {img_path}\")\n\n if im.mode == 'CMYK':\n im = im.convert('RGB')\n \n im_resize = im.resize((args.image_size, args.image_size))\n\n np_img = np.array(im_resize, dtype=np.uint8)\n if im_resize.mode == \"L\":\n np_img = np.repeat(np_img[..., np.newaxis], 3, -1)\n \n if (np_img.shape[-1] != 3):\n raise Exception(f\"Img is not 3-channel: {img_path}. {np_img.shape}\")\n\n tensor_img = torch.from_numpy(np_img).permute(2, 0, 1).float() / 255.0 # HWC to CHW\n tensor_batch = torch.unsqueeze(tensor_img, 0).cuda().half() # float16 inference\n output = torch.squeeze(torch.sigmoid(model(tensor_batch)))\n np_output = output.cpu().detach().numpy()\n\n ## Top-k predictions\n # detected_classes = classes_list[np_output > args.th]\n # idx_sort = np.argsort(-np_output)\n # detected_classes = np.array(classes_list)[idx_sort][: args.top_k]\n # scores = np_output[idx_sort][: args.top_k]\n # idx_th = scores > args.th\n # detected_classes = detected_classes[idx_th]\n \n # im.show()\n np_output_allowed = np_output[allowed_indices_in_classes_list]\n idx_sort = np.argsort(-np_output_allowed)\n detected_classes = classes_list_allowed[idx_sort]\n scores = np_output_allowed[idx_sort]\n idx_th = scores > args.th\n final_detected_classes = detected_classes[idx_th]\n final_detected_scores = scores[idx_th]\n\n if len(final_detected_classes) == 0:\n print(\"*\" * 10)\n print(f\"Detected classes is zero for {img_path}.\")\n print(f\"Max score: {scores.max()}\")\n final_detected_classes = [detected_classes[np.argmax(scores)]]\n print(final_detected_classes)\n\n # line = img_id + \"\\t\" + \"[\"\n # for detected_class, score in zip(final_detected_classes, final_detected_scores):\n # line += f\"{{\\\"class\\\":\\\"{detected_class}\\\",\\\"conf\\\":{score}}},\"\n # line = line.rstrip(\",\") + \"]\\n\"\n # out_file.write(line)\n out_json = []\n for detected_class, score in zip(final_detected_classes, final_detected_scores):\n out_dict = {\n \"class\": detected_class,\n \"conf\": score.astype(float)\n }\n out_json.append(out_dict)\n\n out_line += img_id + \"\\t\" + json.dumps(out_json) + \"\\n\"\n \n out_label_tsv_f.write(out_line)\n\n print(f\"Finished {prediction_tsv_file}\")\n\n\nif __name__ == '__main__':\n main()\n",
"import logging\nimport os\nfrom urllib import request\n\nimport torch\n\nfrom ...ml_decoder.ml_decoder import add_ml_decoder_head\n\nlogger = logging.getLogger(__name__)\n\nfrom ..tresnet import TResnetM, TResnetL, TResnetXL\n\n\ndef create_model(args,load_head=False):\n \"\"\"Create a model\n \"\"\"\n model_params = {'args': args, 'num_classes': args.num_classes}\n args = model_params['args']\n args.model_name = args.model_name.lower()\n\n if args.model_name == 'tresnet_m':\n model = TResnetM(model_params)\n elif args.model_name == 'tresnet_l':\n model = TResnetL(model_params)\n elif args.model_name == 'tresnet_xl':\n model = TResnetXL(model_params)\n else:\n print(\"model: {} not found !!\".format(args.model_name))\n exit(-1)\n\n ####################################################################################\n if args.use_ml_decoder:\n model = add_ml_decoder_head(model,num_classes=args.num_classes,num_of_groups=args.num_of_groups,\n decoder_embedding=args.decoder_embedding, zsl=args.zsl)\n ####################################################################################\n # loading pretrain model\n model_path = args.model_path\n if args.model_name == 'tresnet_l' and os.path.exists(\"./tresnet_l.pth\"):\n model_path = \"./tresnet_l.pth\"\n if model_path: # make sure to load pretrained model\n if not os.path.exists(model_path):\n print(\"downloading pretrain model...\")\n request.urlretrieve(args.model_path, \"./tresnet_l.pth\")\n model_path = \"./tresnet_l.pth\"\n print('done')\n state = torch.load(model_path, map_location='cpu')\n if not load_head:\n if 'model' in state:\n key = 'model'\n else:\n key = 'state_dict'\n filtered_dict = {k: v for k, v in state[key].items() if\n (k in model.state_dict() and 'head.fc' not in k)}\n model.load_state_dict(filtered_dict, strict=False)\n else:\n model.load_state_dict(state['model'], strict=True)\n\n return model\n"
] |
[
[
"torch.load",
"matplotlib.use",
"torch.from_numpy",
"torch.unsqueeze",
"numpy.argmax",
"numpy.argsort",
"numpy.repeat",
"numpy.array"
],
[
"torch.load"
]
] |
rknightly/neural-mnist
|
[
"42189508f6e41b8b8f93d30fc78bd598c7d47dc5"
] |
[
"Train Size MNIST MLP.py"
] |
[
"from __future__ import print_function\nimport keras\nfrom keras.datasets import mnist\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nfrom keras.optimizers import RMSprop\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n# Prepare Data\nbatch_size = 128\nnum_classes = 10\nepochs = 20\n\n# the data, split between train and test sets\n(x_train, y_train), (x_test, y_test) = mnist.load_data()\n\nx_train = x_train.reshape(60000, 784)\nx_test = x_test.reshape(10000, 784)\nx_train = x_train.astype('float32')\nx_test = x_test.astype('float32')\nx_train /= 255\nx_test /= 255\nprint(x_train.shape[0], 'Total train samples')\nprint(x_test.shape[0], 'Total test samples')\n\n# convert class vectors to binary class matrices\ny_train = keras.utils.to_categorical(y_train, num_classes)\ny_test = keras.utils.to_categorical(y_test, num_classes)\n\ndef test_model(sample_count):\n model = Sequential()\n model.add(Dense(512, activation='relu', input_shape=(784,)))\n model.add(Dense(512, activation='relu'))\n model.add(Dense(num_classes, activation='softmax'))\n\n x_train_small = x_train[:][:sample_count]\n y_train_small = y_train[:][:sample_count]\n print('Training Shape:', x_train_small.shape)\n\n model.summary()\n\n model.compile(loss='categorical_crossentropy',\n optimizer=RMSprop(),\n metrics=['accuracy'])\n\n training = model.fit(x_train_small, y_train_small,\n batch_size=batch_size,\n epochs=epochs,\n verbose=1,\n validation_data=(x_test, y_test))\n\n\n score = model.evaluate(x_test, y_test, verbose=0)\n print('Test loss:', score[0])\n print('Test accuracy:', score[1])\n # Convert to percent\n training.history['acc'] = 100.0 * np.array(training.history['acc']) \n training.history['val_acc'] = 100.0 * np.array(training.history['val_acc']) \n \n # Start the accuracy at 10% before training started\n training.history['acc'] = np.insert(training.history['acc'], 0, 10.0)\n training.history['val_acc'] = np.insert(training.history['val_acc'], 0, 10.0)\n \n return training\n\nsixty = test_model(60)\nsix_hundred = test_model(600)\nsix_thousand = test_model(6000)\nsixty_thousand = test_model(60000)\n\n# Plot Results\nhistories = [sixty.history, six_hundred.history, six_thousand.history, sixty_thousand.history]\n\n# Bar Graphs\nplt.figure(0)\nplt.title('Training Data Size vs. Training Accuracy')\nplt.ylabel('Training Accuracy %')\nplt.grid(axis='y', linestyle='dashed')\nbar_titles = ('60 Samples', '600 Samples', '6,000 Samples', '60,000 Samples')\naccuracy = [history['acc'][-1] for history in histories]\nx_nums = np.arange(len(bar_titles))\nplt.bar(x_nums, accuracy, align='center')\nplt.xticks(x_nums, bar_titles)\n\nplt.figure(1)\nplt.title('Training Data Size vs. Validation Accuracy')\nplt.ylabel('Training Accuracy %')\nplt.grid(axis='y', linestyle='dashed')\nbar_titles = ('60 Samples', '600 Samples', '6,000 Samples', '60,000 Samples')\naccuracy = [history['val_acc'][-1] for history in histories]\nx_nums = np.arange(len(bar_titles))\nplt.bar(x_nums, accuracy, align='center')\nplt.xticks(x_nums, bar_titles)\n\n# Line graphs\nx_data = np.arange(0, epochs+1)\n\nplt.figure(2)\nplt.title(\"Train Accuracy Vs. Epoch\")\nplt.ylabel('Accuracy') \nplt.xlabel('Epoch')\nplt.grid(linestyle='dashed')\nplt.xlim(1, epochs)\nplt.xticks(np.arange(0, epochs+1, step=2.0))\nfor history in histories:\n plt.plot(x_data, history['acc'])\nplt.legend(['60 Samples', '600 Samples', '6,000 Samples', '60,000 Samples'], loc='lower right') \n\nplt.figure(3)\nplt.title(\"Validation Accuracy Vs. Epoch\")\nplt.ylabel('Accuracy') \nplt.xlabel('Epoch')\nplt.grid(linestyle='dashed')\nplt.xlim(1, epochs)\nplt.xticks(np.arange(0, epochs+1, step=2.0))\nfor history in histories:\n plt.plot(x_data, history['val_acc'])\nplt.legend(['60 Samples', '600 Samples', '6,000 Samples', '60,000 Samples'], loc='lower right') \n\n"
] |
[
[
"matplotlib.pyplot.legend",
"matplotlib.pyplot.title",
"matplotlib.pyplot.figure",
"numpy.arange",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.xlim",
"numpy.insert",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.bar",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.xticks",
"numpy.array",
"matplotlib.pyplot.ylabel"
]
] |
NoVarlok/sova-tts-vocoder
|
[
"1d44a7247341e404e503fb0de386af5f16d36806"
] |
[
"convert_model.py"
] |
[
"# *****************************************************************************\n# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n# * Neither the name of the NVIDIA CORPORATION nor the\n# names of its contributors may be used to endorse or promote products\n# derived from this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY\n# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n#\n# *****************************************************************************\nimport sys\nimport copy\nimport torch\n\n\ndef _check_model_old_version(model):\n if hasattr(model.WN[0], 'res_layers') or hasattr(model.WN[0], 'cond_layers'):\n return True\n else:\n return False\n\n\ndef _update_model_res_skip(old_model, new_model):\n for idx in range(0, len(new_model.WN)):\n wavenet = new_model.WN[idx]\n n_channels = wavenet.n_channels\n n_layers = wavenet.n_layers\n wavenet.res_skip_layers = torch.nn.ModuleList()\n for i in range(0, n_layers):\n if i < n_layers - 1:\n res_skip_channels = 2 * n_channels\n else:\n res_skip_channels = n_channels\n res_skip_layer = torch.nn.Conv1d(n_channels, res_skip_channels, 1)\n skip_layer = torch.nn.utils.remove_weight_norm(wavenet.skip_layers[i])\n if i < n_layers - 1:\n res_layer = torch.nn.utils.remove_weight_norm(wavenet.res_layers[i])\n res_skip_layer.weight = torch.nn.Parameter(torch.cat([res_layer.weight, skip_layer.weight]))\n res_skip_layer.bias = torch.nn.Parameter(torch.cat([res_layer.bias, skip_layer.bias]))\n else:\n res_skip_layer.weight = torch.nn.Parameter(skip_layer.weight)\n res_skip_layer.bias = torch.nn.Parameter(skip_layer.bias)\n res_skip_layer = torch.nn.utils.weight_norm(res_skip_layer, name='weight')\n wavenet.res_skip_layers.append(res_skip_layer)\n del wavenet.res_layers\n del wavenet.skip_layers\n\n\ndef _update_model_cond(old_model, new_model):\n for idx in range(0, len(new_model.WN)):\n wavenet = new_model.WN[idx]\n n_channels = wavenet.n_channels\n n_layers = wavenet.n_layers\n n_mel_channels = wavenet.cond_layers[0].weight.shape[1]\n cond_layer = torch.nn.Conv1d(n_mel_channels, 2 * n_channels * n_layers, 1)\n cond_layer_weight = []\n cond_layer_bias = []\n for i in range(0, n_layers):\n _cond_layer = torch.nn.utils.remove_weight_norm(wavenet.cond_layers[i])\n cond_layer_weight.append(_cond_layer.weight)\n cond_layer_bias.append(_cond_layer.bias)\n cond_layer.weight = torch.nn.Parameter(torch.cat(cond_layer_weight))\n cond_layer.bias = torch.nn.Parameter(torch.cat(cond_layer_bias))\n cond_layer = torch.nn.utils.weight_norm(cond_layer, name='weight')\n wavenet.cond_layer = cond_layer\n del wavenet.cond_layers\n\n\ndef update_model(old_model):\n if not _check_model_old_version(old_model):\n return old_model\n new_model = copy.deepcopy(old_model)\n if hasattr(old_model.WN[0], 'res_layers'):\n _update_model_res_skip(old_model, new_model)\n if hasattr(old_model.WN[0], 'cond_layers'):\n _update_model_cond(old_model, new_model)\n return new_model\n\n\nif __name__ == '__main__':\n old_model_path = sys.argv[1]\n new_model_path = sys.argv[2]\n model = torch.load(old_model_path, map_location='cpu')\n model['model'] = update_model(model['model'])\n torch.save(model, new_model_path)\n"
] |
[
[
"torch.nn.Parameter",
"torch.load",
"torch.cat",
"torch.nn.utils.weight_norm",
"torch.nn.ModuleList",
"torch.nn.utils.remove_weight_norm",
"torch.nn.Conv1d",
"torch.save"
]
] |
akshaykurmi/neural-networks-from-scratch
|
[
"54d62d9f5adb102d14267a922a515fa79bf52bd6",
"54d62d9f5adb102d14267a922a515fa79bf52bd6"
] |
[
"nnfs/initializers/random.py",
"nnfs/initializers/he.py"
] |
[
"import numpy as np\n\n\nclass RandomUniform:\n def __init__(self, min_value=-0.05, max_value=0.05, seed=None):\n self.min_value = min_value\n self.max_value = max_value\n np.random.seed(seed)\n\n def initialize(self, shape):\n return np.random.uniform(self.min_value, self.max_value, shape)\n\n\nclass RandomNormal:\n def __init__(self, mean=0.0, standard_deviation=0.05, seed=None):\n self.mean = mean\n self.standard_deviation = standard_deviation\n np.random.seed(seed)\n\n def initialize(self, shape):\n return np.random.normal(self.mean, self.standard_deviation, shape)\n",
"import numpy as np\n\n\nclass HeUniform:\n @staticmethod\n def initialize(shape):\n fan_in = shape[0] if len(shape) == 2 else shape[1] * np.prod(shape[2:])\n scale = np.sqrt(6.0 / fan_in)\n return np.random.uniform(-scale, scale, shape)\n\n\nclass HeNormal:\n @staticmethod\n def initialize(shape):\n fan_in = shape[0] if len(shape) == 2 else shape[1] * np.prod(shape[2:])\n scale = np.sqrt(2.0 / fan_in)\n return np.random.randn(*shape) * scale\n"
] |
[
[
"numpy.random.uniform",
"numpy.random.normal",
"numpy.random.seed"
],
[
"numpy.random.uniform",
"numpy.random.randn",
"numpy.sqrt",
"numpy.prod"
]
] |
ArgentLo/PPNW-KAIS
|
[
"5e69bdd3a3b9c5f90134663a52696094d8b54b15"
] |
[
"pretrain.py"
] |
[
"import argparse\nimport os\nimport numpy as np\nimport tensorflow as tf\nfrom tqdm import tqdm\nfrom util.gmf import PairwiseGMF\nfrom util.helper import BaseConfig\nfrom util.data import Dataset\n\nfrom util.evaluation import evaluate_model, get_eval, get_model_scores\n\nparser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)\nparser.add_argument('-g', '--gpu', help='set gpu device number 0-3', type=str, required=True)\nparser.add_argument('--iters', help='Max iters', type=int, default=15)\nparser.add_argument('-b', '--batch_size', help='Batch Size', type=int, default=128)\nparser.add_argument('-e', '--embedding', help='Embedding Size', type=int, default=50)\nparser.add_argument('--dataset', help='path to npz file', type=str, required=True)\nparser.add_argument('-n', '--neg', help='Negative Samples Count', type=int, default=4) # neg_ratio\nparser.add_argument('--l2', help='l2 Regularization', type=float, default=0.001)\nparser.add_argument('-o', '--output', help='save filename for trained embeddings', type=str,\n required=True)\n\nparser.add_argument('--use_unpop_weight', default=False, type=bool, help='Use unpopularity matching.')\nFLAGS = parser.parse_args()\nos.environ['CUDA_VISIBLE_DEVICES'] = FLAGS.gpu\n\nclass Config(BaseConfig):\n filename = FLAGS.dataset\n embed_size = FLAGS.embedding\n batch_size = FLAGS.batch_size\n l2 = FLAGS.l2\n user_count = -1\n item_count = -1\n optimizer = 'adam'\n neg_count = FLAGS.neg\n learning_rate = 0.001\n use_unpop_weight = FLAGS.use_unpop_weight\n \nconfig = Config()\ndataset = Dataset(config.filename, config.use_unpop_weight)\nconfig.item_count = dataset.item_count\nconfig.user_count = dataset.user_count\nitem_popularity = dataset._item_popularity ####\nitem_unpop = dataset.item_unpop ####\nuser_items = dataset.user_items ####\ntf.logging.info(msg=\"\\n\\n%s\\n\\n\" % config)\n\nmodel = PairwiseGMF(config)\nsv = tf.train.Supervisor(logdir=None, save_model_secs=0, save_summaries_secs=0)\nsess = sv.prepare_or_wait_for_session(\n config=tf.ConfigProto(gpu_options=tf.GPUOptions(\n per_process_gpu_memory_fraction=0.1,\n allow_growth=True)))\n\nfor i in range(FLAGS.iters):\n if sv.should_stop():\n break\n progress = tqdm(enumerate(dataset.get_data(FLAGS.batch_size, False, FLAGS.neg)),\n dynamic_ncols=True, total=(dataset.train_size * FLAGS.neg) // FLAGS.batch_size)\n loss = []\n \n # for eval\n item_users_list, input_neighborhoods, input_neighborhood_lengths, dropout, max_neighbors = None, None, None, None, None\n evaluate_model(sess, dataset.test_data, item_users_list, model.input_users, model.input_items,\n input_neighborhoods, input_neighborhood_lengths,\n dropout, model.score, max_neighbors, EVAL_AT=[1, 5, 10],\n item_popularity=item_popularity, item_unpop=item_unpop, user_items=user_items)\n \n ### In GMF, only batch_example outputted from Dataset \n ### example (Triplet) : [user_idx, item_idx, neg_item_idx]\n for k, example in progress:\n \n ratings, pos_unpop_mtc, neg_unpop_mtc = example \n\n feed = {\n model.input_users: ratings[:, 0], # user_id\n model.input_items: ratings[:, 1], # item_id\n model.input_items_negative: ratings[:, 2], # neg_item_id\n \n model.pos_unpop_mtc: pos_unpop_mtc,\n model.neg_unpop_mtc: neg_unpop_mtc,\n }\n batch_loss, _ = sess.run([model.loss, model.train], feed)\n loss.append(batch_loss)\n progress.set_description(u\"[{}] Loss: {:,.4f} » » » » \".format(i, batch_loss))\n\n print(\"Epoch {}: Avg Loss/Batch {:<20,.6f}\".format(i, np.mean(loss)))\n \n############################################### EVALUATION ##################################################\n\nEVAL_AT = [1, 5, 10]\nhrs, ndcgs, longtails, coverages, unpop_scores = [], [], [], [], []\ns = '\\n===> Novelty-Adaptive Weighting\\n'\npra_hrs, pra_ndcgs, pra_longtails, pra_coverages, pra_unpop_scores= [], [], [], [], []\npra_s = '\\n===> PRA_Re-Ranking\\n'\n\n# for eval\nitem_users_list, input_neighborhoods, input_neighborhood_lengths, dropout, max_neighbors = None, None, None, None, None \n\nscores, out, item_indices, usr_pra_samples = get_model_scores(sess, dataset.test_data, item_users_list, model.input_users, model.input_items,\n input_neighborhoods, input_neighborhood_lengths,\n dropout, model.score, max_neighbors, user_items, True)\n\nfor k in EVAL_AT:\n hr, ndcg, longtail, coverage, unpop_score, pra_hr, pra_ndcg, pra_longtail, pra_coverage, pra_unpop_score = get_eval(scores, len(scores[0]) - 1, k, \n item_indices=item_indices, \n item_popularity=item_popularity, \n item_unpop=item_unpop,\n usr_pra_samples=usr_pra_samples)\n hrs.append(hr)\n ndcgs.append(ndcg)\n longtails.append(longtail)\n coverages.append(coverage)\n unpop_scores.append(unpop_score)\n pra_hrs.append(pra_hr)\n pra_ndcgs.append(pra_ndcg)\n pra_longtails.append(pra_longtail)\n pra_coverages.append(pra_coverage)\n pra_unpop_scores.append(pra_unpop_score)\n\n s += \"{:<9} {:<9.4f}{:<9} {:<9.4f}{:<9} {:<9.4f}{:<9} {:<9.4f}{:<9} {:<9.4f}\\n\".format('HR@%s'%k, hr, 'NDCG@%s'%k, ndcg, \n 'L_Tail@%s'%k, longtail, 'Coverage@%s'%k, coverage,\n 'Unpop_score@%s'%k, unpop_score)\n \n pra_s += \"{:<9} {:<9.4f}{:<9} {:<9.4f}{:<9} {:<9.4f}{:<9} {:<9}{:<9} {:<9.4f}\\n\".format('HR@%s'%k, pra_hr, 'NDCG@%s'%k, pra_ndcg, \n 'L_Tail@%s'%k, pra_longtail, 'Coverage@%s'%k, 'NA',\n 'Unpop_score@%s'%k, pra_unpop_score)\n\nprint(s, pra_s)\n\nuser_embed, item_embed, v = sess.run([model.user_memory.embeddings, model.item_memory.embeddings, model.v.w])\nnp.savez(FLAGS.output, user=user_embed, item=item_embed, v=v)\nprint('Saving to: %s' % FLAGS.output)\nsv.request_stop()\n"
] |
[
[
"numpy.savez",
"numpy.mean",
"tensorflow.GPUOptions",
"tensorflow.logging.info",
"tensorflow.train.Supervisor"
]
] |
ctralie/Math412S2017
|
[
"27b32eabf079a7f8bc5dadf1acbc1e8442cfa639"
] |
[
"MusicSpeech.py"
] |
[
"\"\"\"\nProgrammer: Chris Tralie ([email protected])\nPurpose: To show how TDA can be used to quantify how periodic\nan audio clip is. Simple example with music versus speech.\nShow how doing a delay embedding on raw audio is a bad idea when\nthe length of the period is on the order of seconds, and how\n\"audio novelty functions\" come in handy\n\"\"\"\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.decomposition import PCA\nimport scipy.interpolate as interp\n\nfrom TDA import *\nfrom SlidingWindow import *\nfrom MusicFeatures import *\nimport scipy.io.wavfile\n\nif __name__ == '__main__':\n #Don't Stop Believing\n FsMusic, XMusic = scipy.io.wavfile.read(\"journey.wav\") \n FsSpeech, XSpeech = scipy.io.wavfile.read(\"speech.wav\")\n \n #Step 1: Try a raw delay embedding\n #Note that dim*Tau here spans a half a second of audio, \n #since Fs is the sample rate\n dim = round(FsMusic/200)\n Tau = 100\n dT = FsMusic/100\n Y = getSlidingWindowInteger(XMusic[0:FsMusic*3], dim, Tau, dT)\n #Mean-center and normalize\n Y = Y - np.mean(Y, 1)[:, None]\n Y = Y/np.sqrt(np.sum(Y**2, 1))[:, None]\n\n PDs = doRipsFiltration(Y, 1)\n pca = PCA()\n Z = pca.fit_transform(Y)\n\n plt.figure(figsize=(12, 6))\n plt.subplot(121)\n plt.title(\"2D PCA Raw Audio Embedding\")\n plt.scatter(Z[:, 0], Z[:, 1])\n plt.subplot(122)\n plotDGM(PDs[1])\n plt.title(\"Persistence Diagram\")\n \n \n #Step 2: Do sliding window on audio novelty functions\n #(sliding window of sliding windows!)\n hopSize = 512\n \n #First do audio novelty function on music\n novFnMusic = getAudioNovelty(XMusic, FsMusic, hopSize)\n dim = 20\n #Make sure the window size is half of a second, noting that\n #the audio novelty function has been downsampled by a \"hopSize\" factor\n Tau = (FsMusic/2)/(float(hopSize)*dim)\n dT = 1\n Y = getSlidingWindowInteger(novFnMusic, dim, Tau, dT)\n print(\"Y.shape = \", Y.shape)\n #Mean-center and normalize\n Y = Y - np.mean(Y, 1)[:, None]\n Y = Y/np.sqrt(np.sum(Y**2, 1))[:, None]\n\n PDs = doRipsFiltration(Y, 1)\n pca = PCA()\n Z = pca.fit_transform(Y)\n\n plt.figure(figsize=(12, 6))\n plt.subplot(121)\n plt.title(\"2D PCA Music Novelty Function Sliding Window\")\n plt.scatter(Z[:, 0], Z[:, 1])\n plt.subplot(122)\n plotDGM(PDs[1])\n plt.title(\"Persistence Diagram\")\n\n \n #Now do audio novelty function on speech\n novFnSpeech = getAudioNovelty(XSpeech, FsSpeech, hopSize)\n dim = 20\n #Make sure the window size is half of a second, noting that\n #the audio novelty function has been downsampled by a \"hopSize\" factor\n Tau = (FsSpeech/2)/(float(hopSize)*dim)\n dT = 1\n Y = getSlidingWindowInteger(novFnSpeech, dim, Tau, dT)\n print(\"Y.shape = \", Y.shape)\n #Mean-center and normalize\n Y = Y - np.mean(Y, 1)[:, None]\n Y = Y/np.sqrt(np.sum(Y**2, 1))[:, None]\n\n PDs = doRipsFiltration(Y, 1)\n pca = PCA()\n Z = pca.fit_transform(Y)\n\n plt.figure(figsize=(12, 6))\n plt.subplot(121)\n plt.title(\"2D PCA Speech Novelty Function Sliding Window\")\n plt.scatter(Z[:, 0], Z[:, 1])\n plt.subplot(122)\n plotDGM(PDs[1])\n plt.title(\"Persistence Diagram\")\n plt.show()\n \n"
] |
[
[
"numpy.sum",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.title",
"matplotlib.pyplot.subplot",
"numpy.mean",
"matplotlib.pyplot.show",
"sklearn.decomposition.PCA",
"matplotlib.pyplot.figure"
]
] |
kourgeorge/deep_anomalies
|
[
"04352a2a49197ec8cf3a2165014d79e3cf0511be"
] |
[
"auto_encoder/train_autoencoder_all.py"
] |
[
"import auto_encoder.network as network\nimport tensorflow as tf\nimport utils\nimport numpy as np\n\nnum_epochs = 30\nbatch_size = 100\nmodel_path = \"./model/model.ckpt\"\n\nsaver = tf.train.Saver()\n\n\n\nwith tf.Session() as sess:\n sess.run(network.init)\n num_iterations = int(utils.mnist.train.num_examples / batch_size)\n\n for epoch in range(num_epochs):\n ep_error = 0\n for i in range(num_iterations):\n xs, ys = utils.mnist2.train.next_batch(batch_size)\n\n sess.run(network.opt, feed_dict={network.X: xs})\n\n test_error, summary, cons_error, l2_regul_error = sess.run(\n [network.cost, network.merged_summary_op, network.cons_error, network.l2_regul_error],\n feed_dict={network.X: utils.mnist2.test.images})\n train_error = sess.run(network.cost,\n feed_dict={network.X: utils.mnist2.train.images})\n\n # Write logs at every iteration\n network.summary_writer.add_summary(summary, epoch)\n\n print(\"Epoch: \", str(epoch), \" Training error: \", train_error / utils.mnist.train.num_examples, \" Testing error: \",\n test_error / utils.mnist.test.num_examples, \" Cons error: \", cons_error, \" l2_regularization error: \",\n l2_regul_error)\n\n save_path = saver.save(sess, model_path)\n print(\"Model saved in file: %s\" % save_path)\n"
] |
[
[
"tensorflow.train.Saver",
"tensorflow.Session"
]
] |
swapnil96/ML-NeuralNetworks
|
[
"025b6990e4a43989e865ca72425958cfd33a06b5"
] |
[
"Convolutional Neural Networks/ensemble/ensemble.py"
] |
[
"import numpy as np\nfrom keras.models import Sequential, Model, Input\nfrom keras.models import load_model\nfrom sklearn.utils import shuffle\nfrom keras.utils.np_utils import to_categorical\nfrom keras.layers import Dense, Dropout, Activation, Flatten, Average\nfrom keras.optimizers import Adam\nfrom keras.layers.normalization import BatchNormalization\nfrom keras.utils import np_utils\nfrom keras.layers import Conv2D, MaxPooling2D, ZeroPadding2D, GlobalAveragePooling2D\nfrom keras.layers.advanced_activations import LeakyReLU\nfrom keras.preprocessing.image import ImageDataGenerator\nimport os\nimport keras.backend as k\n\n# Striving for Simplicity: The All Convolutional Net\ndef conv_pool_cnn(model_input):\n\n x = Conv2D(96, kernel_size=(3, 3), activation='relu', padding='same')(model_input)\n x = Conv2D(96, (3, 3), activation='relu', padding='same')(x)\n x = Conv2D(96, (3, 3), activation='relu', padding='same')(x)\n x = MaxPooling2D(pool_size=(3, 3), strides=2)(x)\n x = Conv2D(192, (3, 3), activation='relu', padding='same')(x)\n x = Conv2D(192, (3, 3), activation='relu', padding='same')(x)\n x = Conv2D(192, (3, 3), activation='relu', padding='same')(x)\n x = MaxPooling2D(pool_size=(3, 3), strides=2)(x)\n x = Conv2D(192, (3, 3), activation='relu', padding='same')(x)\n x = Conv2D(192, (1, 1), activation='relu')(x)\n x = Conv2D(20, (1, 1))(x)\n x = GlobalAveragePooling2D()(x)\n x = Activation(activation='softmax')(x)\n\n model = Model(model_input, x, name='conv_pool_cnn')\n\n return model\n\n# Striving for Simplicity: The All Convolutional Net\ndef all_cnn(model_input):\n\n x = Conv2D(\n 96, kernel_size=(3, 3), activation='relu', padding='same')(model_input)\n x = Conv2D(96, (3, 3), activation='relu', padding='same')(x)\n x = Conv2D(96, (3, 3), activation='relu', padding='same', strides=2)(x)\n x = Conv2D(192, (3, 3), activation='relu', padding='same')(x)\n x = Conv2D(192, (3, 3), activation='relu', padding='same')(x)\n x = Conv2D(192, (3, 3), activation='relu', padding='same', strides=2)(x)\n x = Conv2D(192, (3, 3), activation='relu', padding='same')(x)\n x = Conv2D(192, (1, 1), activation='relu')(x)\n x = Conv2D(20, (1, 1))(x)\n x = GlobalAveragePooling2D()(x)\n x = Activation(activation='softmax')(x)\n\n model = Model(model_input, x, name='all_cnn')\n\n return model\n\n# Striving for Simplicity: The All Convolutional Net\ndef nin_cnn(model_input):\n\n #mlpconv block 1\n x = Conv2D(32, (5, 5), activation='relu',padding='valid')(model_input)\n x = Conv2D(32, (1, 1), activation='relu')(x)\n x = Conv2D(32, (1, 1), activation='relu')(x)\n x = MaxPooling2D((2,2))(x)\n x = Dropout(0.5)(x)\n\n #mlpconv block2\n x = Conv2D(64, (3, 3), activation='relu',padding='valid')(x)\n x = Conv2D(64, (1, 1), activation='relu')(x)\n x = Conv2D(64, (1, 1), activation='relu')(x)\n x = MaxPooling2D((2,2))(x)\n x = Dropout(0.5)(x)\n\n #mlpconv block3\n x = Conv2D(128, (3, 3), activation='relu',padding='valid')(x)\n x = Conv2D(32, (1, 1), activation='relu')(x)\n x = Conv2D(20, (1, 1))(x)\n\n x = GlobalAveragePooling2D()(x)\n x = Activation(activation='softmax')(x)\n\n model = Model(model_input, x, name='nin_cnn')\n\n return model\n\n# https://medium.com/randomai/ensemble-and-store-models-in-keras-2-x-b881a6d7693f\ndef ensembleModels(models, model_input):\n # collect outputs of models in a list\n yModels=[model(model_input) for model in models]\n\n # averaging outputs\n yAvg=Average()(yModels)\n\n # build model from same input and avg output\n modelEns = Model(inputs=model_input, outputs=yAvg, name='ensemble')\n\n return modelEns\n\nfinal = {}\n\ndef read_data(link):\n\n files = os.listdir(link)\n files.sort()\n idx = 0\n for file1 in files:\n now = link + file1\n final[idx] = file1.split(\".\")[0]\n if idx == 0:\n train = np.load(now)\n m_lbl = np.array([idx] * train.shape[0])\n\n else:\n temp1 = np.load(now)\n temp3 = np.array([idx] * temp1.shape[0])\n train = np.vstack([train, temp1])\n m_lbl = np.hstack([m_lbl, temp3])\n\n idx += 1\n\n print(final)\n print(train.shape)\n return train, m_lbl\n\n\ntrain, m_lbl = read_data(\"../../col-774-spring-2018/train/\")\ntest = np.load(\"../../col-774-spring-2018/test/test.npy\")\n\ntrain_x, m_lbl = shuffle(train, m_lbl, random_state=0)\ntrain_y = to_categorical(m_lbl, num_classes=20)\n\n# train_x -= 255\n# test -= 255\n\ntrain_x = np.divide(train_x, 255)\ntest_x = np.divide(test, 255)\n\ntrain_x.resize(train_x.shape[0], 28, 28, 1)\ntest_x.resize(test_x.shape[0], 28, 28, 1)\n\nmodel_input = Input(shape=(28, 28, 1))\n\n#\tc_model = conv_pool_cnn(model_input)\n#\tall_cnn_model = all_cnn(model_input)\n\nconv_pool_model = load_model('conv_pool.h5')\nall_cnn_model = load_model('all_cnn.h5')\nvgg_aug_model = load_model('vgg_aug.h5')\nvgg_model = load_model('vgg.h5')\n\nmodels=[]\n\nmodels.append(conv_pool_model)\nmodels.append(all_cnn_model)\nmodels.append(vgg_aug_model)\nmodels.append(vgg_model)\n\nmodel_input = Input(shape=models[0].input_shape[1:]) # c*h*w\nmodelEns = ensembleModels(models, model_input)\nmodelEns.summary()\n\n\npred = modelEns.predict(train_x, verbose=1)\npred = np.argmax(pred, axis=1)\nerror = np.sum(np.not_equal(pred, train_y))\nprint(error)\n\npredicted_classes = modelEns.predict(test_x, verbose=1)\npredicted_classes = np.argmax(predicted_classes, axis=-1)\nwith open(\"submit_fire.csv\", \"w\") as outfile:\n outfile.writelines(\"ID,CATEGORY\\n\")\n for i in range(predicted_classes.shape[0]):\n outfile.writelines(str(i) + ',' + final[predicted_classes[i]] + '\\n')\n\n print(\"done\")\n"
] |
[
[
"numpy.hstack",
"sklearn.utils.shuffle",
"numpy.vstack",
"numpy.argmax",
"numpy.not_equal",
"numpy.load",
"numpy.array",
"numpy.divide"
]
] |
michiboo/jax
|
[
"7083b0a78edd8f2e88abe3f395ee0f51ac915082"
] |
[
"jax/abstract_arrays.py"
] |
[
"# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as onp\nimport six\n\nfrom . import core\nfrom . import ad_util\nfrom . import dtypes\nfrom . util import prod, partialmethod\n\n\ndef concretization_err_msg(fun):\n fname = getattr(fun, \"__name__\", fun)\n msg = (\"Abstract value passed to `{}`, which requires a concrete value. \"\n \"The function to be transformed can't be traced at the required level \"\n \"of abstraction. If using `jit`, try using `static_argnums` or \"\n \"applying `jit` to smaller subfunctions instead.\")\n return msg.format(fname)\n\ndef concretization_function_error(fun):\n def error(self, *args):\n raise TypeError(concretization_err_msg(fun))\n return error\n\n\nclass UnshapedArray(core.AbstractValue):\n __slots__ = ['dtype', 'weak_type']\n array_abstraction_level = 2\n\n def __init__(self, dtype, weak_type=False):\n self.dtype = onp.dtype(dtypes.canonicalize_dtype(dtype))\n self.weak_type = weak_type\n\n def __eq__(self, other):\n return (type(self) is type(other) and self.dtype == other.dtype and\n self.weak_type == other.weak_type)\n\n def __ne__(self, other):\n return not self == other\n\n def __hash__(self):\n # can use hash(self.dtype) and rely on the fact that numpy reuses base dtype\n # objects, e.g. `onp.zeros(3).dtype is onp.zeros(4).dtype`, or we can use\n # the unique character code via hash(self.dtype.char)\n return hash((self.dtype, self.weak_type))\n\n def __repr__(self):\n return '{}({}{})'.format(self.__class__.__name__, self.str_short(),\n \", weak_type=True\" if self.weak_type else \"\")\n\n _bool = _nonzero = concretization_function_error(bool)\n _float = concretization_function_error(float)\n _int = concretization_function_error(int)\n if six.PY2:\n _long = concretization_function_error(long) # noqa: F821\n _complex = concretization_function_error(complex)\n _hex = concretization_function_error(hex)\n _oct = concretization_function_error(oct)\n\n def at_least_vspace(self):\n return self\n\n def join(self, other):\n if self.dtype == other.dtype:\n if self.weak_type == other.weak_type:\n return self\n else:\n return UnshapedArray(self.dtype, weak_type=False)\n else:\n raise TypeError(self, other)\n\n def str_short(self):\n return self.dtype.name\n\n def strip_weak_type(self):\n \"\"\"Returns a copy of the aval with weak_type=False.\"\"\"\n return UnshapedArray(self.dtype) if self.weak_type else self\n\n\nclass ShapedArray(UnshapedArray):\n __slots__ = ['shape']\n array_abstraction_level = 1\n\n def __init__(self, shape, dtype, weak_type=False):\n super(ShapedArray, self).__init__(dtype, weak_type=weak_type)\n self.shape = shape\n\n ndim = property(lambda self: len(self.shape))\n size = property(lambda self: prod(self.shape))\n\n def __eq__(self, other):\n return (type(self) is type(other)\n and self.dtype == other.dtype and self.shape == other.shape\n and self.weak_type == other.weak_type)\n\n def __hash__(self):\n # can use hash(self.dtype) and rely on the fact that numpy reuses base dtype\n # objects, e.g. `onp.zeros(3).dtype is onp.zeros(4).dtype`, or we can use\n # the unique character code via hash(self.dtype.char)\n return hash((self.shape, self.dtype, self.weak_type))\n\n def at_least_vspace(self):\n return self\n\n def join(self, other):\n if self.shape == other.shape and self.dtype == other.dtype:\n if self.weak_type == other.weak_type:\n return self\n else:\n return ShapedArray(self.shape, self.dtype, weak_type=False)\n elif self.dtype == other.dtype:\n return UnshapedArray(self.dtype)\n else:\n raise TypeError(self, other)\n\n def str_short(self):\n shapestr = ','.join(map(str, self.shape))\n return '{}[{}]'.format(self.dtype.name, shapestr)\n\n def __len__(self):\n try:\n return self.shape[0]\n except IndexError:\n raise TypeError(\"len() of unsized object\") # same as numpy error\n\n def _len(self, ignored_tracer):\n return len(self)\n\n def strip_weak_type(self):\n return ShapedArray(self.shape, self.dtype) if self.weak_type else self\n\ndef _forward_to_value(self, fun, ignored_tracer, *args):\n return fun(self.val, *args)\n\nclass ConcreteArray(ShapedArray):\n __slots__ = ['val']\n array_abstraction_level = 0\n\n def __init__(self, val, weak_type=False):\n super(ConcreteArray, self).__init__(onp.shape(val), onp.result_type(val),\n weak_type=weak_type)\n # Note: canonicalized self.dtype doesn't necessarily match self.val\n self.val = val\n assert self.dtype != onp.dtype('O')\n\n def __eq__(self, other):\n return (type(self) is type(other) and self.dtype == other.dtype\n and self.shape == other.shape and self.weak_type == other.weak_type\n and onp.all(self.val == other.val))\n\n def __hash__(self):\n return id(self.val)\n\n def at_least_vspace(self):\n return ShapedArray(self.shape, self.dtype, weak_type=self.weak_type)\n\n def join(self, other):\n if self == other:\n return self\n elif self.shape == other.shape and self.dtype == other.dtype:\n return ShapedArray(self.shape, self.dtype,\n weak_type=self.weak_type and other.weak_type)\n elif self.dtype == other.dtype:\n return UnshapedArray(self.dtype,\n weak_type=self.weak_type and other.weak_type)\n else:\n raise TypeError(self, other)\n\n def str_short(self):\n return str(self.val)\n\n def strip_weak_type(self):\n return ConcreteArray(self.val) if self.weak_type else self\n\n _bool = _nonzero = partialmethod(_forward_to_value, bool)\n _float = partialmethod(_forward_to_value, float)\n _int = partialmethod(_forward_to_value, int)\n if six.PY2:\n _long = partialmethod(_forward_to_value, long) # noqa: F821\n _complex = partialmethod(_forward_to_value, complex)\n _hex = partialmethod(_forward_to_value, hex)\n _oct = partialmethod(_forward_to_value, oct)\n\nclass AbstractToken(core.AbstractValue): pass\n\nabstract_token = AbstractToken()\n\n\ndef make_shaped_array(x):\n dtype = dtypes.canonicalize_dtype(dtypes.result_type(x))\n return ShapedArray(onp.shape(x), dtype)\n\ndef zeros_like_array(x):\n dtype = dtypes.canonicalize_dtype(dtypes.result_type(x))\n return onp.broadcast_to(onp.array(0, dtype), onp.shape(x))\n\narray_types = {onp.ndarray, onp.bool_,\n onp.int8, onp.int16, onp.int32, onp.int64,\n onp.uint8, onp.uint16, onp.uint32, onp.uint64,\n dtypes.bfloat16, onp.float16, onp.float32, onp.float64,\n onp.complex64, onp.complex128,\n onp.longlong}\n\nfor t in array_types:\n core.pytype_aval_mappings[t] = ConcreteArray\n ad_util.jaxval_zeros_likers[t] = zeros_like_array\n\n\ndef zeros_like_shaped_array(aval):\n assert isinstance(aval, ShapedArray)\n return onp.zeros(aval.shape, dtype=aval.dtype)\n\nad_util.aval_zeros_likers[ShapedArray] = zeros_like_shaped_array\n\ndef raise_to_shaped(aval):\n if isinstance(aval, ShapedArray):\n return ShapedArray(aval.shape, aval.dtype)\n elif aval is core.abstract_unit:\n return core.abstract_unit\n elif aval is abstract_token:\n return abstract_token\n else:\n raise TypeError(type(aval))\n\ncore.literalable_types.update(array_types)\n\ndef make_abstract_python_scalar(x):\n return ShapedArray((), dtypes.python_scalar_dtypes[type(x)],\n weak_type=True)\n\ndef _zeros_like_python_scalar(x):\n return onp.array(0, dtypes.python_scalar_dtypes[type(x)])\n\ndef _make_concrete_python_scalar(x):\n return ConcreteArray(\n onp.array(x, dtype=dtypes.python_scalar_dtypes[type(x)]),\n weak_type=True)\n\nfor t in dtypes.python_scalar_dtypes.keys():\n core.pytype_aval_mappings[t] = _make_concrete_python_scalar\n ad_util.jaxval_zeros_likers[t] = _zeros_like_python_scalar\n\ncore.literalable_types.update(dtypes.python_scalar_dtypes.keys())\n"
] |
[
[
"numpy.dtype",
"numpy.all",
"numpy.result_type",
"numpy.shape",
"numpy.array",
"numpy.zeros"
]
] |
PidgeyBE/ray
|
[
"7a2d7964d8f944bd60c1f03d58d6cc190c7a7015"
] |
[
"rllib/agents/ddpg/tests/test_td3.py"
] |
[
"import numpy as np\nimport unittest\n\nimport ray.rllib.agents.ddpg.td3 as td3\nfrom ray.rllib.utils.framework import try_import_tf\nfrom ray.rllib.utils.test_utils import check, check_compute_single_action, \\\n framework_iterator\n\ntf1, tf, tfv = try_import_tf()\n\n\nclass TestTD3(unittest.TestCase):\n def test_td3_compilation(self):\n \"\"\"Test whether a TD3Trainer can be built with both frameworks.\"\"\"\n config = td3.TD3_DEFAULT_CONFIG.copy()\n config[\"num_workers\"] = 0 # Run locally.\n\n # Test against all frameworks.\n for _ in framework_iterator(config, frameworks=[\"tf\"]):\n trainer = td3.TD3Trainer(config=config, env=\"Pendulum-v0\")\n num_iterations = 2\n for i in range(num_iterations):\n results = trainer.train()\n print(results)\n check_compute_single_action(trainer)\n\n def test_td3_exploration_and_with_random_prerun(self):\n \"\"\"Tests TD3's Exploration (w/ random actions for n timesteps).\"\"\"\n config = td3.TD3_DEFAULT_CONFIG.copy()\n config[\"num_workers\"] = 0 # Run locally.\n obs = np.array([0.0, 0.1, -0.1])\n\n # Test against all frameworks.\n for _ in framework_iterator(config, frameworks=\"tf\"):\n lcl_config = config.copy()\n # Default GaussianNoise setup.\n trainer = td3.TD3Trainer(config=lcl_config, env=\"Pendulum-v0\")\n # Setting explore=False should always return the same action.\n a_ = trainer.compute_action(obs, explore=False)\n for _ in range(50):\n a = trainer.compute_action(obs, explore=False)\n check(a, a_)\n # explore=None (default: explore) should return different actions.\n actions = []\n for _ in range(50):\n actions.append(trainer.compute_action(obs))\n check(np.std(actions), 0.0, false=True)\n trainer.stop()\n\n # Check randomness at beginning.\n lcl_config[\"exploration_config\"] = {\n # Act randomly at beginning ...\n \"random_timesteps\": 30,\n # Then act very closely to deterministic actions thereafter.\n \"stddev\": 0.001,\n \"initial_scale\": 0.001,\n \"final_scale\": 0.001,\n }\n trainer = td3.TD3Trainer(config=lcl_config, env=\"Pendulum-v0\")\n # ts=1 (get a deterministic action as per explore=False).\n deterministic_action = trainer.compute_action(obs, explore=False)\n # ts=2-5 (in random window).\n random_a = []\n for _ in range(29):\n random_a.append(trainer.compute_action(obs, explore=True))\n check(random_a[-1], deterministic_action, false=True)\n self.assertTrue(np.std(random_a) > 0.5)\n\n # ts > 30 (a=deterministic_action + scale * N[0,1])\n for _ in range(50):\n a = trainer.compute_action(obs, explore=True)\n check(a, deterministic_action, rtol=0.1)\n\n # ts >> 30 (BUT: explore=False -> expect deterministic action).\n for _ in range(50):\n a = trainer.compute_action(obs, explore=False)\n check(a, deterministic_action)\n trainer.stop()\n\n\nif __name__ == \"__main__\":\n import pytest\n import sys\n sys.exit(pytest.main([\"-v\", __file__]))\n"
] |
[
[
"numpy.std",
"numpy.array"
]
] |
denisuzhva/ML_task2
|
[
"49220c370256be66a7e3eb98ae069259aa2f48fc"
] |
[
"Src/Evaluator.py"
] |
[
"import numpy as np\r\n\r\n\r\n\r\ndef mseMetric(fx_batch, z_batch):\r\n batch_size = fx_batch.shape[0]\r\n metric = np.sum(np.square(z_batch - fx_batch)) / batch_size\r\n return metric\r\n\r\n\r\ndef rmseMetric(fx_batch, z_batch):\r\n batch_size = fx_batch.shape[0]\r\n metric = np.sum(np.square(z_batch - fx_batch)) / batch_size\r\n return np.sqrt(metric)\r\n\r\n\r\ndef r2Metric(fx_batch, z_batch):\r\n batch_size = fx_batch.shape[0]\r\n mse_metric = np.sum(np.square(z_batch - fx_batch)) / batch_size\r\n metric = 1 - mse_metric / (np.sum(np.square(z_batch - np.mean(z_batch))) / batch_size)\r\n return metric\r\n\r\n\r\n# with regularization\r\ndef mseMetricReg(fx_batch, z_batch, weights, order=2):\r\n batch_size = fx_batch.shape[0]\r\n metric = np.sum(np.square(z_batch - fx_batch)) / batch_size\r\n metric = metric + np.linalg.norm(weights, order)\r\n return metric\r\n\r\n\r\ndef rmseMetricReg(fx_batch, z_batch, weights, order=2):\r\n batch_size = fx_batch.shape[0]\r\n metric = np.sum(np.square(z_batch - fx_batch)) / batch_size\r\n metric = metric + np.linalg.norm(weights, order)\r\n return np.sqrt(metric)\r\n\r\n\r\ndef r2MetricReg(fx_batch, z_batch, weights, order=2):\r\n batch_size = fx_batch.shape[0]\r\n mse_metric = np.sum(np.square(z_batch - fx_batch)) / batch_size\r\n mse_metric = mse_metric + np.linalg.norm(weights, order)\r\n metric = 1 - mse_metric / (np.sum(np.square(z_batch - np.mean(z_batch))) / batch_size)\r\n return metric\r\n\r\n\r\n"
] |
[
[
"numpy.square",
"numpy.mean",
"numpy.linalg.norm",
"numpy.sqrt"
]
] |
Corie96/fairseq-dev
|
[
"24be7a63db5de078f81487544385feb51aea9453"
] |
[
"generate.py"
] |
[
"#!/usr/bin/env python3 -u\n# Copyright (c) 2017-present, Facebook, Inc.\n# All rights reserved.\n#\n# This source code is licensed under the license found in the LICENSE file in\n# the root directory of this source tree. An additional grant of patent rights\n# can be found in the PATENTS file in the same directory.\n\"\"\"\nTranslate pre-processed data with a trained model.\n\"\"\"\n\nimport os\nimport torch\n\nfrom fairseq import bleu, checkpoint_utils, options, progress_bar, tasks, utils\nfrom fairseq.meters import StopwatchMeter, TimeMeter\n\n\ndef main(args):\n assert args.path is not None, '--path required for generation!'\n assert not args.sampling or args.nbest == args.beam, \\\n '--sampling requires --nbest to be equal to --beam'\n assert args.replace_unk is None or args.raw_text, \\\n '--replace-unk requires a raw text dataset (--raw-text)'\n\n utils.import_user_module(args)\n\n if args.max_tokens is None and args.max_sentences is None:\n args.max_tokens = 12000\n print(args)\n\n use_cuda = torch.cuda.is_available() and not args.cpu\n\n # Load dataset splits\n task = tasks.setup_task(args)\n task.load_dataset(args.gen_subset)\n\n # Set dictionaries\n try:\n src_dict = getattr(task, 'source_dictionary', None)\n except NotImplementedError:\n src_dict = None\n tgt_dict = task.target_dictionary\n\n # Load ensemble\n print('| loading model(s) from {}'.format(args.path))\n models, _model_args = checkpoint_utils.load_model_ensemble(\n args.path.split(':'),\n arg_overrides=eval(args.model_overrides),\n task=task,\n )\n\n # Optimize ensemble for generation\n for model in models:\n model.make_generation_fast_(\n beamable_mm_beam_size=None if args.no_beamable_mm else args.beam,\n need_attn=args.print_alignment,\n )\n if args.fp16:\n model.half()\n if use_cuda:\n model.cuda()\n\n # Load alignment dictionary for unknown word replacement\n # (None if no unknown word replacement, empty if no path to align dictionary)\n align_dict = utils.load_align_dict(args.replace_unk)\n\n # Load dataset (possibly sharded)\n itr = task.get_batch_iterator(\n dataset=task.dataset(args.gen_subset),\n max_tokens=args.max_tokens,\n max_sentences=args.max_sentences,\n max_positions=utils.resolve_max_positions(\n task.max_positions(),\n *[model.max_positions() for model in models]\n ),\n ignore_invalid_inputs=args.skip_invalid_size_inputs_valid_test,\n required_batch_size_multiple=args.required_batch_size_multiple,\n num_shards=args.num_shards,\n shard_id=args.shard_id,\n num_workers=args.num_workers,\n ).next_epoch_itr(shuffle=False)\n\n # Initialize generator\n gen_timer = StopwatchMeter()\n generator = task.build_generator(args)\n\n # Generate and compute BLEU score\n if args.sacrebleu_zh:\n scorer = bleu.SacrebleuScorer(zh=True)\n elif args.sacrebleu:\n scorer = bleu.SacrebleuScorer()\n else:\n scorer = bleu.Scorer(tgt_dict.pad(), tgt_dict.eos(), tgt_dict.unk())\n num_sentences = 0\n has_target = True\n with progress_bar.build_progress_bar(args, itr) as t:\n wps_meter = TimeMeter()\n for sample in t:\n sample = utils.move_to_cuda(sample) if use_cuda else sample\n if 'net_input' not in sample:\n continue\n\n prefix_tokens = None\n if args.prefix_size > 0:\n prefix_tokens = sample['target'][:, :args.prefix_size]\n\n gen_timer.start()\n hypos = task.inference_step(generator, models, sample, prefix_tokens)\n num_generated_tokens = sum(len(h[0]['tokens']) for h in hypos)\n gen_timer.stop(num_generated_tokens)\n\n for i, sample_id in enumerate(sample['id'].tolist()):\n has_target = sample['target'] is not None\n\n # Remove padding\n src_tokens = utils.strip_pad(sample['net_input']['src_tokens'][i, :], tgt_dict.pad())\n target_tokens = None\n if has_target:\n target_tokens = utils.strip_pad(sample['target'][i, :], tgt_dict.pad()).int().cpu()\n\n # Either retrieve the original sentences or regenerate them from tokens.\n if align_dict is not None:\n src_str = task.dataset(args.gen_subset).src.get_original_text(sample_id)\n target_str = task.dataset(args.gen_subset).tgt.get_original_text(sample_id)\n else:\n if src_dict is not None:\n src_str = src_dict.string(src_tokens, args.remove_bpe)\n else:\n src_str = \"\"\n if has_target:\n target_str = tgt_dict.string(target_tokens, args.remove_bpe, escape_unk=True)\n\n if not args.quiet:\n if src_dict is not None:\n print('S-{}\\t{}'.format(sample_id, src_str))\n if has_target:\n print('T-{}\\t{}'.format(sample_id, target_str))\n\n # Process top predictions\n for i, hypo in enumerate(hypos[i][:min(len(hypos), args.nbest)]):\n hypo_tokens, hypo_str, alignment = utils.post_process_prediction(\n hypo_tokens=hypo['tokens'].int().cpu(),\n src_str=src_str,\n alignment=hypo['alignment'].int().cpu() if hypo['alignment'] is not None else None,\n align_dict=align_dict,\n tgt_dict=tgt_dict,\n remove_bpe=args.remove_bpe,\n )\n\n if not args.quiet:\n print('H-{}\\t{}\\t{}'.format(sample_id, hypo['score'], hypo_str))\n print('P-{}\\t{}'.format(\n sample_id,\n ' '.join(map(\n lambda x: '{:.4f}'.format(x),\n hypo['positional_scores'].tolist(),\n ))\n ))\n\n if args.print_alignment:\n print('A-{}\\t{}'.format(\n sample_id,\n ' '.join(map(lambda x: str(utils.item(x)), alignment))\n ))\n\n # Score only the top hypothesis\n if has_target and i == 0:\n if align_dict is not None or args.remove_bpe is not None:\n # Convert back to tokens for evaluation with unk replacement and/or without BPE\n target_tokens = tgt_dict.encode_line(target_str, add_if_not_exist=True)\n if hasattr(scorer, 'add_string'):\n scorer.add_string(target_str, hypo_str)\n else:\n scorer.add(target_tokens, hypo_tokens)\n\n wps_meter.update(num_generated_tokens)\n t.log({'wps': round(wps_meter.avg)})\n num_sentences += sample['nsentences']\n\n print('| Translated {} sentences ({} tokens) in {:.1f}s ({:.2f} sentences/s, {:.2f} tokens/s)'.format(\n num_sentences, gen_timer.n, gen_timer.sum, num_sentences / gen_timer.sum, 1. / gen_timer.avg))\n if has_target:\n bleu_obj = scorer.result_string()\n print('| Generate {} with beam={}: {}'.format(args.gen_subset, args.beam, bleu_obj))\n print('{0:.{1}f}'.format(bleu_obj.score, 1))\n if args.rename:\n os.rename(args.path, args.path[:-3] + '_{0:.{1}f}'.format(bleu_obj.score, 1) + '.pt')\n return scorer\n\n\ndef cli_main():\n parser = options.get_generation_parser()\n args = options.parse_args_and_arch(parser)\n main(args)\n\n\nif __name__ == '__main__':\n cli_main()\n"
] |
[
[
"torch.cuda.is_available"
]
] |
ZichaoGuo/PaddleSlim
|
[
"2550fb4ec86aee6155c1c8a2c9ab174e239918a3"
] |
[
"demo/unstructured_prune/train.py"
] |
[
"import os\nimport sys\nimport logging\nimport paddle\nimport argparse\nimport functools\nimport time\nimport numpy as np\nimport paddle.fluid as fluid\nfrom paddleslim.prune.unstructured_pruner import UnstructuredPruner, GMPUnstructuredPruner\nfrom paddleslim.common import get_logger\nsys.path.append(os.path.join(os.path.dirname(\"__file__\"), os.path.pardir))\nimport models\nfrom utility import add_arguments, print_arguments\nimport paddle.vision.transforms as T\nfrom paddle.fluid.incubate.fleet.collective import fleet, DistributedStrategy\nfrom paddle.fluid.incubate.fleet.base import role_maker\n\n_logger = get_logger(__name__, level=logging.INFO)\n\nparser = argparse.ArgumentParser(description=__doc__)\nadd_arg = functools.partial(add_arguments, argparser=parser)\n# yapf: disable\nadd_arg('use_gpu', bool, True, \"Whether to use gpu for traning or not. Defauly: True\")\nadd_arg('batch_size', int, 64, \"Minibatch size. Default: 64\")\nadd_arg('batch_size_for_validation', int, 64, \"Minibatch size for validation. Default: 64\")\nadd_arg('model', str, \"MobileNet\", \"The target model.\")\nadd_arg('pretrained_model', str, None, \"Whether to use pretrained model. Default: None\")\nadd_arg('checkpoint', str, None, \"The model to load for resuming training. Default: None\")\nadd_arg('lr', float, 0.1, \"The learning rate used to fine-tune pruned model. Default: 0.1\")\nadd_arg('lr_strategy', str, \"piecewise_decay\", \"The learning rate decay strategy. Default: piecewise_decay\")\nadd_arg('l2_decay', float, 3e-5, \"The l2_decay parameter. Default: 3e-5\")\nadd_arg('momentum_rate', float, 0.9, \"The value of momentum_rate. Default: 0.9\")\nadd_arg('pruning_strategy', str, 'base', \"The pruning strategy, currently we support base and gmp. Default: base\")\nadd_arg('threshold', float, 0.01, \"The threshold to set zeros, the abs(weights) lower than which will be zeros. Default: 0.01\")\nadd_arg('pruning_mode', str, 'ratio', \"the pruning mode: whether by ratio or by threshold. Default: ratio\")\nadd_arg('ratio', float, 0.55, \"The ratio to set zeros, the smaller portion will be zeros. Default: 0.55\")\nadd_arg('num_epochs', int, 120, \"The number of total epochs. Default: 120\")\nparser.add_argument('--step_epochs', nargs='+', type=int, default=[30, 60, 90], help=\"piecewise decay step\")\nadd_arg('data', str, \"imagenet\", \"Which data to use. 'mnist' or 'imagenet'. Default: imagenet\")\nadd_arg('log_period', int, 100, \"Log period in batches. Default: 100\")\nadd_arg('test_period', int, 5, \"Test period in epoches. Default: 5\")\nadd_arg('model_path', str, \"./models\", \"The path to save model. Default: ./models\")\nadd_arg('model_period', int, 10, \"The period to save model in epochs. Default: 10\")\nadd_arg('last_epoch', int, -1, \"The last epoch we could train from. Default: -1\")\nadd_arg('stable_epochs', int, 0, \"The epoch numbers used to stablize the model before pruning. Default: 0\")\nadd_arg('pruning_epochs', int, 60, \"The epoch numbers used to prune the model by a ratio step. Default: 60\")\nadd_arg('tunning_epochs', int, 60, \"The epoch numbers used to tune the after-pruned models. Default: 60\")\nadd_arg('pruning_steps', int, 120, \"How many times you want to increase your ratio during training. Default: 120\")\nadd_arg('initial_ratio', float, 0.15, \"The initial pruning ratio used at the start of pruning stage. Default: 0.15\")\nadd_arg('prune_params_type', str, None, \"Which kind of params should be pruned, we only support None (all but norms) and conv1x1_only for now. Default: None\")\n# yapf: enable\n\nmodel_list = models.__all__\n\n\ndef piecewise_decay(args, step_per_epoch):\n bd = [step_per_epoch * e for e in args.step_epochs]\n lr = [args.lr * (0.1**i) for i in range(len(bd) + 1)]\n last_iter = (1 + args.last_epoch) * step_per_epoch\n learning_rate = paddle.optimizer.lr.PiecewiseDecay(\n boundaries=bd, values=lr, last_epoch=last_iter)\n\n optimizer = paddle.optimizer.Momentum(\n learning_rate=learning_rate,\n momentum=args.momentum_rate,\n weight_decay=paddle.regularizer.L2Decay(args.l2_decay))\n return optimizer, learning_rate\n\n\ndef cosine_decay(args, step_per_epoch):\n last_iter = (1 + args.last_epoch) * step_per_epoch\n learning_rate = paddle.optimizer.lr.CosineAnnealingDecay(\n learning_rate=args.lr,\n T_max=args.num_epochs * step_per_epoch,\n last_epoch=last_iter)\n optimizer = paddle.optimizer.Momentum(\n learning_rate=learning_rate,\n momentum=args.momentum_rate,\n weight_decay=paddle.regularizer.L2Decay(args.l2_decay))\n return optimizer, learning_rate\n\n\ndef create_optimizer(args, step_per_epoch):\n if args.lr_strategy == \"piecewise_decay\":\n return piecewise_decay(args, step_per_epoch)\n elif args.lr_strategy == \"cosine_decay\":\n return cosine_decay(args, step_per_epoch)\n\n\ndef create_unstructured_pruner(train_program, args, place, configs):\n if configs is None:\n return UnstructuredPruner(\n train_program,\n mode=args.pruning_mode,\n ratio=args.ratio,\n threshold=args.threshold,\n prune_params_type=args.prune_params_type,\n place=place)\n else:\n return GMPUnstructuredPruner(\n train_program,\n ratio=args.ratio,\n prune_params_type=args.prune_params_type,\n place=place,\n configs=configs)\n\n\ndef compress(args):\n env = os.environ\n num_trainers = int(env.get('PADDLE_TRAINERS_NUM', 1))\n use_data_parallel = num_trainers > 1\n\n if use_data_parallel:\n # Fleet step 1: initialize the distributed environment\n role = role_maker.PaddleCloudRoleMaker(is_collective=True)\n fleet.init(role)\n\n train_reader = None\n test_reader = None\n if args.data == \"mnist\":\n transform = T.Compose([T.Transpose(), T.Normalize([127.5], [127.5])])\n train_dataset = paddle.vision.datasets.MNIST(\n mode='train', backend=\"cv2\", transform=transform)\n val_dataset = paddle.vision.datasets.MNIST(\n mode='test', backend=\"cv2\", transform=transform)\n class_dim = 10\n image_shape = \"1,28,28\"\n args.pretrained_model = False\n elif args.data == \"imagenet\":\n import imagenet_reader as reader\n train_dataset = reader.ImageNetDataset(mode='train')\n val_dataset = reader.ImageNetDataset(mode='val')\n class_dim = 1000\n image_shape = \"3,224,224\"\n else:\n raise ValueError(\"{} is not supported.\".format(args.data))\n image_shape = [int(m) for m in image_shape.split(\",\")]\n assert args.model in model_list, \"{} is not in lists: {}\".format(args.model,\n model_list)\n if args.use_gpu:\n places = paddle.static.cuda_places()\n else:\n places = paddle.static.cpu_places()\n place = places[0]\n exe = paddle.static.Executor(place)\n\n image = paddle.static.data(\n name='image', shape=[None] + image_shape, dtype='float32')\n label = paddle.static.data(name='label', shape=[None, 1], dtype='int64')\n\n batch_size_per_card = args.batch_size\n batch_sampler = paddle.io.DistributedBatchSampler(\n train_dataset,\n batch_size=batch_size_per_card,\n shuffle=True,\n drop_last=True)\n\n train_loader = paddle.io.DataLoader(\n train_dataset,\n places=place,\n batch_sampler=batch_sampler,\n feed_list=[image, label],\n return_list=False,\n use_shared_memory=True,\n num_workers=32)\n\n valid_loader = paddle.io.DataLoader(\n val_dataset,\n places=place,\n feed_list=[image, label],\n drop_last=False,\n return_list=False,\n use_shared_memory=True,\n batch_size=args.batch_size_for_validation,\n shuffle=False)\n\n step_per_epoch = int(\n np.ceil(len(train_dataset) * 1. / args.batch_size / num_trainers))\n\n # model definition\n model = models.__dict__[args.model]()\n out = model.net(input=image, class_dim=class_dim)\n cost = paddle.nn.functional.loss.cross_entropy(input=out, label=label)\n avg_cost = paddle.mean(x=cost)\n acc_top1 = paddle.metric.accuracy(input=out, label=label, k=1)\n acc_top5 = paddle.metric.accuracy(input=out, label=label, k=5)\n\n val_program = paddle.static.default_main_program().clone(for_test=True)\n\n opt, learning_rate = create_optimizer(args, step_per_epoch)\n\n # Fleet step 2: distributed strategy\n if use_data_parallel:\n dist_strategy = DistributedStrategy()\n dist_strategy.sync_batch_norm = False\n dist_strategy.exec_strategy = paddle.static.ExecutionStrategy()\n dist_strategy.fuse_all_reduce_ops = False\n\n train_program = paddle.static.default_main_program()\n\n if args.pruning_strategy == 'gmp':\n # GMP pruner step 0: define configs for GMP, no need to define configs for the base training.\n configs = {\n 'stable_iterations': args.stable_epochs * step_per_epoch,\n 'pruning_iterations': args.pruning_epochs * step_per_epoch,\n 'tunning_iterations': args.tunning_epochs * step_per_epoch,\n 'resume_iteration': (args.last_epoch + 1) * step_per_epoch,\n 'pruning_steps': args.pruning_steps,\n 'initial_ratio': args.initial_ratio,\n }\n elif args.pruning_strategy == 'base':\n configs = None\n\n # GMP pruner step 1: initialize a pruner object by calling entry function.\n pruner = create_unstructured_pruner(\n train_program, args, place, configs=configs)\n\n if use_data_parallel:\n # Fleet step 3: decorate the origial optimizer and minimize it\n opt = fleet.distributed_optimizer(opt, strategy=dist_strategy)\n opt.minimize(avg_cost, no_grad_set=pruner.no_grad_set)\n\n exe.run(paddle.static.default_startup_program())\n if args.last_epoch > -1:\n assert args.checkpoint is not None and os.path.exists(\n args.checkpoint), \"Please specify a valid checkpoint path.\"\n paddle.fluid.io.load_persistables(\n executor=exe, dirname=args.checkpoint, main_program=train_program)\n\n elif args.pretrained_model:\n assert os.path.exists(\n args.\n pretrained_model), \"Pretrained model path {} doesn't exist\".format(\n args.pretrained_model)\n\n def if_exist(var):\n return os.path.exists(os.path.join(args.pretrained_model, var.name))\n\n _logger.info(\"Load pretrained model from {}\".format(\n args.pretrained_model))\n # NOTE: We are using fluid.io.load_vars() because the pretrained model is from an older version which requires this API. \n # Please consider using paddle.static.load(program, model_path) when possible\n paddle.fluid.io.load_vars(\n exe, args.pretrained_model, predicate=if_exist)\n\n def test(epoch, program):\n acc_top1_ns = []\n acc_top5_ns = []\n\n _logger.info(\n \"The current sparsity of the inference model is {}%\".format(\n round(100 * UnstructuredPruner.total_sparse(\n paddle.static.default_main_program()), 2)))\n for batch_id, data in enumerate(valid_loader):\n start_time = time.time()\n acc_top1_n, acc_top5_n = exe.run(\n program, feed=data, fetch_list=[acc_top1.name, acc_top5.name])\n end_time = time.time()\n if batch_id % args.log_period == 0:\n _logger.info(\n \"Eval epoch[{}] batch[{}] - acc_top1: {}; acc_top5: {}; time: {}\".\n format(epoch, batch_id,\n np.mean(acc_top1_n),\n np.mean(acc_top5_n), end_time - start_time))\n acc_top1_ns.append(np.mean(acc_top1_n))\n acc_top5_ns.append(np.mean(acc_top5_n))\n\n _logger.info(\"Final eval epoch[{}] - acc_top1: {}; acc_top5: {}\".format(\n epoch,\n np.mean(np.array(acc_top1_ns)), np.mean(np.array(acc_top5_ns))))\n\n def train(epoch, program):\n train_reader_cost = 0.0\n train_run_cost = 0.0\n total_samples = 0\n reader_start = time.time()\n for batch_id, data in enumerate(train_loader):\n train_reader_cost += time.time() - reader_start\n train_start = time.time()\n loss_n, acc_top1_n, acc_top5_n = exe.run(\n program,\n feed=data,\n fetch_list=[avg_cost.name, acc_top1.name, acc_top5.name])\n # GMP pruner step 2: step() to update ratios and other internal states of the pruner.\n pruner.step()\n\n train_run_cost += time.time() - train_start\n total_samples += args.batch_size\n loss_n = np.mean(loss_n)\n acc_top1_n = np.mean(acc_top1_n)\n acc_top5_n = np.mean(acc_top5_n)\n if batch_id % args.log_period == 0:\n _logger.info(\n \"epoch[{}]-batch[{}] lr: {:.6f} - loss: {}; acc_top1: {}; acc_top5: {}; avg_reader_cost: {:.5f} sec, avg_batch_cost: {:.5f} sec, avg_samples: {:.5f}, ips: {:.5f} images/sec\".\n format(epoch, batch_id,\n learning_rate.get_lr(), loss_n, acc_top1_n,\n acc_top5_n, train_reader_cost / args.log_period, (\n train_reader_cost + train_run_cost\n ) / args.log_period, total_samples / args.log_period,\n total_samples / (train_reader_cost + train_run_cost\n )))\n train_reader_cost = 0.0\n train_run_cost = 0.0\n total_samples = 0\n learning_rate.step()\n reader_start = time.time()\n\n if use_data_parallel:\n # Fleet step 4: get the compiled program from fleet\n compiled_train_program = fleet.main_program\n else:\n compiled_train_program = paddle.static.CompiledProgram(\n paddle.static.default_main_program())\n\n for i in range(args.last_epoch + 1, args.num_epochs):\n train(i, compiled_train_program)\n # GMP pruner step 3: update params before summrizing sparsity, saving model or evaluation. \n pruner.update_params()\n\n _logger.info(\"The current sparsity of the pruned model is: {}%\".format(\n round(100 * UnstructuredPruner.total_sparse(\n paddle.static.default_main_program()), 2)))\n\n if (i + 1) % args.test_period == 0:\n test(i, val_program)\n if (i + 1) % args.model_period == 0:\n if use_data_parallel:\n fleet.save_persistables(executor=exe, dirname=args.model_path)\n else:\n paddle.fluid.io.save_persistables(\n executor=exe, dirname=args.model_path)\n\n\ndef main():\n paddle.enable_static()\n args = parser.parse_args()\n print_arguments(args)\n compress(args)\n\n\nif __name__ == '__main__':\n main()\n"
] |
[
[
"numpy.array",
"numpy.mean"
]
] |
wenxichen/donkeycar
|
[
"5a23b0fee170596e29c80826c3db0d3a4c4c5392"
] |
[
"donkeycar/parts/camera.py"
] |
[
"import os\nimport time\nimport numpy as np\nfrom PIL import Image\nimport glob\nfrom donkeycar.utils import rgb2gray\n\nclass BaseCamera:\n\n def run_threaded(self):\n return self.frame\n\nclass PiCamera(BaseCamera):\n def __init__(self, image_w=160, image_h=120, image_d=3, framerate=20, vflip=False, hflip=False):\n from picamera.array import PiRGBArray\n from picamera import PiCamera\n \n resolution = (image_w, image_h)\n # initialize the camera and stream\n self.camera = PiCamera() #PiCamera gets resolution (height, width)\n self.camera.resolution = resolution\n self.camera.framerate = framerate\n self.camera.vflip = vflip\n self.camera.hflip = hflip\n self.rawCapture = PiRGBArray(self.camera, size=resolution)\n self.stream = self.camera.capture_continuous(self.rawCapture,\n format=\"rgb\", use_video_port=True)\n\n # initialize the frame and the variable used to indicate\n # if the thread should be stopped\n self.frame = None\n self.on = True\n self.image_d = image_d\n\n print('PiCamera loaded.. .warming camera')\n time.sleep(2)\n\n def run(self):\n f = next(self.stream)\n frame = f.array\n self.rawCapture.truncate(0)\n if self.image_d == 1:\n frame = rgb2gray(frame)\n return frame\n\n def update(self):\n # keep looping infinitely until the thread is stopped\n for f in self.stream:\n # grab the frame from the stream and clear the stream in\n # preparation for the next frame\n self.frame = f.array\n self.rawCapture.truncate(0)\n\n if self.image_d == 1:\n self.frame = rgb2gray(self.frame)\n\n # if the thread indicator variable is set, stop the thread\n if not self.on:\n break\n\n def shutdown(self):\n # indicate that the thread should be stopped\n self.on = False\n print('Stopping PiCamera')\n time.sleep(.5)\n self.stream.close()\n self.rawCapture.close()\n self.camera.close()\n\n\nclass Webcam(BaseCamera):\n def __init__(self, image_w=160, image_h=120, image_d=3, framerate = 20, iCam = 0):\n import pygame\n import pygame.camera\n\n super().__init__()\n resolution = (image_w, image_h)\n pygame.init()\n pygame.camera.init()\n l = pygame.camera.list_cameras()\n print('cameras', l)\n self.cam = pygame.camera.Camera(l[iCam], resolution, \"RGB\")\n self.resolution = resolution\n self.cam.start()\n self.framerate = framerate\n\n # initialize variable used to indicate\n # if the thread should be stopped\n self.frame = None\n self.on = True\n self.image_d = image_d\n\n print('WebcamVideoStream loaded.. .warming camera')\n\n time.sleep(2)\n\n def update(self):\n from datetime import datetime, timedelta\n import pygame.image\n while self.on:\n start = datetime.now()\n\n if self.cam.query_image():\n # snapshot = self.cam.get_image()\n # self.frame = list(pygame.image.tostring(snapshot, \"RGB\", False))\n snapshot = self.cam.get_image()\n snapshot1 = pygame.transform.scale(snapshot, self.resolution)\n self.frame = pygame.surfarray.pixels3d(pygame.transform.rotate(pygame.transform.flip(snapshot1, True, False), 90))\n if self.image_d == 1:\n self.frame = rgb2gray(self.frame)\n\n stop = datetime.now()\n s = 1 / self.framerate - (stop - start).total_seconds()\n if s > 0:\n time.sleep(s)\n\n self.cam.stop()\n\n def run_threaded(self):\n return self.frame\n\n def shutdown(self):\n # indicate that the thread should be stopped\n self.on = False\n print('stoping Webcam')\n time.sleep(.5)\n\n\nclass CSICamera(BaseCamera):\n '''\n Camera for Jetson Nano IMX219 based camera\n Credit: https://github.com/feicccccccc/donkeycar/blob/dev/donkeycar/parts/camera.py\n gstreamer init string from https://github.com/NVIDIA-AI-IOT/jetbot/blob/master/jetbot/camera.py\n '''\n def gstreamer_pipeline(self, capture_width=3280, capture_height=2464, output_width=224, output_height=224, framerate=21, flip_method=0) : \n return 'nvarguscamerasrc ! video/x-raw(memory:NVMM), width=%d, height=%d, format=(string)NV12, framerate=(fraction)%d/1 ! nvvidconv flip-method=%d ! nvvidconv ! video/x-raw, width=(int)%d, height=(int)%d, format=(string)BGRx ! videoconvert ! appsink' % (\n capture_width, capture_height, framerate, flip_method, output_width, output_height)\n \n def __init__(self, image_w=160, image_h=120, image_d=3, capture_width=3280, capture_height=2464, framerate=60, gstreamer_flip=0):\n '''\n gstreamer_flip = 0 - no flip\n gstreamer_flip = 1 - rotate CCW 90\n gstreamer_flip = 2 - flip vertically\n gstreamer_flip = 3 - rotate CW 90\n '''\n self.w = image_w\n self.h = image_h\n self.running = True\n self.frame = None\n self.flip_method = gstreamer_flip\n self.capture_width = capture_width\n self.capture_height = capture_height\n self.framerate = framerate\n\n def init_camera(self):\n import cv2\n\n # initialize the camera and stream\n self.camera = cv2.VideoCapture(\n self.gstreamer_pipeline(\n capture_width =self.capture_width,\n capture_height =self.capture_height,\n output_width=self.w,\n output_height=self.h,\n framerate=self.framerate,\n flip_method=self.flip_method),\n cv2.CAP_GSTREAMER)\n\n self.poll_camera()\n print('CSICamera loaded.. .warming camera')\n time.sleep(2)\n \n def update(self):\n self.init_camera()\n while self.running:\n self.poll_camera()\n\n def poll_camera(self):\n import cv2\n self.ret , frame = self.camera.read()\n self.frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n\n def run(self):\n self.poll_camera()\n return self.frame\n\n def run_threaded(self):\n return self.frame\n \n def shutdown(self):\n self.running = False\n print('stoping CSICamera')\n time.sleep(.5)\n del(self.camera)\n\n\nclass V4LCamera(BaseCamera):\n '''\n uses the v4l2capture library from this fork for python3 support: https://github.com/atareao/python3-v4l2capture\n sudo apt-get install libv4l-dev\n cd python3-v4l2capture\n python setup.py build\n pip install -e .\n '''\n def __init__(self, image_w=160, image_h=120, image_d=3, framerate=20, dev_fn=\"/dev/video0\", fourcc='MJPG'):\n\n self.running = True\n self.frame = None\n self.image_w = image_w\n self.image_h = image_h\n self.dev_fn = dev_fn\n self.fourcc = fourcc\n\n def init_video(self):\n import v4l2capture\n\n self.video = v4l2capture.Video_device(self.dev_fn)\n\n # Suggest an image size to the device. The device may choose and\n # return another size if it doesn't support the suggested one.\n self.size_x, self.size_y = self.video.set_format(self.image_w, self.image_h, fourcc=self.fourcc)\n\n print(\"V4L camera granted %d, %d resolution.\" % (self.size_x, self.size_y))\n\n # Create a buffer to store image data in. This must be done before\n # calling 'start' if v4l2capture is compiled with libv4l2. Otherwise\n # raises IOError.\n self.video.create_buffers(30)\n\n # Send the buffer to the device. Some devices require this to be done\n # before calling 'start'.\n self.video.queue_all_buffers()\n\n # Start the device. This lights the LED if it's a camera that has one.\n self.video.start()\n\n def update(self):\n import select\n from donkeycar.parts.image import JpgToImgArr\n\n self.init_video()\n jpg_conv = JpgToImgArr()\n\n while self.running:\n # Wait for the device to fill the buffer.\n select.select((self.video,), (), ())\n image_data = self.video.read_and_queue()\n self.frame = jpg_conv.run(image_data)\n\n def shutdown(self):\n self.running = False\n time.sleep(0.5)\n\n\nclass MockCamera(BaseCamera):\n '''\n Fake camera. Returns only a single static frame\n '''\n def __init__(self, image_w=160, image_h=120, image_d=3, image=None):\n if image is not None:\n self.frame = image\n else:\n self.frame = np.array(Image.new('RGB', (image_w, image_h)))\n\n def update(self):\n pass\n\n def shutdown(self):\n pass\n\n\nclass ImageListCamera(BaseCamera):\n '''\n Use the images from a tub as a fake camera output\n '''\n def __init__(self, path_mask='~/mycar/data/**/images/*.jpg'):\n self.image_filenames = glob.glob(os.path.expanduser(path_mask), recursive=True)\n \n def get_image_index(fnm):\n sl = os.path.basename(fnm).split('_')\n return int(sl[0])\n\n '''\n I feel like sorting by modified time is almost always\n what you want. but if you tared and moved your data around,\n sometimes it doesn't preserve a nice modified time.\n so, sorting by image index works better, but only with one path.\n '''\n self.image_filenames.sort(key=get_image_index)\n #self.image_filenames.sort(key=os.path.getmtime)\n self.num_images = len(self.image_filenames)\n print('%d images loaded.' % self.num_images)\n print( self.image_filenames[:10])\n self.i_frame = 0\n self.frame = None\n self.update()\n\n def update(self):\n pass\n\n def run_threaded(self): \n if self.num_images > 0:\n self.i_frame = (self.i_frame + 1) % self.num_images\n self.frame = Image.open(self.image_filenames[self.i_frame]) \n\n return np.asarray(self.frame)\n\n def shutdown(self):\n pass\n"
] |
[
[
"numpy.asarray"
]
] |
aiwithqasim/tensorflow-specialization
|
[
"4355d00bcb88aa39ca5dfa2daed5f306d2043d55"
] |
[
"Pycharm code/C1/W2/C1_W2_Assignment_Solution.py"
] |
[
"import tensorflow as tf\nprint(tf.__version__)\n\n\n# GRADED FUNCTION: train_mnist\ndef train_mnist():\n # Please write your code only where you are indicated.\n # please do not remove # model fitting inline comments.\n\n # YOUR CODE SHOULD START HERE\n class myCallback(tf.keras.callbacks.Callback):\n def on_epoch_end(self, epoch, logs={}):\n if logs.get('accuracy') is not None and logs.get('accuracy') > 0.99:\n print(\"\\nReached 99% accuracy so cancelling training!\")\n self.model.stop_training = True\n\n # YOUR CODE SHOULD END HERE\n mnist = tf.keras.datasets.mnist\n\n (x_train, y_train), (x_test, y_test) = mnist.load_data()\n\n # YOUR CODE SHOULD START HERE\n mnist = tf.keras.datasets.mnist\n\n (x_train, y_train), (x_test, y_test) = mnist.load_data()\n x_train, x_test = x_train / 255.0, x_test / 255.0\n\n callbacks = myCallback()\n\n # YOUR CODE SHOULD END HERE\n model = tf.keras.models.Sequential([\n tf.keras.layers.Flatten(input_shape=(28, 28)), # YOUR CODE HERE\n tf.keras.layers.Dense(512, activation=tf.nn.relu), # YOUR CODE HERE\n tf.keras.layers.Dense(10, activation=tf.nn.softmax) # YOUR CODE HERE\n ])\n\n model.compile(optimizer='adam',\n loss='sparse_categorical_crossentropy',\n metrics=['accuracy'])\n\n # model fitting\n history = model.fit(x_train, y_train, epochs=10, callbacks=[callbacks] # YOUR CODE HERE\n )\n # model fitting\n return history.epoch, history.history['accuracy'][-1]\n\nif __name__ == \"__main__\":\n train_mnist()\n"
] |
[
[
"tensorflow.keras.layers.Dense",
"tensorflow.keras.layers.Flatten"
]
] |
Jasplet/SKS_Splitting
|
[
"d5c3ea442e75fd9643c31575158cf350aa41a5e2"
] |
[
"src/plot_splitting.py"
] |
[
"#! /usr/bin/env python\n### Script containing varous plotting functions for splitting Measurements\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport cartopy.crs as cart\nimport cartopy\nimport matplotlib.gridspec as gridspec\nimport obspy\nfrom obspy import taup\n\ndef load(stat,phase):\n\n data = pd.read_csv('/Users/ja17375/Shear_Wave_Splitting/Python/Measurements/{}_{}_Splitting.txt'.format(stat,phase),delim_whitespace=True)\n a = data['FAST']\n d = data.index[np.isnan(data['FAST']) == True].tolist() # Find any rows which contain NaNs\n data = data.drop(d)\n data = data[(data.QUAL != 'x')]\n\n stat_loc = data.STLA[0],data.STLO[0] # sta and stlo are the same for all events (same station). In future maybe read this data in from Station list?)\n\n\n #Parse events by whether they are 'null' or 'split'\n null = (data[(data.QUAL == 'n')].BAZ,data[(data.QUAL == 'n')].FAST,data[(data.QUAL == 'n')].DFAST,data[(data.QUAL == 'n')].TLAG,data[(data.QUAL == 'n')].DTLAG,data[(data.QUAL == 'n')].EVLO,data[(data.QUAL == 'n')].EVLA)\n split =(data[(data.QUAL != 'n')].BAZ,data[(data.QUAL != 'n')].FAST,data[(data.QUAL != 'n')].DFAST,data[(data.QUAL != 'n')].TLAG,data[(data.QUAL != 'n')].DTLAG,data[(data.QUAL != 'n')].EVLO,data[(data.QUAL != 'n')].EVLA)\n return data,stat_loc,null,split # also return data so not to break SKS_plot\n\ndef SKS_plot(stat,title1=None,phase='SKS'):\n \"\"\"\n Function to make diagnostice plots for a given file of splitting measuremtns\n \"\"\"\n data, stat_loc, null, split= load(stat,phase)\n\n fig,axs = plt.subplots(2, 2,sharex='col',figsize=(10,10))\n\n\n axs[0,0].errorbar(null[0],null[1],yerr=null[2],fmt='kx',elinewidth=0.5,label='Null Measurement')\n axs[0,0].errorbar(split[0],split[1],yerr=split[2],fmt='ko',elinewidth=0.5,label='Split')\n axs[0,0].legend(loc=2)\n\n axs[0,0].set_ylabel('Fast Direction (deg)')\n axs[0,0].set_ylim([-90,90])\n axs[0,0].set_yticks(np.arange(-90,91,30))\n axs[0,0].set_title('Splitting from Broadband Data - Fast Direction')\n axs[0,1].errorbar(data[(data.QUAL == 'n')].BAZ,data[(data.QUAL == 'n')].WL_FAST,yerr=data[(data.QUAL == 'n')].WL_DFAST,fmt='kx',elinewidth=0.5)\n axs[0,1].errorbar(data[(data.QUAL != 'n')].BAZ,data[(data.QUAL != 'n')].WL_FAST,yerr=data[(data.QUAL != 'n')].WL_DFAST,fmt='ko',elinewidth=0.5)\n axs[0,1].set_ylim([-90,90])\n axs[0,1].set_yticks(np.arange(-90,91,30))\n axs[0,1].set_title('Walpole et al. (2014)- Fast Direction')\n axs[0,1].set_xlabel('Back Azimuth')\n axs[0,1].set_ylabel('Fast Direction (deg)')\n\n plot_lag(axs[1,0],null[0],null[3],null[4],fmt='kx')\n plot_lag(axs[1,0],split[0],split[3],split[4],fmt='ko')\n # axs[1,0].set_ylabel('Tlag (s)')\n # axs[1,0].set_ylim([0,4])\n axs[1,0].set_title('Splitting from Broadband Data - Lag Time'.format(title1))\n\n axs[1,1].errorbar(data[(data.QUAL == 'n')].BAZ,data[(data.QUAL == 'n')].WL_TLAG,yerr=data[(data.QUAL == 'n')].WL_DTLAG,fmt='kx',elinewidth=0.5)\n axs[1,1].errorbar(data[(data.QUAL != 'n')].BAZ,data[(data.QUAL != 'n')].WL_TLAG,yerr=data[(data.QUAL != 'n')].WL_DTLAG,fmt='ko',elinewidth=0.5)\n axs[1,1].set_ylim([0,4])\n axs[1,1].set_ylabel('Tlag (s)')\n axs[1,1].set_xlabel('Back Azimuth')\n axs[1,1].set_title('Walpole et al. (2014) - Lag Time')\n\n plt.tight_layout()\n plt.show()\n\n\n\ndef SKKS_plot(stat,phase):\n \"\"\"\n Creates a 3 panel plot showing measured fast direction and lag time vs back azimuth at a given station along with the coverage of the events\n stat - station Code [STRING]\n phase - phase to plot [STRING]\n \"\"\"\n data = load(stat,phase)\n\n fig = plt.figure(figsize = [20,20])\n axs = []\n gs = gridspec.GridSpec(2,3)\n proj = cart.AzimuthalEquidistant(central_longitude=data.STLO[0],central_latitude=data.STLA[0])\n\n\n ax1 = plt.subplot(gs[0,0])\n ax2 = plt.subplot(gs[1,0])\n ax3 = fig.add_subplot(gs[:,1:],projection = proj)\n\n ax1.errorbar(data.BAZ,data.TLAG,yerr = data.DTLAG,fmt='kx',elinewidth=0.5)\n ax1.set_ylim([0,4])\n ax1.set_ylabel('Lag (s)')\n ax1.set_xlabel('Back Azimuth (deg)')\n # _lag(ax1,data.BAZ,data.TLAG,data.DTLAG,'kx')\n # _fast(ax2,data.BAZ,data.FAST,data.DFAST,'kx')\n # coverage(ax3,data.EVLA,data.EVLO,data.STLA[0],data.STLO[0],stat)\n # plt.show()\n ax2.errorbar(data.BAZ,data.FAST,yerr=data.DFAST,fmt='kx',elinewidth=0.5)\n ax2.set_ylim([-90,90])\n ax2.set_ylabel('Fast Direction (s)')\n ax2.set_xlabel('Back Azimuth (deg)')\n ### Plot Coverage on third axis object\n\n ax3.set_global() #This sets the axes extent to its maximum possible setting, so we can find these darn events\n ax3.coastlines(resolution = '110m') # Coastline data is downloaded by Cartopy from Natural Earth (http://www.naturalearthdata.com)\n #Resolution options are 100m,50m and 10m\n ax3.add_feature(cartopy.feature.OCEAN, zorder=0)\n ax3.add_feature(cartopy.feature.LAND, zorder=0, edgecolor='black')\n # Now add observed events\n ax3.plot(data.EVLO,data.EVLA,'ro',markersize = 5,transform = cart.Geodetic(),label='Event Location')\n # ax.set_xticks([-130,-125,-120,-115,-110,-105,-100], crs=proj)\n # ax.set_yticks([30,35,40,45,50,55,60], crs=proj)\n ax3.plot(data.STLO,data.STLA,'kv',transform=cart.Geodetic(),markersize=10,label='Station Loction')\n ax3.set_title('{} coverage for Station {}'.format(phase,stat))\n ax3.legend()\n plt.show()\n\ndef SKS_SKKS_plot(stat,save=False):\n \"\"\"\n Creates a 3 panel plot showing measured fast direction and lag time vs back azimuth at a given station along with the coverage of the events\n for BOTH SKS and SKKS\n stat - station Code [STRING]\n phase - phase to plot [STRING]\n \"\"\"\n SKS_data,stat_loc,SKS_null,SKS_split = load(stat,phase='SKS')\n SKKS_data,stat_loc,SKKS_null,SKKS_split = load(stat,phase='SKKS')\n\n fig = plt.figure()\n axs = []\n gs = gridspec.GridSpec(2,4) # Creates a 2x3 grid in the figure space for plotting in\n proj = cart.AzimuthalEquidistant(central_longitude=SKKS_data.STLO[0],central_latitude=SKKS_data.STLA[0])\n\n\n ax1 = plt.subplot(gs[0,0:2])\n ax2 = plt.subplot(gs[1,0:2])\n ax3 = fig.add_subplot(gs[:,2:],projection = proj) # have to use add_subplots in order to add a different projection\n\n ax1.errorbar(SKS_null[0],SKS_null[1],yerr = SKS_null[2],fmt='ro',elinewidth=0.5,label='SKS (null)')\n ax1.errorbar(SKKS_null[0],SKKS_null[1],yerr = SKKS_null[2],fmt='bo',elinewidth=0.5,label='SKKS (null)')\n ax1.errorbar(SKS_split[0],SKS_split[1],yerr = SKS_split[2],fmt='rx',elinewidth=0.5,label='SKS (split)')\n ax1.errorbar(SKKS_split[0],SKKS_split[1],yerr=SKKS_split[2],fmt = 'bx',elinewidth=0.5,label='SKKS (split)')\n ax1.set_ylim([-90,90])\n ax1.set_ylabel('Fast Direction (deg)')\n ax1.set_xlabel('Back Azimuth (deg)')\n ax1.legend()\n # _lag(ax1,data.BAZ,data.TLAG,data.DTLAG,'kx')\n # _fast(ax2,data.BAZ,data.FAST,data.DFAST,'kx')\n #coverage(ax3,data.EVLA,data.EVLO,data.STLA[0],data.STLO[0],stat)\n # plt.show()\n ax2.errorbar(SKS_null[0],SKS_null[3],yerr = SKS_null[4],fmt='ro',elinewidth=0.5,label='SKS (null)')\n ax2.errorbar(SKKS_null[0],SKKS_null[3],yerr = SKKS_null[4],fmt='bo',elinewidth=0.5,label='SKKS (null)')\n ax2.errorbar(SKS_split[0],SKS_split[3],yerr = SKS_split[4],fmt='rx',elinewidth=0.5,label='SKS (split)')\n ax2.errorbar(SKKS_split[0],SKKS_split[3],yerr=SKKS_split[4],fmt = 'bx',elinewidth=0.5,label='SKKS (split)')\n ax2.set_ylim([0,4])\n ax2.set_ylabel('Lag (s)')\n ax2.set_xlabel('Back Azimuth (deg)')\n ### Plot Coverage on third axis object\n #ax3.set_extent([-180,180,-60,90])\n ax3.set_global() #This sets the axes extent to its maximum possible setting, so we can find these darn events\n ax3.coastlines(resolution = '110m') # Coastline data is downloaded by Cartopy from Natural Earth (http://www.naturalearthdata.com)\n #Resolution options are 100m,50m and 10m\n #ax3.add_feature(cartopy.feature.OCEAN, zorder=0)\n #ax3.add_feature(cartopy.feature.LAND, zorder=0, edgecolor='black')\n # Now add observed events\n ax3.plot(SKS_null[5],SKS_null[6],'ro',markersize = 5,transform = cart.Geodetic(),label='Null SKS Locations')\n ax3.plot(SKKS_null[5],SKKS_null[6],'bo',markersize = 5,transform = cart.Geodetic(),label='Null SKKS Locations')\n ax3.plot(SKS_split[5],SKS_split[6],'rx',markersize = 5,transform = cart.Geodetic(),label='Null SKS Locations')\n ax3.plot(SKKS_split[5],SKKS_split[6],'bx',markersize = 5,transform = cart.Geodetic(),label='Null SKKS Locations')\n # ax.set_xticks([-130,-125,-120,-115,-110,-105,-100], crs=proj)\n # ax.set_yticks([30,35,40,45,50,55,60], crs=proj)\n ax3.plot([SKS_data.STLO,SKS_data.EVLO],[SKS_data.STLA,SKS_data.EVLA],'-k',transform = cart.Geodetic())\n ax3.plot(SKS_data.STLO,SKS_data.STLA,'kv',transform=cart.Geodetic(),markersize=10,label='Station {}'.format(stat))\n ax3.set_title('SKS/SKKS coverage for Station {}'.format(stat))\n ax3.legend()\n\n\n if save is True:\n plt.savefig('/Users/ja17375/Shear_Wave_Splitting/Python/Figures/{}_SKS_SKKS_plot'.format(stat))\n print('Saving')\n else:\n plt.show()\n\ndef plot_lag(ax,baz,lag1,dlag1,fmt):\n\n ax.errorbar(baz,lag1,yerr=dlag1,fmt=fmt,elinewidth=0.5)\n\n ax.set_ylim([0,4])\n ax.set_ylabel('Lag (s)')\n ax.set_xlabel('Back Azimuth (deg)')\n\ndef plot_fast(ax,baz,fast1,dfast1,fmt):\n\n ax.errorbar(baz,fast1,yerr=dfast1,fmt = fmt,elinewidth=0.5)\n ax.set_ylim([-90,90])\n ax.set_ylabel('Fast Direction (s)')\n ax.set_xlabel('Back Azimuth (deg)')\n\ndef coverage(evla,evlo,stla,stlo,stat):\n \"\"\"\n Creates a map showing the locations of a seismic station and the associated events using a AzimuthalEquidistant projection centered on the Station\n evla - event longitude(s) [deg] can be 1 or more\n evlo - event latitude(s) [deg] can be 1 or more\n stla - station latitude [deg]\n stlo - station longitude [deg]\n stat - Station Code (string)\n \"\"\"\n # We can either do and AzimuthalEquidistant projection centered on the staiton or a nice, Pacific-centered one\n # proj = cart.PlateCarree(central_longitude=180)\n proj = cart.AzimuthalEquidistant(central_longitude=stlo,central_latitude=stla)\n ax = plt.axes(projection=proj)\n ax.set_global() #This sets the axes extent to its maximum possible setting, so we can find these darn events\n\n ax.coastlines(resolution = '110m') # Coastline data is downloaded by Cartopy from Natural Earth (http://www.naturalearthdata.com)\n #Resolution options are 100m,50m and 10m\n\n ax.add_feature(cartopy.feature.OCEAN, zorder=0)\n ax.add_feature(cartopy.feature.LAND, zorder=0, edgecolor='black')\n\n # Now add observed events\n ax.plot(evlo,evla,'ro',markersize = 5,transform = cart.Geodetic(),label='Event Location')\n\n # ax.set_xticks([-130,-125,-120,-115,-110,-105,-100], crs=proj)\n # ax.set_yticks([30,35,40,45,50,55,60], crs=proj)\n stat = 'NEW'\n ax.plot(stlo,stla,'kv',transform=cart.Geodetic(),markersize=10,label='Station Loction')\n ax.set_title('Coverage for Station {}'.format(stat))\n ax.legend()\n plt.show()\n\n######################## Section for SDB plotting\n"
] |
[
[
"matplotlib.pyplot.tight_layout",
"numpy.isnan",
"numpy.arange",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.axes",
"matplotlib.pyplot.subplot",
"matplotlib.gridspec.GridSpec",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
]
] |
KamalGalrani/h2o-3
|
[
"5d5b6b319569b03ad1bca004a6d1a5dc6972d433"
] |
[
"h2o-py/h2o/model/model_base.py"
] |
[
"# -*- encoding: utf-8 -*-\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport os\nimport traceback\nimport warnings\n\nimport h2o\nfrom h2o.base import Keyed\nfrom h2o.exceptions import H2OValueError\nfrom h2o.job import H2OJob\nfrom h2o.utils.backward_compatibility import backwards_compatible\nfrom h2o.utils.compatibility import * # NOQA\nfrom h2o.utils.compatibility import viewitems\nfrom h2o.utils.shared_utils import can_use_pandas\nfrom h2o.utils.typechecks import I, assert_is_type, assert_satisfies, Enum, is_type\n\n\nclass ModelBase(backwards_compatible(Keyed)):\n \"\"\"Base class for all models.\"\"\"\n\n def __init__(self):\n \"\"\"Construct a new model instance.\"\"\"\n super(ModelBase, self).__init__()\n self._id = None\n self._model_json = None\n self._metrics_class = None\n self._is_xvalidated = False\n self._xval_keys = None\n self._parms = {} # internal, for object recycle\n self.parms = {} # external\n self._estimator_type = \"unsupervised\"\n self._future = False # used by __repr__/show to query job state\n self._job = None # used when _future is True\n self._have_pojo = False\n self._have_mojo = False\n self._start_time = None\n self._end_time = None\n self._run_time = None\n\n\n @property\n def key(self):\n return self._id\n\n @property\n def model_id(self):\n \"\"\"Model identifier.\"\"\"\n return self._id\n\n @model_id.setter\n def model_id(self, newid):\n oldid = self._id\n self._id = newid\n h2o.rapids(\"(rename '%s' '%s')\" % (oldid, newid))\n\n\n @property\n def params(self):\n \"\"\"\n Get the parameters and the actual/default values only.\n\n :returns: A dictionary of parameters used to build this model.\n \"\"\"\n params = {}\n for p in self.parms:\n params[p] = {\"default\": self.parms[p][\"default_value\"],\n \"actual\": self.parms[p][\"actual_value\"]}\n return params\n\n\n @property\n def default_params(self):\n \"\"\"Dictionary of the default parameters of the model.\"\"\"\n params = {}\n for p in self.parms:\n params[p] = self.parms[p][\"default_value\"]\n return params\n\n\n @property\n def actual_params(self):\n \"\"\"Dictionary of actual parameters of the model.\"\"\"\n params_to_select = {\"model_id\": \"name\",\n \"response_column\": \"column_name\",\n \"training_frame\": \"name\",\n \"validation_frame\": \"name\"}\n params = {}\n for p in self.parms:\n if p in params_to_select.keys():\n params[p] = self.parms[p][\"actual_value\"].get(params_to_select[p], None)\n else:\n params[p] = self.parms[p][\"actual_value\"]\n return params\n\n\n @property\n def full_parameters(self):\n \"\"\"Dictionary of the full specification of all parameters.\"\"\"\n return self.parms\n\n\n @property\n def type(self):\n \"\"\"The type of model built: ``\"classifier\"`` or ``\"regressor\"`` or ``\"unsupervised\"``\"\"\"\n return self._estimator_type\n\n @property\n def have_pojo(self):\n \"\"\"True, if export to POJO is possible\"\"\"\n return self._have_pojo\n\n @property\n def have_mojo(self):\n \"\"\"True, if export to MOJO is possible\"\"\"\n return self._have_mojo\n\n @property\n def start_time(self):\n \"\"\"Timestamp (milliseconds since 1970) when the model training was started.\"\"\"\n return self._start_time\n\n @property\n def end_time(self):\n \"\"\"Timestamp (milliseconds since 1970) when the model training was ended.\"\"\"\n return self._end_time\n\n @property\n def run_time(self):\n \"\"\"Model training time in milliseconds\"\"\"\n return self._run_time\n\n\n def __repr__(self):\n # PUBDEV-2278: using <method>? from IPython caused everything to dump\n stk = traceback.extract_stack()\n if not (\"IPython\" in stk[-2][0] and \"info\" == stk[-2][2]):\n self.show()\n return \"\"\n\n\n def predict_leaf_node_assignment(self, test_data, type=\"Path\"):\n \"\"\"\n Predict on a dataset and return the leaf node assignment (only for tree-based models).\n\n :param H2OFrame test_data: Data on which to make predictions.\n :param Enum type: How to identify the leaf node. Nodes can be either identified by a path from to the root node\n of the tree to the node or by H2O's internal node id. One of: ``\"Path\"``, ``\"Node_ID\"`` (default: ``\"Path\"``).\n\n :returns: A new H2OFrame of predictions.\n \"\"\"\n if not isinstance(test_data, h2o.H2OFrame): raise ValueError(\"test_data must be an instance of H2OFrame\")\n assert_is_type(type, None, Enum(\"Path\", \"Node_ID\"))\n j = h2o.api(\"POST /3/Predictions/models/%s/frames/%s\" % (self.model_id, test_data.frame_id),\n data={\"leaf_node_assignment\": True, \"leaf_node_assignment_type\": type})\n return h2o.get_frame(j[\"predictions_frame\"][\"name\"])\n\n def staged_predict_proba(self, test_data):\n \"\"\"\n Predict class probabilities at each stage of an H2O Model (only GBM models).\n\n The output structure is analogous to the output of function predict_leaf_node_assignment. For each tree t and\n class c there will be a column Tt.Cc (eg. T3.C1 for tree 3 and class 1). The value will be the corresponding\n predicted probability of this class by combining the raw contributions of trees T1.Cc,..,TtCc. Binomial models\n build the trees just for the first class and values in columns Tx.C1 thus correspond to the the probability p0.\n\n :param H2OFrame test_data: Data on which to make predictions.\n\n :returns: A new H2OFrame of staged predictions.\n \"\"\"\n if not isinstance(test_data, h2o.H2OFrame): raise ValueError(\"test_data must be an instance of H2OFrame\")\n j = h2o.api(\"POST /3/Predictions/models/%s/frames/%s\" % (self.model_id, test_data.frame_id),\n data={\"predict_staged_proba\": True})\n return h2o.get_frame(j[\"predictions_frame\"][\"name\"])\n\n def predict_contributions(self, test_data):\n \"\"\"\n Predict feature contributions - SHAP values on an H2O Model (only GBM and XGBoost models).\n \n Returned H2OFrame has shape (#rows, #features + 1) - there is a feature contribution column for each input\n feature, the last column is the model bias (same value for each row). The sum of the feature contributions\n and the bias term is equal to the raw prediction of the model. Raw prediction of tree-based model is the sum \n of the predictions of the individual trees before before the inverse link function is applied to get the actual\n prediction. For Gaussian distribution the sum of the contributions is equal to the model prediction. \n\n Note: Multinomial classification models are currently not supported.\n\n :param H2OFrame test_data: Data on which to calculate contributions.\n\n :returns: A new H2OFrame made of feature contributions.\n \"\"\"\n if not isinstance(test_data, h2o.H2OFrame): raise ValueError(\"test_data must be an instance of H2OFrame\")\n j = h2o.api(\"POST /3/Predictions/models/%s/frames/%s\" % (self.model_id, test_data.frame_id),\n data={\"predict_contributions\": True})\n return h2o.get_frame(j[\"predictions_frame\"][\"name\"])\n\n def feature_frequencies(self, test_data):\n \"\"\"\n Retrieve the number of occurrences of each feature for given observations \n on their respective paths in a tree ensemble model.\n Available for GBM, Random Forest and Isolation Forest models.\n\n :param H2OFrame test_data: Data on which to calculate feature frequencies.\n\n :returns: A new H2OFrame made of feature contributions.\n \"\"\"\n if not isinstance(test_data, h2o.H2OFrame): raise ValueError(\"test_data must be an instance of H2OFrame\")\n j = h2o.api(\"POST /3/Predictions/models/%s/frames/%s\" % (self.model_id, test_data.frame_id),\n data={\"feature_frequencies\": True})\n return h2o.get_frame(j[\"predictions_frame\"][\"name\"])\n\n def predict(self, test_data, custom_metric = None, custom_metric_func = None):\n \"\"\"\n Predict on a dataset.\n\n :param H2OFrame test_data: Data on which to make predictions.\n :param custom_metric: custom evaluation function defined as class reference, the class get uploaded\n into the cluster\n :param custom_metric_func: custom evaluation function reference, e.g, result of upload_custom_metric\n\n :returns: A new H2OFrame of predictions.\n \"\"\"\n # Upload evaluation function into DKV\n if custom_metric:\n assert_satisfies(custom_metric_func, custom_metric_func is None,\n \"The argument 'eval_func_ref' cannot be specified when eval_func is specified, \")\n eval_func_ref = h2o.upload_custom_metric(custom_metric)\n if not isinstance(test_data, h2o.H2OFrame): raise ValueError(\"test_data must be an instance of H2OFrame\")\n j = H2OJob(h2o.api(\"POST /4/Predictions/models/%s/frames/%s\" % (self.model_id, test_data.frame_id), data = {'custom_metric_func': custom_metric_func}),\n self._model_json[\"algo\"] + \" prediction\")\n j.poll()\n return h2o.get_frame(j.dest_key)\n\n\n def is_cross_validated(self):\n \"\"\"Return True if the model was cross-validated.\"\"\"\n return self._is_xvalidated\n\n\n def xval_keys(self):\n \"\"\"Return model keys for the cross-validated model.\"\"\"\n return self._xval_keys\n\n\n def get_xval_models(self, key=None):\n \"\"\"\n Return a Model object.\n\n :param key: If None, return all cross-validated models; otherwise return the model that key points to.\n\n :returns: A model or list of models.\n \"\"\"\n return h2o.get_model(key) if key is not None else [h2o.get_model(k) for k in self._xval_keys]\n\n\n @property\n def xvals(self):\n \"\"\"\n Return a list of the cross-validated models.\n\n :returns: A list of models.\n \"\"\"\n return self.get_xval_models()\n\n\n def detach(self):\n self._id = None\n\n\n def deepfeatures(self, test_data, layer):\n \"\"\"\n Return hidden layer details.\n\n :param test_data: Data to create a feature space on\n :param layer: 0 index hidden layer\n \"\"\"\n if test_data is None: raise ValueError(\"Must specify test data\")\n if str(layer).isdigit():\n j = H2OJob(h2o.api(\"POST /4/Predictions/models/%s/frames/%s\" % (self._id, test_data.frame_id),\n data={\"deep_features_hidden_layer\": layer}), \"deepfeatures\")\n else:\n j = H2OJob(h2o.api(\"POST /4/Predictions/models/%s/frames/%s\" % (self._id, test_data.frame_id),\n data={\"deep_features_hidden_layer_name\": layer}), \"deepfeatures\")\n j.poll()\n return h2o.get_frame(j.dest_key)\n\n\n def weights(self, matrix_id=0):\n \"\"\"\n Return the frame for the respective weight matrix.\n\n :param matrix_id: an integer, ranging from 0 to number of layers, that specifies the weight matrix to return.\n\n :returns: an H2OFrame which represents the weight matrix identified by matrix_id\n \"\"\"\n num_weight_matrices = len(self._model_json[\"output\"][\"weights\"])\n if matrix_id not in list(range(num_weight_matrices)):\n raise ValueError(\n \"Weight matrix does not exist. Model has {0} weight matrices (0-based indexing), but matrix {1} \"\n \"was requested.\".format(num_weight_matrices, matrix_id))\n return h2o.get_frame(self._model_json[\"output\"][\"weights\"][matrix_id][\"URL\"].split(\"/\")[3])\n\n\n def biases(self, vector_id=0):\n \"\"\"\n Return the frame for the respective bias vector.\n\n :param: vector_id: an integer, ranging from 0 to number of layers, that specifies the bias vector to return.\n\n :returns: an H2OFrame which represents the bias vector identified by vector_id\n \"\"\"\n num_bias_vectors = len(self._model_json[\"output\"][\"biases\"])\n if vector_id not in list(range(num_bias_vectors)):\n raise ValueError(\n \"Bias vector does not exist. Model has {0} bias vectors (0-based indexing), but vector {1} \"\n \"was requested.\".format(num_bias_vectors, vector_id))\n return h2o.get_frame(self._model_json[\"output\"][\"biases\"][vector_id][\"URL\"].split(\"/\")[3])\n\n\n def normmul(self):\n \"\"\"Normalization/Standardization multipliers for numeric predictors.\"\"\"\n return self._model_json[\"output\"][\"normmul\"]\n\n\n def normsub(self):\n \"\"\"Normalization/Standardization offsets for numeric predictors.\"\"\"\n return self._model_json[\"output\"][\"normsub\"]\n\n\n def respmul(self):\n \"\"\"Normalization/Standardization multipliers for numeric response.\"\"\"\n return self._model_json[\"output\"][\"normrespmul\"]\n\n\n def respsub(self):\n \"\"\"Normalization/Standardization offsets for numeric response.\"\"\"\n return self._model_json[\"output\"][\"normrespsub\"]\n\n\n def catoffsets(self):\n \"\"\"Categorical offsets for one-hot encoding.\"\"\"\n return self._model_json[\"output\"][\"catoffsets\"]\n\n\n def model_performance(self, test_data=None, train=False, valid=False, xval=False):\n \"\"\"\n Generate model metrics for this model on test_data.\n\n :param H2OFrame test_data: Data set for which model metrics shall be computed against. All three of train,\n valid and xval arguments are ignored if test_data is not None.\n :param bool train: Report the training metrics for the model.\n :param bool valid: Report the validation metrics for the model.\n :param bool xval: Report the cross-validation metrics for the model. If train and valid are True, then it\n defaults to True.\n\n :returns: An object of class H2OModelMetrics.\n \"\"\"\n if test_data is None:\n if not train and not valid and not xval: train = True # default to train\n if train: return self._model_json[\"output\"][\"training_metrics\"]\n if valid: return self._model_json[\"output\"][\"validation_metrics\"]\n if xval: return self._model_json[\"output\"][\"cross_validation_metrics\"]\n\n else: # cases dealing with test_data not None\n if not isinstance(test_data, h2o.H2OFrame):\n raise ValueError(\"`test_data` must be of type H2OFrame. Got: \" + type(test_data))\n if (self._model_json[\"response_column_name\"] != None) and not(self._model_json[\"response_column_name\"] in test_data.names):\n print(\"WARNING: Model metrics cannot be calculated and metric_json is empty due to the absence of the response column in your dataset.\")\n return\n res = h2o.api(\"POST /3/ModelMetrics/models/%s/frames/%s\" % (self.model_id, test_data.frame_id))\n\n # FIXME need to do the client-side filtering... (PUBDEV-874)\n raw_metrics = None\n for mm in res[\"model_metrics\"]:\n if mm[\"frame\"] is not None and mm[\"frame\"][\"name\"] == test_data.frame_id:\n raw_metrics = mm\n break\n return self._metrics_class(raw_metrics, algo=self._model_json[\"algo\"])\n\n\n def scoring_history(self):\n \"\"\"\n Retrieve Model Score History.\n\n :returns: The score history as an H2OTwoDimTable or a Pandas DataFrame.\n \"\"\"\n model = self._model_json[\"output\"]\n if \"scoring_history\" in model and model[\"scoring_history\"] is not None:\n return model[\"scoring_history\"].as_data_frame()\n print(\"No score history for this model\")\n\n\n def cross_validation_metrics_summary(self):\n \"\"\"\n Retrieve Cross-Validation Metrics Summary.\n\n :returns: The cross-validation metrics summary as an H2OTwoDimTable\n \"\"\"\n model = self._model_json[\"output\"]\n if \"cross_validation_metrics_summary\" in model and model[\"cross_validation_metrics_summary\"] is not None:\n return model[\"cross_validation_metrics_summary\"]\n print(\"No cross-validation metrics summary for this model\")\n\n\n def summary(self):\n \"\"\"Print a detailed summary of the model.\"\"\"\n model = self._model_json[\"output\"]\n if \"model_summary\" in model and model[\"model_summary\"] is not None:\n return model[\"model_summary\"]\n print(\"No model summary for this model\")\n\n\n def show(self):\n \"\"\"Print innards of model, without regards to type.\"\"\"\n if self._future:\n self._job.poll_once()\n return\n if self._model_json is None:\n print(\"No model trained yet\")\n return\n if self.model_id is None:\n print(\"This H2OEstimator has been removed.\")\n return\n model = self._model_json[\"output\"]\n print(\"Model Details\")\n print(\"=============\")\n\n print(self.__class__.__name__, \": \", self._model_json[\"algo_full_name\"])\n print(\"Model Key: \", self._id)\n print()\n\n summary = self.summary()\n if summary:\n print(summary)\n\n # training metrics\n tm = model[\"training_metrics\"]\n if tm: tm.show()\n vm = model[\"validation_metrics\"]\n if vm: vm.show()\n xm = model[\"cross_validation_metrics\"]\n if xm: xm.show()\n xms = model[\"cross_validation_metrics_summary\"]\n if xms: xms.show()\n\n if \"scoring_history\" in model and model[\"scoring_history\"]:\n model[\"scoring_history\"].show()\n if \"variable_importances\" in model and model[\"variable_importances\"]:\n model[\"variable_importances\"].show()\n\n\n def varimp(self, use_pandas=False):\n \"\"\"\n Pretty print the variable importances, or return them in a list.\n\n :param bool use_pandas: If True, then the variable importances will be returned as a pandas data frame.\n\n :returns: A list or Pandas DataFrame.\n \"\"\"\n model = self._model_json[\"output\"]\n if self.algo=='glm' or \"variable_importances\" in list(model.keys()) and model[\"variable_importances\"]:\n if self.algo=='glm':\n tempvals = model[\"standardized_coefficient_magnitudes\"].cell_values\n maxVal = 0\n sum=0\n for item in tempvals:\n sum=sum+item[1]\n if item[1]>maxVal:\n maxVal = item[1]\n vals = []\n for item in tempvals:\n tempT = (item[0], item[1], item[1]/maxVal, item[1]/sum)\n vals.append(tempT)\n header = [\"variable\", \"relative_importance\", \"scaled_importance\", \"percentage\"]\n else:\n vals = model[\"variable_importances\"].cell_values\n header = model[\"variable_importances\"].col_header\n \n if use_pandas and can_use_pandas():\n import pandas\n return pandas.DataFrame(vals, columns=header)\n else:\n return vals\n else:\n print(\"Warning: This model doesn't have variable importances\")\n\n\n def residual_deviance(self, train=False, valid=False, xval=None):\n \"\"\"\n Retreive the residual deviance if this model has the attribute, or None otherwise.\n\n :param bool train: Get the residual deviance for the training set. If both train and valid are False, then\n train is selected by default.\n :param bool valid: Get the residual deviance for the validation set. If both train and valid are True, then\n train is selected by default.\n\n :returns: Return the residual deviance, or None if it is not present.\n \"\"\"\n if xval: raise H2OValueError(\"Cross-validation metrics are not available.\")\n if not train and not valid: train = True\n if train and valid: train = True\n if train:\n return self._model_json[\"output\"][\"training_metrics\"].residual_deviance()\n else:\n return self._model_json[\"output\"][\"validation_metrics\"].residual_deviance()\n\n\n def residual_degrees_of_freedom(self, train=False, valid=False, xval=False):\n \"\"\"\n Retreive the residual degress of freedom if this model has the attribute, or None otherwise.\n\n :param bool train: Get the residual dof for the training set. If both train and valid are False, then train\n is selected by default.\n :param bool valid: Get the residual dof for the validation set. If both train and valid are True, then train\n is selected by default.\n\n :returns: Return the residual dof, or None if it is not present.\n \"\"\"\n if xval: raise H2OValueError(\"Cross-validation metrics are not available.\")\n if not train and not valid: train = True\n if train and valid: train = True\n if train:\n return self._model_json[\"output\"][\"training_metrics\"].residual_degrees_of_freedom()\n else:\n return self._model_json[\"output\"][\"validation_metrics\"].residual_degrees_of_freedom()\n\n\n def null_deviance(self, train=False, valid=False, xval=False):\n \"\"\"\n Retreive the null deviance if this model has the attribute, or None otherwise.\n\n :param bool train: Get the null deviance for the training set. If both train and valid are False, then train\n is selected by default.\n :param bool valid: Get the null deviance for the validation set. If both train and valid are True, then train\n is selected by default.\n\n :returns: Return the null deviance, or None if it is not present.\n \"\"\"\n if xval: raise H2OValueError(\"Cross-validation metrics are not available.\")\n if not train and not valid: train = True\n if train and valid: train = True\n if train:\n return self._model_json[\"output\"][\"training_metrics\"].null_deviance()\n else:\n return self._model_json[\"output\"][\"validation_metrics\"].null_deviance()\n\n\n def null_degrees_of_freedom(self, train=False, valid=False, xval=False):\n \"\"\"\n Retreive the null degress of freedom if this model has the attribute, or None otherwise.\n\n :param bool train: Get the null dof for the training set. If both train and valid are False, then train is\n selected by default.\n :param bool valid: Get the null dof for the validation set. If both train and valid are True, then train is\n selected by default.\n\n :returns: Return the null dof, or None if it is not present.\n \"\"\"\n if xval: raise H2OValueError(\"Cross-validation metrics are not available.\")\n if not train and not valid: train = True\n if train and valid: train = True\n if train:\n return self._model_json[\"output\"][\"training_metrics\"].null_degrees_of_freedom()\n else:\n return self._model_json[\"output\"][\"validation_metrics\"].null_degrees_of_freedom()\n\n\n def pprint_coef(self):\n \"\"\"Pretty print the coefficents table (includes normalized coefficients).\"\"\"\n print(self._model_json[\"output\"][\"coefficients_table\"]) # will return None if no coefs!\n\n\n def coef(self):\n \"\"\"\n Return the coefficients which can be applied to the non-standardized data.\n\n Note: standardize = True by default, if set to False then coef() return the coefficients which are fit directly.\n \"\"\"\n tbl = self._model_json[\"output\"][\"coefficients_table\"]\n if tbl is None:\n return None\n return {name: coef for name, coef in zip(tbl[\"names\"], tbl[\"coefficients\"])}\n\n\n def coef_norm(self):\n \"\"\"\n Return coefficients fitted on the standardized data (requires standardize = True, which is on by default).\n\n These coefficients can be used to evaluate variable importance.\n \"\"\"\n if self._model_json[\"output\"][\"model_category\"]==\"Multinomial\":\n tbl = self._model_json[\"output\"][\"standardized_coefficient_magnitudes\"]\n if tbl is None:\n return None\n return {name: coef for name, coef in zip(tbl[\"names\"], tbl[\"coefficients\"])}\n else:\n tbl = self._model_json[\"output\"][\"coefficients_table\"]\n if tbl is None:\n return None\n return {name: coef for name, coef in zip(tbl[\"names\"], tbl[\"standardized_coefficients\"])}\n\n\n def r2(self, train=False, valid=False, xval=False):\n \"\"\"\n Return the R squared for this regression model.\n\n Will return R^2 for GLM Models and will return NaN otherwise.\n\n The R^2 value is defined to be 1 - MSE/var, where var is computed as sigma*sigma.\n\n If all are False (default), then return the training metric value.\n If more than one options is set to True, then return a dictionary of metrics where the keys are \"train\",\n \"valid\", and \"xval\".\n\n :param bool train: If train is True, then return the R^2 value for the training data.\n :param bool valid: If valid is True, then return the R^2 value for the validation data.\n :param bool xval: If xval is True, then return the R^2 value for the cross validation data.\n\n :returns: The R squared for this regression model.\n \"\"\"\n tm = ModelBase._get_metrics(self, train, valid, xval)\n m = {}\n for k, v in viewitems(tm):\n m[k] = None if v is None else v.r2()\n return list(m.values())[0] if len(m) == 1 else m\n\n\n def mse(self, train=False, valid=False, xval=False):\n \"\"\"\n Get the Mean Square Error.\n\n If all are False (default), then return the training metric value.\n If more than one options is set to True, then return a dictionary of metrics where the keys are \"train\",\n \"valid\", and \"xval\".\n\n :param bool train: If train is True, then return the MSE value for the training data.\n :param bool valid: If valid is True, then return the MSE value for the validation data.\n :param bool xval: If xval is True, then return the MSE value for the cross validation data.\n\n :returns: The MSE for this regression model.\n \"\"\"\n tm = ModelBase._get_metrics(self, train, valid, xval)\n m = {}\n for k, v in viewitems(tm):\n m[k] = None if v is None else v.mse()\n return list(m.values())[0] if len(m) == 1 else m\n\n\n def rmse(self, train=False, valid=False, xval=False):\n \"\"\"\n Get the Root Mean Square Error.\n\n If all are False (default), then return the training metric value.\n If more than one options is set to True, then return a dictionary of metrics where the keys are \"train\",\n \"valid\", and \"xval\".\n\n :param bool train: If train is True, then return the RMSE value for the training data.\n :param bool valid: If valid is True, then return the RMSE value for the validation data.\n :param bool xval: If xval is True, then return the RMSE value for the cross validation data.\n\n :returns: The RMSE for this regression model.\n \"\"\"\n tm = ModelBase._get_metrics(self, train, valid, xval)\n m = {}\n for k, v in viewitems(tm):\n m[k] = None if v is None else v.rmse()\n return list(m.values())[0] if len(m) == 1 else m\n\n\n def mae(self, train=False, valid=False, xval=False):\n \"\"\"\n Get the Mean Absolute Error.\n\n If all are False (default), then return the training metric value.\n If more than one options is set to True, then return a dictionary of metrics where the keys are \"train\",\n \"valid\", and \"xval\".\n\n :param bool train: If train is True, then return the MAE value for the training data.\n :param bool valid: If valid is True, then return the MAE value for the validation data.\n :param bool xval: If xval is True, then return the MAE value for the cross validation data.\n\n :returns: The MAE for this regression model.\n \"\"\"\n tm = ModelBase._get_metrics(self, train, valid, xval)\n m = {}\n for k, v in viewitems(tm):\n m[k] = None if v is None else v.mae()\n return list(m.values())[0] if len(m) == 1 else m\n\n\n def rmsle(self, train=False, valid=False, xval=False):\n \"\"\"\n Get the Root Mean Squared Logarithmic Error.\n\n If all are False (default), then return the training metric value.\n If more than one options is set to True, then return a dictionary of metrics where the keys are \"train\",\n \"valid\", and \"xval\".\n\n :param bool train: If train is True, then return the RMSLE value for the training data.\n :param bool valid: If valid is True, then return the RMSLE value for the validation data.\n :param bool xval: If xval is True, then return the RMSLE value for the cross validation data.\n\n :returns: The RMSLE for this regression model.\n \"\"\"\n tm = ModelBase._get_metrics(self, train, valid, xval)\n m = {}\n for k, v in viewitems(tm): m[k] = None if v is None else v.rmsle()\n return list(m.values())[0] if len(m) == 1 else m\n\n\n def logloss(self, train=False, valid=False, xval=False):\n \"\"\"\n Get the Log Loss.\n\n If all are False (default), then return the training metric value.\n If more than one options is set to True, then return a dictionary of metrics where the keys are \"train\",\n \"valid\", and \"xval\".\n\n :param bool train: If train is True, then return the log loss value for the training data.\n :param bool valid: If valid is True, then return the log loss value for the validation data.\n :param bool xval: If xval is True, then return the log loss value for the cross validation data.\n\n :returns: The log loss for this regression model.\n \"\"\"\n tm = ModelBase._get_metrics(self, train, valid, xval)\n m = {}\n for k, v in viewitems(tm): m[k] = None if v is None else v.logloss()\n return list(m.values())[0] if len(m) == 1 else m\n\n\n def mean_residual_deviance(self, train=False, valid=False, xval=False):\n \"\"\"\n Get the Mean Residual Deviances.\n\n If all are False (default), then return the training metric value.\n If more than one options is set to True, then return a dictionary of metrics where the keys are \"train\",\n \"valid\", and \"xval\".\n\n :param bool train: If train is True, then return the Mean Residual Deviance value for the training data.\n :param bool valid: If valid is True, then return the Mean Residual Deviance value for the validation data.\n :param bool xval: If xval is True, then return the Mean Residual Deviance value for the cross validation data.\n\n :returns: The Mean Residual Deviance for this regression model.\n \"\"\"\n tm = ModelBase._get_metrics(self, train, valid, xval)\n m = {}\n for k, v in viewitems(tm): m[k] = None if v is None else v.mean_residual_deviance()\n return list(m.values())[0] if len(m) == 1 else m\n\n\n def auc(self, train=False, valid=False, xval=False):\n \"\"\"\n Get the AUC (Area Under Curve).\n\n If all are False (default), then return the training metric value.\n If more than one options is set to True, then return a dictionary of metrics where the keys are \"train\",\n \"valid\", and \"xval\".\n\n :param bool train: If train is True, then return the AUC value for the training data.\n :param bool valid: If valid is True, then return the AUC value for the validation data.\n :param bool xval: If xval is True, then return the AUC value for the validation data.\n\n :returns: The AUC.\n \"\"\"\n tm = ModelBase._get_metrics(self, train, valid, xval)\n m = {}\n for k, v in viewitems(tm): m[k] = None if v is None else v.auc()\n return list(m.values())[0] if len(m) == 1 else m\n\n\n def aic(self, train=False, valid=False, xval=False):\n \"\"\"\n Get the AIC (Akaike Information Criterium).\n\n If all are False (default), then return the training metric value.\n If more than one options is set to True, then return a dictionary of metrics where the keys are \"train\",\n \"valid\", and \"xval\".\n\n :param bool train: If train is True, then return the AIC value for the training data.\n :param bool valid: If valid is True, then return the AIC value for the validation data.\n :param bool xval: If xval is True, then return the AIC value for the validation data.\n\n :returns: The AIC.\n \"\"\"\n tm = ModelBase._get_metrics(self, train, valid, xval)\n m = {}\n for k, v in viewitems(tm): m[k] = None if v is None else v.aic()\n return list(m.values())[0] if len(m) == 1 else m\n\n\n def gini(self, train=False, valid=False, xval=False):\n \"\"\"\n Get the Gini coefficient.\n\n If all are False (default), then return the training metric value.\n If more than one options is set to True, then return a dictionary of metrics where the keys are \"train\",\n \"valid\", and \"xval\"\n\n :param bool train: If train is True, then return the Gini Coefficient value for the training data.\n :param bool valid: If valid is True, then return the Gini Coefficient value for the validation data.\n :param bool xval: If xval is True, then return the Gini Coefficient value for the cross validation data.\n\n :returns: The Gini Coefficient for this binomial model.\n \"\"\"\n tm = ModelBase._get_metrics(self, train, valid, xval)\n m = {}\n for k, v in viewitems(tm): m[k] = None if v is None else v.gini()\n return list(m.values())[0] if len(m) == 1 else m\n\n def download_pojo(self, path=\"\", get_genmodel_jar=False, genmodel_name=\"\"):\n \"\"\"\n Download the POJO for this model to the directory specified by path.\n\n If path is an empty string, then dump the output to screen.\n\n :param path: An absolute path to the directory where POJO should be saved.\n :param get_genmodel_jar: if True, then also download h2o-genmodel.jar and store it in folder ``path``.\n :param genmodel_name: Custom name of genmodel jar\n :returns: name of the POJO file written.\n \"\"\"\n assert_is_type(path, str)\n assert_is_type(get_genmodel_jar, bool)\n path = path.rstrip(\"/\")\n return h2o.download_pojo(self, path, get_jar=get_genmodel_jar, jar_name=genmodel_name)\n\n\n def download_mojo(self, path=\".\", get_genmodel_jar=False, genmodel_name=\"\"):\n \"\"\"\n Download the model in MOJO format.\n\n :param path: the path where MOJO file should be saved.\n :param get_genmodel_jar: if True, then also download h2o-genmodel.jar and store it in folder ``path``.\n :param genmodel_name: Custom name of genmodel jar\n :returns: name of the MOJO file written.\n \"\"\"\n assert_is_type(path, str)\n assert_is_type(get_genmodel_jar, bool)\n\n if not self.have_mojo:\n raise H2OValueError(\"Export to MOJO not supported\")\n\n if get_genmodel_jar:\n if genmodel_name == \"\":\n h2o.api(\"GET /3/h2o-genmodel.jar\", save_to=os.path.join(path, \"h2o-genmodel.jar\"))\n else:\n h2o.api(\"GET /3/h2o-genmodel.jar\", save_to=os.path.join(path, genmodel_name))\n return h2o.api(\"GET /3/Models/%s/mojo\" % self.model_id, save_to=path)\n\n def save_mojo(self, path=\"\", force=False):\n \"\"\"\n Save an H2O Model as MOJO (Model Object, Optimized) to disk.\n\n :param model: The model object to save.\n :param path: a path to save the model at (hdfs, s3, local)\n :param force: if True overwrite destination directory in case it exists, or throw exception if set to False.\n\n :returns str: the path of the saved model\n \"\"\"\n assert_is_type(path, str)\n assert_is_type(force, bool)\n if not self.have_mojo:\n raise H2OValueError(\"Export to MOJO not supported\")\n path = os.path.join(os.getcwd() if path == \"\" else path, self.model_id + \".zip\")\n return h2o.api(\"GET /99/Models.mojo/%s\" % self.model_id, data={\"dir\": path, \"force\": force})[\"dir\"]\n\n def save_model_details(self, path=\"\", force=False):\n \"\"\"\n Save Model Details of an H2O Model in JSON Format to disk.\n\n :param model: The model object to save.\n :param path: a path to save the model details at (hdfs, s3, local)\n :param force: if True overwrite destination directory in case it exists, or throw exception if set to False.\n\n :returns str: the path of the saved model details\n \"\"\"\n assert_is_type(path, str)\n assert_is_type(force, bool)\n path = os.path.join(os.getcwd() if path == \"\" else path, self.model_id + \".json\")\n return h2o.api(\"GET /99/Models/%s/json\" % self.model_id, data={\"dir\": path, \"force\": force})[\"dir\"]\n\n @staticmethod\n def _get_metrics(o, train, valid, xval):\n # noinspection PyProtectedMember\n output = o._model_json[\"output\"]\n metrics = {}\n if train: metrics[\"train\"] = output[\"training_metrics\"]\n if valid: metrics[\"valid\"] = output[\"validation_metrics\"]\n if xval: metrics[\"xval\"] = output[\"cross_validation_metrics\"]\n if len(metrics) == 0: metrics[\"train\"] = output[\"training_metrics\"]\n return metrics\n\n\n # Delete from cluster as model goes out of scope\n # def __del__(self):\n # h2o.remove(self._id)\n\n def _plot(self, timestep, metric, server=False):\n plt = _get_matplotlib_pyplot(server)\n if not plt: return\n\n scoring_history = self.scoring_history()\n # Separate functionality for GLM since its output is different from other algos\n if self._model_json[\"algo\"] == \"glm\":\n # GLM has only one timestep option, which is `iteration`\n timestep = \"iteration\"\n if metric == \"AUTO\":\n metric = \"log_likelihood\"\n elif metric not in (\"log_likelihood\", \"objective\"):\n raise H2OValueError(\"for GLM, metric must be one of: log_likelihood, objective\")\n plt.xlabel(timestep)\n plt.ylabel(metric)\n plt.title(\"Validation Scoring History\")\n plt.plot(scoring_history[timestep], scoring_history[metric])\n\n elif self._model_json[\"algo\"] in (\"deeplearning\", \"deepwater\", \"xgboost\", \"drf\", \"gbm\"):\n # Set timestep\n if self._model_json[\"algo\"] in (\"gbm\", \"drf\", \"xgboost\"):\n assert_is_type(timestep, \"AUTO\", \"duration\", \"number_of_trees\")\n if timestep == \"AUTO\":\n timestep = \"number_of_trees\"\n else: # self._model_json[\"algo\"] == \"deeplearning\":\n # Delete first row of DL scoring history since it contains NAs & NaNs\n if scoring_history[\"samples\"][0] == 0:\n scoring_history = scoring_history[1:]\n assert_is_type(timestep, \"AUTO\", \"epochs\", \"samples\", \"duration\")\n if timestep == \"AUTO\":\n timestep = \"epochs\"\n\n training_metric = \"training_{}\".format(metric)\n validation_metric = \"validation_{}\".format(metric)\n if timestep == \"duration\":\n dur_colname = \"duration_{}\".format(scoring_history[\"duration\"][1].split()[1])\n scoring_history[dur_colname] = [str(x).split()[0] for x in scoring_history[\"duration\"]]\n timestep = dur_colname\n\n if can_use_pandas():\n valid = validation_metric in list(scoring_history)\n ylim = (scoring_history[[training_metric, validation_metric]].min().min(),\n scoring_history[[training_metric, validation_metric]].max().max()) if valid \\\n else (scoring_history[training_metric].min(), scoring_history[training_metric].max())\n else:\n valid = validation_metric in scoring_history.col_header\n ylim = (min(min(scoring_history[[training_metric, validation_metric]])),\n max(max(scoring_history[[training_metric, validation_metric]]))) if valid \\\n else (min(scoring_history[training_metric]), max(scoring_history[training_metric]))\n if ylim[0] == ylim[1]: ylim = (0, 1)\n\n if valid: # Training and validation scoring history\n plt.xlabel(timestep)\n plt.ylabel(metric)\n plt.title(\"Scoring History\")\n plt.ylim(ylim)\n plt.plot(scoring_history[timestep], scoring_history[training_metric], label=\"Training\")\n plt.plot(scoring_history[timestep], scoring_history[validation_metric], color=\"orange\",\n label=\"Validation\")\n plt.legend()\n else: # Training scoring history only\n plt.xlabel(timestep)\n plt.ylabel(training_metric)\n plt.title(\"Training Scoring History\")\n plt.ylim(ylim)\n plt.plot(scoring_history[timestep], scoring_history[training_metric])\n\n else: # algo is not glm, deeplearning, drf, gbm, xgboost\n raise H2OValueError(\"Plotting not implemented for this type of model\")\n if not server: plt.show()\n\n\n def partial_plot(self, data, cols=None, destination_key=None, nbins=20, weight_column=None,\n plot=True, plot_stddev = True, figsize=(7, 10), server=False, include_na=False, user_splits=None,\n col_pairs_2dpdp=None, save_to_file=None, row_index=None):\n \"\"\"\n Create partial dependence plot which gives a graphical depiction of the marginal effect of a variable on the\n response. The effect of a variable is measured in change in the mean response.\n\n :param H2OFrame data: An H2OFrame object used for scoring and constructing the plot.\n :param cols: Feature(s) for which partial dependence will be calculated.\n :param destination_key: An key reference to the created partial dependence tables in H2O.\n :param nbins: Number of bins used. For categorical columns make sure the number of bins exceed the level count. If you enable add_missing_NA, the returned length will be nbin+1.\n :param weight_column: A string denoting which column of data should be used as the weight column.\n :param plot: A boolean specifying whether to plot partial dependence table.\n :param plot_stddev: A boolean specifying whether to add std err to partial dependence plot.\n :param figsize: Dimension/size of the returning plots, adjust to fit your output cells.\n :param server: Specify whether to activate matplotlib \"server\" mode. In this case, the plots are saved to a file instead of being rendered.\n :param include_na: A boolean specifying whether missing value should be included in the Feature values.\n :param user_splits: a dictionary containing column names as key and user defined split values as value in a list.\n :param col_pairs_2dpdp: list containing pairs of column names for 2D pdp\n :param save_to_file: Fully qualified name to an image file the resulting plot should be saved to, e.g. '/home/user/pdpplot.png'. The 'png' postfix might be omitted. If the file already exists, it will be overridden. Plot is only saved if plot = True.\n :param row_index: Row for which partial dependence will be calculated instead of the whole input frame.\n :returns: Plot and list of calculated mean response tables for each feature requested.\n \"\"\"\n if not isinstance(data, h2o.H2OFrame): raise ValueError(\"data must be an instance of H2OFrame\")\n num_1dpdp = 0\n num_2dpdp = 0\n if not(cols==None):\n assert_is_type(cols, [str])\n num_1dpdp = len(cols)\n if not(col_pairs_2dpdp==None):\n assert_is_type(col_pairs_2dpdp, [[str, str]])\n num_2dpdp=len(col_pairs_2dpdp)\n \n if (cols==None) and (col_pairs_2dpdp==None):\n raise ValueError(\"must specify either cols or col_pairs_2dpd to generate partial dependency plots\")\n \n assert_is_type(destination_key, None, str)\n assert_is_type(nbins, int)\n assert_is_type(plot, bool)\n assert_is_type(figsize, (int, int))\n\n # Check cols specified exist in frame data\n if not(cols==None):\n for xi in cols:\n if xi not in data.names:\n raise H2OValueError(\"Column %s does not exist in the training frame\" % xi)\n if not(col_pairs_2dpdp==None):\n for oneP in col_pairs_2dpdp:\n if oneP[0] not in data.names:\n raise H2OValueError(\"Column %s does not exist in the training frame\" % oneP[0])\n if oneP[1] not in data.names:\n raise H2OValueError(\"Column %s does not exist in the training frame\" % oneP[1])\n if oneP[0]==oneP[1]:\n raise H2OValueError(\"2D pdp must be with different columns.\")\n if isinstance(weight_column, int) and not (weight_column == -1):\n raise H2OValueError(\"Weight column should be a column name in your data frame.\")\n elif isinstance(weight_column, str): # index is a name\n if weight_column not in data.names:\n raise H2OValueError(\"Column %s does not exist in the data frame\" % weight_column)\n weight_column = data.names.index(weight_column)\n \n if row_index:\n if not isinstance(row_index, int):\n raise H2OValueError(\"Row index should be of type int.\")\n else:\n row_index = -1\n \n kwargs = {}\n kwargs[\"cols\"] = cols\n kwargs[\"model_id\"] = self.model_id\n kwargs[\"frame_id\"] = data.frame_id\n kwargs[\"nbins\"] = nbins\n kwargs[\"destination_key\"] = destination_key\n kwargs[\"weight_column_index\"] = weight_column\n kwargs[\"add_missing_na\"] = include_na\n kwargs[\"row_index\"] = row_index\n kwargs[\"col_pairs_2dpdp\"] = col_pairs_2dpdp\n\n self.__generate_user_splits(user_splits, data, kwargs)\n json = H2OJob(h2o.api(\"POST /3/PartialDependence/\", data=kwargs), job_type=\"PartialDependencePlot\").poll()\n json = h2o.api(\"GET /3/PartialDependence/%s\" % json.dest_key)\n\n # Extract partial dependence data from json response\n pps = json[\"partial_dependence_data\"]\n\n # Plot partial dependence plots using matplotlib\n self.__generate_partial_plots(num_1dpdp, num_2dpdp, plot, server, pps, figsize, col_pairs_2dpdp, data, nbins,\n kwargs[\"user_cols\"], kwargs[\"num_user_splits\"], plot_stddev, cols, save_to_file)\n return pps\n\n def __generate_user_splits(self, user_splits, data, kwargs):\n # extract user defined split points from dict user_splits into an integer array of column indices\n # and a double array of user define values for the corresponding columns\n if not(user_splits == None) and (len(user_splits) > 0):\n if not(isinstance(user_splits, dict)):\n raise H2OValueError(\"user_splits must be a Python dict.\")\n else:\n user_cols = []\n user_values = []\n user_num_splits = []\n data_ncol = data.ncol\n column_names = data.names\n for colKey,val in user_splits.items():\n if is_type(colKey, str) and colKey in column_names:\n user_cols.append(colKey)\n elif isinstance(colKey, int) and colKey < data_ncol:\n user_cols.append(column_names[colKey])\n else:\n raise H2OValueError(\"column names/indices used in user_splits are not valid. They \"\n \"should be chosen from the columns of your data set.\")\n\n if data[colKey].isfactor()[0] or data[colKey].isnumeric()[0]: # replace enum string with actual value\n nVal = len(val)\n if data[colKey].isfactor()[0]:\n domains = data[colKey].levels()[0]\n\n numVal = [0]*nVal\n for ind in range(nVal):\n if (val[ind] in domains):\n numVal[ind] = domains.index(val[ind])\n else:\n raise H2OValueError(\"Illegal enum value {0} encountered. To include missing\"\n \" values in your feature values, set include_na to \"\n \"True\".format(val[ind]))\n\n user_values.extend(numVal)\n else:\n user_values.extend(val)\n user_num_splits.append(nVal)\n else:\n raise H2OValueError(\"Partial dependency plots are generated for numerical and categorical \"\n \"columns only.\")\n kwargs[\"user_cols\"] = user_cols\n kwargs[\"user_splits\"] = user_values\n kwargs[\"num_user_splits\"] = user_num_splits\n else:\n kwargs[\"user_cols\"] = None\n kwargs[\"user_splits\"] = None\n kwargs[\"num_user_splits\"] = None\n\n def __generate_partial_plots(self, num_1dpdp, num_2dpdp, plot, server, pps, figsize, col_pairs_2dpdp, data, nbins,\n user_cols, user_num_splits, plot_stddev, cols, save_to_file):\n # Plot partial dependence plots using matplotlib\n totFig = num_1dpdp+num_2dpdp\n if plot and totFig>0: # plot 1d pdp for now\n plt = _get_matplotlib_pyplot(server)\n if not plt: return pps\n import matplotlib.gridspec as gridspec\n fig = plt.figure(figsize=figsize)\n gxs = gridspec.GridSpec(totFig, 1)\n if num_2dpdp>0: # 2d pdp requested\n axes3D = _get_mplot3d_pyplot(\"2D partial plots\")\n cm = _get_matplotlib_cm(\"2D partial plots\")\n figPlotted = False # indicated number of figures plotted\n for i, pp in enumerate(pps):\n if (i >= num_1dpdp): # plot 2D pdp\n if (axes3D==None) or (cm==None) or (plt==None): # quit if cannot find toolbox\n break\n figPlotted = self.__plot_2dpdp(fig, col_pairs_2dpdp, gxs, num_1dpdp, data, pp, nbins, user_cols,\n user_num_splits, plot_stddev, cm, i)\n else: # plot 1D pdp\n figPlotted = self.__plot_1dpdp(cols, i, data, pp, fig, gxs, plot_stddev)\n\n if figPlotted:\n fig.tight_layout(pad=0.4, w_pad=0.5, h_pad=1.0)\n else:\n print(\"No partial plot is generated and/or saved. You may be missing toolboxes like \"\n \"mpl_toolkits.mplot3d, matplotlib\")\n if (save_to_file is not None) and figPlotted: # only save when a figure is actually plotted\n plt.savefig(save_to_file)\n\n def __plot_2dpdp(self, fig, col_pairs_2dpdp, gxs, num_1dpdp, data, pp, nbins, user_cols, user_num_splits, plot_stddev, cm, i):\n ax = fig.add_subplot(gxs[i], projection='3d')\n colPairs = col_pairs_2dpdp[i-num_1dpdp]\n x = self.__grabValues(pp, 0, data, colPairs[0], ax) # change to numpy 2d_array\n y = self.__grabValues(pp, 1, data, colPairs[1], ax)\n X,Y,Z = self.__predFor3D(x,y,pp[2], colPairs, nbins, user_cols, user_num_splits)\n\n zupper = [a + b for a, b in zip(pp[2], pp[3]) ] # pp[1] is mean, pp[2] is std\n zlower = [a - b for a, b in zip(pp[2], pp[3]) ]\n _,_,Zupper = self.__predFor3D(x,y,zupper, colPairs, nbins, user_cols, user_num_splits)\n _,_,Zlower = self.__predFor3D(x,y,zlower, colPairs, nbins, user_cols, user_num_splits)\n ax.plot_surface(X, Y, Z, cmap=cm.coolwarm,linewidth=1, antialiased=False, alpha=0.5, edgecolor='k')\n if plot_stddev:\n ax.plot_surface(X, Y, Zupper, cmap=cm.coolwarm,linewidth=0.2, antialiased=False, alpha=0.3, edgecolor='y')\n ax.plot_surface(X, Y, Zlower, cmap=cm.coolwarm,linewidth=0.2, antialiased=False, alpha=0.3, edgecolor='g')\n ax.set_xlabel(colPairs[0])\n ax.set_xlim(min(x), max(x))\n ax.set_ylabel(colPairs[1])\n ax.set_ylim(min(y), max(y))\n ax.set_zlim(min([min(zupper), min(zlower), min(pp[2])]), max([max(zupper), max(zlower), max(pp[2])]))\n ax.set_zlabel('Partial dependence')\n titles = '2D partial dependence plot for '+colPairs[0] + ' and '+colPairs[1]\n ax.set_title(titles)\n return True\n \n def __plot_1dpdp(self, cols, i, data, pp, fig, gxs, plot_stddev):\n col = cols[i]\n cat = data[col].isfactor()[0]\n upper = [a + b for a, b in zip(pp[1], pp[2]) ] # pp[1] is mean, pp[2] is std\n lower = [a - b for a, b in zip(pp[1], pp[2]) ]\n axs = fig.add_subplot(gxs[i])\n self.__setAxs1D(axs, upper, lower, plot_stddev, cat, pp, 0, col) # setup graph, axis, labels and ...\n return True\n \n # change x, y, z to be 2-D numpy arrays in order to plot it.\n # note that, x stays at one value for the duration of y value changes.\n def __predFor3D(self, x, y, z, colPairs, nbins, user_cols, user_num_splits):\n # deal with y axis first\n np = _get_numpy(\"2D partial plots\")\n if np==None:\n print(\"Numpy not found. Cannot plot 2D partial plots.\")\n ycol = colPairs[1]\n nBins = nbins\n if ycol in user_cols:\n ind = user_cols.index(ycol)\n nBins = user_num_splits[ind]\n nrow = int(len(x)/nBins)\n X = np.transpose(np.array(x).reshape(nrow, nBins))\n Y = np.transpose(np.array(y).reshape(nrow, nBins))\n Z = np.transpose(np.array(z).reshape(nrow, nBins))\n return X,Y,Z\n \n def __grabValues(self, pp, index, data, col, axs):\n cat = data[col].isfactor()[0]\n if cat:\n labels = pp[index]\n uniqueL =list(set(labels))\n x = range(len(uniqueL))\n xlab = [None]*len(uniqueL)\n for ind in range(len(uniqueL)):\n xlab[ind] = labels[labels.index(uniqueL[ind])]\n\n # replace string enum labels with integer values\n xext = [None]*len(labels)\n for ind in range(len(labels)):\n xext[ind] = labels.index(labels[ind])\n \n if index==0: # x-axis\n axs.set_xticks(x)\n axs.set_xticklabels(xlab)\n else: # y-axis\n axs.set_yticks(x)\n axs.set_yticklabels(labels)\n axs.margins(0.2) \n\n return xext\n else:\n return pp[index]\n \n def __setAxs1D(self, axs, upper, lower, plot_stddev, cat, pp, pp_start_index, col):\n if cat:\n labels = pp[pp_start_index] # 1d pdp, this is 0\n x = range(len(labels))\n y = pp[pp_start_index+1]\n axs.plot(x, y, \"ro\")\n if plot_stddev:\n axs.plot(x, lower, 'b--')\n axs.plot(x, upper, 'b--')\n axs.set_ylim(min(lower) - 0.1*abs(min(lower)), max(upper) + 0.1*abs(max(upper)))\n axs.set_xticks(x)\n axs.set_xticklabels(labels)\n axs.margins(0.2)\n else:\n x = pp[pp_start_index]\n y = pp[pp_start_index+1]\n axs.plot(x, y, \"r-\")\n if plot_stddev:\n axs.plot(x, lower, 'b--')\n axs.plot(x, upper, 'b--')\n axs.set_xlim(min(x), max(x))\n axs.set_ylim(min(lower) - 0.1*abs(min(lower)), max(upper) + 0.1*abs(max(upper)))\n \n axs.set_title(\"Partial Dependence Plot For {}\".format(col))\n axs.set_xlabel(pp.col_header[pp_start_index])\n axs.set_ylabel(pp.col_header[pp_start_index+1])\n axs.xaxis.grid()\n axs.yaxis.grid()\n\n def varimp_plot(self, num_of_features=None, server=False):\n \"\"\"\n Plot the variable importance for a trained model.\n\n :param num_of_features: the number of features shown in the plot (default is 10 or all if less than 10).\n :param server: ?\n\n :returns: None.\n \"\"\"\n assert_is_type(num_of_features, None, int)\n assert_is_type(server, bool)\n\n plt = _get_matplotlib_pyplot(server)\n if not plt: return\n\n # get the variable importances as a list of tuples, do not use pandas dataframe\n importances = self.varimp(use_pandas=False)\n # features labels correspond to the first value of each tuple in the importances list\n feature_labels = [tup[0] for tup in importances]\n # relative importances correspond to the first value of each tuple in the importances list\n scaled_importances = [tup[2] for tup in importances]\n # specify bar centers on the y axis, but flip the order so largest bar appears at top\n pos = range(len(feature_labels))[::-1]\n # specify the bar lengths\n val = scaled_importances\n\n # # check that num_of_features is an integer\n # if num_of_features is None:\n # num_of_features = len(val)\n\n # default to 10 or less features if num_of_features is not specified\n if num_of_features is None:\n num_of_features = min(len(val), 10)\n\n fig, ax = plt.subplots(1, 1, figsize=(14, 10))\n # create separate plot for the case where num_of_features == 1\n if num_of_features == 1:\n plt.barh(pos[0:num_of_features], val[0:num_of_features], align=\"center\",\n height=0.8, color=\"#1F77B4\", edgecolor=\"none\")\n # Hide the right and top spines, color others grey\n ax.spines[\"right\"].set_visible(False)\n ax.spines[\"top\"].set_visible(False)\n ax.spines[\"bottom\"].set_color(\"#7B7B7B\")\n ax.spines[\"left\"].set_color(\"#7B7B7B\")\n # Only show ticks on the left and bottom spines\n ax.yaxis.set_ticks_position(\"left\")\n ax.xaxis.set_ticks_position(\"bottom\")\n plt.yticks(pos[0:num_of_features], feature_labels[0:num_of_features])\n ax.margins(None, 0.5)\n\n else:\n plt.barh(pos[0:num_of_features], val[0:num_of_features], align=\"center\",\n height=0.8, color=\"#1F77B4\", edgecolor=\"none\")\n # Hide the right and top spines, color others grey\n ax.spines[\"right\"].set_visible(False)\n ax.spines[\"top\"].set_visible(False)\n ax.spines[\"bottom\"].set_color(\"#7B7B7B\")\n ax.spines[\"left\"].set_color(\"#7B7B7B\")\n # Only show ticks on the left and bottom spines\n ax.yaxis.set_ticks_position(\"left\")\n ax.xaxis.set_ticks_position(\"bottom\")\n plt.yticks(pos[0:num_of_features], feature_labels[0:num_of_features])\n plt.ylim([min(pos[0:num_of_features])- 1, max(pos[0:num_of_features])+1])\n # ax.margins(y=0.5)\n\n # check which algorithm was used to select right plot title\n if self._model_json[\"algo\"] == \"gbm\":\n plt.title(\"Variable Importance: H2O GBM\", fontsize=20)\n if not server: plt.show()\n elif self._model_json[\"algo\"] == \"drf\":\n plt.title(\"Variable Importance: H2O DRF\", fontsize=20)\n if not server: plt.show()\n elif self._model_json[\"algo\"] == \"xgboost\":\n plt.title(\"Variable Importance: H2O XGBoost\", fontsize=20)\n if not server: plt.show()\n # if H2ODeepLearningEstimator has variable_importances == True\n elif self._model_json[\"algo\"] == \"deeplearning\":\n plt.title(\"Variable Importance: H2O Deep Learning\", fontsize=20)\n if not server: plt.show()\n elif self._model_json[\"algo\"] == \"glm\":\n plt.title(\"Variable Importance: H2O GLM\", fontsize=20)\n if not server: plt.show() \n else:\n raise H2OValueError(\"A variable importances plot is not implemented for this type of model\")\n\n\n def std_coef_plot(self, num_of_features=None, server=False):\n \"\"\"\n Plot a GLM model\"s standardized coefficient magnitudes.\n\n :param num_of_features: the number of features shown in the plot.\n :param server: ?\n\n :returns: None.\n \"\"\"\n assert_is_type(num_of_features, None, I(int, lambda x: x > 0))\n\n # check that model is a glm\n if self._model_json[\"algo\"] != \"glm\":\n raise H2OValueError(\"This function is available for GLM models only\")\n\n plt = _get_matplotlib_pyplot(server)\n if not plt: return\n\n # get unsorted tuple of labels and coefficients\n unsorted_norm_coef = self.coef_norm().items()\n # drop intercept value then sort tuples by the coefficient\"s absolute value\n drop_intercept = [tup for tup in unsorted_norm_coef if tup[0] != \"Intercept\"]\n norm_coef = sorted(drop_intercept, key=lambda x: abs(x[1]), reverse=True)\n\n signage = []\n for element in norm_coef:\n # if positive including zero, color blue, else color orange (use same colors as Flow)\n if element[1] >= 0:\n signage.append(\"#1F77B4\") # blue\n else:\n signage.append(\"#FF7F0E\") # dark orange\n\n # get feature labels and their corresponding magnitudes\n feature_labels = [tup[0] for tup in norm_coef]\n norm_coef_magn = [abs(tup[1]) for tup in norm_coef]\n # specify bar centers on the y axis, but flip the order so largest bar appears at top\n pos = range(len(feature_labels))[::-1]\n # specify the bar lengths\n val = norm_coef_magn\n\n # check number of features, default is all the features\n if num_of_features is None:\n num_of_features = len(val)\n\n # plot horizontal plot\n fig, ax = plt.subplots(1, 1, figsize=(14, 10))\n # create separate plot for the case where num_of_features = 1\n if num_of_features == 1:\n plt.barh(pos[0], val[0],\n align=\"center\", height=0.8, color=signage[0], edgecolor=\"none\")\n # Hide the right and top spines, color others grey\n ax.spines[\"right\"].set_visible(False)\n ax.spines[\"top\"].set_visible(False)\n ax.spines[\"bottom\"].set_color(\"#7B7B7B\")\n ax.spines[\"left\"].set_color(\"#7B7B7B\")\n # Only show ticks on the left and bottom spines\n ax.yaxis.set_ticks_position(\"left\")\n ax.xaxis.set_ticks_position(\"bottom\")\n plt.yticks([0], feature_labels[0])\n ax.margins(None, 0.5)\n\n else:\n plt.barh(pos[0:num_of_features], val[0:num_of_features],\n align=\"center\", height=0.8, color=signage[0:num_of_features], edgecolor=\"none\")\n # Hide the right and top spines, color others grey\n ax.spines[\"right\"].set_visible(False)\n ax.spines[\"top\"].set_visible(False)\n ax.spines[\"bottom\"].set_color(\"#7B7B7B\")\n ax.spines[\"left\"].set_color(\"#7B7B7B\")\n # Only show ticks on the left and bottom spines\n ax.yaxis.set_ticks_position(\"left\")\n ax.xaxis.set_ticks_position(\"bottom\")\n plt.yticks(pos[0:num_of_features], feature_labels[0:num_of_features])\n ax.margins(None, 0.05)\n\n # generate custom fake lines that will be used as legend entries:\n # check if positive and negative values exist\n # if positive create positive legend\n if \"#1F77B4\" in signage[0:num_of_features] and \"#FF7F0E\" not in signage[0:num_of_features]:\n color_ids = (\"Positive\",)\n markers = [plt.Line2D([0, 0], [0, 0], color=color, marker=\"s\", linestyle=\"\")\n for color in signage[0:num_of_features]]\n lgnd = plt.legend(markers, color_ids, numpoints=1, loc=\"best\", frameon=False, fontsize=13)\n lgnd.legendHandles[0]._legmarker.set_markersize(10)\n # if neg create neg legend\n elif \"#FF7F0E\" in signage[0:num_of_features] and \"#1F77B4\" not in signage[0:num_of_features]:\n color_ids = (\"Negative\",)\n markers = [plt.Line2D([0, 0], [0, 0], color=color, marker=\"s\", linestyle=\"\")\n for color in set(signage[0:num_of_features])]\n lgnd = plt.legend(markers, color_ids, numpoints=1, loc=\"best\", frameon=False, fontsize=13)\n lgnd.legendHandles[0]._legmarker.set_markersize(10)\n # if both provide both colors in legend\n else:\n color_ids = (\"Positive\", \"Negative\")\n markers = [plt.Line2D([0, 0], [0, 0], color=color, marker=\"s\", linestyle=\"\")\n for color in ['#1F77B4', '#FF7F0E']] # blue should always be positive, orange negative\n lgnd = plt.legend(markers, color_ids, numpoints=1, loc=\"best\", frameon=False, fontsize=13)\n lgnd.legendHandles[0]._legmarker.set_markersize(10)\n lgnd.legendHandles[1]._legmarker.set_markersize(10)\n\n # Hide the right and top spines, color others grey\n ax.spines[\"right\"].set_visible(False)\n ax.spines[\"top\"].set_visible(False)\n ax.spines[\"bottom\"].set_color(\"#7B7B7B\")\n ax.spines[\"left\"].set_color(\"#7B7B7B\")\n\n # Only show ticks on the left and bottom spines\n # ax.yaxis.set_ticks_position(\"left\")\n # ax.xaxis.set_ticks_position(\"bottom\")\n plt.yticks(pos[0:num_of_features], feature_labels[0:num_of_features])\n plt.tick_params(axis=\"x\", which=\"minor\", bottom=\"off\", top=\"off\", labelbottom=\"off\")\n plt.title(\"Standardized Coef. Magnitudes: H2O GLM\", fontsize=20)\n # plt.axis(\"tight\")\n # show plot\n if not server: plt.show()\n\n\n @staticmethod\n def _check_targets(y_actual, y_predicted):\n \"\"\"Check that y_actual and y_predicted have the same length.\n\n :param H2OFrame y_actual:\n :param H2OFrame y_predicted:\n\n :returns: None\n \"\"\"\n if len(y_actual) != len(y_predicted):\n raise ValueError(\"Row mismatch: [{},{}]\".format(len(y_actual), len(y_predicted)))\n\n\n def cross_validation_models(self):\n \"\"\"\n Obtain a list of cross-validation models.\n\n :returns: list of H2OModel objects.\n \"\"\"\n cvmodels = self._model_json[\"output\"][\"cross_validation_models\"]\n if cvmodels is None: return None\n m = []\n for p in cvmodels: m.append(h2o.get_model(p[\"name\"]))\n return m\n\n\n def cross_validation_predictions(self):\n \"\"\"\n Obtain the (out-of-sample) holdout predictions of all cross-validation models on their holdout data.\n\n Note that the predictions are expanded to the full number of rows of the training data, with 0 fill-in.\n\n :returns: list of H2OFrame objects.\n \"\"\"\n preds = self._model_json[\"output\"][\"cross_validation_predictions\"]\n if preds is None: return None\n m = []\n for p in preds: m.append(h2o.get_frame(p[\"name\"]))\n return m\n\n\n def cross_validation_holdout_predictions(self):\n \"\"\"\n Obtain the (out-of-sample) holdout predictions of all cross-validation models on the training data.\n\n This is equivalent to summing up all H2OFrames returned by cross_validation_predictions.\n\n :returns: H2OFrame\n \"\"\"\n preds = self._model_json[\"output\"][\"cross_validation_holdout_predictions_frame_id\"]\n if preds is None: return None\n return h2o.get_frame(preds[\"name\"])\n\n\n def cross_validation_fold_assignment(self):\n \"\"\"\n Obtain the cross-validation fold assignment for all rows in the training data.\n\n :returns: H2OFrame\n \"\"\"\n fid = self._model_json[\"output\"][\"cross_validation_fold_assignment_frame_id\"]\n if fid is None: return None\n return h2o.get_frame(fid[\"name\"])\n\n def rotation(self):\n \"\"\"\n Obtain the rotations (eigenvectors) for a PCA model\n\n :return: H2OFrame\n \"\"\"\n if self._model_json[\"algo\"] != \"pca\":\n raise H2OValueError(\"This function is available for PCA models only\")\n return self._model_json[\"output\"][\"eigenvectors\"]\n\n def score_history(self):\n \"\"\"DEPRECATED. Use :meth:`scoring_history` instead.\"\"\"\n return self.scoring_history()\n\n\n # Deprecated functions; left here for backward compatibility\n _bcim = {\n \"giniCoef\": lambda self, *args, **kwargs: self.gini(*args, **kwargs),\n }\n\n\n\n\ndef _get_matplotlib_pyplot(server):\n try:\n # noinspection PyUnresolvedReferences\n import matplotlib\n if server: matplotlib.use(\"Agg\", warn=False)\n # noinspection PyUnresolvedReferences\n import matplotlib.pyplot as plt\n return plt\n except ImportError:\n print(\"`matplotlib` library is required for this function!\")\n return None\n\ndef _get_mplot3d_pyplot(functionName):\n try:\n # noinspection PyUnresolvedReferences\n from mpl_toolkits.mplot3d import Axes3D\n return Axes3D\n except ImportError:\n print(\"`mpl_toolkits.mplot3d` library is required for function {0}!\".format(functionName))\n return None\n\ndef _get_numpy(functionName):\n try:\n import numpy as np\n return np\n except ImportError:\n print(\"`numpy` library is required for function {0}!\".format(functionName))\n return None\n\ndef _get_matplotlib_cm(functionName):\n try:\n from matplotlib import cm\n return cm\n except ImportError:\n print('matplotlib library is required for 3D plots for function {0}'.format(functionName))\n return None\n \n"
] |
[
[
"matplotlib.use",
"matplotlib.gridspec.GridSpec",
"pandas.DataFrame"
]
] |
tkhirianov/TopicNet
|
[
"41318897d25b622da16f20d68fe9c315cd177b3c"
] |
[
"topicnet/viewers/spectrum.py"
] |
[
"\"\"\"\nA few ways to obtain \"decent\" solution to TSP problem\nwhich returns a spectre of topics in our case. \nIf speed is the essence I recommend to use functions providing\ngood initial solution. Which are, get_nearest_neighbour_init. \nIf that solution is not good enough use annealing heuristic (get_annealed_spectrum). \nAnother good but time-heavy option is full check with get_three_opt_path. \nPerforms well on < 50 topics. \nWithin a few runs with right temperature selected it can provide a\nsolution better than the initial.\n\"\"\" # noqa: W291\nimport numpy as np\nimport warnings\nfrom scipy.spatial import distance\nfrom tqdm import tqdm\nfrom .base_viewer import BaseViewer\n\n\ndef get_nearest_neighbour_init(phi_matrix, metric='jensenshannon', start_topic=0):\n \"\"\"\n Given the matrix calculates the initial path by nearest neighbour heuristic.\n\n Parameters\n ----------\n phi_matrix : np.array of float\n a matrix of N topics x M tokens from the model\n metric : str\n name of a metric to compute distances (Default value = 'jensenshannon')\n start_topic : int\n an index of a topic to start and end the path with (Default value = 0)\n\n Returns\n -------\n init_path : list of int\n order of initial topic distribution\n\n \"\"\"\n init_path = [start_topic, ]\n connection_candidates = [int(topic) for topic in np.arange(phi_matrix.shape[0])\n if topic not in init_path]\n neighbour_vectors = phi_matrix[connection_candidates, :]\n\n while len(connection_candidates) > 0:\n last_connection = phi_matrix[[init_path[-1]]]\n nearest_index = distance.cdist(last_connection, neighbour_vectors, metric=metric).argmin()\n init_path.append(connection_candidates[nearest_index])\n connection_candidates = [int(topic) for topic in np.arange(phi_matrix.shape[0])\n if topic not in init_path]\n neighbour_vectors = np.delete(neighbour_vectors, nearest_index, axis=0)\n\n init_path.append(start_topic)\n init_path = [int(topic) for topic in init_path]\n return init_path\n\n\ndef generate_all_segments(n):\n \"\"\"\n Generates all segments combinations for 3-opt swap operation.\n\n Parameters\n ----------\n n : int > 5\n length of path for fixed endpoint\n\n Yields\n -------\n list of int\n\n \"\"\"\n for i in range(n-1):\n for j in range(i + 2, n - 1):\n for k in range(j + 2, n - 1): # + (i > 0)\n yield [i, j, k]\n\n\ndef generate_three_opt_candidates(path, sequence):\n \"\"\"\n Generates all possible tour connections and filters out a trivial one.\n\n Parameters\n ----------\n path : np.array of float\n square matrix of distances between all topics\n sequence : list of int\n list of indices to perform swap on\n\n Yields\n ------\n list of int\n possible tour\n\n \"\"\"\n chunk_start = path[:sequence[0] + 1]\n chunk_one = path[sequence[0] + 1:sequence[1] + 1]\n chunk_two = path[sequence[1] + 1:sequence[2] + 1]\n chunk_end = path[sequence[2] + 1:]\n\n for change_chunks in [True, False]:\n middle_chunks = [chunk_two, chunk_one] if change_chunks else [chunk_one, chunk_two]\n\n for reverse_first_chunk in [True, False]:\n if reverse_first_chunk:\n first_chunk = middle_chunks[0][::-1]\n else:\n first_chunk = middle_chunks[0]\n\n for reverse_second_chunk in [True, False]:\n\n if reverse_second_chunk:\n second_chunk = middle_chunks[1][::-1]\n else:\n second_chunk = middle_chunks[1]\n\n if change_chunks or reverse_first_chunk or reverse_second_chunk:\n tour = chunk_start + first_chunk + second_chunk + chunk_end\n yield tour\n\n\ndef make_three_opt_swap(path, distance_m, sequence, temperature=None):\n \"\"\"\n Performs swap based on the selection candidates,\n allows for non-optimal solution to be accepted\n based on Boltzman distribution.\n\n Parameters\n ----------\n path : list of int\n current path\n distance_m : np.array of float\n square matrix of distances between all topics\n sequence : list of int\n list of indices to perform swap on\n temperature : float\n \"temperature\" parameter regulates strictness of\n the new candidate choice (Default value = None)\n if None - works in a regime when only better solutions are chosen \n This regime is used for 3-opt heuristic\n\n Returns\n -------\n path : list of int\n best path after the permutation\n val : float\n a value gained after the path permutation\n\n \"\"\" # noqa: W291\n\n cut_connections = sum([[path[ind], path[ind + 1]] for ind in sequence], [])\n baseline = np.sum(distance_m[cut_connections[:-1], cut_connections[1:]])\n\n # 6 == len(cut_connections) always\n new_connections = list(generate_three_opt_candidates(cut_connections,\n generate_index_candidates(6)))\n\n candidates = list(generate_three_opt_candidates(path, sequence))\n scores = [np.sum(distance_m[new[:-1], new[1:]]) - baseline for new in new_connections]\n best_score = np.min(scores)\n\n if best_score < 0.0:\n path = candidates[np.argmin(scores)]\n val = best_score\n else:\n if temperature is None:\n val = 0.0\n else:\n # 1e-8 saves from division by 0\n boltzman = np.exp(- best_score / temperature)\n val = 0.0\n if np.random.rand() > boltzman:\n path = candidates[np.argmin(scores)]\n val = best_score\n\n return path, val\n\n\ndef get_three_opt_path(path, distance_m, max_iter=20):\n \"\"\"\n Iterative improvement based on 3 opt exchange.\n\n Parameters\n ----------\n path : list of int\n path to optimize\n distance_m : np.array of float\n square matrix of distances between all topics, \n attempt at optimizing path from the other end\n max_iter : int\n maximum iteration number (Default value = 20)\n\n Returns\n -------\n path : list of int\n end optimization of the route\n\n \"\"\" # noqa: W291\n count_iter = 0\n while True and count_iter <= max_iter:\n delta = 0\n\n for segment in generate_all_segments(len(path)):\n path, d = make_three_opt_swap(path, distance_m, segment)\n delta += d\n count_iter += 1\n if count_iter >= max_iter:\n warnings.warn('Reached maximum iterations', UserWarning)\n if delta >= 0:\n break\n\n return path\n\n\ndef generate_index_candidates(n):\n \"\"\"\n Randomly chooses 3 indexes from the path. \n Does not swap the first or the last point because they fixed.\n\n Parameters\n ----------\n n : int > 5\n length of the path\n\n Returns\n -------\n segment: list of int\n sorted list of candidates for 3 opt swap optimization\n\n \"\"\" # noqa: W291\n segment = np.zeros(3, dtype='int')\n\n first_interval = np.arange(n - 5)\n segment[0] = np.random.choice(first_interval)\n\n second_interval = np.arange(segment[0] + 2, n - 3)\n segment[1] = np.random.choice(second_interval)\n\n third_interval = np.arange(segment[1] + 2, n - 1)\n segment[2] = np.random.choice(third_interval, 1)\n\n return segment\n\n\ndef get_annealed_spectrum(phi_matrix,\n t_coeff,\n start_topic=0,\n metric='jensenshannon',\n init_path=None,\n max_iter=1000000,\n early_stopping=100000,):\n \"\"\"\n Returns annealed spectrum for the topics in the Phi matrix\n with default metrics being Jensen-Shannon.\n\n Parameters\n ----------\n phi_matrix : np.array of float\n Phi matrix of N topics x M tokens from the model\n t_coeff : float\n coefficient that brings ambiguity to the process,\n bigger coefficient allows to jump from local minima.\n start_topic : int\n index of a topic to start and end the path with (Default value = 0)\n metric : str\n name of a metric to compute distances (Default value = 'jensenshannon')\n init_path : list of int\n initial route, contains all numbers from 0 to N-1,\n starts and ends with the same number from the given range (Default value = None)\n max_iter : int\n number of iterations for annealing (Default value = 1000000)\n early_stopping : int\n number of iterations without improvement before stop (Default value = 100000)\n\n Returns\n -------\n best_path : list of int\n best path obtained during the run\n best_score : float\n length of the best path during the run\n\n \"\"\" # noqa: W291\n distance_m = distance.squareform(distance.pdist(phi_matrix, metric=metric))\n np.fill_diagonal(distance_m, 10 * np.max(distance_m))\n if init_path is None:\n current_path = get_nearest_neighbour_init(phi_matrix,\n metric=metric,\n start_topic=start_topic)\n else:\n current_path = init_path\n\n if len(current_path) < 6:\n warnings.warn('The path is too short, returning nearest neighbour solution.',\n UserWarning)\n return current_path, np.sum(distance_m[current_path[:-1], current_path[1:]])\n\n best_score = np.sum(distance_m[current_path[:-1], current_path[1:]])\n best_path = current_path\n running_score = best_score\n\n no_progress_steps = 0\n for i in tqdm(range(max_iter), total=max_iter, leave=False):\n temperature_iter = t_coeff * (max_iter / (i + 1))\n sequence = generate_index_candidates(len(current_path))\n current_path, score = make_three_opt_swap(current_path,\n distance_m,\n sequence,\n temperature=temperature_iter)\n running_score += score\n\n if running_score < best_score:\n best_path = current_path\n best_score = running_score\n no_progress_steps = 0\n else:\n no_progress_steps += 1\n if no_progress_steps >= early_stopping:\n break\n return best_path, best_score\n\n\nclass TopicSpectrumViewer(BaseViewer):\n def __init__(\n self,\n model,\n t_coeff=1e5,\n start_topic=0,\n metric='jensenshannon',\n init_path=None,\n max_iter=1000000,\n early_stopping=100000,\n verbose=False,\n class_ids=None\n ):\n \"\"\"\n Class providing wrap around for functions\n that allow to view a collection of topics\n in order of their similarity to each other.\n\n Parameters\n ----------\n model : TopicModel\n topic model from TopicNet library\n t_coeff : float\n coefficient for annealing, value should be chosen\n start_topic : int\n number of model topic to start from\n metric : string or function\n name of the default metric implemented in scipy or function \n that calculates metric based on the input matrix\n init_path : list of int\n initial tour that could be provided by the user\n max_iter : int\n number of iterations for annealing\n early_stopping : int\n number of iterations without improvement before stop\n verbose : boolean\n if print the resulting length of the tour\n class_ids : list of str\n parameter for model.get_phi method\n contains list of modalities to obtain from the model\n (Default value = None)\n \"\"\" # noqa: W291\n super().__init__(model=model)\n self.metric = metric\n self.start_topic = start_topic\n self.t_coeff = t_coeff\n self.init_path = init_path\n self.verbose = verbose\n self.early_stopping = early_stopping\n self.max_iter = max_iter\n self.class_ids = class_ids\n\n def view(self, class_ids=None):\n \"\"\"\n The class method returning ordered spectrum of\n the topics.\n\n Parameters\n ----------\n class_ids : list of str\n parameter for model.get_phi method\n contains list of modalities to obtain from the model (Default value = None)\n ordered_topics : list of str\n topic names from the model ordered as spectrum\n\n \"\"\" # noqa: W291\n # default get_phi returns N x T matrix while we implemented T x N\n if class_ids is None:\n class_ids = self.class_ids\n model_phi = self.model.get_phi(class_ids=class_ids).T\n spectrum, distance = get_annealed_spectrum(model_phi.values,\n self.t_coeff,\n metric=self.metric,\n start_topic=self.start_topic,\n init_path=self.init_path,\n max_iter=self.max_iter,\n early_stopping=self.early_stopping,)\n if self.verbose:\n print('the resulting path length: ', distance)\n ordered_topics = (\n model_phi\n .iloc[spectrum]\n .index.values\n )\n return ordered_topics\n"
] |
[
[
"numpy.random.choice",
"numpy.min",
"numpy.arange",
"scipy.spatial.distance.cdist",
"numpy.max",
"numpy.delete",
"scipy.spatial.distance.pdist",
"numpy.argmin",
"numpy.random.rand",
"numpy.exp",
"numpy.zeros",
"numpy.sum"
]
] |
lem0nle/PyGCL
|
[
"340b0201a5edf4236fef4c96b958ff373ceb7f28"
] |
[
"GCL/models/samplers.py"
] |
[
"import torch\nfrom abc import ABC, abstractmethod\nfrom torch_scatter import scatter\n\n\nclass Sampler(ABC):\n def __init__(self, intraview_negs=False):\n self.intraview_negs = intraview_negs\n\n def __call__(self, anchor, sample, *args, **kwargs):\n ret = self.sample(anchor, sample, *args, **kwargs)\n if self.intraview_negs:\n ret = self.add_intraview_negs(*ret)\n return ret\n\n @abstractmethod\n def sample(self, anchor, sample, *args, **kwargs):\n pass\n\n @staticmethod\n def add_intraview_negs(anchor, sample, pos_mask, neg_mask):\n num_nodes = anchor.size(0)\n device = anchor.device\n intraview_pos_mask = torch.zeros_like(pos_mask, device=device)\n intraview_neg_mask = torch.ones_like(pos_mask, device=device) - torch.eye(num_nodes, device=device)\n new_sample = torch.cat([sample, anchor], dim=0) # (M+N) * K\n new_pos_mask = torch.cat([pos_mask, intraview_pos_mask], dim=1) # M * (M+N)\n new_neg_mask = torch.cat([neg_mask, intraview_neg_mask], dim=1) # M * (M+N)\n return anchor, new_sample, new_pos_mask, new_neg_mask\n\n\nclass SameScaleSampler(Sampler):\n def __init__(self, *args, **kwargs):\n super(SameScaleSampler, self).__init__(*args, **kwargs)\n\n def sample(self, anchor, sample, *args, **kwargs):\n assert anchor.size(0) == sample.size(0)\n num_nodes = anchor.size(0)\n device = anchor.device\n pos_mask = torch.eye(num_nodes, dtype=torch.float32, device=device)\n neg_mask = 1. - pos_mask\n return anchor, sample, pos_mask, neg_mask\n\n\nclass CrossScaleSampler(Sampler):\n def __init__(self, *args, **kwargs):\n super(CrossScaleSampler, self).__init__(*args, **kwargs)\n\n def sample(self, anchor, sample, batch=None, neg_sample=None, use_gpu=True, *args, **kwargs):\n num_graphs = anchor.shape[0] # M\n num_nodes = sample.shape[0] # N\n device = sample.device\n\n if neg_sample is not None:\n assert num_graphs == 1 # only one graph, explicit negative samples are needed\n assert sample.shape == neg_sample.shape\n pos_mask1 = torch.ones((num_graphs, num_nodes), dtype=torch.float32, device=device)\n pos_mask0 = torch.zeros((num_graphs, num_nodes), dtype=torch.float32, device=device)\n pos_mask = torch.cat([pos_mask1, pos_mask0], dim=1) # M * 2N\n sample = torch.cat([sample, neg_sample], dim=0) # 2N * K\n else:\n assert batch is not None\n if use_gpu:\n ones = torch.eye(num_nodes, dtype=torch.float32, device=device) # N * N\n pos_mask = scatter(ones, batch, dim=0, reduce='sum') # M * N\n else:\n pos_mask = torch.zeros((num_graphs, num_nodes), dtype=torch.float32).to(device)\n for node_idx, graph_idx in enumerate(batch):\n pos_mask[graph_idx][node_idx] = 1. # M * N\n\n neg_mask = 1. - pos_mask\n return anchor, sample, pos_mask, neg_mask\n\n\ndef get_sampler(mode: str, intraview_negs: bool) -> Sampler:\n if mode in {'L2L', 'G2G'}:\n return SameScaleSampler(intraview_negs=intraview_negs)\n elif mode == 'G2L':\n return CrossScaleSampler(intraview_negs=intraview_negs)\n else:\n raise RuntimeError(f'unsupported mode: {mode}')\n"
] |
[
[
"torch.ones",
"torch.cat",
"torch.zeros",
"torch.eye",
"torch.zeros_like",
"torch.ones_like"
]
] |
Arctickirillas/Rubrication
|
[
"35d2d7362aaf4776dcb28d13d7e07942ac8bad85"
] |
[
"competition.py"
] |
[
"# coding: utf-8\n__author__ = 'Kirill Rudakov'\n\nimport read as r\nimport quantify as q\nfrom nltk.stem.snowball import SnowballStemmer\nfrom nltk import word_tokenize\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom nltk.tokenize import TweetTokenizer\nfrom nltk.corpus import stopwords\nfrom sklearn import cross_validation\n# from quantification import Quantification\nimport subprocess\nimport numpy as np\nfrom time import sleep\n\n\ndef obtainSVMperfTwoPoint(sentiment,tweets,name = 'noname'):\n path = 'SemEval/data/two-point/'\n file = open(path+str(name)+'.txt','w')\n for i in range(len(sentiment)):\n if sentiment[i] == 'positive':\n file.write('1 ')\n for j,tw in enumerate(tweets[i]):\n if tw != 0.0:\n file.write(str(j+1)+':'+str(tw)+' ')\n file.write('\\n')\n elif sentiment[i] == 'negative':\n file.write('-1 ')\n for j,tw in enumerate(tweets[i]):\n if tw != 0.0:\n file.write(str(j+1)+':'+str(tw)+' ')\n file.write('\\n')\n else:\n file.write('0 ')\n for j,tw in enumerate(tweets[i]):\n if tw != 0.0:\n file.write(str(j+1)+':'+str(tw)+' ')\n file.write('\\n')\n file.close()\n\ndef obtainSVMperfFivePoint(sentiment,tweets,name = 'noname'):\n path = 'SemEval/data/five-point/'\n for m in range(-2,3,1):\n file = open(path+str(name)+'('+str(m)+')'+'.txt','w')\n for i in range(len(sentiment)):\n if str(sentiment[i]) == str(m):\n file.write('1 ')\n for j,tw in enumerate(tweets[i]):\n if tw != 0.0:\n file.write(str(j+1)+':'+str(tw)+' ')\n file.write('\\n')\n elif str(sentiment[i]) == 'UNKNOWN':\n file.write('0 ')\n for j,tw in enumerate(tweets[i]):\n if tw != 0.0:\n file.write(str(j+1)+':'+str(tw)+' ')\n file.write('\\n')\n else:\n file.write('-1 ')\n for j,tw in enumerate(tweets[i]):\n if tw != 0.0:\n file.write(str(j+1)+':'+str(tw)+' ')\n file.write('\\n')\n file.close()\n\n\ndef SVMperfForTwoPoint(train=None,test=None,model=None,predictions=None):\n train = \"SemEval/data/two-point/train.txt\"\n test = \"SemEval/data/two-point/test.txt\"\n model = \"SemEval/SVMperf/two-point/model.txt\"\n predictions = \"SemEval/SVMperf/two-point/predictions.txt\"\n subprocess.Popen([\"svm-perf-original/svm_perf_learn\",\"-c\",\"20\",train,model], stdout=subprocess.PIPE)\n sleep(1)\n predict = subprocess.Popen([\"svm-perf-original/svm_perf_classify\",test,model,predictions], stdout=subprocess.PIPE)\n sleep(1)\n return predict.communicate()[0]\n\n# Проблема с моделями!!! Нужно несколько раз запускать\ndef SVMperfForFivePoint(type=None):\n for i in range(-2,3,1):\n train = \"SemEval/data/five-point/train(\"+str(i)+').txt'\n model = \"SemEval/SVMperf/five-point/model(\"+str(i)+\").txt\"\n subprocess.Popen([\"svm-perf-original/svm_perf_learn\",\"-c\",\"20\",train,model], stdout=subprocess.PIPE)\n sleep(1)\n for j in range(-2,3,1):\n test = \"SemEval/data/five-point/test(\"+str(j)+\").txt\"\n predictions = \"SemEval/SVMperf/five-point/predictions(\"+str(i)+\")_(\"+str(j)+\").txt\"\n predict = subprocess.Popen([\"svm-perf-original/svm_perf_classify\",test,model,predictions], stdout=subprocess.PIPE)\n sleep(1)\n file = open('SemEval/SVMperf/five-point/report('+str(i)+\")_(\"+str(j)+\").txt\" ,'wr')\n file.write(predict.communicate()[0])\n\ndef SVMperf():\n learn = subprocess.Popen([\"svm-perf-original/svm_perf_learn\",\"-c\",\"20\",\"SemEval/train.txt\",\"SemEval/model.txt\"], stdout=subprocess.PIPE)\n sleep(1)\n predict = subprocess.Popen([\"svm-perf-original/svm_perf_classify\",\"SemEval/test.txt\",\"SemEval/model.txt\",\"SemEval/predictions.txt\"], stdout=subprocess.PIPE)\n sleep(1)\n print(predict.communicate()[0])\n\n\ndef obtainPredictedAndTweets(tweetData):\n _predicted = []\n _tweets = []\n _topic = []\n file = open(tweetData, 'r')\n for line in file:\n data = line.split('\\t')\n if data[3] != 'Not Available\\n':\n _tweets.append(data[3])\n _predicted.append(data[2])\n _topic.append(data[1])\n return _predicted, _tweets, _topic\n\ndef kld(p, q):\n \"\"\"Kullback-Leibler divergence D(P || Q) for discrete distributions when Q is used to approximate P\n Parameters\n p, q : array-like, dtype=float, shape=n\n Discrete probability distributions.\"\"\"\n p = np.asarray(p, dtype=np.float)\n q = np.asarray(q, dtype=np.float)\n return np.sum(np.where(p != 0,p * np.log(p / q), 0))\n\ndef getPredictions(predictions):\n q = []\n f = open(predictions,'r')\n for line in f:\n if float(line) >= 0.61 :\n q.append(1)\n elif float(line) < 0.61:\n q.append(-1)\n f.close()\n return np.array(q)\n\ndef getPredictionsOption(predictions, option):\n q = []\n f = open(predictions,'r')\n for line in f:\n if float(line) >= option :\n q.append(1)\n elif float(line) < option:\n q.append(-1)\n f.close()\n return np.array(q)\n\ndef getRealValue(data_test):\n q = []\n f = open(data_test,'r')\n for line in f:\n q.append(int(line.split(' ')[0]))\n f.close()\n return np.array(q)\n\n\n\n\n\n\n# MAIN\n# stop = stopwords.words('english')\n\npredicted = []\ntweets = []\ntopic = []\n\npredicted,tweets, topic = obtainPredictedAndTweets('TweetsDownloaded/five-point/downloadedTrain.tsv')\npredicted.reverse()\n\npredictedTest, tweetsTest, topicTest = obtainPredictedAndTweets('TweetsDownloaded/testData/test.txt')\n\n\n# http://www.nltk.org/api/nltk.tokenize.html\ntknzr = TweetTokenizer()\nstemmer = SnowballStemmer(\"english\")\nvectorizer = TfidfVectorizer(analyzer = \"word\",\n tokenizer = None,\n preprocessor = None,\n stop_words = 'english'\n )\n\n\ntw = []\nfor t,statement in enumerate(tweets):\n tw.append(' '.join(stemmer.stem(i) for i in tknzr.tokenize(statement)))\n\ntwt = []\nfor t,statement in enumerate(tweetsTest):\n twt.append(' '.join(stemmer.stem(i) for i in tknzr.tokenize(statement)))\n\ntrain = tw\nsentimentTrain = predicted\ntest = twt\nsentimentTest = predictedTest\n\n\ntrain, test, sentimentTrain, sentimentTest = cross_validation.train_test_split(tw, predicted, test_size=0.3, random_state=15)\n\ntrain_data_features= vectorizer.fit_transform(train)\ntrain_data_features = train_data_features.toarray()\n\ntest_data_features = vectorizer.transform(test)\ntest_data_features = test_data_features.toarray()\n\n\n\n\ndef obtainTwoPointData():\n obtainSVMperfTwoPoint(sentimentTrain,train_data_features,'train')\n obtainSVMperfTwoPoint(sentimentTest,test_data_features,'test')\n\n\ndef obtainFivePointdata():\n obtainSVMperfFivePoint(sentimentTrain,train_data_features,'train')\n obtainSVMperfFivePoint(sentimentTest,test_data_features,'test')\n\n\n# obtainFivePointdata()\n# SVMperfForFivePoint()\n\n# p = q.sentimentTest()\n# qq = getPredictions()\n\n\n\nqq = []\nf = open('SemEval/SVMperf/two-point/predictions.txt','r')\nfor line in f:\n if float(line)>=1 :\n qq.append(1)\n elif float(line)<1:\n qq.append(-1)\n\ndef getOutData(q, topic, name = 'out'):\n top = ''\n countOfTop = 1.\n neg = 0.\n pos = 0.\n file = open (str(name)+'.output','w')\n prev_list={}\n for i in range(len(q)):\n if top==topic[i]:\n if q[i]==1:\n pos += 1\n else:\n neg += 1\n countOfTop += 1\n else:\n if top!='':\n file.write(str(top)+'\\t'+str(float(pos/countOfTop))+'\\t'+str(float(neg/countOfTop)) + '\\n')\n prev_list[str(top)]=[float(pos/countOfTop),float(neg/countOfTop)]\n top = topic[i]\n pos = 0.\n neg =0.\n if q[i]==1:\n pos += 1\n else:\n neg += 1\n countOfTop = 1\n file.write(str(top)+'\\t'+str(float(pos/countOfTop))+'\\t'+str(float(neg/countOfTop)) + '\\n')\n prev_list[str(top)]=[float(pos/countOfTop),float(neg/countOfTop)]\n file.close()\n return prev_list\n\n\n# getOutData(qq,topicTest,'test_out')\n\n\n\n\n\n# qq = getPredictions(\"SemEval/SVMperf/five-point/predictions(0)_(0).txt\")\n# qq = q.CCforDouble(qq)\n# print kld(p,qq)\n# option = 0.61 # the best two-point\n# option = -0.25 # the best five-point\n\noption = -0.2\np = getRealValue(\"SemEval/data/five-point/test(2).txt\")\np = q.CCforDouble(p)\nqq = getPredictionsOption(\"SemEval/SVMperf/five-point/predictions(2)_(2).txt\",option)\nqq = q.CCforDouble(qq)\nprint(kld(p,qq))\noption = -1.\n_kld = 1.\ndef preFunc():\n for i in range(-2,3,1):\n print(i)\npreFunc()\n# while option<1.3:\n#\n# qq = getPredictionsOption(\"SemEval/SVMperf/five-point/predictions(0)_(0).txt\",option)\n# qq = q.CCforDouble(qq)\n#\n# # file = open('SemEval/SVMperf/five-point/report(0)_(0).txt','r')\n# # for line in file:\n# # print line\n# if kld(p,qq)<_kld:\n#\n# _kld = kld(p,qq)\n# print option,':',kld(p,qq)\n# option += 0.01\nexit()\n\n# print 'Random Forest'\n# print 'Classification:'\n# sentimentTestEst,sentimentTestProbability = r.randomForest(train_data_features,sentimentTrain,test_data_features)\n# print(r.metrics.classification_report(sentimentTest,sentimentTestEst))\n# print 'Quantification:'\n# q.CCforDouble(sentimentTestEst)\n# q.CCforDouble(sentimentTestEst,sentimentTestProbability)\n\n\n# print 'simpleSVM'\n# print 'Classification:'\nsentimentTestEst,sentimentTestProbability = r.simpleSVM(train_data_features,sentimentTrain,test_data_features, 100)\n# print(r.metrics.classification_report(sentimentTest,sentimentTestEst))\n# print 'Quantification:'\nqq = q.CCforDouble(sentimentTestEst)\nqq_prob = q.CCforDouble(sentimentTestEst,sentimentTestProbability)\np = q.CCforDouble(sentimentTest)\n\n\n\n\n\n\n\n\n\n#\nprint(kld(p,qq))\nprint(kld(p,qq_prob))\n\n\n\n# print 'multiClassSVM'\n# sentimentTestEst = r.multiClassSVM(train_data_features,sentimentTrain,test_data_features)\n# print(r.metrics.classification_report(sentimentTest,sentimentTestEst))\n\n\n\n\nqq = []\nf = open('SemEval/predictions.txt','r')\nfor line in f:\n if float(line)>0 and float(line)<0.5 or float(line)>1 :\n qq.append(1)\n elif float(line)<0 and float(line)>-0.5 or float(line)<-1:\n qq.append(-1)\n else:\n qq.append(int(round(float(line))))\n\nqq = q.CCforDouble(qq)\n\nprint(qq)\n\nprint(kld(p,qq))"
] |
[
[
"numpy.log",
"sklearn.cross_validation.train_test_split",
"numpy.asarray",
"numpy.array",
"sklearn.feature_extraction.text.TfidfVectorizer"
]
] |
alejandroviegener/copy-test
|
[
"c9b32380ce85ec11488092d9f692c63ce51bd9eb",
"c9b32380ce85ec11488092d9f692c63ce51bd9eb"
] |
[
"tests/test_plotting.py",
"muttlib/forecast.py"
] |
[
"\"\"\"muttlib.plotting test suite.\n\n`muttlib` uses `pytest-mpl` to plots testing.\n\nTo use, you simply need to mark the function where you want to compare images using\[email protected]_image_compare, and make sure that the function returns\na Matplotlib figure (or any figure object that has a savefig method):\n\n```python\nimport pytest\nimport matplotlib.pyplot as plt\n\[email protected]_image_compare\ndef test_succeeds():\n fig = plt.figure()\n ax = fig.add_subplot(1,1,1)\n ax.plot([1,2,3])\n return fig\n```\n\nTo generate the baseline images, run the tests with the --mpl-generate-path option\nwith the name of the directory where the generated images should be placed:\n\n```python\npytest --mpl-generate-path=baseline\n```\n\nMore info about `pytest-mpl` library: https://github.com/matplotlib/pytest-mpl#using\n\"\"\"\nfrom copy import deepcopy\n\nimport numpy as np\nimport pandas as pd\nimport pytest\n\nfrom muttlib.plotting import plot\nfrom muttlib.plotting.constants import (\n DAILY_TIME_GRANULARITY,\n HOURLY_TIME_GRANULARITY,\n PLOT_CONFIG,\n DS_COL,\n Y_COL,\n YHAT_COL,\n)\n\n\[email protected]\ndef sample_data_df():\n # Taken from https://raw.githubusercontent.com/facebook/prophet/master/examples/example_retail_sales.csv\n return pd.DataFrame.from_records(\n np.array(\n [\n ('2013-02-01T00:00:00.000000000', 373938),\n ('2013-03-01T00:00:00.000000000', 421638),\n ('2013-04-01T00:00:00.000000000', 408381),\n ('2013-05-01T00:00:00.000000000', 436985),\n ('2013-06-01T00:00:00.000000000', 414701),\n ('2013-07-01T00:00:00.000000000', 422357),\n ('2013-08-01T00:00:00.000000000', 434950),\n ('2013-09-01T00:00:00.000000000', 396199),\n ('2013-10-01T00:00:00.000000000', 415740),\n ('2013-11-01T00:00:00.000000000', 423611),\n ('2013-12-01T00:00:00.000000000', 477205),\n ('2014-01-01T00:00:00.000000000', 383399),\n ('2014-02-01T00:00:00.000000000', 380315),\n ('2014-03-01T00:00:00.000000000', 432806),\n ('2014-04-01T00:00:00.000000000', 431415),\n ('2014-05-01T00:00:00.000000000', 458822),\n ('2014-06-01T00:00:00.000000000', 433152),\n ('2014-07-01T00:00:00.000000000', 443005),\n ('2014-08-01T00:00:00.000000000', 450913),\n ('2014-09-01T00:00:00.000000000', 420871),\n ('2014-10-01T00:00:00.000000000', 437702),\n ('2014-11-01T00:00:00.000000000', 437910),\n ('2014-12-01T00:00:00.000000000', 501232),\n ('2015-01-01T00:00:00.000000000', 397252),\n ('2015-02-01T00:00:00.000000000', 386935),\n ('2015-03-01T00:00:00.000000000', 444110),\n ('2015-04-01T00:00:00.000000000', 438217),\n ('2015-05-01T00:00:00.000000000', 462615),\n ('2015-06-01T00:00:00.000000000', 448229),\n ('2015-07-01T00:00:00.000000000', 457710),\n ('2015-08-01T00:00:00.000000000', 456340),\n ('2015-09-01T00:00:00.000000000', 430917),\n ('2015-10-01T00:00:00.000000000', 444959),\n ('2015-11-01T00:00:00.000000000', 444507),\n ('2015-12-01T00:00:00.000000000', 518253),\n ('2016-01-01T00:00:00.000000000', 400928),\n ('2016-02-01T00:00:00.000000000', 413554),\n ('2016-03-01T00:00:00.000000000', 460093),\n ('2016-04-01T00:00:00.000000000', 450935),\n ('2016-05-01T00:00:00.000000000', 471421),\n ],\n dtype=[('ds', '<M8[ns]'), ('y', '<i8')],\n ),\n )\n\n\[email protected]\ndef sample_data_yhat_df():\n # Taken from `sample_data_df`\n return pd.DataFrame.from_records(\n np.array(\n [\n ('2013-02-01T00:00:00.000000000', 3.7394, 0.3739, 7.4788, 1),\n ('2013-03-01T00:00:00.000000000', 4.2164, 0.4216, 8.4328, 1),\n ('2013-04-01T00:00:00.000000000', 4.0838, 0.4084, 8.1676, 1),\n ('2013-05-01T00:00:00.000000000', 4.3699, 0.4370, 8.7397, 1),\n ('2013-06-01T00:00:00.000000000', 4.1470, 0.4147, 8.2940, 1),\n ('2013-07-01T00:00:00.000000000', 4.2236, 0.4224, 8.4471, 1),\n ('2013-08-01T00:00:00.000000000', 4.3495, 0.4350, 8.6990, 1),\n ('2013-09-01T00:00:00.000000000', 3.9620, 0.3962, 7.9240, 1),\n ('2013-10-01T00:00:00.000000000', 4.1574, 0.4157, 8.3148, 1),\n ('2013-11-01T00:00:00.000000000', 4.2361, 0.4236, 8.4722, 1),\n ('2013-12-01T00:00:00.000000000', 4.7721, 0.4772, 9.5441, 1),\n ('2014-01-01T00:00:00.000000000', 3.8340, 0.3834, 7.6680, 1),\n ('2014-02-01T00:00:00.000000000', 3.8032, 0.3803, 7.6063, 1),\n ('2014-03-01T00:00:00.000000000', 4.3281, 0.4328, 8.6561, 1),\n ('2014-04-01T00:00:00.000000000', 4.3142, 0.4314, 8.6283, 1),\n ('2014-05-01T00:00:00.000000000', 4.5882, 0.4588, 9.1764, 1),\n ('2014-06-01T00:00:00.000000000', 4.3315, 0.4332, 8.6630, 1),\n ('2014-07-01T00:00:00.000000000', 4.4301, 0.4430, 8.8601, 1),\n ('2014-08-01T00:00:00.000000000', 4.5091, 0.4509, 9.0183, 1),\n ('2014-09-01T00:00:00.000000000', 4.2087, 0.4209, 8.4174, 1),\n ('2014-10-01T00:00:00.000000000', 4.3770, 0.4377, 8.7540, 1),\n ('2014-11-01T00:00:00.000000000', 4.3791, 0.4379, 8.7582, 1),\n ('2014-12-01T00:00:00.000000000', 5.0123, 0.5012, 10.0246, 1),\n ('2015-01-01T00:00:00.000000000', 3.9725, 0.3973, 7.9450, 1),\n ('2015-02-01T00:00:00.000000000', 3.8694, 0.3869, 7.7387, 1),\n ('2015-03-01T00:00:00.000000000', 4.4411, 0.4441, 8.8822, 1),\n ('2015-04-01T00:00:00.000000000', 4.3822, 0.4382, 8.7643, 1),\n ('2015-05-01T00:00:00.000000000', 4.6262, 0.4626, 9.2523, 1),\n ('2015-06-01T00:00:00.000000000', 4.4823, 0.4482, 8.9646, 1),\n ('2015-07-01T00:00:00.000000000', 4.5771, 0.4577, 9.1542, 1),\n ('2015-08-01T00:00:00.000000000', 4.5634, 0.4563, 9.1268, 1),\n ('2015-09-01T00:00:00.000000000', 4.3092, 0.4309, 8.6183, 1),\n ('2015-10-01T00:00:00.000000000', 4.4496, 0.4450, 8.8992, 1),\n ('2015-11-01T00:00:00.000000000', 4.4451, 0.4445, 8.8901, 1),\n ('2015-12-01T00:00:00.000000000', 5.1825, 0.5183, 10.3651, 1),\n ('2016-01-01T00:00:00.000000000', 4.0093, 0.4009, 8.0186, 1),\n ('2016-02-01T00:00:00.000000000', 4.1355, 0.4136, 8.2711, 1),\n ('2016-03-01T00:00:00.000000000', 4.6009, 0.4601, 9.2019, 1),\n ('2016-04-01T00:00:00.000000000', 4.5094, 0.4509, 9.0187, 1),\n ('2016-05-01T00:00:00.000000000', 4.7142, 0.4714, 9.4284, 1),\n ],\n dtype=[\n ('ds', '<M8[ns]'),\n ('y', '<i8'),\n ('yhat_lower', '<i8'),\n ('yhat_upper', '<i8'),\n ('sign', '<i8'),\n ],\n ),\n )\n\n\ndef perturb_ts(df, col, scale=1):\n \"\"\"Add noise to ts\n \"\"\"\n mean = df[col].mean() * scale\n df[col] += np.random.default_rng(42).uniform(\n low=-mean / 2, high=mean / 2, size=len(df)\n )\n return df\n\n\[email protected]_image_compare\ndef test_create_forecast_figure(sample_data_df):\n time_series = sample_data_df.iloc[:30]\n predictions = sample_data_df.iloc[30:]\n predictions = predictions.rename(columns={Y_COL: YHAT_COL})\n full_series = pd.concat([predictions, time_series])\n full_series[DS_COL] = pd.to_datetime(full_series[DS_COL])\n end_date = pd.to_datetime(predictions[DS_COL]).min()\n forecast_window = (pd.to_datetime(predictions[DS_COL]).max() - end_date).days\n fig = plot.create_forecast_figure(\n full_series,\n 'test',\n end_date,\n forecast_window,\n time_granularity=DAILY_TIME_GRANULARITY,\n plot_config=deepcopy(PLOT_CONFIG),\n )\n return fig\n\n\[email protected]_image_compare\ndef test_create_forecast_figure_overlapping(sample_data_yhat_df):\n time_series = sample_data_yhat_df\n predictions = sample_data_yhat_df.iloc[30:]\n predictions = predictions.rename(columns={Y_COL: YHAT_COL})\n predictions = perturb_ts(predictions, YHAT_COL, scale=0.1)\n full_series = pd.concat([predictions, time_series])\n full_series[DS_COL] = pd.to_datetime(full_series[DS_COL])\n end_date = pd.to_datetime(predictions[DS_COL]).min()\n forecast_window = (pd.to_datetime(predictions[DS_COL]).max() - end_date).days\n fig = plot.create_forecast_figure(\n full_series,\n 'test',\n end_date,\n forecast_window,\n anomaly_window=0.5,\n time_granularity=HOURLY_TIME_GRANULARITY,\n )\n return fig\n",
"\"\"\"Module to give FBProphet a common interface to Sklearn and general utilities\nfor forecasting problems like limiting the datasets to the last n days,\nallowing wider grid search for hyperparameters not available using standard\nFBProphet and Sklearn libraries.\n\nClasses:\n - SkProphet: a wrapper around FBProphet to provide a scikit learn compatible\n API.\n - StepsSelectorEstimator: a scikit learn metaestimator to limit the amount of\n days used to fit a forecast. Wraps another estimator.\n\nThese two classes can be combined to perform gridsearch using FBProphet while\nalso exploring the amount of training days to use in the dataset.\n\nThe most relevant docstrings are on:\n - SkProphet.__init__\n - SkProphet.fit\n - StepsSelectorEstimator.__init__\n\nSimple examples can be taken from the tests.\nA complex example doing a grid search can be seen here:\n\n.. code-block:: python\n\n import pandas as pd\n from sklearn.model_selection import GridSearchCV, ParameterGrid\n from muttlib.forecast import SkProphet, StepsSelectorEstimator\n\n # The grid has to be turned into a list if used in a StepsSelectorEstimator\n # as it has to be copyable for get / set params\n prophet_grid = list(ParameterGrid({\n 'sk_date_column': ['date'],\n 'sk_yhat_only': [True],\n 'sk_extra_regressors': [\n [],\n [{'name': 'b'}],\n ],\n 'prophet_kwargs': [\n dict(daily_seasonality='auto'),\n dict(daily_seasonality=True),\n ],\n }))\n\n days_selector_grid = {\n 'estimator_class': [SkProphet],\n 'amount_of_steps': [90, 120],\n 'sort_col': ['date'],\n 'estimator_kwargs': prophet_grid,\n }\n\n # To instance GridSearchCV, we need to pass an initialized estimator\n # (for example, a `StepsSelectorEstimator`)\n initial_estimator = StepsSelectorEstimator(\n SkProphet,\n days_selector_grid['amount_of_steps'][0],\n prophet_grid[0])\n cv = GridSearchCV(\n initial_estimator,\n days_selector_grid,\n cv=2,\n scoring='r2')\n\n X = pd.DataFrame({'date': [0, 2, 3, 4, 5], 'b': [1, 4, 5, 0, 9]})\n y = pd.Series([1, 1, 0, 1, 0])\n cv.fit(X, y)\n\n\nTODO:\n - At the moment, given FBProphet's current version we have that the model's\n parameter for *extra_regressors* is not set on initialization but rather it\n is set by using a specific prophet method. Thus, we have that our current\n SKProphet class handles this parameter by setting it manually and knowing\n about this implicitly. If, for some future reason, prophet's API changes to\n include a variety of other/new parameters that are added _not-on-init _,\n then it'ld be probably a good idea to keep an internal dictionary of the\n parameter's dtype and prophet's method used to set it, so as to iterate and\n set these in a \"programatic\" way.\n - Evaluate if SkProphet.fit and SkProphet.copy default value should be False\n to save memory and cpu by default, risking to modifying the input data as a\n side effect of the function.\n\"\"\"\nfrom copy import deepcopy\nfrom inspect import isclass, signature\n\nimport numpy as np\nimport pandas as pd\n\nfrom fbprophet import Prophet\nfrom sklearn.base import BaseEstimator\n\n\nclass SkProphet(Prophet):\n\n DS = 'ds'\n\n def __init__(\n self,\n sk_date_column=DS,\n sk_yhat_only=True,\n sk_extra_regressors=None,\n prophet_kwargs=None,\n ):\n \"\"\"Scikit learn compatible interface for FBProphet.\n\n Parameters\n ----------\n sk_date_column: str\n Name of the column to use as date in Prophet.\n\n sk_yhat_only: Boolean\n True to return only the yhat from Prophet predictions.\n False to return everything.\n\n sk_extra_regressors: [] or [str] or [dict()]\n List with extra regressors to use. The list can have:\n\n * strings: column names (default prophet arguments for extra\n regressors will be used).\n * dicts: {name: *column_name*, prior_scale: _, standardize: _,\n mode: _}\n\n For more information see Prophet.add_regressors.\n\n prophet_kwargs: dict\n Keyword arguments to forward to Prophet.\n \"\"\"\n if sk_extra_regressors is None:\n sk_extra_regressors = []\n if prophet_kwargs is None:\n prophet_kwargs = {}\n\n super().__init__(**prophet_kwargs)\n self.sk_date_column = sk_date_column\n self.sk_yhat_only = sk_yhat_only\n self.sk_extra_regressors = sk_extra_regressors\n self.prophet_kwargs = prophet_kwargs\n self._set_my_extra_regressors()\n\n def fit(\n self, X, y=None, copy=True, **fit_params\n ): # pylint: disable=arguments-differ\n \"\"\"Scikit learn's like fit on the Prophet model.\n\n Parameters\n ----------\n X: pd.DataFrame\n A dataframe with the data to fit.\n It is expected to have a column with datetime values named as\n *self.sk_date_column*.\n y: None or str or (list, tuple, numpy.ndarray, pandas.Series/DataFrame)\n The label values to fit. If y is:\n - None: the column 'y' should be contained in X.\n - str: the name of the column to use in X.\n - list, tuple, ndarray, etc: the values to fit.\n If the values have two dimensions (a matrix instead of a vector)\n the first column will be used.\n E.g.: [1, 3] -> [1, 3] will be used.\n E.g.: [[1], [3]] -> [1, 3] will be used.\n E.g.: [[1, 2], [3, 4]] -> [1, 3] will be used.\n copy: Boolean\n True to copy the input dataframe before working with it to avoid\n modifying the original one.\n If True is set, X should contain the `ds` and `y` columns for\n prophet with those names.\n If False is provided, the input data will be copied and the copy\n modified if required.\n fit_params: keyword arguments\n Keyword arguments to forward to Prophet's fit.\n \"\"\"\n if not isinstance(X, pd.DataFrame):\n raise TypeError('Arg \"X\" passed can only be of pandas.DataFrame type.')\n if copy:\n X = X.copy()\n if self.sk_date_column != self.DS and self.sk_date_column in X.columns:\n X = X.rename({self.sk_date_column: self.DS}, axis=1)\n if y is not None:\n if isinstance(y, str) and y in X.columns:\n X = X.rename({y: 'y'}, axis=1)\n else:\n X['y'] = self._as_np_vector(y)\n return super().fit(X, **fit_params)\n\n def predict(self, X, copy=True): # pylint: disable=arguments-differ\n \"\"\"Scikit learn's predict (returns predicted values).\n\n Parameters\n ----------\n X: pandas.DataFrame\n Input data for predictions.\n copy: Boolean\n True to copy the input dataframe before working with it to avoid\n modifying the original one.\n If True is set, X should contain the `ds` and `y` columns for\n prophet with those names.\n If False is provided, the input data will be copied and the copy\n modified if required.\n \"\"\"\n if copy:\n X = X.copy()\n if self.sk_date_column != self.DS and self.sk_date_column in X.columns:\n X = X.rename({self.sk_date_column: self.DS}, axis=1)\n predictions = super().predict(X)\n if self.sk_yhat_only:\n predictions = predictions.yhat.values\n return predictions\n\n def get_params(self, deep=True):\n \"\"\"Scikit learn's get_params (returns the estimator's params).\"\"\"\n prophet_attrs = [\n attr for attr in signature(Prophet.__init__).parameters if attr != 'self'\n ]\n sk_attrs = [\n attr for attr in signature(self.__init__).parameters if attr != 'self'\n ]\n prophet_params = {a: getattr(self, a, None) for a in prophet_attrs}\n sk_params = {a: getattr(self, a, None) for a in sk_attrs}\n if deep:\n sk_params = deepcopy(sk_params)\n prophet_params = deepcopy(prophet_params)\n sk_params['prophet_kwargs'].update(prophet_params)\n return sk_params\n\n def set_params(self, **params):\n \"\"\"Scikit learn's set_params (sets the parameters provided).\n Note on prophet keyword arguments precedence; this applies:\n - First, if some argument is explicitly provided, this value will be kept.\n - If not, but provided inside a 'prophet_kwargs' dict, the last is kept.\n - Lastly, if not provided in neither way but currently set, the value is not erased.\n \"\"\"\n sk_kws = [\n attr for attr in signature(self.__init__).parameters if attr != 'self'\n ]\n current_prophet_kws = getattr(self, 'prophet_kwargs', {})\n explicit_prophet_kws = {}\n args_passed_prophet_kws = {}\n for attr, value in params.items():\n if attr == 'prophet_kwargs':\n explicit_prophet_kws = value\n elif attr not in sk_kws:\n args_passed_prophet_kws[attr] = value\n else:\n setattr(self, attr, value)\n prophet_kws = current_prophet_kws\n prophet_kws.update(explicit_prophet_kws)\n prophet_kws.update(args_passed_prophet_kws)\n for attr, value in prophet_kws.items():\n setattr(self, attr, value)\n setattr(self, 'prophet_kwargs', prophet_kws)\n self._set_my_extra_regressors()\n return self\n\n def _set_my_extra_regressors(self):\n \"\"\"Adds the regressors defined in self.sk_extra_regressors.\n It is meant to be used at initialization.\n \"\"\"\n if self.extra_regressors:\n self.extra_regressors = self.extra_regressors.__class__()\n for regressor in self.sk_extra_regressors:\n if isinstance(regressor, str):\n self.add_regressor(regressor)\n elif isinstance(regressor, dict):\n self.add_regressor(**regressor)\n else:\n raise TypeError(\n 'Invalid extra_regressor in SkProphet.'\n 'Extra regressors must be strings or dicts with '\n '{name: *column_name*, prior_scale: _, standardize: _, '\n 'mode: _}'\n )\n\n def _as_np_vector(self, y):\n \"\"\"Ensures a list, tuple, pandas.Series, pandas.DataFrame\n or numpy.ndarray is returned as a numpy.ndarray of dimension 1.\n\n Parameters\n ----------\n y: list, tuple, numpy.ndarray, pandas.Series, pandas.DataFrame\n The object containing the y values to fit.\n If y is multidimensional, e.g.: [[1, 2], [3, 4]], the first column\n will be returned as y value, continuining the example: [1, 3].\n\n Returns\n -------\n numpy.ndarray of dimension 1\n The values as a numpy array of dimension 1.\n \"\"\"\n if isinstance(y, (list, tuple)):\n y = np.asarray(y)\n elif isinstance(y, (pd.Series, pd.DataFrame)):\n y = y.values\n if isinstance(y, np.ndarray):\n if len(y.shape) > 1:\n y = y[:, 0]\n return y\n\n def __repr__(self):\n \"\"\"Text representation of the object to look it nicely in the\n interpreter.\n \"\"\"\n return (\n f'{self.__class__.__name__}('\n f'sk_date_column=\"{self.sk_date_column}\", '\n f'sk_yhat_only={self.sk_yhat_only}, '\n f'sk_extra_regressors={self.extra_regressors}'\n f'prophet_kwargs={self.prophet_kwargs})'\n )\n\n __str__ = __repr__\n\n\nclass StepsSelectorEstimator(BaseEstimator):\n def __init__(\n self, estimator_class, amount_of_steps, estimator_kwargs=None, sort_col='date'\n ):\n \"\"\"An estimator that only uses a certain amount of rows on fit.\n\n Parameters\n ----------\n estimator_class: Classer or Estimator Class or estimator instance\n Estimator class to use to fit, if an Estimator Class is provided\n it will be wrapped with a metaestimator.Classer, if an instance\n is provided, its classed will be wrapped.\n examples:\n - Classer(sklearn.ensemble.RandomForestRegressor)\n - sklearn.ensemble.RandomForestRegressor\n - sklearn.ensemble.RandomForestRegressor()\n amount_of_steps: int\n The amount of time steps to use for training.\n sort_col: str\n Name of the column which will be used for sorting if X is a\n dataframe and has the column.\n estimator_kwargs: dict\n Keyword arguments to initialize EstimatorClass\n\n E.g.:\n\n > StepsSelectorEstimator(RandomForestRegressor(), 100)\n \"\"\"\n if estimator_kwargs is None:\n estimator_kwargs = {}\n\n self.amount_of_steps = amount_of_steps\n self.sort_col = sort_col\n self.estimator_kwargs = estimator_kwargs\n self.estimator_class = Classer.from_obj(estimator_class)\n self._estimator = self.estimator_class.new(**self.estimator_kwargs)\n\n def fit(self, X, y):\n \"\"\"Fits self.estimator only to the last self.amount_of_steps rows.\n Tries to sort X first.\n\n Parameters\n ----------\n X: pd.DataFrame\n A dataframe to fit.\n y: vector like\n Labels\n \"\"\"\n if self.sort_col in X.columns:\n X = X.sort_values(self.sort_col, axis=0)\n index_to_drop = X.iloc[: -self.amount_of_steps].index\n y = y.drop(index_to_drop).reset_index(drop=True)\n X = X.drop(index_to_drop).reset_index(drop=True)\n self._estimator.fit(X, y)\n return self\n\n def predict(self, X):\n \"\"\"Scikit's learn like predict.\"\"\"\n return self._estimator.predict(X)\n\n def get_params(self, deep=True):\n \"\"\"Get estimator params.\"\"\"\n kwargs = self.estimator_kwargs\n if deep:\n kwargs = deepcopy(kwargs)\n return {\n 'estimator_class': self.estimator_class,\n 'amount_of_steps': self.amount_of_steps,\n 'sort_col': self.sort_col,\n 'estimator_kwargs': kwargs,\n }\n\n def set_params(self, **params):\n \"\"\"Sets the estimator's params to \\*\\*params.\"\"\" # pylint: disable=anomalous-backslash-in-string\n self.estimator_class = Classer.from_obj(params['estimator_class'])\n self.amount_of_steps = params['amount_of_steps']\n self.sort_col = params['sort_col']\n self.estimator_kwargs = params['estimator_kwargs']\n self._estimator = self.estimator_class.new(**self.estimator_kwargs)\n return self\n\n def __repr__(self): # pylint: disable=signature-differs\n \"\"\"Text representation of the object to look it nicely in the\n interpreter.\n \"\"\"\n return (\n f'{self.__class__.__name__}('\n f'estimator_class={Classer.from_obj(self.estimator_class)}, '\n f'amount_of_steps={self.amount_of_steps}, '\n f'estimator_kwargs={self.estimator_kwargs})'\n )\n\n __str__ = __repr__\n\n\nclass Classer:\n def __init__(self, EstimatorClass):\n \"\"\"Wraps an EstimatorClass to avoid sklearn.base.clone exploting when\n called against an EstimatorClass during grid search of metaestimators.\n\n Parameters\n ----------\n EstimatorClass: class\n A Sklearn compatible estimator class.\n \"\"\"\n self._class = EstimatorClass\n\n def new(self, *args, **kwargs):\n \"\"\"Returns a new instance of the wrapped class initialized with the\n args and kwargs.\n \"\"\"\n return self._class(*args, **kwargs)\n\n @classmethod\n def from_obj(cls, obj):\n \"\"\"Initializes a new classer from an object, which can be another\n Classer, a class or an instance.\n \"\"\"\n if isinstance(obj, Classer):\n return obj\n elif isclass(obj):\n return Classer(obj)\n else:\n return Classer(obj.__class__)\n\n def __eq__(self, other):\n \"\"\"Equality checks inner class wrapped.\"\"\"\n return self.__class__ == other.__class__ and self._class == other._class\n\n def __repr__(self):\n \"\"\"Text representation of the object to look it nicely in the\n interpreter.\n \"\"\"\n return f'{self.__class__.__name__}({self._class.__name__})'\n\n __str__ = __repr__\n"
] |
[
[
"numpy.array",
"pandas.concat",
"pandas.to_datetime",
"numpy.random.default_rng"
],
[
"numpy.asarray"
]
] |
kperrynrel/bifacial_radiance
|
[
"cf5ae46b4ef93990e3e1619956a186376cb4fd8a"
] |
[
"bifacial_radiance/HPCScripts/simulate_improvedArray_Oct2127.py"
] |
[
"import numpy as np\nimport os\nimport pandas as pd\nimport time\nimport math\nfrom itertools import chain\nfrom itertools import product\n\nfrom bifacial_radiance import AnalysisObj, load, MetObj, RadianceObj\nfrom bifacial_radiance.spectral_utils import (spectral_property,\n spectral_irradiance_smarts,\n spectral_albedo_smarts)\n\nfrom dask.distributed import Client\n\n#from multitask_worker.worker import run_partial\n#from multitask_worker.slurm_utils import slurm_worker_id, slurm_worker_cnt\n\nfrom math import sin, cos, radians\n\n# Generate spectra for DNI, DHI and albedo using smarts\n\n# Run simulation using the given timestamp and wavelength\ndef simulate_single(idx=None, wavelength=None, \n test_folder_fmt=None, best_data_file=None, data_folder=None): \n \n # Verify test_folder exists before creating radiance obj\n test_folder = test_folder_fmt.format(f'{idx:04}',f'{wavelength:04}')\n if not os.path.exists(test_folder):\n os.makedirs(test_folder)\n\n ### NEW FOR SPECTRA \n \n # Create radiance obj\n radiance_name = 'BEST'\n rad_obj = RadianceObj(radiance_name, str(test_folder))\n \n # Set ground\n rad_obj.readWeatherFile(best_data_file, label = 'center')\n \n # Check to see if file exists\n foo=rad_obj.metdata.datetime[idx]\n\n # If a wavelength was specified, assume this is a spectral simulation and\n # try to load spectra files.\n # Determine file suffix\n suffix = f'_{idx}.txt'\n \n # Generate/Load albedo\n alb_file = os.path.join(data_folder, \"alb\"+suffix)\n spectral_alb = spectral_property.load_file(alb_file)\n \n # Generate/Load dni and dhi\n dni_file = os.path.join(data_folder, \"dni\"+suffix)\n dhi_file = os.path.join(data_folder, \"dhi\"+suffix)\n ghi_file = os.path.join(data_folder, \"ghi\"+suffix)\n spectral_dni = spectral_property.load_file(dni_file)\n spectral_dhi = spectral_property.load_file(dhi_file)\n spectral_ghi = spectral_property.load_file(ghi_file)\n \n weighted_albedo = False\n if wavelength:\n alb = spectral_alb[wavelength]\n dni = spectral_dni[wavelength]\n dhi = spectral_dhi[wavelength]\n elif weighted_albedo:\n _alb = np.array(spectral_alb[range(300, 2501, 10)])\n _dni = np.array(spectral_dni[range(300, 2501, 10)])\n _dhi = np.array(spectral_dhi[range(300, 2501, 10)])\n _ghi = np.array(spectral_ghi[range(300, 2501, 10)])\n \n alb_scale = np.sum(_alb * (_ghi))/np.sum(alb * (_ghi))\n alb *= alb_scale\n print(f'For IDX {idx}, albedo scaled by {alb_scale}')\n \n\n res_name = \"irr_Hydra_\"+str(foo.year)+\"_\"+str(foo.month)+\"_\"+str(foo.day)+\"_\"+str(foo.hour)+\"_\"+str(foo.minute)+'.csv'\n \n rad_obj.setGround(alb)\n # Set sky\n solpos = rad_obj.metdata.solpos.iloc[idx]\n zen = float(solpos.zenith)\n azm = float(solpos.azimuth) - 180\n rad_obj.gendaylit2manual(dni, dhi, 90 - zen, azm)\n\n \n lat=39.742 # NREL SSRL location\n lon=-105.179 # NREL SSRL location\n elev=1829\n timezone=-7\n axis_tilt=0\n axis_azimuth=180\n limit_angle=60\n backtrack=True # Set to false since it's only 1 row, no shading.\n gcr=0.35\n angledelta=0 # rounding to ints\n numpanels=1\n torquetube=False # We are going to add it separately\n diameter = 0.130175 # 5 1/8 in\n torqueTubeMaterial='Metal_Grey'\n tubetype='Round'\n axisofrotationTorqueTube = True\n azimuth=90\n material = 'Metal_Grey'\n hub_height = 1.5#0.927\n postdiamy = 0.1016 # N-S measurement, 4 \"\n postdiamx = 0.1524 # E-W measurement, 6 \"\n ttedgeoffset = -1.07 # south edge 42 in. negative because that's how I coded the trnaslation.\n ttedgeoffsetNorth = 0.10795 # North edge $ 4 1/4 inches\n length = 21.64-ttedgeoffset+ttedgeoffsetNorth # map goes from beginning of south post, but there is a bit more post to hold the sensor\n decimate = True\n zgap = 0.05 + diameter/2 # 1 inch of arm, + 1 3/16 of panel width on average ~ 0.055 m\n decimateinterval = '15Min'\n pitch=5.7 # distance between rows\n ypostlist=[0, 4.199, 10.414, 16.63, 21.64]\n ymods=[0.589, 1.596, 2.603, 3.610, 4.788, 5.795, 6.803, 7.810, 8.818, 9.825, 11.003, 12.011, 13.018, 14.026, 15.034, 16.041, 17.220, 18.230, 19.240, 20.250]\n \n numcellsx = 6\n numcellsy = 12\n xcell = 0.142\n ycell = 0.142 \n xcellgap = 0.02\n ycellgap = 0.02\n module_type = 'Bi60' \n \n xgap = 0.046\n ygap=0\n glass = False\n\n # Set tracker information\n try:\n tilt = round(rad_obj.getSingleTimestampTrackerAngle(rad_obj.metdata, idx, gcr, limit_angle=65),1)\n except: \n print(\"Night time !!!!!!!!!\")\n print(\"\")\n print(\"\")\n return None\n\n if math.isnan(tilt):\n return None\n \n sazm = 90 \n \n cellLevelModuleParams = {'numcellsx': numcellsx, 'numcellsy':numcellsy, \n 'xcell': xcell, 'ycell': ycell, 'xcellgap': xcellgap, 'ycellgap': ycellgap}\n \n # Running make module on HPC can cause issues if too many works try to \n # write to the module file at the same time. If something goes wrong,\n # assume the module has already been created.\n '''\n try:\n \n mymodule = rad_obj.makeModule(name=module_type, torquetube=torquetube, diameter=diameter, tubetype=tubetype, material=material, \n xgap=xgap, ygap=ygap, zgap=zgap, numpanels=numpanels,# x=0.952, y=1.924,\n cellLevelModuleParams=cellLevelModuleParams, \n axisofrotationTorqueTube=axisofrotationTorqueTube, glass=glass, z=0.0002)\n rad_obj.makeModule(name='sensor', x=0.15, y=0.15, z=0.04)\n\n except:\n print('Failed to make module.')\n '''\n radname = \"Bi60_\"+str(foo.year)+\"_\"+str(foo.month)+\"_\"+str(foo.day)+\"_\"+str(foo.hour)+\"_\"+str(foo.minute)+\"_\"\n \n sceneDict1 = {'tilt': tilt, 'pitch':pitch,'hub_height':hub_height,'azimuth':azimuth,'nMods': 1, 'nRows': 1, 'originx': pitch*0, 'originy': ymods[0]} \n sceneObj1 = rad_obj.makeScene(moduletype=module_type, sceneDict=sceneDict1, radname = radname) \n \n sceneDict2 = {'tilt': tilt, 'pitch':pitch,'hub_height':hub_height,'azimuth':azimuth,'nMods': 1, 'nRows': 1, 'originx': pitch*1, 'originy': ymods[0]} \n sceneObj2 = rad_obj.makeScene(moduletype=module_type,sceneDict=sceneDict2, radname = radname) \n \n sceneDict3 = {'tilt': tilt, 'pitch':pitch,'hub_height':hub_height,'azimuth':azimuth,'nMods': 1, 'nRows': 1, 'originx': pitch*2, 'originy': ymods[0]} \n sceneObj3 = rad_obj.makeScene(moduletype=module_type,sceneDict=sceneDict3, radname = radname) \n \n sceneDict4 = {'tilt': tilt, 'pitch':pitch,'hub_height':hub_height,'azimuth':azimuth,'nMods': 1, 'nRows': 1, 'originx': pitch*3, 'originy': ymods[0]} \n sceneObj4 = rad_obj.makeScene(moduletype=module_type,sceneDict=sceneDict4, radname = radname) \n \n sceneDict5 = {'tilt': tilt, 'pitch':pitch,'hub_height':hub_height,'azimuth':azimuth,'nMods': 1, 'nRows': 1, 'originx': pitch*4, 'originy': ymods[0]} \n sceneObj5 = rad_obj.makeScene(moduletype=module_type,sceneDict=sceneDict5, radname = radname) \n \n sceneDict6 = {'tilt': tilt, 'pitch':pitch,'hub_height':hub_height,'azimuth':azimuth,'nMods': 1, 'nRows': 1, 'originx': pitch*5, 'originy': ymods[0]} \n sceneObj6 = rad_obj.makeScene(moduletype=module_type,sceneDict=sceneDict6, radname = radname) \n \n sceneDict7 = {'tilt': tilt, 'pitch':pitch,'hub_height':hub_height,'azimuth':azimuth,'nMods': 1, 'nRows': 1, 'originx': pitch*6, 'originy': ymods[0]} \n sceneObj7 = rad_obj.makeScene(moduletype=module_type,sceneDict=sceneDict7, radname = radname) \n \n sceneDict8 = {'tilt': tilt, 'pitch':pitch,'hub_height':hub_height,'azimuth':azimuth,'nMods': 1, 'nRows': 1, 'originx': pitch*7, 'originy': ymods[0]} \n sceneObj8 = rad_obj.makeScene(moduletype=module_type,sceneDict=sceneDict8, radname = radname) \n \n sceneDict9 = {'tilt': tilt, 'pitch':pitch,'hub_height':hub_height,'azimuth':azimuth,'nMods': 1, 'nRows': 1, 'originx': pitch*8, 'originy': ymods[0]} \n sceneObj9 = rad_obj.makeScene(moduletype=module_type,sceneDict=sceneDict9, radname = radname) \n \n sceneDict10 = {'tilt': tilt, 'pitch':pitch,'hub_height':hub_height,'azimuth':azimuth,'nMods': 1, 'nRows': 1, 'originx': pitch*9, 'originy': ymods[0]} \n sceneObj10 = rad_obj.makeScene(moduletype=module_type,sceneDict=sceneDict10, radname = radname) \n \n #sceneObjects[tilt] = {'Obj1': sceneObj1, 'Obj2': sceneObj2, 'Obj3': sceneObj3, 'Obj4': sceneObj4, 'Obj5': sceneObj5, 'Obj6': sceneObj6, 'Obj7': sceneObj7, 'Obj8': sceneObj8, 'Obj9': sceneObj9, 'Obj10': sceneObj10}\n\n modulesArray = []\n fieldArray = [] \n\n modulesArray.append(sceneObj1)\n modulesArray.append(sceneObj2)\n modulesArray.append(sceneObj3)\n modulesArray.append(sceneObj4)\n modulesArray.append(sceneObj5)\n modulesArray.append(sceneObj6)\n modulesArray.append(sceneObj7)\n modulesArray.append(sceneObj8)\n modulesArray.append(sceneObj9)\n modulesArray.append(sceneObj10) \n fieldArray.append(modulesArray)\n \n \n textrow1 = ''\n textrow2 = sceneObj2.text + '\\r\\n'\n textrow3 = sceneObj3.text + '\\r\\n'\n textrow4 = sceneObj4.text + '\\r\\n'\n textrow5 = sceneObj5.text + '\\r\\n'\n textrow6 = sceneObj6.text + '\\r\\n'\n textrow7 = sceneObj7.text + '\\r\\n'\n textrow8 = sceneObj8.text + '\\r\\n'\n textrow9 = sceneObj9.text + '\\r\\n'\n textrow10 = sceneObj10.text + '\\r\\n'\n\n # Row 1 \n for i in range(1, 20): \n modulesArray = []\n\n sceneDict1 = {'tilt': tilt, 'pitch':pitch,'hub_height':hub_height,'azimuth':azimuth,'nMods': 1, 'nRows': 1, 'originx': pitch*0, 'originy': ymods[i]} \n sceneObj1 = rad_obj.makeScene(moduletype=module_type,sceneDict=sceneDict1, radname = radname) \n \n sceneDict2 = {'tilt': tilt, 'pitch':pitch,'hub_height':hub_height,'azimuth':azimuth,'nMods': 1, 'nRows': 1, 'originx': pitch*1, 'originy': ymods[i]} \n sceneObj2 = rad_obj.makeScene(moduletype=module_type,sceneDict=sceneDict2, radname = radname) \n \n sceneDict3 = {'tilt': tilt, 'pitch':pitch,'hub_height':hub_height,'azimuth':azimuth,'nMods': 1, 'nRows': 1, 'originx': pitch*2, 'originy': ymods[i]} \n sceneObj3 = rad_obj.makeScene(moduletype=module_type,sceneDict=sceneDict3, radname = radname) \n \n sceneDict4 = {'tilt': tilt, 'pitch':pitch,'hub_height':hub_height,'azimuth':azimuth,'nMods': 1, 'nRows': 1, 'originx': pitch*3, 'originy': ymods[i]} \n sceneObj4 = rad_obj.makeScene(moduletype=module_type,sceneDict=sceneDict4, radname = radname) \n \n sceneDict5 = {'tilt': tilt, 'pitch':pitch,'hub_height':hub_height,'azimuth':azimuth,'nMods': 1, 'nRows': 1, 'originx': pitch*4, 'originy': ymods[i]} \n sceneObj5 = rad_obj.makeScene(moduletype=module_type,sceneDict=sceneDict5, radname = radname) \n \n sceneDict6 = {'tilt': tilt, 'pitch':pitch,'hub_height':hub_height,'azimuth':azimuth,'nMods': 1, 'nRows': 1, 'originx': pitch*5, 'originy': ymods[i]} \n sceneObj6 = rad_obj.makeScene(moduletype=module_type,sceneDict=sceneDict6, radname = radname) \n \n sceneDict7 = {'tilt': tilt, 'pitch':pitch,'hub_height':hub_height,'azimuth':azimuth,'nMods': 1, 'nRows': 1, 'originx': pitch*6, 'originy': ymods[i]} \n sceneObj7 = rad_obj.makeScene(moduletype=module_type,sceneDict=sceneDict7, radname = radname) \n \n sceneDict8 = {'tilt': tilt, 'pitch':pitch,'hub_height':hub_height,'azimuth':azimuth,'nMods': 1, 'nRows': 1, 'originx': pitch*7, 'originy': ymods[i]} \n sceneObj8 = rad_obj.makeScene(moduletype=module_type,sceneDict=sceneDict8, radname = radname) \n \n sceneDict9 = {'tilt': tilt, 'pitch':pitch,'hub_height':hub_height,'azimuth':azimuth,'nMods': 1, 'nRows': 1, 'originx': pitch*8, 'originy': ymods[i]} \n sceneObj9 = rad_obj.makeScene(moduletype=module_type,sceneDict=sceneDict9, radname = radname) \n \n sceneDict10 = {'tilt': tilt, 'pitch':pitch,'hub_height':hub_height,'azimuth':azimuth,'nMods': 1, 'nRows': 1, 'originx': pitch*9, 'originy': ymods[i]} \n sceneObj10 = rad_obj.makeScene(moduletype=module_type,sceneDict=sceneDict10, radname = radname) \n \n textrow1 += sceneObj1.text + '\\r\\n' \n textrow2 += sceneObj2.text + '\\r\\n'\n textrow3 += sceneObj3.text + '\\r\\n'\n textrow4 += sceneObj4.text + '\\r\\n'\n textrow5 += sceneObj5.text + '\\r\\n'\n textrow6 += sceneObj6.text + '\\r\\n'\n textrow7 += sceneObj7.text + '\\r\\n'\n textrow8 += sceneObj8.text + '\\r\\n'\n textrow9 += sceneObj9.text + '\\r\\n'\n textrow10 += sceneObj10.text + '\\r\\n'\n \n modulesArray.append(sceneObj1)\n modulesArray.append(sceneObj2)\n modulesArray.append(sceneObj3)\n modulesArray.append(sceneObj4)\n modulesArray.append(sceneObj5)\n modulesArray.append(sceneObj6)\n modulesArray.append(sceneObj7)\n modulesArray.append(sceneObj8)\n modulesArray.append(sceneObj9)\n modulesArray.append(sceneObj10)\n \n fieldArray.append(modulesArray)\n\n\n # Redoing the first module to append everything to it.\n sceneDict1 = {'tilt': tilt, 'pitch':pitch,'hub_height':hub_height,'azimuth':azimuth,'nMods': 1, 'nRows': 1, 'originx': pitch*0, 'originy': ymods[0]} \n sceneObj1 = rad_obj.makeScene(moduletype=module_type,sceneDict=sceneDict1, radname = radname) \n \n rad_obj.appendtoScene(sceneObj1.radfiles, '', textrow1)\n rad_obj.appendtoScene(sceneObj1.radfiles, '', textrow2)\n rad_obj.appendtoScene(sceneObj1.radfiles, '', textrow3)\n rad_obj.appendtoScene(sceneObj1.radfiles, '', textrow4)\n rad_obj.appendtoScene(sceneObj1.radfiles, '', textrow5)\n rad_obj.appendtoScene(sceneObj1.radfiles, '', textrow6)\n rad_obj.appendtoScene(sceneObj1.radfiles, '', textrow7)\n rad_obj.appendtoScene(sceneObj1.radfiles, '', textrow8)\n rad_obj.appendtoScene(sceneObj1.radfiles, '', textrow9)\n rad_obj.appendtoScene(sceneObj1.radfiles, '', textrow10)\n \n # Custom BSA Geometry\n # Bottom posttubes and torquetube:\n for i in range (0, 10):\n xpost = i*pitch \n \n # adding torquetube\n torquetube = '\\n\\r! genrev Metal_Grey torquetube{} t*{} {} 32 | xform -rx -90 -t {} {} {}'.format(i, length, diameter/2.0, xpost, ttedgeoffset, hub_height-zgap)\n rad_obj.appendtoScene(sceneObj1.radfiles, '', torquetube)\n \n for j in range (0,5):\n ypost = ypostlist[j]\n \n post1='! genbox Metal_Grey pile{} {} {} {} | xform -t {} {} 0 '.format((str(i)+\",\"+str(j)),postdiamx, postdiamy, hub_height, -postdiamx/2.0+xpost, -postdiamy+ypost) \n rad_obj.appendtoScene(sceneObj1.radfiles, '', post1)\n \n ###########################\n # Create sensor objects #\n ###########################\n \n \n # West Sensors\n shhw = 1.5 + (1 - 0.226/2)*sin(radians(tilt)) + (0.130175/2 + 0.05 - 0.02)*cos(radians(tilt))\n sxw = pitch*2 - (1 - 0.226/2)*cos(radians(tilt)) + (0.130175/2 + 0.05 - 0.02)*sin(radians(tilt))\n syw = ymods[9] + 0.5 + 0.226/2\n sensorw_scene = {'tilt': tilt, 'pitch':1,'hub_height': shhw, 'azimuth':azimuth,'nMods': 1, 'nRows': 1, 'originx': sxw, 'originy': syw,'appendRadfile':True} \n res_name = \"SensorW_\"+str(foo.year)+\"_\"+str(foo.month)+\"_\"+str(foo.day)+\"_\"+str(foo.hour)+\"_\"+str(foo.minute)\n sensorw_sceneObj = rad_obj.makeScene(moduletype='sensor',sceneDict=sensorw_scene, radname = res_name) \n\n syw = ymods[15] + 0.5 + 0.226/2\n sensorIMTw_scene = {'tilt': tilt, 'pitch':1,'hub_height': shhw, 'azimuth':azimuth,'nMods': 1, 'nRows': 1, 'originx': sxw, 'originy': syw,'appendRadfile':True} \n res_name = \"SensorIMTW_\"+str(foo.year)+\"_\"+str(foo.month)+\"_\"+str(foo.day)+\"_\"+str(foo.hour)+\"_\"+str(foo.minute)\n sensorIMTw_sceneObj = rad_obj.makeScene(moduletype='sensor',sceneDict=sensorIMTw_scene, radname=res_name) \n\n # East Sensors\n shhe = 1.5 - (1 - 0.226/2)*sin(radians(tilt)) + (0.130175/2 + 0.05 - 0.02)*cos(radians(tilt))\n sxe = pitch*2 + (1 - 0.226/2)*cos(radians(tilt)) + (0.130175/2 + 0.05 - 0.02)*sin(radians(tilt))\n sye = ymods[9] + 0.5 + 0.226/2\n sensore_scene = {'tilt': tilt, 'pitch':1,'hub_height': shhe, 'azimuth':azimuth,'nMods': 1, 'nRows': 1, 'originx': sxe, 'originy': sye,'appendRadfile':True} \n res_name = \"SensorE_\"+str(foo.year)+\"_\"+str(foo.month)+\"_\"+str(foo.day)+\"_\"+str(foo.hour)+\"_\"+str(foo.minute)\n sensore_sceneObj = rad_obj.makeScene(moduletype='sensor',sceneDict=sensore_scene, radname=res_name) \n\n sye = ymods[15] + 0.5 + 0.226/2\n sensorIMTe_scene = {'tilt': tilt, 'pitch':1,'hub_height': shhe, 'azimuth':azimuth,'nMods': 1, 'nRows': 1, 'originx': sxe, 'originy': sye,'appendRadfile':True} \n res_name = \"SensorIMTE_\"+str(foo.year)+\"_\"+str(foo.month)+\"_\"+str(foo.day)+\"_\"+str(foo.hour)+\"_\"+str(foo.minute)\n sensorIMTe_sceneObj = rad_obj.makeScene(moduletype='sensor',sceneDict=sensorIMTe_scene, radname=res_name) \n \n # Build oct file \n sim_name = \"BEST_\"+str(foo.year)+\"_\"+str(foo.month)+\"_\"+str(foo.day)+\"_\"+str(foo.hour)+\"_\"+str(foo.minute)\n octfile = rad_obj.makeOct(rad_obj.getfilelist(), octname=sim_name)\n \n #################\n # Run analysis #\n #################\n \n #Row 3 Module 10 sensors \n analysis = AnalysisObj(octfile, rad_obj.basename) \n\n frontscan, backscan = analysis.moduleAnalysis(sensorw_sceneObj, sensorsy=1)#, frontsurfaceoffset=0.021)#, backsurfaceoffset = 0.02)\n res_name = \"SensorW_\"+str(foo.year)+\"_\"+str(foo.month)+\"_\"+str(foo.day)+\"_\"+str(foo.hour)+\"_\"+str(foo.minute)\n frontdict, backdict = analysis.analysis(octfile, res_name, frontscan, backscan)\n\n frontscan, backscan = analysis.moduleAnalysis(sensore_sceneObj, sensorsy=1)#, frontsurfaceoffset=0.021)#, backsurfaceoffset = 0.02)\n res_name = \"SensorE_\"+str(foo.year)+\"_\"+str(foo.month)+\"_\"+str(foo.day)+\"_\"+str(foo.hour)+\"_\"+str(foo.minute)\n frontdict, backdict = analysis.analysis(octfile, res_name, frontscan, backscan)\n\n #IMT Sensors Row 3 Module 5 \n '''\n frontscan, backscan = analysis.moduleAnalysis(sensorIMTw_sceneObj, sensorsy=1)#, frontsurfaceoffset=0.021)#, backsurfaceoffset = 0.02)\n res_name = \"SensorIMTW_\"+str(foo.year)+\"_\"+str(foo.month)+\"_\"+str(foo.day)+\"_\"+str(foo.hour)+\"_\"+str(foo.minute)\n frontdict, backdict = analysis.analysis(octfile, res_name, frontscan, backscan)\n\n frontscan, backscan = analysis.moduleAnalysis(sensorIMTe_sceneObj, sensorsy=1)#, frontsurfaceoffset=0.021)#, backsurfaceoffset = 0.02)\n res_name = \"SensorIMTE_\"+str(foo.year)+\"_\"+str(foo.month)+\"_\"+str(foo.day)+\"_\"+str(foo.hour)+\"_\"+str(foo.minute)\n frontdict, backdict = analysis.analysis(octfile, res_name, frontscan, backscan)\n \n #fieldARray[module][row]\n\n #HYDRA\n modmod = 16\n rowrow = 1\n frontscan, backscan = analysis.moduleAnalysis(fieldArray[modmod][rowrow], sensorsy=12)#, frontsurfaceoffset=0.021)#, backsurfaceoffset = 0.02) \n res_name = \"Hydra_\"+str(foo.year)+\"_\"+str(foo.month)+\"_\"+str(foo.day)+\"_\"+str(foo.hour)+\"_\"+str(foo.minute)\n frontdict, backdict = analysis.analysis(octfile, res_name, frontscan, backscan)\n\n '''\n \n #LOCATION_APOGEES\n modmod = 9\n rowrow = 2\n frontscan, backscan = analysis.moduleAnalysis(fieldArray[modmod][rowrow], sensorsy=4)#, frontsurfaceoffset=0.021)#, backsurfaceoffset = 0.02) \n frontscan['ystart'] = frontscan['ystart'] + 0.45\n backscan['ystart'] = backscan['ystart'] + 0.45\n res_name = \"Apogee_\"+str(foo.year)+\"_\"+str(foo.month)+\"_\"+str(foo.day)+\"_\"+str(foo.hour)+\"_\"+str(foo.minute)\n frontdict, backdict = analysis.analysis(octfile, res_name, frontscan, backscan)\n\n\n '''\n # SCAN FULL ROWS\n for rowrow in range(5, 7):\n for modmod in range(0, 20):\n frontscan, backscan = analysis.moduleAnalysis(fieldArray[modmod][rowrow], sensorsy=12)#, frontsurfaceoffset=0.021)#, backsurfaceoffset = 0.02) \n res_name = \"Row_\"+str(rowrow)+\"_Mod_\"+str(modmod)+\"_\"+str(foo.year)+\"_\"+str(foo.month)+\"_\"+str(foo.day)+\"_\"+str(foo.hour)+\"_\"+str(foo.minute)\n frontdict, backdict = analysis.analysis(octfile, res_name, frontscan, backscan)\n '''\n # Read in results\n #results_file = os.path.join('results', f'irr_sensor_{sim_name}.csv')\n #results = load.read1Result(results_file)\n results = 1\n \n # Format output\n #tracker_theta = tilt\n #front = ','.join([ str(f) for f in results['Wm2Front'] ])\n #back = ','.join([ str(r) for r in results['Wm2Back'] ])\n print(\"***** Finished simulation for \"+ str(foo))\n #time_str = metdata.datetime[idx]\n# print(f\"sim_results,{idx},{time_str},{wavelength},{dni},{dhi},{alb},\" \\\n# f\"{tracker_theta},{front},{back}\")\n return results\n\n\n\ndef run_simulations_dask(arraysimulations, kwargs):\n # Create client\n \n scheduler_file = '/scratch/sayala/dask_testing/scheduler.json'\n client = Client(scheduler_file=scheduler_file)\n \n # Iterate over inputs\n futures = []\n \n for ii in range(0, len(arraysimulations)):\n idx = arraysimulations.iloc[ii].idx \n wavelength = arraysimulations.iloc[ii].wavelength\n test_folder = test_folder_fmt.format(f'{idx:04}',f'{wavelength:04}')\n if not os.path.exists(test_folder):\n futures.append(client.submit(simulate_single, idx=idx, wavelength=wavelength, **kwargs))\n else:\n print(\"\\n\\nAlready simulated ***********************\\n\\n\", idx, wavelength)\n\n # Get results for all simulations\n res = client.gather(futures)\n \n # Close all dask workers and scheduler\n try:\n \tclient.shutdown()\n except:\n pass\n\n # Close client\n client.close()\n\n res = 'FINISHED!!!!!!!!!!!!!!!!!!!!!!!!!!!!!'\n return res\n\ndef compileResults(testfolder, resultsfile):\n # ASsumes there is an overarching folder, where\n # Folder > custoname_idx_XXXX > custoname_Spectra_XXXX > results\n # and resultfiles are irr_custoname_YEAR_MONTH_DAY_HOUR_MINUTE.CSV' \n #\n # ie. folder: r'/scratch/sayala/BEST_SpectraMarch/BEST_APOGGEE_Spectra_Mark_NotScaled'\n # testfolder = r'C:\\Users\\sayala\\Documents\\HPC_Scratch\\BEST_Spectra'\n # Reading results in results/folder like: irr_SensorW_2020_10_30_8_30.csv'\n # SAving into resultsfile = r'C:\\Users\\sayala\\Desktop\\Dask\\Compiled_SPECTRA_Results\\TEMP\\March15_Results.csv'\n \n \n arrayWm2Front = []\n arrayWm2Back = []\n arrayMatFront = []\n arrayMatBack = []\n monthlist=[]\n daylist=[]\n hourlist=[]\n yearlist=[]\n minutelist=[]\n #faillist=[]\n addresslist=[]\n wavlist = []\n sensorlist = []\n indexlist = []\n \n # List all IDX folders\n idxlist = sorted(os.listdir(testfolder))\n print('{} Indexes in the directory'.format(idxlist.__len__()))\n\n # Loop over Timestamps\n for ii in range (0, len(idxlist)):\n idx = int(idxlist[ii][-4:])\n\n spectralist = sorted(os.listdir(os.path.join(testfolder, idxlist[ii])))\n\n # Loop over Spectras\n for jj in range(0, len(spectralist)):\n wav = int(spectralist[jj][-4:]) \n\n resultslist = sorted(os.listdir(os.path.join(testfolder, idxlist[ii], spectralist[jj], 'results')))\n \n # Loop over Sensors\n for kk in range(0, len(resultslist)): \n \n try:\n resultfile=os.path.join(testfolder, idxlist[ii], spectralist[jj], 'results', resultslist[kk])\n sensorname = resultslist[kk].split('_')[1]\n year = resultslist[kk].split('_')[2]\n month = resultslist[kk].split('_')[3]\n day = resultslist[kk].split('_')[4]\n hour = resultslist[kk].split('_')[5]\n try:\n minute = int(resultslist[kk].split('_')[6].split('.')[0])\n except:\n minute = 0\n\n# resultsDF = bifacial_radiance.load.read1Result(resultfile)\n resultsDF = load.read1Result(resultfile)\n wavlist.append(wav)\n indexlist.append(idx)\n arrayWm2Front.append(list(resultsDF['Wm2Front']))\n arrayWm2Back.append(list(resultsDF['Wm2Back']))\n arrayMatFront.append(list(resultsDF['mattype']))\n arrayMatBack.append(list(resultsDF['rearMat']))\n yearlist.append(year)\n monthlist.append(month)\n daylist.append(day)\n hourlist.append(hour)\n minutelist.append(minute)\n sensorlist.append(sensorname)\n addresslist.append(resultfile)\n except:\n print(\" FAILED index \", idx, \" wav \", wav, \" file \", resultslist[kk] )\n \n resultsdf = pd.DataFrame(list(zip(arrayWm2Front, arrayWm2Back, \n arrayMatFront, arrayMatBack)),\n columns = ['br_Wm2Front', 'br_Wm2Back', \n 'br_MatFront', 'br_MatBack'])\n resultsdf['minute'] = minutelist\n resultsdf['hour'] = hourlist\n resultsdf['day'] = daylist\n resultsdf['month'] = monthlist\n resultsdf['year'] = yearlist\n resultsdf['wavelength'] = wavlist\n resultsdf['sensor'] = sensorlist\n resultsdf['file'] = addresslist\n resultsdf['idx'] = indexlist \n \n format = '%Y-%m-%d %H:%M:00'\n datesread = pd.to_datetime(resultsdf[['year','month','day','hour','minute']], format=format)\n resultsdf['timestamp'] = datesread \n \n resultsdf.to_csv(resultsfile)\n \n\ndef findMissingSimulationValues(resultsfile, idxs=None, wavs=None, sensors=None):\n \n data = pd.read_csv(resultsfile)\n #data['timestamp']= pd.to_datetime(data['timestamp'])\n\n if idxs is None:\n idxs = list(data.idx.unique())\n \n if wavs is None:\n wavs = list(data.wavelength.unique())\n \n if sensors is None:\n sensors = list(data.sensor.unique())\n \n # Make Ideal Dataframe\n ideal = pd.DataFrame(\n list(product(idxs, wavs, sensors)),\n columns=['idx', 'wavelength', 'sensor'])\n \n ideal.idx = ideal.idx.astype('int64')\n ideal.wavelength = ideal.wavelength.astype('int64')\n \n # Set idx, wavelenths and sensors as indexes\n ideal.set_index(['idx','wavelength','sensor'], inplace=True)\n foo = data.copy()\n foo.set_index(['idx','wavelength','sensor'], inplace=True)\n\n # Concatenate to generate missing values (nan)\n result = pd.concat([foo, ideal], axis=1, sort=False)\n \n # Select missing values (na)\n result = result[result['br_Wm2Back'].isna()]\n \n # Reset index and generate subset dataframe to return\n result.reset_index(inplace=True)\n missing = result[['idx','wavelength','sensor']]\n \n return missing\n\n\nif __name__ == \"__main__\":\n # Define locations within file system\n\n #best_data_file = '/scratch/sayala/WeatherFiles/spectral_experiment_TMY.csv'\n best_data_file = '/scratch/sayala/SPECTRAS_Used/spctrutils_Oct2127/spectratimesTMY.csv'\n\n cases = ['Mark_NotScaled', 'Mark_Scaled', 'SRRL_NotScaled', 'SRRL_Scaled']\n i = 2\n \n test_folder_fmt = '/scratch/sayala/BEST_SpectraMarch/BEST_APOGGEE_Spectra_'+cases[i]+'/BEST_idx_{}/Spectra_{}'\n data_folder = '/scratch/sayala/SPECTRAS_Used/spctrutils_Oct2127/'+cases[i]\n \n # Define inputs \n kwargs = {\n 'best_data_file': best_data_file,\n 'test_folder_fmt': test_folder_fmt,\n 'data_folder': data_folder\n }\n \n wavelengths = np.array(list(chain(range(300, 1101, 5), range(1110, 2501, 10))))\n indices = np.array(list(range(10, 43)))\n \n\n try:\n # Make Dataframe with missing entries only\n resultsfile = r'/scratch/sayala/BEST_SpectraMarch/March15_Results.csv' \n currenttestfolder = r'/scratch/sayala/BEST_SpectraMarch/BEST_APOGGEE_Spectra_'+cases[i]\n compileResults(testfolder=currenttestfolder, resultsfile=resultsfile)\n arraysimulations = findMissingSimulationValues(resultsfile, idxs=indices, wavs=wavelengths)\n except:\n # Make Ideal Dataframe\n \"No files simulated yet, Strating from 0\"\n arraysimulations = pd.DataFrame(\n list(product(indices, wavelengths)),\n columns=['indices', 'wavelengths'])\n\n run_simulations_dask(arraysimulations, kwargs)\n"
] |
[
[
"pandas.concat",
"pandas.to_datetime",
"numpy.sum",
"pandas.read_csv"
]
] |
iturov/rov2018
|
[
"ca1949806d105a2caddf2cf7a1361e2d3f6a1246"
] |
[
"groundstation/ROV/OCR/old-studies/atahan.py"
] |
[
"import pyscreenshot as ImageGrab\nimport cv2\nimport numpy as np\nimport pytesseract\nfrom PIL import Image\n\nsrc_path = \"C:\\\\Users\\\\Public\\\\ROV\\\\OCR\\\\\"\nif __name__ == \"__main__\":\n # fullscreen\n im=ImageGrab.grab()\n im.show()\n im.save('init.png')\n\ntext=[]\na = pytesseract.image_to_string(Image.open('init.png'))\ntext.append(a)\nimg = cv2.imread('init.png')\n\nimg = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\n\nkernel = np.ones((1, 1), np.uint8)\nimg = cv2.dilate(img, kernel, iterations=1)\nimg = cv2.erode(img, kernel, iterations=1)\n\ncv2.imwrite(src_path + \"gray.png\", img)\nb = pytesseract.image_to_string(Image.open('gray.png'))\ntext.append(b)\n\n\n\nimg = cv2.adaptiveThreshold(img, 255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,cv2.THRESH_BINARY,115,1)\n\ncv2.imwrite(src_path + \"thres.png\", img)\n\nc = pytesseract.image_to_string(Image.open('thres.png'))\n\ntext.append(c)\n\ntexta = len(text)\nf= open(\"C:\\\\Users\\\\Public\\\\ROV\\\\OCR\\\\model.txt\",\"w+\")\nfor i in range(0,texta):\n f.write(text[i])\nf.close()\n"
] |
[
[
"numpy.ones"
]
] |
ChengIC/ryan-sad
|
[
"09a93245ae6917911bd0f9d39d533d825c23c259"
] |
[
"networks/layers/standard.py"
] |
[
"import torch\r\n\r\nfrom torch.nn import Module\r\nfrom torch.nn import init\r\nfrom torch.nn.parameter import Parameter\r\n\r\n\r\n# Acknowledgements: https://github.com/wohlert/semi-supervised-pytorch\r\nclass Standardize(Module):\r\n \"\"\"\r\n Applies (element-wise) standardization with trainable translation parameter μ and scale parameter σ, i.e. computes\r\n (x - μ) / σ where '/' is applied element-wise.\r\n\r\n Args:\r\n in_features: size of each input sample\r\n out_features: size of each output sample\r\n bias: If set to False, the layer will not learn a translation parameter μ.\r\n Default: ``True``\r\n\r\n Attributes:\r\n mu: the learnable translation parameter μ.\r\n std: the learnable scale parameter σ.\r\n \"\"\"\r\n __constants__ = ['mu']\r\n\r\n def __init__(self, in_features, bias=True, eps=1e-6):\r\n super(Standardize, self).__init__()\r\n self.in_features = in_features\r\n self.out_features = in_features\r\n self.eps = eps\r\n self.std = Parameter(torch.Tensor(in_features))\r\n if bias:\r\n self.mu = Parameter(torch.Tensor(in_features))\r\n else:\r\n self.register_parameter('mu', None)\r\n self.reset_parameters()\r\n\r\n def reset_parameters(self):\r\n init.constant_(self.std, 1)\r\n if self.mu is not None:\r\n init.constant_(self.mu, 0)\r\n\r\n def forward(self, x):\r\n if self.mu is not None:\r\n x -= self.mu\r\n x = torch.div(x, self.std + self.eps)\r\n return x\r\n\r\n def extra_repr(self):\r\n return 'in_features={}, out_features={}, bias={}'.format(\r\n self.in_features, self.out_features, self.mu is not None\r\n )\r\n"
] |
[
[
"torch.nn.init.constant_",
"torch.div",
"torch.Tensor"
]
] |
SchneiderDaniel/data
|
[
"741f5d912eb9a62b77ec3cecf4fc54f21133784c"
] |
[
"flask/Dashapps/world/Dash_App13.py"
] |
[
"# -*- coding: utf-8 -*-\n\nfrom dash import Dash\nfrom dash.dependencies import Input, Output, ALL, State, MATCH, ALLSMALLER, ClientsideFunction\nfrom Dashapps.Dash_fun import apply_layout_with_auth, load_object, save_object\nimport dash_core_components as dcc\nimport dash_html_components as html\nimport dash_bootstrap_components as dbc\nimport pandas as pd\nimport plotly.express as px\nimport plotly.graph_objs as go\nfrom plotly.subplots import make_subplots\nfrom Dashapps.Dash_base import warning_card, colors, cite_card, description_card, draft_template\nimport dash_table\nfrom datetime import datetime\nimport numpy as np\nfrom flask import request\nimport locale\nimport pycountry_convert as pc\n\n\nurl_base = '/dash/app13/' \n\n\n\ndata_sources = [\n \"https://www.kaggle.com/ajaypalsinghlo/world-happiness-report-2021\",\n \"https://www.kaggle.com/danielkorth/eda-world-happiness-report-2021\",\n] \n\ndata_licenses = [\n \"https://creativecommons.org/publicdomain/zero/1.0/\"\n]\n\nsourced_date = \"03/21/2021\"\n\ncite_text = '\"Freedom is not worth having if it does not include the freedom to make mistakes\"'\ncite_author = \"Mahatma Gandhi\"\ncite_link = \"https://en.wikipedia.org/wiki/Mahatma_Gandhi\"\ndescription_text = '''On the map below you see the a score that measures how people have the feeling to be able to make life coices in the individual countries. The score ranges from 0 to 1. The results are gathered from the Gallup World Poll. Below the map you will also find a list of the Top 20 countries based on the score.'''\n\ndf = pd.read_csv('app_data/processed/0009.csv')\n\n# temp = df.sort_values(by=['Ladder score'], ascending=False)\n\n\n\ndf2 = df.set_index('Country name')\ntemp = pd.DataFrame(df2['Freedom to make life choices']).reset_index()\n\n#ADAPTING TO THE ISO 3166 STANDARD\ntemp.loc[temp['Country name'] == 'Taiwan Province of China', 'Country name'] = 'Taiwan, Province of China' \ntemp.loc[temp['Country name'] == 'Hong Kong S.A.R. of China', 'Country name'] = 'Hong Kong' \ntemp.loc[temp['Country name'] == 'Congo (Brazzaville)','Country name'] = 'Congo' \ntemp.loc[temp['Country name'] == 'Palestinian Territories','Country name'] = 'Palestine, State of' \n\ntemp.drop(index=temp[temp['Country name'] == 'Kosovo'].index, inplace=True) # Kosovo Code agreed on not to use by ISO 3166\ntemp.drop(index=temp[temp['Country name'] == 'North Cyprus'].index, inplace=True) # Not part of the ISO 3166 standard\n\n\ntemp['iso_alpha'] = temp['Country name'].apply(lambda x:pc.country_name_to_country_alpha3(x,))\ntemp2 = temp.sort_values(by=['Freedom to make life choices'], ascending=False)[:20]\nfig = px.choropleth(temp, locations='iso_alpha',\n color='Freedom to make life choices',\n hover_name='Country name',\n color_continuous_scale=px.colors.diverging.RdYlGn,\n )\nfig.update_layout(\n showlegend=False,\n template=draft_template,\n annotations=[\n dict(\n templateitemname=\"draft watermark\",\n text=\"www.blackandwhitedata.com\",\n )\n ],\n paper_bgcolor='rgb(248, 248, 255)',\n geo_bgcolor='rgb(248, 248, 255)',\n geo_showframe=False,\n height=600,\n legend={\"xanchor\":\"center\", \"yanchor\":\"top\"},\n margin=dict(\n l=0,\n r=0,\n b=50,\n t=50\n )\n)\n###\n\nt1 = temp.nlargest(20, 'Freedom to make life choices')[::-1]\nfig2 = make_subplots(rows=1, cols=2, \n column_widths=[0.65, 0.35],\n subplot_titles=['Top 20 Countries', 'All countries'])\nfig2.append_trace(go.Bar(x=t1['Freedom to make life choices'],\n y=t1['Country name'],\n orientation='h',\n \n marker=dict(\n color=colors['gray'],\n line=dict(color=colors['black'], width=1)\n ),\n name=''\n ), 1,1\n )\n\nfig2.append_trace(go.Box(y=df['Freedom to make life choices'],\n marker_color=colors['lightgray'],\n name=''), 1,2)\nfig2.add_vline(x=9,\n col=1,\n )\n\nfig2.update_layout(\n xaxis_range=(0.8,1),\n yaxis2_range=(0.35,1),\n xaxis = { \n 'showgrid': True,\n 'gridcolor' :colors['gray'],\n 'tickfont': {\n 'color': '#333',\n 'size': 12\n },\n },\n yaxis2 = { \n 'showgrid': False,\n # 'mirror': True,\n # 'automargin':False,\n 'side':'right',\n 'anchor': 'free',\n 'position': 0.95,\n 'gridcolor' :colors['gray'],\n 'tickfont': {\n 'color': '#333',\n 'size': 12\n },\n },\n margin=dict(\n l=0,\n r=0,\n b=50,\n t=100\n ),\n # yaxis2_tickvals=[45,50,55,60,65,70,75],\n paper_bgcolor='rgb(248, 248, 255)',\n plot_bgcolor='rgb(248, 248, 255)',\n showlegend=False,\n title_text='Top 20 Countries',\n title_font_size=22),\n \nfig2.update_annotations(yshift=5)\n\n\n# The Layout\nlayout = html.Div(style={'font-family':'\"Poppins\", sans-serif', 'backgroundColor': colors['background']}, children=[\n html.H1(\n children='Having the freedom to make life choices',\n style={\n 'textAlign': 'center',\n 'color': colors['text'],\n 'backgroundColor': colors['background']\n }\n ),\n html.Div(children=description_card(description_text), style={\n 'textAlign': 'center',\n 'color': colors['text'],\n 'backgroundColor': colors['background']\n }),\n html.Div(children=cite_card(cite_text,cite_author,cite_link), style={\n 'textAlign': 'center',\n 'color': colors['text'],\n 'backgroundColor': colors['background']\n }),\n html.Br(),\n dcc.Graph(\n id='ty-figure',\n figure=fig\n ),\n dcc.Graph(\n id='ty-figure2',\n figure=fig2\n ),\n html.Br(),\n html.Hr(className=\"my-2\"),\n html.Br(),\n html.Div(children=warning_card(data_sources,data_licenses,sourced_date), style={\n 'textAlign': 'left',\n 'color': colors['text'],\n 'backgroundColor': colors['background']\n })\n])\n\ndef cast_int(val):\n if val is None: return 1\n return int(val)\n\ndef cast_float(val):\n if val is None: return 1.0\n return float(val)\n\ndef Add_Dash(server):\n app = Dash(server=server, url_base_pathname=url_base, external_stylesheets = [dbc.themes.BOOTSTRAP], external_scripts = [\"https://cdn.plot.ly/plotly-locale-de-latest.js\"], meta_tags=[{\"name\": \"viewport\", \"content\": \"width=device-width, initial-scale=1\"}])\n \n apply_layout_with_auth(app, layout)\n\n return app.server"
] |
[
[
"pandas.read_csv",
"pandas.DataFrame"
]
] |
koya-ken/pose-ae-train
|
[
"e6147778ac4da079db03abb286becaae29653dac"
] |
[
"task/pose.py"
] |
[
"\"\"\"\n__config__ contains the options for training and testing\nBasically all of the variables related to training are put in __config__['train'] \n\"\"\"\nimport torch\nimport numpy as np\nfrom torch import nn\nimport os\nfrom torch.nn import DataParallel\nfrom utils.misc import make_input, make_output, importNet\n\n__config__ = {\n 'data_provider': 'data.coco_pose.dp',\n 'network': 'models.posenet.PoseNet',\n 'inference': {\n 'nstack': 4,\n 'inp_dim': 256,\n 'oup_dim': 68,\n 'num_parts': 17,\n 'increase': 128,\n 'keys': ['imgs']\n },\n\n 'train': {\n 'batchsize': 32,\n 'input_res': 512,\n 'output_res': 128,\n 'train_iters': 1000,\n 'valid_iters': 10,\n 'learning_rate': 2e-4,\n 'num_loss': 4,\n\n 'loss': [\n ['push_loss', 1e-3],\n ['pull_loss', 1e-3],\n ['detection_loss', 1],\n ],\n\n 'max_num_people': 30,\n 'num_workers': 2,\n 'use_data_loader': True,\n },\n}\n\nclass Trainer(nn.Module):\n \"\"\"\n The wrapper module that will behave differetly for training or testing\n inference_keys specify the inputs for inference\n \"\"\"\n def __init__(self, model, inference_keys, calc_loss):\n super(Trainer, self).__init__()\n self.model = model\n self.keys = inference_keys\n self.calc_loss = calc_loss\n\n def forward(self, imgs, **inputs):\n inps = {}\n labels = {}\n\n for i in inputs:\n if i in self.keys:\n inps[i] = inputs[i]\n else:\n labels[i] = inputs[i]\n\n if not self.training:\n return self.model(imgs, **inps)\n else:\n res = self.model(imgs, **inps)\n if type(res)!=list and type(res)!=tuple:\n res = [res]\n return list(res) + list(self.calc_loss(*res, **labels))\n\ndef make_network(configs):\n PoseNet = importNet(configs['network'])\n train_cfg = configs['train']\n config = configs['inference']\n\n poseNet = PoseNet(**config)\n\n forward_net = DataParallel(poseNet.cuda())\n def calc_loss(*args, **kwargs):\n return poseNet.calc_loss(*args, **kwargs)\n\n config['net'] = Trainer(forward_net, configs['inference']['keys'], calc_loss)\n train_cfg['optimizer'] = torch.optim.Adam(config['net'].parameters(), train_cfg['learning_rate'])\n\n exp_path = os.path.join('exp', configs['opt'].exp)\n if not os.path.exists(exp_path):\n os.mkdir(exp_path)\n logger = open(os.path.join(exp_path, 'log'), 'a+')\n\n def make_train(batch_id, config, phase, **inputs):\n for i in inputs:\n inputs[i] = make_input(inputs[i])\n\n net = config['inference']['net']\n config['batch_id'] = batch_id\n\n if phase == 'train':\n net = net.train()\n else:\n net = net.eval()\n\n if phase != 'inference':\n result = net(inputs['imgs'], **{i:inputs[i] for i in inputs if i!='imgs'})\n\n num_loss = len(config['train']['loss'])\n\n ## I use the last outputs as the loss\n ## the weights of the loss are controlled by config['train']['loss'] \n losses = {i[0]: result[-num_loss + idx]*i[1] for idx, i in enumerate(config['train']['loss'])}\n\n loss = 0\n toprint = '\\n{}: '.format(batch_id)\n for i in losses:\n loss = loss + torch.mean(losses[i])\n\n my_loss = make_output( losses[i] )\n my_loss = my_loss.mean(axis = 0)\n\n if my_loss.size == 1:\n toprint += ' {}: {}'.format(i, format(my_loss.mean(), '.8f'))\n else:\n toprint += '\\n{}'.format(i)\n for j in my_loss:\n toprint += ' {}'.format(format(j.mean(), '.8f'))\n\n logger.write(toprint)\n logger.flush()\n\n if batch_id == 200000:\n ## decrease the learning rate after 200000 iterations\n for param_group in optimizer.param_groups:\n param_group['lr'] = 1e-5\n\n if phase == 'train':\n optimizer = train_cfg['optimizer']\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n return None\n else:\n out = {}\n net = net.eval()\n result = net(**inputs)\n if type(result)!=list and type(result)!=tuple:\n result = [result]\n out['preds'] = [make_output(i) for i in result]\n return out\n return make_train\n"
] |
[
[
"torch.mean"
]
] |
patalen/daqhats
|
[
"90d5a47f5cf5dd8103632df70a9266c4245d9862"
] |
[
"daqhats/mcc172.py"
] |
[
"# pylint: disable=too-many-lines\n\"\"\"\nWraps all of the methods from the MCC 172 library for use in Python.\n\"\"\"\nimport sys\nfrom collections import namedtuple\nfrom ctypes import c_ubyte, c_int, c_ushort, c_ulong, c_long, c_double, \\\n POINTER, c_char_p, byref, create_string_buffer\nfrom enum import IntEnum, unique\nfrom daqhats.hats import Hat, HatError\n\n@unique\nclass SourceType(IntEnum):\n \"\"\"Clock / trigger source options.\"\"\"\n LOCAL = 0 #: Use a local-only source.\n MASTER = 1 #: Use a local source and set it as master.\n SLAVE = 2 #: Use a master source from another MCC 172.\n\nclass mcc172(Hat): # pylint: disable=invalid-name, too-many-public-methods\n \"\"\"\n The class for an MCC 172 board.\n\n Args:\n address (int): board address, must be 0-7.\n\n Raises:\n HatError: the board did not respond or was of an incorrect type\n \"\"\"\n\n _AIN_NUM_CHANNELS = 2 # Number of analog channels\n\n _STATUS_HW_OVERRUN = 0x0001\n _STATUS_BUFFER_OVERRUN = 0x0002\n _STATUS_TRIGGERED = 0x0004\n _STATUS_RUNNING = 0x0008\n\n _MAX_SAMPLE_RATE = 51200.0\n\n _dev_info_type = namedtuple(\n 'MCC172DeviceInfo', [\n 'NUM_AI_CHANNELS', 'AI_MIN_CODE', 'AI_MAX_CODE',\n 'AI_MIN_VOLTAGE', 'AI_MAX_VOLTAGE', 'AI_MIN_RANGE',\n 'AI_MAX_RANGE'])\n\n _dev_info = _dev_info_type(\n NUM_AI_CHANNELS=2,\n AI_MIN_CODE=-8388608,\n AI_MAX_CODE=8388607,\n AI_MIN_VOLTAGE=-5.0,\n AI_MAX_VOLTAGE=(5.0 - (10.0/16777216)),\n AI_MIN_RANGE=-5.0,\n AI_MAX_RANGE=+5.0)\n\n def __init__(self, address=0): # pylint: disable=too-many-statements\n \"\"\"\n Initialize the class.\n \"\"\"\n # call base class initializer\n Hat.__init__(self, address)\n\n # set up library argtypes and restypes\n self._lib.mcc172_open.argtypes = [c_ubyte]\n self._lib.mcc172_open.restype = c_int\n\n self._lib.mcc172_close.argtypes = [c_ubyte]\n self._lib.mcc172_close.restype = c_int\n\n self._lib.mcc172_blink_led.argtypes = [c_ubyte, c_ubyte]\n self._lib.mcc172_blink_led.restype = c_int\n\n self._lib.mcc172_firmware_version.argtypes = [\n c_ubyte, POINTER(c_ushort)]\n self._lib.mcc172_firmware_version.restype = c_int\n\n self._lib.mcc172_serial.argtypes = [c_ubyte, c_char_p]\n self._lib.mcc172_serial.restype = c_int\n\n self._lib.mcc172_calibration_date.argtypes = [c_ubyte, c_char_p]\n self._lib.mcc172_calibration_date.restype = c_int\n\n self._lib.mcc172_calibration_coefficient_read.argtypes = [\n c_ubyte, c_ubyte, POINTER(c_double), POINTER(c_double)]\n self._lib.mcc172_calibration_coefficient_read.restype = c_int\n\n self._lib.mcc172_calibration_coefficient_write.argtypes = [\n c_ubyte, c_ubyte, c_double, c_double]\n self._lib.mcc172_calibration_coefficient_write.restype = c_int\n\n self._lib.mcc172_iepe_config_read.argtypes = [\n c_ubyte, c_ubyte, POINTER(c_ubyte)]\n self._lib.mcc172_iepe_config_read.restype = c_int\n\n self._lib.mcc172_iepe_config_write.argtypes = [\n c_ubyte, c_ubyte, c_ubyte]\n self._lib.mcc172_iepe_config_write.restype = c_int\n\n self._lib.mcc172_a_in_sensitivity_read.argtypes = [\n c_ubyte, c_ubyte, POINTER(c_double)]\n self._lib.mcc172_a_in_sensitivity_read.restype = c_int\n\n self._lib.mcc172_a_in_sensitivity_write.argtypes = [\n c_ubyte, c_ubyte, c_double]\n self._lib.mcc172_a_in_sensitivity_write.restype = c_int\n\n self._lib.mcc172_a_in_clock_config_read.argtypes = [\n c_ubyte, POINTER(c_ubyte), POINTER(c_double),\n POINTER(c_ubyte)]\n self._lib.mcc172_a_in_clock_config_read.restype = c_int\n\n self._lib.mcc172_a_in_clock_config_write.argtypes = [\n c_ubyte, c_ubyte, c_double]\n self._lib.mcc172_a_in_clock_config_write.restype = c_int\n\n self._lib.mcc172_trigger_config.argtypes = [c_ubyte, c_ubyte, c_ubyte]\n self._lib.mcc172_trigger_config.restype = c_int\n\n self._lib.mcc172_a_in_scan_start.argtypes = [\n c_ubyte, c_ubyte, c_ulong, c_ulong]\n self._lib.mcc172_a_in_scan_start.restype = c_int\n\n self._lib.mcc172_a_in_scan_status.argtypes = [\n c_ubyte, POINTER(c_ushort), POINTER(c_ulong)]\n self._lib.mcc172_a_in_scan_status.restype = c_int\n\n self._lib.mcc172_a_in_scan_buffer_size.argtypes = [\n c_ubyte, POINTER(c_ulong)]\n self._lib.mcc172_a_in_scan_buffer_size.restype = c_int\n\n self._lib.mcc172_a_in_scan_read.restype = c_int\n\n self._lib.mcc172_a_in_scan_stop.argtypes = [c_ubyte]\n self._lib.mcc172_a_in_scan_stop.restype = c_int\n\n self._lib.mcc172_a_in_scan_cleanup.argtypes = [c_ubyte]\n self._lib.mcc172_a_in_scan_cleanup.restype = c_int\n\n self._lib.mcc172_a_in_scan_channel_count.argtypes = [c_ubyte]\n self._lib.mcc172_a_in_scan_channel_count.restype = c_ubyte\n\n self._lib.mcc172_test_signals_read.argtypes = [\n c_ubyte, POINTER(c_ubyte), POINTER(c_ubyte), POINTER(c_ubyte)]\n self._lib.mcc172_test_signals_read.restype = c_int\n\n self._lib.mcc172_test_signals_write.argtypes = [\n c_ubyte, c_ubyte, c_ubyte, c_ubyte]\n self._lib.mcc172_test_signals_write.restype = c_int\n\n result = self._lib.mcc172_open(self._address)\n\n if result == self._RESULT_SUCCESS:\n self._initialized = True\n elif result == self._RESULT_INVALID_DEVICE:\n raise HatError(self._address, \"Invalid board type.\")\n else:\n raise HatError(self._address, \"Board not responding.\")\n\n return\n\n def __del__(self):\n if self._initialized:\n self._lib.mcc172_a_in_scan_cleanup(self._address)\n self._lib.mcc172_close(self._address)\n return\n\n @staticmethod\n def info():\n \"\"\"\n Return constant information about this type of device.\n\n Returns:\n namedtuple: a namedtuple containing the following field names\n\n * **NUM_AI_CHANNELS** (int): The number of analog input channels\n (2.)\n * **AI_MIN_CODE** (int): The minimum ADC code (-8388608.)\n * **AI_MAX_CODE** (int): The maximum ADC code (8388607.)\n * **AI_MIN_VOLTAGE** (float): The voltage corresponding to the\n minimum ADC code (-5.0.)\n * **AI_MAX_VOLTAGE** (float): The voltage corresponding to the\n maximum ADC code (+5.0 - 1 LSB)\n * **AI_MIN_RANGE** (float): The minimum voltage of the input range\n (-5.0.)\n * **AI_MAX_RANGE** (float): The maximum voltage of the input range\n (+5.0.)\n \"\"\"\n return mcc172._dev_info\n\n def firmware_version(self):\n \"\"\"\n Read the board firmware and bootloader versions.\n\n Returns:\n namedtuple: a namedtuple containing the following field names\n\n * **version** (string): The firmware version, i.e \"1.03\".\n\n Raises:\n HatError: the board is not initialized, does not respond, or\n responds incorrectly.\n \"\"\"\n if not self._initialized:\n raise HatError(self._address, \"Not initialized.\")\n version = c_ushort()\n if (self._lib.mcc172_firmware_version(self._address, byref(version))\n != self._RESULT_SUCCESS):\n raise HatError(self._address, \"Incorrect response.\")\n version_str = \"{0:X}.{1:02X}\".format(\n version.value >> 8, version.value & 0x00FF)\n version_info = namedtuple(\n 'MCC172VersionInfo', ['version'])\n return version_info(version=version_str)\n\n def serial(self):\n \"\"\"\n Read the serial number.\n\n Returns:\n string: The serial number.\n\n Raises:\n HatError: the board is not initialized, does not respond, or\n responds incorrectly.\n \"\"\"\n if not self._initialized:\n raise HatError(self._address, \"Not initialized.\")\n # create string to hold the result\n my_buffer = create_string_buffer(9)\n if (self._lib.mcc172_serial(self._address, my_buffer)\n != self._RESULT_SUCCESS):\n raise HatError(self._address, \"Incorrect response.\")\n my_serial = my_buffer.value.decode('ascii')\n return my_serial\n\n def blink_led(self, count):\n \"\"\"\n Blink the MCC 172 LED.\n\n Setting count to 0 will cause the LED to blink continuously until\n blink_led() is called again with a non-zero count.\n\n Args:\n count (int): The number of times to blink (max 255).\n\n Raises:\n HatError: the board is not initialized, does not respond, or\n responds incorrectly.\n \"\"\"\n if not self._initialized:\n raise HatError(self._address, \"Not initialized.\")\n if (self._lib.mcc172_blink_led(self._address, count)\n != self._RESULT_SUCCESS):\n raise HatError(self._address, \"Incorrect response.\")\n return\n\n def calibration_date(self):\n \"\"\"\n Read the calibration date.\n\n Returns:\n string: The calibration date in the format \"YYYY-MM-DD\".\n\n Raises:\n HatError: the board is not initialized, does not respond, or\n responds incorrectly.\n \"\"\"\n if not self._initialized:\n raise HatError(self._address, \"Not initialized.\")\n # create string to hold the result\n my_buffer = create_string_buffer(11)\n if (self._lib.mcc172_calibration_date(self._address, my_buffer)\n != self._RESULT_SUCCESS):\n raise HatError(self._address, \"Incorrect response.\")\n my_date = my_buffer.value.decode('ascii')\n return my_date\n\n def calibration_coefficient_read(self, channel):\n \"\"\"\n Read the calibration coefficients for a single channel.\n\n The coefficients are applied in the library as: ::\n\n calibrated_ADC_code = (raw_ADC_code - offset) * slope\n\n Returns:\n namedtuple: a namedtuple containing the following field names\n\n * **slope** (float): The slope.\n * **offset** (float): The offset.\n\n Raises:\n HatError: the board is not initialized, does not respond, or\n responds incorrectly.\n \"\"\"\n if not self._initialized:\n raise HatError(self._address, \"Not initialized.\")\n slope = c_double()\n offset = c_double()\n if (self._lib.mcc172_calibration_coefficient_read(\n self._address, channel, byref(slope), byref(offset))\n != self._RESULT_SUCCESS):\n raise HatError(self._address, \"Incorrect response.\")\n cal_info = namedtuple('MCC172CalInfo', ['slope', 'offset'])\n return cal_info(\n slope=slope.value,\n offset=offset.value)\n\n def calibration_coefficient_write(self, channel, slope, offset):\n \"\"\"\n Temporarily write the calibration coefficients for a single channel.\n\n The user can apply their own calibration coefficients by writing to\n these values. The values will reset to the factory values from the\n EEPROM whenever the class is initialized. This function will fail and\n raise a HatError exception if a scan is active when it is called.\n\n The coefficients are applied in the library as: ::\n\n calibrated_ADC_code = (raw_ADC_code - offset) * slope\n\n Args:\n slope (float): The new slope value.\n offset (float): The new offset value.\n\n Raises:\n HatError: the board is not initialized, does not respond, or\n responds incorrectly.\n \"\"\"\n if not self._initialized:\n raise HatError(self._address, \"Not initialized.\")\n if (self._lib.mcc172_calibration_coefficient_write(\n self._address, channel, slope, offset)\n != self._RESULT_SUCCESS):\n raise HatError(self._address, \"Incorrect response.\")\n return\n\n def iepe_config_write(self, channel, mode):\n \"\"\"\n Configure a channel for an IEPE sensor.\n\n This method turns on / off the IEPE power supply for the specified\n channel. The power-on default is IEPE power off.\n\n Args:\n channel (int): The channel, 0 or 1.\n mode (int): The IEPE mode for the channel, 0 = IEPE off,\n 1 = IEPE on.\n\n Raises:\n HatError: the board is not initialized, does not respond, or\n responds incorrectly.\n \"\"\"\n if not self._initialized:\n raise HatError(self._address, \"Not initialized.\")\n if (self._lib.mcc172_iepe_config_write(self._address, channel, mode) !=\n self._RESULT_SUCCESS):\n raise HatError(self._address, \"Incorrect response.\")\n return\n\n def iepe_config_read(self, channel):\n \"\"\"\n Read the IEPE configuration for a channel.\n\n This method returns the state of the IEPE power supply for the specified\n channel\n\n Args:\n channel (int): The channel, 0 or 1.\n\n Returns\n int: The IEPE mode for the channel, 0 = IEPE off, 1 = IEPE on.\n\n Raises:\n HatError: the board is not initialized, does not respond, or\n responds incorrectly.\n \"\"\"\n if not self._initialized:\n raise HatError(self._address, \"Not initialized.\")\n mode = c_ubyte()\n if (self._lib.mcc172_iepe_config_read(\n self._address, channel, byref(mode)) != self._RESULT_SUCCESS):\n raise HatError(self._address, \"Incorrect response.\")\n return mode.value\n\n def a_in_sensitivity_write(self, channel, value):\n \"\"\"\n Write the MCC 172 analog input sensitivity scaling factor for a single\n channel.\n\n This applies a scaling factor to the analog input data so it returns\n values that are meaningful for the connected sensor.\n\n The sensitivity is specified in mV / mechanical unit. The default value\n when opening the library is 1000, resulting in no scaling of the input\n voltage. Changing this value will not change the values reported by\n :py:func:`info` since it is simply sensor scaling applied to the data\n before returning it.\n\n Examples:\n\n * A seismic sensor with a sensitivity of 10 V/g. Set the sensitivity to\n 10,000 and the returned data will be in units of g.\n * A vibration sensor with a sensitivity of 100 mV/g. Set the\n sensitivity to 100 and the returned data will be in units of g.\n\n Args:\n channel (int): The channel, 0 or 1.\n value (float): The sensitivity for the specified channel.\n\n Raises:\n HatError: the board is not initialized, does not respond, or\n responds incorrectly.\n \"\"\"\n if not self._initialized:\n raise HatError(self._address, \"Not initialized.\")\n result = self._lib.mcc172_a_in_sensitivity_write(\n self._address, channel, value)\n if result == self._RESULT_BUSY:\n raise HatError(\n self._address, \"Cannot change the sensitivity \"\n \"while a scan is active.\")\n elif result != self._RESULT_SUCCESS:\n raise HatError(self._address, \"Incorrect response.\")\n return\n\n def a_in_sensitivity_read(self, channel):\n \"\"\"\n Read the MCC 172 analog input sensitivity scaling factor for a single\n channel.\n\n The sensitivity is returned in mV / mechanical unit. The default value\n when opening the library is 1000, resulting in no scaling of the input\n voltage.\n\n Args:\n channel (int): The channel, 0 or 1.\n\n Returns\n float: The sensitivity factor for the channel.\n\n Raises:\n HatError: the board is not initialized, does not respond, or\n responds incorrectly.\n \"\"\"\n if not self._initialized:\n raise HatError(self._address, \"Not initialized.\")\n value = c_double()\n if (self._lib.mcc172_a_in_sensitivity_read(\n self._address, channel, byref(value)) != self._RESULT_SUCCESS):\n raise HatError(self._address, \"Incorrect response.\")\n return value.value\n\n def a_in_clock_config_write(\n self, clock_source, sample_rate_per_channel):\n \"\"\"\n Configure the ADC sampling clock.\n\n This method will configure the ADC sampling clock. The default\n configuration after opening the device is local mode, 51.2 KHz sampling\n rate. The clock source must be one of:\n\n * :py:const:`SourceType.LOCAL`: the clock is generated on this MCC 172\n and not shared with any other devices.\n * :py:const:`SourceType.MASTER`: the clock is generated on this MCC 172\n and shared over the Raspberry Pi header with other MCC 172s. All other\n MCC 172s must be configured for local or slave clock.\n * :py:const:`SourceType.SLAVE`: no clock is generated on this MCC 172,\n it receives its clock from the Raspberry Pi header. Another MCC 172\n must be configured for master clock.\n\n The ADCs will be synchronized so they sample the inputs at the same\n time. This requires 128 clock cycles before the first sample is\n available. When using a master - slave clock configuration there are\n additional considerations:\n\n * There should be only one master device; otherwise, you will be\n connecting multiple outputs together and could damage a device.\n * Configure the clock on the slave device(s) first, master last. The\n synchronization will occur when the master clock is configured,\n causing the ADCs on all the devices to be in sync.\n * If you change the clock configuration on one device after configuring\n the master, then the data will no longer be in sync. The devices\n cannot detect this and will still report that they are synchronized.\n Always write the clock configuration to all devices when modifying the\n configuration.\n * Slave devices must have a master clock source or scans will never\n complete.\n * A trigger must be used for the data streams from all devices to start\n on the same sample.\n\n The MCC 172 can generate a sampling clock equal to 51.2 KHz divided by\n an integer between 1 and 256. The sample_rate_per_channel will be\n internally converted to the nearest valid rate. The actual rate can be\n read back using :py:func:`a_in_clock_config_read`. When used in slave\n clock configuration, the device will measure the frequency of the\n incoming master clock after the synchronization period is complete.\n Calling :py:func:`a_in_clock_config_read` after this will return the\n measured sample rate.\n\n Args:\n clock_source (:py:class:`SourceType`): The ADC clock source.\n sample_rate_per_channel (float): The requested sampling rate in\n samples per second per channel.\n\n Raises:\n HatError: the board is not initialized, does not respond, or\n responds incorrectly.\n \"\"\"\n if not self._initialized:\n raise HatError(self._address, \"Not initialized.\")\n result = self._lib.mcc172_a_in_clock_config_write(\n self._address, clock_source, sample_rate_per_channel)\n if result == self._RESULT_BUSY:\n raise HatError(\n self._address, \"Cannot change the clock \"\n \"configuration while a scan is active.\")\n elif result != self._RESULT_SUCCESS:\n raise HatError(self._address, \"Incorrect response.\")\n return\n\n def a_in_clock_config_read(self):\n \"\"\"\n Read the sampling clock configuration.\n\n This method will return the sample clock configuration and rate. If the\n clock is configured for local or master source, then the rate will be\n the internally adjusted rate set by the user. If the clock is\n configured for slave source, then the rate will be measured from the\n master clock after the synchronization period has ended. The\n synchronization status is also returned.\n\n The clock source will be one of:\n\n * :py:const:`SourceType.LOCAL`: the clock is generated on this MCC 172\n and not shared with any other devices.\n * :py:const:`SourceType.MASTER`: the clock is generated on this MCC 172\n and shared over the Raspberry Pi header with other MCC 172s.\n * :py:const:`SourceType.SLAVE`: no clock is generated on this MCC 172,\n it receives its clock from the Raspberry Pi header.\n\n The sampling rate will not be valid in slave mode if synced is False.\n The device will not detect a loss of the master clock when in slave\n mode; it only monitors the clock when a sync is initiated.\n\n Returns\n namedtuple: a namedtuple containing the following field names:\n\n * **clock_source** (:py:class:`SourceType`): The ADC clock source.\n * **sample_rate_per_channel** (float): The sample rate in\n samples per second per channel.\n * **synchronized** (bool): True if the ADCs are synchronized, False\n if a synchronization is in progress.\n\n Raises:\n HatError: the board is not initialized, does not respond, or\n responds incorrectly.\n \"\"\"\n if not self._initialized:\n raise HatError(self._address, \"Not initialized.\")\n clock_source = c_ubyte()\n sample_rate_per_channel = c_double()\n synced = c_ubyte()\n result = self._lib.mcc172_a_in_clock_config_read(\n self._address, byref(clock_source),\n byref(sample_rate_per_channel), byref(synced))\n\n if result != self._RESULT_SUCCESS:\n raise HatError(self._address, \"Incorrect response.\")\n\n clock_config = namedtuple(\n 'MCC172ClockConfig',\n ['clock_source', 'sample_rate_per_channel',\n 'synchronized'])\n return clock_config(\n clock_source=clock_source.value,\n sample_rate_per_channel=sample_rate_per_channel.value,\n synchronized=synced.value != 0)\n\n def trigger_config(self, trigger_source, trigger_mode):\n \"\"\"\n Configure the digital trigger.\n\n The analog input scan may be configured to start saving the acquired\n data when the digital trigger is in the desired state. A single device\n trigger may also be shared with multiple boards. This command sets the\n trigger source and mode.\n\n The trigger source must be one of:\n\n * :py:const:`SourceType.LOCAL`: the trigger terminal on this MCC 172 is\n used and not shared with any other devices.\n * :py:const:`SourceType.MASTER`: the trigger terminal on this MCC 172 is\n used and is shared as the master trigger for other MCC 172s.\n * :py:const:`SourceType.SLAVE`: the trigger terminal on this MCC 172 is\n not used, it receives its trigger from the master MCC 172.\n\n The trigger mode must be one of:\n\n * :py:const:`TriggerModes.RISING_EDGE`: Start saving data when the\n trigger transitions from low to high.\n * :py:const:`TriggerModes.FALLING_EDGE`: Start saving data when the\n trigger transitions from high to low.\n * :py:const:`TriggerModes.ACTIVE_HIGH`: Start saving data when the\n trigger is high.\n * :py:const:`TriggerModes.ACTIVE_LOW`: Start saving data when the\n trigger is low.\n\n Care must be taken when using master / slave triggering; the input\n trigger signal on the master will be passed through to the slave(s), but\n the mode is set independently on each device. For example, it is\n possible for the master to trigger on the rising edge of the signal and\n the slave to trigger on the falling edge.\n\n Args:\n trigger_source (:py:class:`SourceType`): The trigger source.\n trigger_mode (:py:class:`TriggerModes`): The trigger mode.\n\n Raises:\n HatError: the board is not initialized, does not respond, or\n responds incorrectly.\n \"\"\"\n if not self._initialized:\n raise HatError(self._address, \"Not initialized.\")\n result = self._lib.mcc172_trigger_config(\n self._address, trigger_source, trigger_mode)\n if result == self._RESULT_BUSY:\n raise HatError(\n self._address, \"Cannot write trigger configuration \"\n \"while a scan is active.\")\n elif result != self._RESULT_SUCCESS:\n raise HatError(self._address, \"Incorrect response.\")\n return\n\n @staticmethod\n def a_in_scan_actual_rate(sample_rate_per_channel):\n \"\"\"\n Calculate the actual sample rate per channel for a requested sample\n rate.\n\n The scan clock is generated from a 51.2 KHz clock source divided by an\n integer between 1 and 256, so only discrete frequency steps can be\n achieved. This method will return the actual rate for a requested\n sample rate.\n\n This function does not perform any actions with a board, it simply\n calculates the rate.\n\n Args:\n sample_rate_per_channel (float): The desired per-channel rate of the\n internal sampling clock.\n\n Returns:\n float: the actual sample rate\n \"\"\"\n divisor = 51200.0 / sample_rate_per_channel + 0.5\n if divisor < 1.0:\n divisor = 1.0\n elif divisor > 256.0:\n divisor = 256.0\n\n sample_rate = 51200.0 / int(divisor)\n return sample_rate\n\n def a_in_scan_start(self, channel_mask, samples_per_channel, options):\n \"\"\"\n Start capturing analog input data.\n\n The scan runs as a separate thread from the user's code. This function\n will allocate a scan buffer and start the thread that reads data from\n the device into that buffer. The user reads the data from the scan\n buffer and the scan status using the :py:func:`a_in_scan_read` function.\n :py:func:`a_in_scan_stop` is used to stop a continuous scan, or to stop\n a finite scan before it completes. The user must call\n :py:func:`a_in_scan_cleanup` after the scan has finished and all desired\n data has been read; this frees all resources from the scan and allows\n additional scans to be performed.\n\n The scan cannot be started until the ADCs are synchronized, so this\n function will not return until that has completed. It is best to wait\n for sync using :py:func:`a_in_clock_config_read` before starting the\n scan.\n\n The scan state has defined terminology:\n\n * **Active**: :py:func:`a_in_scan_start` has been called and the device\n may be acquiring data or finished with the acquisition. The scan has\n not been cleaned up by calling :py:func:`a_in_scan_cleanup`, so\n another scan may not be started.\n * **Running**: The scan is active and the device is still acquiring\n data. Certain methods like :py:func:`a_in_clock_config_write` will\n return an error because the device is busy.\n\n The scan options that may be used are:\n\n * :py:const:`OptionFlags.DEFAULT`: Return scaled and calibrated data,\n do not use a trigger, and finite operation. Any other flags will\n override DEFAULT behavior.\n * :py:const:`OptionFlags.NOSCALEDATA`: Return ADC codes (values between\n -8,388,608 and 8,388,607) rather than voltage.\n * :py:const:`OptionFlags.NOCALIBRATEDATA`: Return data without the\n calibration factors applied.\n * :py:const:`OptionFlags.EXTTRIGGER`: Do not start saving data (after\n calling :py:func:`a_in_scan_start`) until the trigger condition is\n met. The trigger is configured with :py:func:`trigger_config`.\n * :py:const:`OptionFlags.CONTINUOUS`: Read analog data continuously\n until stopped by the user by calling :py:func:`a_in_scan_stop` and\n write data to a circular buffer. The data must be read before being\n overwritten to avoid a buffer overrun error. **samples_per_channel**\n is only used for buffer sizing.\n\n The :py:const:`OptionFlags.EXTCLOCK` option is not supported for this\n device and will raise a ValueError.\n\n The scan buffer size will be allocated as follows:\n\n **Finite mode:** Total number of samples in the scan.\n\n **Continuous mode:** Either **samples_per_channel** or the value in the\n table below, whichever is greater.\n\n ================= =========================\n Sample Rate Buffer Size (per channel)\n ================= =========================\n 200-1024 S/s 1 kS\n 1280-10.24 kS/s 10 kS\n 12.8 kS or higher 100 kS\n ================= =========================\n\n Specifying a very large value for samples_per_channel could use too much\n of the Raspberry Pi memory. If the memory allocation fails, the function\n will raise a HatError with this description. The allocation could\n succeed, but the lack of free memory could cause other problems in the\n Raspberry Pi. If you need to acquire a high number of samples then it is\n better to run the scan in continuous mode and stop it when you have\n acquired the desired amount of data. If a scan is active this method\n will raise a HatError.\n\n Args:\n channel_mask (int): A bit mask of the desired channels (0x01 -\n 0x03).\n samples_per_channel (int): The number of samples to acquire per\n channel (finite mode,) or or can be used to set a larger scan\n buffer size than the default value (continuous mode.)\n options (int): An ORed combination of :py:class:`OptionFlags` flags\n that control the scan.\n\n Raises:\n HatError: a scan is active; memory could not be allocated; the board\n is not initialized, does not respond, or responds incorrectly.\n ValueError: a scan argument is invalid.\n \"\"\"\n if not self._initialized:\n raise HatError(self._address, \"Not initialized.\")\n\n # Perform some argument checking\n if (channel_mask == 0) or (channel_mask > 3):\n raise ValueError(\"channel_mask must be 1 - 3\")\n\n num_channels = 0\n for index in range(2):\n bit_mask = 1 << index\n if (channel_mask & bit_mask) != 0:\n num_channels += 1\n\n result = self._lib.mcc172_a_in_scan_start(\n self._address, channel_mask, samples_per_channel, options)\n\n if result == self._RESULT_BAD_PARAMETER:\n raise ValueError(\"Invalid scan parameter.\")\n elif result == self._RESULT_BUSY:\n raise HatError(self._address, \"A scan is already active.\")\n elif result == self._RESULT_RESOURCE_UNAVAIL:\n raise HatError(self._address, \"Memory could not be allocated.\")\n elif result != self._RESULT_SUCCESS:\n raise HatError(self._address, \"Incorrect response {}.\".format(\n result))\n return\n\n def a_in_scan_buffer_size(self):\n \"\"\"\n Read the internal scan data buffer size.\n\n An internal data buffer is allocated for the scan when\n :py:func:`a_in_scan_start` is called. This function returns the total\n size of that buffer in samples.\n\n Returns:\n int: the buffer size in samples\n\n Raises:\n HatError: the board is not initialized or no scan buffer is\n allocated (a scan is not active).\n \"\"\"\n if not self._initialized:\n raise HatError(self._address, \"Not initialized.\")\n data_value = c_ulong()\n\n result = self._lib.mcc172_a_in_scan_buffer_size(\n self._address, byref(data_value))\n\n if result == self._RESULT_RESOURCE_UNAVAIL:\n raise HatError(self._address, \"No scan is active.\")\n elif result != self._RESULT_SUCCESS:\n raise HatError(self._address, \"Incorrect response {}.\".format(\n result))\n return data_value.value\n\n def a_in_scan_status(self):\n \"\"\"\n Read scan status and number of available samples per channel.\n\n The analog input scan is started with :py:func:`a_in_scan_start` and\n runs in the background. This function reads the status of that\n background scan and the number of samples per channel available in\n the scan thread buffer.\n\n Returns:\n namedtuple: a namedtuple containing the following field names:\n\n * **running** (bool): True if the scan is running, False if it has\n stopped or completed.\n * **hardware_overrun** (bool): True if the hardware could not\n acquire and unload samples fast enough and data was lost.\n * **buffer_overrun** (bool): True if the background scan buffer was\n not read fast enough and data was lost.\n * **triggered** (bool): True if the trigger conditions have been met\n and data acquisition started.\n * **samples_available** (int): The number of samples per channel\n currently in the scan buffer.\n\n Raises:\n HatError: A scan is not active, the board is not initialized, does\n not respond, or responds incorrectly.\n \"\"\"\n if not self._initialized:\n raise HatError(self._address, \"Not initialized.\")\n\n status = c_ushort(0)\n samples_available = c_ulong(0)\n\n result = self._lib.mcc172_a_in_scan_status(\n self._address, byref(status), byref(samples_available))\n\n if result == self._RESULT_RESOURCE_UNAVAIL:\n raise HatError(self._address, \"Scan not active.\")\n elif result != self._RESULT_SUCCESS:\n raise HatError(self._address, \"Incorrect response {}.\".format(\n result))\n\n scan_status = namedtuple(\n 'MCC172ScanStatus',\n ['running', 'hardware_overrun', 'buffer_overrun', 'triggered',\n 'samples_available'])\n return scan_status(\n running=(status.value & self._STATUS_RUNNING) != 0,\n hardware_overrun=(status.value & self._STATUS_HW_OVERRUN) != 0,\n buffer_overrun=(status.value & self._STATUS_BUFFER_OVERRUN) != 0,\n triggered=(status.value & self._STATUS_TRIGGERED) != 0,\n samples_available=samples_available.value)\n\n def a_in_scan_read(self, samples_per_channel, timeout):\n # pylint: disable=too-many-locals\n \"\"\"\n Read scan status and data (as a list).\n\n The analog input scan is started with :py:func:`a_in_scan_start` and\n runs in the background. This function reads the status of that\n background scan and optionally reads sampled data from the scan buffer.\n\n Args:\n samples_per_channel (int): The number of samples per channel to read\n from the scan buffer. Specify a negative number to return all\n available samples immediately and ignore **timeout** or 0 to\n only read the scan status and return no data.\n timeout (float): The amount of time in seconds to wait for the\n samples to be read. Specify a negative number to wait\n indefinitely, or 0 to return immediately with the samples that\n are already in the scan buffer (up to **samples_per_channel**.)\n If the timeout is met and the specified number of samples have\n not been read, then the function will return all the available\n samples and the timeout status set.\n\n Returns:\n namedtuple: a namedtuple containing the following field names:\n\n * **running** (bool): True if the scan is running, False if it has\n stopped or completed.\n * **hardware_overrun** (bool): True if the hardware could not\n acquire and unload samples fast enough and data was lost.\n * **buffer_overrun** (bool): True if the background scan buffer was\n not read fast enough and data was lost.\n * **triggered** (bool): True if the trigger conditions have been met\n and data acquisition started.\n * **timeout** (bool): True if the timeout time expired before the\n specified number of samples were read.\n * **data** (list of float): The data that was read from the scan\n buffer.\n\n Raises:\n HatError: A scan is not active, the board is not initialized, does\n not respond, or responds incorrectly.\n ValueError: Incorrect argument.\n \"\"\"\n if not self._initialized:\n raise HatError(self._address, \"Not initialized.\")\n\n num_channels = self._lib.mcc172_a_in_scan_channel_count(self._address)\n\n self._lib.mcc172_a_in_scan_read.argtypes = [\n c_ubyte, POINTER(c_ushort), c_long, c_double, POINTER(c_double),\n c_ulong, POINTER(c_ulong)]\n\n samples_read_per_channel = c_ulong(0)\n samples_to_read = 0\n status = c_ushort(0)\n timed_out = False\n\n if samples_per_channel < 0:\n # read all available data\n\n # first, get the number of available samples\n samples_available = c_ulong(0)\n result = self._lib.mcc172_a_in_scan_status(\n self._address, byref(status), byref(samples_available))\n\n if result != self._RESULT_SUCCESS:\n raise HatError(self._address, \"Incorrect response {}.\".format(\n result))\n\n # allocate a buffer large enough for all the data\n samples_to_read = samples_available.value\n buffer_size = samples_available.value * num_channels\n data_buffer = (c_double * buffer_size)()\n elif samples_per_channel == 0:\n # only read the status\n samples_to_read = 0\n buffer_size = 0\n data_buffer = None\n elif samples_per_channel > 0:\n # read the specified number of samples\n samples_to_read = samples_per_channel\n # create a C buffer for the read\n buffer_size = samples_per_channel * num_channels\n data_buffer = (c_double * buffer_size)()\n else:\n # invalid samples_per_channel\n raise ValueError(\"Invalid samples_per_channel {}.\".format(\n samples_per_channel))\n\n result = self._lib.mcc172_a_in_scan_read(\n self._address, byref(status), samples_to_read, timeout, data_buffer,\n buffer_size, byref(samples_read_per_channel))\n\n if result == self._RESULT_BAD_PARAMETER:\n raise ValueError(\"Invalid parameter.\")\n elif result == self._RESULT_RESOURCE_UNAVAIL:\n raise HatError(self._address, \"Scan not active.\")\n elif result == self._RESULT_TIMEOUT:\n timed_out = True\n elif result != self._RESULT_SUCCESS:\n raise HatError(self._address, \"Incorrect response {}.\".format(\n result))\n\n total_read = samples_read_per_channel.value * num_channels\n\n # python 2 / 3 workaround for xrange\n if sys.version_info.major > 2:\n data_list = [data_buffer[i] for i in range(total_read)]\n else:\n data_list = [data_buffer[i] for i in xrange(total_read)]\n\n scan_status = namedtuple(\n 'MCC172ScanRead',\n ['running', 'hardware_overrun', 'buffer_overrun', 'triggered',\n 'timeout', 'data'])\n return scan_status(\n running=(status.value & self._STATUS_RUNNING) != 0,\n hardware_overrun=(status.value & self._STATUS_HW_OVERRUN) != 0,\n buffer_overrun=(status.value & self._STATUS_BUFFER_OVERRUN) != 0,\n triggered=(status.value & self._STATUS_TRIGGERED) != 0,\n timeout=timed_out,\n data=data_list)\n\n def a_in_scan_read_numpy(self, samples_per_channel, timeout):\n # pylint: disable=too-many-locals\n \"\"\"\n Read scan status and data (as a NumPy array).\n\n This function is similar to :py:func:`a_in_scan_read` except that the\n *data* key in the returned namedtuple is a NumPy array of float64 values\n and may be used directly with NumPy functions.\n\n Args:\n samples_per_channel (int): The number of samples per channel to read\n from the scan buffer. Specify a negative number to read all\n available samples or 0 to only read the scan status and return\n no data.\n timeout (float): The amount of time in seconds to wait for the\n samples to be read. Specify a negative number to wait\n indefinitely, or 0 to return immediately with the samples that\n are already in the scan buffer. If the timeout is met and the\n specified number of samples have not been read, then the\n function will return with the amount that has been read and the\n timeout status set.\n\n Returns:\n namedtuple: a namedtuple containing the following field names:\n\n * **running** (bool): True if the scan is running, False if it has\n stopped or completed.\n * **hardware_overrun** (bool): True if the hardware could not\n acquire and unload samples fast enough and data was lost.\n * **buffer_overrun** (bool): True if the background scan buffer was\n not read fast enough and data was lost.\n * **triggered** (bool): True if the trigger conditions have been met\n and data acquisition started.\n * **timeout** (bool): True if the timeout time expired before the\n specified number of samples were read.\n * **data** (NumPy array of float64): The data that was read from the\n scan buffer.\n\n Raises:\n HatError: A scan is not active, the board is not initialized, does\n not respond, or responds incorrectly.\n ValueError: Incorrect argument.\n \"\"\"\n try:\n import numpy\n from numpy.ctypeslib import ndpointer\n except ImportError:\n raise\n\n if not self._initialized:\n raise HatError(self._address, \"Not initialized.\")\n\n self._lib.mcc172_a_in_scan_read.argtypes = [\n c_ubyte, POINTER(c_ushort), c_long, c_double,\n ndpointer(c_double, flags=\"C_CONTIGUOUS\"), c_ulong,\n POINTER(c_ulong)]\n\n num_channels = self._lib.mcc172_a_in_scan_channel_count(self._address)\n samples_read_per_channel = c_ulong()\n status = c_ushort()\n timed_out = False\n samples_to_read = 0\n\n if samples_per_channel < 0:\n # read all available data\n\n # first, get the number of available samples\n samples_available = c_ulong(0)\n result = self._lib.mcc172_a_in_scan_status(\n self._address, byref(status), byref(samples_available))\n\n if result != self._RESULT_SUCCESS:\n raise HatError(self._address, \"Incorrect response {}.\".format(\n result))\n\n # allocate a buffer large enough for all the data\n samples_to_read = samples_available.value\n buffer_size = samples_available.value * num_channels\n data_buffer = numpy.empty(buffer_size, dtype=numpy.float64)\n elif samples_per_channel == 0:\n # only read the status\n samples_to_read = 0\n buffer_size = 0\n data_buffer = None\n elif samples_per_channel > 0:\n # read the specified number of samples\n samples_to_read = samples_per_channel\n # create a C buffer for the read\n buffer_size = samples_per_channel * num_channels\n data_buffer = numpy.empty(buffer_size, dtype=numpy.float64)\n else:\n # invalid samples_per_channel\n raise ValueError(\"Invalid samples_per_channel {}.\".format(\n samples_per_channel))\n\n result = self._lib.mcc172_a_in_scan_read(\n self._address, byref(status), samples_to_read, timeout, data_buffer,\n buffer_size, byref(samples_read_per_channel))\n\n if result == self._RESULT_BAD_PARAMETER:\n raise ValueError(\"Invalid parameter.\")\n elif result == self._RESULT_RESOURCE_UNAVAIL:\n raise HatError(self._address, \"Scan not active.\")\n elif result == self._RESULT_TIMEOUT:\n timed_out = True\n elif result != self._RESULT_SUCCESS:\n raise HatError(self._address, \"Incorrect response {}.\".format(\n result))\n\n total_read = samples_read_per_channel.value * num_channels\n\n if total_read < buffer_size:\n data_buffer = numpy.resize(data_buffer, (total_read,))\n\n scan_status = namedtuple(\n 'MCC172ScanRead',\n ['running', 'hardware_overrun', 'buffer_overrun', 'triggered',\n 'timeout', 'data'])\n return scan_status(\n running=(status.value & self._STATUS_RUNNING) != 0,\n hardware_overrun=(status.value & self._STATUS_HW_OVERRUN) != 0,\n buffer_overrun=(status.value & self._STATUS_BUFFER_OVERRUN) != 0,\n triggered=(status.value & self._STATUS_TRIGGERED) != 0,\n timeout=timed_out,\n data=data_buffer)\n\n def a_in_scan_channel_count(self):\n \"\"\"\n Read the number of channels in the current analog input scan.\n\n Returns:\n int: the number of channels (0 if no scan is active, 1-2 otherwise)\n\n Raises:\n HatError: the board is not initialized, does not respond, or\n responds incorrectly.\n \"\"\"\n if not self._initialized:\n raise HatError(self._address, \"Not initialized.\")\n\n num_channels = self._lib.mcc172_a_in_scan_channel_count(self._address)\n return num_channels\n\n def a_in_scan_stop(self):\n \"\"\"\n Stops an analog input scan.\n\n The device stops acquiring data immediately. The scan data that has been\n read into the scan buffer is available until\n :py:func:`a_in_scan_cleanup` is called.\n\n Raises:\n HatError: the board is not initialized, does not respond, or\n responds incorrectly.\n \"\"\"\n if not self._initialized:\n raise HatError(self._address, \"Not initialized.\")\n\n if (self._lib.mcc172_a_in_scan_stop(self._address)\n != self._RESULT_SUCCESS):\n raise HatError(self._address, \"Incorrect response.\")\n\n return\n\n def a_in_scan_cleanup(self):\n \"\"\"\n Free analog input scan resources after the scan is complete.\n\n This will free the scan buffer and other resources used by the\n background scan and make it possible to start another scan with\n :py:func:`a_in_scan_start`.\n\n Raises:\n HatError: the board is not initialized, does not respond, or\n responds incorrectly.\n \"\"\"\n if not self._initialized:\n raise HatError(self._address, \"Not initialized.\")\n\n if (self._lib.mcc172_a_in_scan_cleanup(self._address)\n != self._RESULT_SUCCESS):\n raise HatError(self._address, \"Incorrect response.\")\n\n return\n\n def test_signals_read(self):\n \"\"\"\n Read the state of shared signals for testing.\n\n This function reads the state of the ADC clock, sync, and trigger\n signals. Use it in conjunction with :py:func:`a_in_clock_config_write`\n and :py:func:`trigger_config` to put the signals into slave mode then\n set values on the signals using the Raspberry Pi GPIO pins. This method\n will return the values present on those signals.\n\n Returns:\n namedtuple: a namedtuple containing the following field names:\n\n * **clock** (int): The current value of the clock signal (0 or 1).\n * **sync** (int): The current value of the sync signal (0 or 1).\n * **trigger** (int): The current value of the trigger signal\n (0 or 1).\n\n Raises:\n HatError: the board is not initialized, does not respond, or\n responds incorrectly.\n \"\"\"\n if not self._initialized:\n raise HatError(self._address, \"Not initialized.\")\n\n clock = c_ubyte()\n sync = c_ubyte()\n trigger = c_ubyte()\n result = self._lib.mcc172_test_signals_read(\n self._address, byref(clock), byref(sync), byref(trigger))\n if result != self._RESULT_SUCCESS:\n raise HatError(self._address, \"Incorrect response.\")\n\n test_status = namedtuple(\n 'MCC172TestRead',\n ['clock', 'sync', 'trigger'])\n return test_status(\n clock=clock.value,\n sync=sync.value,\n trigger=trigger.value)\n\n def test_signals_write(self, mode, clock, sync):\n \"\"\"\n Write values to shared signals for testing.\n\n This function puts the shared signals into test mode and sets them to\n the specified state. The signal levels can then be read on the Raspberry\n Pi GPIO pins to confirm values. Return the device to normal mode when\n testing is complete.\n\n ADC conversions will not occur while in test mode. The ADCs require\n synchronization after exiting test mode, so use\n :py:func:`a_in_clock_config_write` to perform synchronization.\n\n Args:\n mode (int): Set to 1 for test mode or 0 for normal mode.\n clock (int): The value to write to the clock signal in test mode\n (0 or 1).\n sync (int): The value to write to the sync signal in test mode\n (0 or 1).\n\n Raises:\n HatError: the board is not initialized, does not respond, or\n responds incorrectly.\n \"\"\"\n if not self._initialized:\n raise HatError(self._address, \"Not initialized.\")\n\n result = self._lib.mcc172_test_signals_write(\n self._address, mode, clock, sync)\n if result != self._RESULT_SUCCESS:\n raise HatError(self._address, \"Incorrect response.\")\n return\n"
] |
[
[
"numpy.ctypeslib.ndpointer",
"numpy.resize",
"numpy.empty"
]
] |
guanhuaw/BLIPSrecon
|
[
"b46667861f036eeddc138d97430c6fd7154f6654"
] |
[
"Supervised learning/test.py"
] |
[
"import os\nfrom options.test_options import TestOptions\nfrom data import CreateDataLoader\nfrom models import create_model\nfrom util.visualizer import save_images\nfrom util import html\nimport ntpath\nimport numpy as np\n\nif __name__ == '__main__':\n opt = TestOptions().parse()\n opt.nThreads = 1 # test code only supports nThreads = 1\n opt.batchSize = 1 # test code only supports batchSize = 1\n opt.serial_batches = True # no shuffle\n opt.no_flip = True # no flip\n opt.display_id = -1 # no visdom display\n data_loader = CreateDataLoader(opt)\n dataset = data_loader.load_data()\n model = create_model(opt)\n model.setup(opt)\n # create website\n web_dir = os.path.join(opt.results_dir, opt.name, '%s_%s' % (opt.phase, opt.which_epoch))\n webpage = html.HTML(web_dir, 'Experiment = %s, Phase = %s, Epoch = %s' % (opt.name, opt.phase, opt.which_epoch))\n # test\n\n img_dir = os.path.join(web_dir, 'images')\n log_name = os.path.join(img_dir, 'loss_log.txt')\n PSNR = []\n SSIM = []\n HFEN = []\n print(len(dataset))\n for i, data in enumerate(dataset):\n\n if i >= opt.how_many:\n break\n model.set_input(data)\n model.test()\n visuals = model.get_current_visuals()\n losses = model.get_current_losses()\n img_path = model.get_image_paths()\n if i % 5 == 0:\n print('processing (%04d)-th image... %s' % (i, img_path))\n save_images(webpage, visuals, img_path, aspect_ratio=opt.aspect_ratio, width=opt.display_winsize)\n\n short_path = ntpath.basename(img_path[0])\n name = os.path.splitext(short_path)[0]\n message = name\n for k, v in losses.items():\n if k == 'PSNR':\n PSNR = np.append(PSNR,v)\n if k == 'SSIM':\n SSIM = np.append(SSIM,v)\n if k == 'HFEN':\n HFEN = np.append(HFEN,v)\n message += '%s: %.5f ' % (k, v)\n print(message)\n with open(log_name, \"a\") as log_file:\n log_file.write('%s\\n' % message)\n\n webpage.save()\n with open(log_name, \"a\") as log_file:\n log_file.write('averagepsnr%s\\n' % (np.sum(PSNR) / i))\n log_file.write('averageSSIM%s\\n' % (np.sum(SSIM) / i))\n log_file.write('averageHFEN%s\\n' % (np.sum(HFEN) / i))\n psnr_name = os.path.join(img_dir, 'PSNR.npy')\n ssim_name = os.path.join(img_dir, 'SSIM.npy')\n HFEN_name = os.path.join(img_dir, 'HFEN.npy')\n np.save(psnr_name, PSNR)\n np.save(ssim_name, SSIM)\n np.save(HFEN_name, HFEN)\n print('psnr%f'%(np.sum(PSNR) / i))\n print('ssim%f'%(np.sum(SSIM) / i))\n print('HFEN%f' % (np.sum(HFEN) / i))\n"
] |
[
[
"numpy.append",
"numpy.sum",
"numpy.save"
]
] |
yuj09161/group
|
[
"69ff6e1d988a979968d267c83a901a0567828fd9"
] |
[
"load.py"
] |
[
"import matplotlib.pyplot as plt,json\n\nwith open('export.json','w') as file:\n json.dump(res,file)\n\nfor k in range(len(res)):\n x=[]\n y=[]\n for j in range(len(res[k])):\n x.append(res[k][j][0])\n y.append(res[k][j][1])\n plt.scatter(x,y)\nplt.show()"
] |
[
[
"matplotlib.pyplot.show",
"matplotlib.pyplot.scatter"
]
] |
buridiaditya/gym
|
[
"99d4555e1862cb0bc88971fa6bc3402aa295b859"
] |
[
"gym/envs/madras/gym_madras.py"
] |
[
"\"\"\"\nGym Madras Env Wrapper.\n\nThis is an OpenAI gym environment wrapper for the MADRaS simulator. For more information on the OpenAI Gym interface, please refer to: https://gym.openai.com\n\nBuilt on top of gym_torcs https://github.com/ugo-nama-kun/gym_torcs/blob/master/gym_torcs.py\n\nThe following enhancements were made for Multi-agent synchronization using exception handling:\n- All the agents connect to the same TORCS engine through UDP ports\n- If an agent fails to connect to the TORCS engine, it keeps on trying in a loop until successful\n- Restart the episode for all the agents when any one of the learning agents terminates its episode\n\n\"\"\"\n\nimport math\nfrom copy import deepcopy\nimport numpy as np\nfrom . import snakeoil3_gym as snakeoil3\nfrom .gym_torcs import TorcsEnv\nfrom .controllers.pid import PID\nimport os\nimport subprocess\nimport signal\nimport time\nfrom mpi4py import MPI\nimport socket\n\nclass MadrasEnv(TorcsEnv):\n \"\"\"Definition of the Gym Madras Env.\"\"\"\n def __init__(self, vision=False, throttle=True,\n gear_change=False, port=60934, pid_assist=False,\n CLIENT_MAX_STEPS=np.inf,visualise=True):\n \"\"\"Init Method.\"\"\"\n self.pid_assist = pid_assist\n if self.pid_assist:\n self.action_dim = 2 # LanePos, Velocity\n else:\n self.action_dim = 3 # Accel, Steer, Brake\n TorcsEnv.__init__(self, vision=False, throttle=True, gear_change=False)\n self.state_dim = 29 # No. of sensors input\n self.env_name = 'Madras_Env'\n self.port = port\n self.visualise = visualise\n self.CLIENT_MAX_STEPS = CLIENT_MAX_STEPS\n self.client_type = 0 # Snakeoil client type\n self.initial_reset = True\n self.early_stop = True\n if self.pid_assist:\n self.PID_latency = 10\n else:\n self.PID_latency = 1\n self.accel_PID = PID(np.array([10.5, 0.05, 2.8])) # accel PID\n self.steer_PID = PID(np.array([5.1, 0.001, 0.000001])) # steer PID\n\n self.prev_lane = 0\n self.prev_angle = 0\n self.prev_vel = 0\n self.prev_dist = 0\n self.ob = None\n self.track_len = 7014.6\n self.torcs_proc = None\n self.start_torcs_process()\n \n def get_free_udp_port(self):\n udp = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n udp.bind(('', 0))\n addr, port = udp.getsockname()\n udp.close()\n return port\n\n def start_torcs_process(self):\n if self.torcs_proc is not None:\n os.killpg(os.getpgid(self.torcs_proc.pid), signal.SIGKILL)\n time.sleep(0.5)\n self.torcs_proc = None\n\n self.port = self.get_free_udp_port()\n window_title = str(self.port)\n command = None\n rank = MPI.COMM_WORLD.Get_rank()\n\n \n\n if self.visualise:\n command = 'export TORCS_PORT={} && vglrun torcs -title {}'.format(window_title, self.port)\n else:\n command = 'export TORCS_PORT={} && vglrun torcs -r ~/.torcs/config/raceman/quickrace.xml -title {}'.format(window_title, self.port)\n if self.vision is True:\n command += ' -vision'\n self.torcs_proc = subprocess.Popen([command], shell=True, preexec_fn=os.setsid)\n time.sleep(0.5)\n #if self.visualise:\n # os.system('sh autostart.sh {}'.format(window_title))\n time.sleep(0.5)\n\n \n def reset(self, prev_step_info=None):\n \"\"\"Reset Method. To be called at the end of each episode\"\"\"\n if self.initial_reset:\n while self.ob is None:\n try:\n self.client = snakeoil3.Client(p=self.port,\n vision=self.vision)\n # Open new UDP in vtorcs\n self.client.MAX_STEPS = self.CLIENT_MAX_STEPS\n self.client.get_servers_input(step=0)\n # Get the initial input from torcs\n raw_ob = self.client.S.d\n # Get the current full-observation\n self.ob = self.make_observation(raw_ob)\n except:\n pass\n self.initial_reset = False\n\n else:\n try:\n if 'termination_cause' in list(prev_step_info.keys()) and\\\n prev_step_info['termination_cause'] == 'hardReset':\n self.ob, self.client =\\\n TorcsEnv.reset(self, client=self.client, relaunch=True)\n else:\n self.ob, self.client =\\\n TorcsEnv.reset(self, client=self.client, relaunch=True)\n\n except Exception as e:\n self.ob = None\n while self.ob is None:\n try:\n self.client = snakeoil3.Client(p=self.port,\n vision=self.vision)\n # Open new UDP in vtorcs\n self.client.MAX_STEPS = self.CLIENT_MAX_STEPS\n self.client.get_servers_input(step=0)\n # Get the initial input from torcs\n raw_ob = self.client.S.d\n # Get the current full-observation from torcs\n self.ob = self.make_observation(raw_ob)\n except:\n pass\n\n self.distance_traversed = 0\n s_t = np.hstack((self.ob.angle, self.ob.track, self.ob.trackPos,\n self.ob.speedX, self.ob.speedY, self.ob.speedZ,\n self.ob.wheelSpinVel / 100.0, self.ob.rpm))\n\n return s_t\n\n def step(self, desire):\n \"\"\"Step method to be called at each time step.\"\"\"\n r_t = 0\n\n for PID_step in range(self.PID_latency):\n # Implement the desired trackpos and velocity using PID\n if self.pid_assist:\n self.accel_PID.update_error((desire[1] - self.prev_vel))\n self.steer_PID.update_error((-(self.prev_lane - desire[0]) / 10 +\n self.prev_angle))\n if self.accel_PID.output() < 0.0:\n brake = 1\n else:\n brake = 0\n a_t = np.asarray([self.steer_PID.output(),\n self.accel_PID.output(), brake])\n else:\n a_t = desire\n try:\n self.ob, r, done, info = TorcsEnv.step(self, PID_step,\n self.client, a_t,\n self.early_stop)\n except Exception as e:\n print((\"Exception caught at port \" + str(e)))\n self.ob = None\n while self.ob is None:\n try:\n self.client = snakeoil3.Client(p=self.port,\n vision=self.vision)\n # Open new UDP in vtorcs\n self.client.MAX_STEPS = self.CLIENT_MAX_STEPS\n self.client.get_servers_input(0)\n # Get the initial input from torcs\n raw_ob = self.client.S.d\n # Get the current full-observation from torcs\n self.ob = self.make_observation(raw_ob)\n except:\n pass\n continue\n self.prev_vel = self.ob.speedX\n self.prev_angle = self.ob.angle\n self.prev_lane = self.ob.trackPos\n if (math.isnan(r)):\n r = 0.0\n r_t += r # accumulate rewards over all the time steps\n\n self.distance_traversed = self.client.S.d['distRaced']\n r_t += (self.distance_traversed - self.prev_dist) /\\\n self.track_len\n self.prev_dist = deepcopy(self.distance_traversed)\n if self.distance_traversed >= self.track_len:\n done = True\n if done:\n break\n\n s_t1 = np.hstack((self.ob.angle, self.ob.track, self.ob.trackPos,\n self.ob.speedX, self.ob.speedY, self.ob.speedZ,\n self.ob.wheelSpinVel / 100.0, self.ob.rpm))\n\n return s_t1, r_t, done, info\n"
] |
[
[
"numpy.hstack",
"numpy.array"
]
] |
Hekstra-Lab/microutil
|
[
"ab3b7b51754bf90ef35d6eea1c7b35cece638f0e",
"ab3b7b51754bf90ef35d6eea1c7b35cece638f0e"
] |
[
"microutil/leica/leica.py",
"microutil/loading.py"
] |
[
"# flake8: noqa E741\nimport glob\nimport re\nimport xml.etree.ElementTree as ET\n\nimport dask.array as da\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport tifffile as tiff\nimport xarray as xr\nfrom cycler import cycler\n\n__all__ = [\n \"delay_to_wns\",\n \"get_standard_metadata\",\n \"get_ldm_metadata\",\n \"get_coords\",\n \"load_leica_frames\",\n \"load_srs_timelapse_dataset\",\n \"gogogo_dimension_data\",\n \"stczyx\",\n \"ldm_stczyx\",\n \"ldm_stcrzyx\",\n \"ldm_to_time\",\n \"viridis_cycler\",\n]\n\nMETA_COLS = [\n \"timestamp\",\n \"x_origin\",\n \"y_origin\",\n \"z_origin\",\n \"t_origin\",\n \"x_length\",\n \"y_length\",\n \"z_length\",\n \"t_length\",\n \"x_elements\",\n \"y_elements\",\n \"z_elements\",\n \"t_elements\",\n \"stage_x\",\n \"stage_y\",\n \"stage_z\",\n \"channels\",\n]\n\n\ndef delay_to_wns(delay, wns_per_mm=228.17640641870852, ref_d=26.27009266, ref_wn=2905):\n \"\"\"\n Convert delay stage positions into Raman wavenumbers give a reference point\n and a conversion factor. Default was based on the spectrum of DMSO.\n\n Parameters\n ----------\n delay : np.array or float\n Aarray containing delay stage positions.\n wns_per_mm : float, default 228.176\n Wavenumbers per millimeter of delay stage travel.\n ref_d : float, default 26.27\n Reference delay stage position that corresponds to wavenumber ref_wn\n ref_wn : float, default 2905\n Reference wavenumber that corresponds to delay stage positiond ref_d\n\n Returns\n -------\n wavenumbers : np.array or float\n Raman wavenumbers corresponding to input delay positions\n\n \"\"\"\n return wns_per_mm * (delay - ref_d) + ref_wn\n\n\ndef get_standard_metadata(data_dir, meta_tag=\"_Properties.xml\"):\n \"\"\"\n Get metadata from and for leica single frame tiffs collected in standard mode.\n\n Parameters\n ----------\n data_dir : str\n Directory containing (meta) data files.\n meta_tag : str, default \"_Properties.xml\"\n Ending for metadata files. The pattern data_dir + \"*\" + meta_tag\n will get globbed for.\n\n Returns\n -------\n metadata : pd.DataFrame\n DataFrame containing metadata from each file found in data_dir.\n \"\"\"\n\n metadata = pd.DataFrame(sorted(glob.glob(data_dir + '*' + meta_tag)), columns=[\"filename\"])\n metadata[\"acq_name\"] = metadata.filename.apply(\n lambda x: re.split(r\"_Properties.xml\", x.split('/')[-1])[0]\n )\n\n metadata[META_COLS] = None\n metadata = metadata.apply(gogogo_dimension_data, axis=1)\n metadata['fov'] = metadata.apply(\n lambda x: int(re.split(r\"Pos(\\d+)\", x.filename)[1]) - 1, axis=1\n )\n return metadata\n\n\ndef ldm_meta_split(x):\n \"\"\"\n Split out relevant numbers for LDM metadata filenames.\n\n Parameters\n ----------\n x : filename\n Metadate file name.\n\n Returns\n -------\n columns : pd.Series\n Series containing the FOV, mode (srs or fluo), and image number from the LDM series.\n \"\"\"\n name = x.acq_name\n fov, mode, ldm_idx = re.split(r\"(\\d+)\", name)[1:4]\n fov = int(fov) - 1\n mode = mode.strip(\"_\")\n ldm_idx = int(ldm_idx)\n return pd.Series([fov, mode, ldm_idx], index=['fov', 'mode', 'ldm_idx'])\n\n\ndef get_ldm_metadata(data_dir, meta_tag=\"_Properties.xml\"):\n \"\"\"\n Get metadata from and for leica single frame tiffs collected in live data mode.\n Assumes LDM jobs are titled according to Pos{fov#}_{mode}.\n\n Parameters\n ----------\n data_dir : str\n Directory containing (meta) data files.\n meta_tag : str, default \"_Properties.xml\"\n Ending for metadata files. The pattern data_dir + \"*\" + meta_tag\n will get globbed for.\n\n Returns\n -------\n metadata : pd.DataFrame\n DataFrame containing metadata from each file found in data_dir.\n \"\"\"\n metadata = pd.DataFrame(sorted(glob.glob(data_dir + \"*\" + meta_tag)), columns=[\"filename\"])\n metadata[\"acq_name\"] = metadata.filename.str.split(\"/\").apply(\n lambda x: x[-1].replace(meta_tag, \"\")\n )\n metadata = metadata.join(metadata.apply(ldm_meta_split, axis=1, result_type='expand'))\n metadata = metadata.apply(gogogo_dimension_data, axis=1, result_type='expand')\n return metadata\n\n\ndef gogogo_dimension_data(entry):\n \"\"\"\n Parse data describing the length of each of the dimensions (TCZYX)\n in the file from a given row of dataframe.\n\n Parameters\n ----------\n entry : pd.DataFrame\n Row of a pandas dataframe\n\n Returns\n -------\n entry : pd.Series (?)\n Should be called by dataframe.apply so this will update\n columns of the calling dataframe.\n \"\"\"\n parsed = ET.parse(entry.filename)\n for x in parsed.iter(\"TimeStampList\"):\n d, t, ms = x.attrib.values()\n entry['timestamp'] = pd.to_datetime(d + \" \" + t) + pd.to_timedelta(int(ms), unit=\"ms\")\n for d in parsed.iter(\"DimensionDescription\"):\n a = d.attrib\n entry[f'{a[\"DimID\"].lower()}_origin'] = float(re.sub(\"[^0-9.]\", \"\", a[\"Origin\"]))\n if a[\"DimID\"] == \"T\":\n try:\n h, m, s = (float(x) for x in re.split(r\"(\\d+)h(\\d+)m([0-9.]+)\", a[\"Length\"])[1:-1])\n entry[f'{a[\"DimID\"].lower()}_length'] = s + 60 * (m + 60 * h)\n except:\n entry[f'{a[\"DimID\"].lower()}_length'] = None\n\n else:\n entry[f'{a[\"DimID\"].lower()}_length'] = float(re.sub(\"[^0-9.]\", \"\", a[\"Length\"]))\n entry[f'{a[\"DimID\"].lower()}_elements'] = int(re.sub(\"[^0-9.]\", \"\", a[\"NumberOfElements\"]))\n for d in parsed.iter(\"FilterSettingRecord\"):\n a = d.attrib\n if a[\"ClassName\"] == \"CXYZStage\" and \"DM6000 Stage Pos\" in a[\"Description\"]:\n entry[f\"stage_{a['Attribute'].strip('Pos').lower()}\"] = float(a[\"Variant\"])\n for f in parsed.iter(\"FrameCount\"):\n entry[\"channels\"] = int(f.text.split()[1].strip(\"()\"))\n return entry\n\n\ndef get_coords(meta_df, dims='STCZYX', others=None):\n \"\"\"\n Generate xarray coordinates (STCZYX) from leica metadata stored\n in a pandas DataFrame by get_standard_metadata.\n\n Parameters\n ----------\n meta_df : pandas.DataFrame\n Metadata dataframe to get coordinates from.\n dims : str or list of str, default \"STCZYX\"\n Dimension names to which coordinates are assigned.\n others : dict or None, default None\n Other coordinates for the dataset. Will be combined\n with coordinates retrieved from meta_df.\n\n Returns\n -------\n coords : dict\n Dictionary mapping dimension names to coordinates.\n \"\"\"\n coords = {}\n if 'S' in dims:\n coords['S'] = np.arange(meta_df.fov.nunique())\n for x in dims:\n if x != 'S':\n length = meta_df[x.lower() + \"_length\"].iloc[0]\n elements = meta_df[x.lower() + \"_elements\"].iloc[0]\n if ~np.isnan(length) and ~np.isnan(elements):\n coords[x] = np.linspace(0, length, elements)\n if others is not None:\n coords = {**coords, **others}\n return coords\n\n\ndef stczyx(x):\n \"\"\"\n Parse multiposition, time lapse, z stack filenames into dimension indices.\n \"\"\"\n l = re.split(r\"(\\d+)\", x.filename.split(\"/\")[-1])[1:-1:2]\n l.pop(1)\n s = pd.Series(l, index=list(\"STZC\")).astype(int)\n s[0] -= 1\n return s\n\n\ndef ldm_stczyx(x):\n \"\"\"\n Parse multi position, time lapse, z stack filenames from LDM acquisitions\n into dimension indices.\n \"\"\"\n l = re.split(r\"(\\d+)\", x.filename.split(\"/\")[-1])[1:-1:2]\n s = pd.Series(l, index=list(\"STZC\")).astype(int)\n s[0] -= 1\n return s\n\n\ndef ldm_stcrzyx(x):\n \"\"\"\n Parse multi position, time lapse, z stack, SRS filenames from LDM acquisitions\n into dimension indices.\n \"\"\"\n l = re.split(r\"(\\d+)\", x.filename.split('/')[-1])[1:-1:2]\n s = pd.Series(l, index=list(\"STRZC\")).astype(int)\n s[0] -= 1\n return s\n\n\ndef ldm_to_time(inds):\n \"\"\"\n Relabel LDM indices to time indices.\n \"\"\"\n mapper = pd.Series(dtype='uint16')\n for i, s in inds.groupby('S'):\n mapper = mapper.append(pd.Series(data=np.arange(s['T'].nunique()), index=s['T'].unique()))\n # return mapper\n inds['T'] = mapper[inds['T'].values].values\n\n\ndef load_leica_frames(df, idx_mapper, coords=None, chunkby_dims='CZ'):\n \"\"\"\n Lazily load single image leica tiffs into an xarray.DataArray.\n\n Parameters\n ----------\n df : pandas.DataFrame\n Data frame containing data file names in a column called \"filename\".\n idx_mapper : callable or pandas.DataFrame\n Means to map data files to the correct dimension index. If\n callable will be used by df.apply. If dataframe, will be joined\n to df directly.\n coords : dict or None, default None\n Coordinates for the dataarray.\n chunkby_dims : str, default \"CZ\"\n Dimensions along which to chunk the dask array. XY will automatically\n be chunked together.\n\n Returns\n -------\n x_data : xarry.DataArray\n Dask backed data array containing leica images. Will have STCZYX dims.\n \"\"\"\n if callable(idx_mapper):\n df = df.join(df.apply(idx_mapper, axis=1, result_type='expand'))\n elif isinstance(idx_mapper, pd.DataFrame):\n df = df.join(idx_mapper)\n else:\n raise TypeError(\n \"Must provide a callable to map names to indices or a pandas dataframe containing the indices\"\n )\n\n # ordered_cols = [df.columns[0]]+list('STCZ')\n # df = df[ordered_cols]\n group_dims = [x for x in df.columns[1:] if x not in chunkby_dims]\n\n # if you end early there might not be the same number of frames in each pos\n # cutoff at the worst case scenario so things can be rectangular\n cutoffs = df.groupby('S').nunique().min().drop('filename')\n df = df.loc[(df.loc[:, ~df.columns.isin(['S', 'filename'])] < cutoffs).all('columns')]\n chunks = np.zeros(df[group_dims].nunique().values, dtype='object')\n\n for idx, val in df.groupby(group_dims):\n darr = da.from_zarr(tiff.imread(val.filename.tolist(), aszarr=True)).rechunk(-1)\n # shape = tuple(cutoffs[x] for x in chunkby_dims) + darr.shape[-2:]\n shape = tuple(x for i, x in cutoffs.iteritems() if i in chunkby_dims) + darr.shape[-2:]\n # print(idx, shape)\n darr = darr.reshape(shape)\n chunks[idx] = darr\n\n chunks = np.expand_dims(chunks, tuple(range(-1, -len(chunkby_dims) - 3, -1)))\n\n d_data = da.block(chunks.tolist())\n x_data = xr.DataArray(\n d_data,\n dims=group_dims + [x for x in df.columns if x in chunkby_dims] + ['Y', 'X'],\n )\n if coords is not None:\n x_data = x_data.assign_coords(coords)\n x_data = x_data.transpose('S', 'T', 'C', ..., 'Z', 'Y', 'X')\n return x_data\n\n\ndef load_srs_timelapse_dataset(data_dir):\n \"\"\"\n Read files from data_dir into a dask backed xarray Dataset. Assumes that\n files are named according to `Pos{S}_{mode}{ldm_idx}_t{R}_z{Z}_ch{C}.tif`\n\n Parameters\n ----------\n data_dir : str\n Path to directory containing image and metadata files.\n\n Returns\n -------\n data : xarray.Dataset\n Dataset containing fluorescnence and srs data.\n \"\"\"\n # glob the files\n srs_files = pd.DataFrame({\"filename\": sorted(glob.glob(data_dir + \"*srs*z*.tif\"))})\n fluo_files = pd.DataFrame({\"filename\": sorted(glob.glob(data_dir + \"*fluo*z*.tif\"))})\n\n # parse filenames -> indices\n srs_inds = srs_files.apply(ldm_stcrzyx, axis=1, result_type='expand')\n ldm_to_time(srs_inds)\n fluo_inds = fluo_files.apply(ldm_stczyx, axis=1, result_type='expand')\n ldm_to_time(fluo_inds)\n\n # parse metadata -> coords\n metadata = get_ldm_metadata(data_dir + \"/Pos*\")\n f_coords = get_coords(\n metadata.loc[metadata['mode'] == 'fluo'],\n 'SZYX', # {'C': ['GFP', 'mCherry', 'BF']}\n )\n s_coords = get_coords(metadata.loc[metadata['mode'] == 'srs'], 'SZYX', {'C': ['BF', 'SRS']})\n\n # load the images\n srs_data = load_leica_frames(srs_files, srs_inds, coords=s_coords)\n fluo_data = load_leica_frames(fluo_files, fluo_inds, coords=f_coords)\n\n # combine into dataset and return\n return xr.Dataset({'srs': srs_data, 'fluo': fluo_data}) # .astype(srs_data.dtype)\n\n\ndef viridis_cycler(N):\n return cycler(color=plt.cm.viridis(np.linspace(0.1, 0.9, N)))\n",
"import glob\nimport json\nimport os\nimport re\n\nimport dask.array as da\nimport numpy as np\nimport pandas as pd\nimport tifffile\nimport xarray as xr\n\n__all__ = [\n \"micromanager_metadata_to_coords\",\n \"load_image_sequence\",\n \"load_mm_frames\",\n]\n\n\ndef micromanager_metadata_to_coords(summary, n_times=None, z_centered=True):\n \"\"\"\n Given the 'Summary' dict from micromanager, parse the information\n into coords for a corresponding DataArray.\n\n Parameters\n ----------\n summary : dict\n Micromanager metadata dictionary.\n n_times : int\n Number of actual time points in dataset. If None, read the 'Frames'\n attribute from metadata. This will cause problems if the experiment\n stopped short of the desired number of timepoints.\n z_centered : bool, default True\n Rescale the z coordinate to be relative to the center slice.\n Will only work with an odd number of slices.\n\n Returns\n -------\n coords : dict\n\n \"\"\"\n # load axis order but extract the positions from the list\n # also reverse it because that's what we need ¯\\_(ツ)_/¯\n # call `list` on it so that we aren't modifying the original metadata\n ax_order = list(summary[\"AxisOrder\"])\n ax_order.remove(\"position\")\n ax_order = ax_order[::-1]\n\n coords = {}\n\n channel_names = summary[\"ChNames\"]\n coords['C'] = channel_names\n\n z_step = summary[\"z-step_um\"]\n n_slices = summary[\"Slices\"]\n z = np.linspace(0, n_slices * z_step, n_slices)\n\n if z_centered:\n if len(z) % 2 == 0:\n raise ValueError(\n f\"There are an even number of z points ({len(z)}) so z_centered cannot be True\"\n )\n z -= z[int(len(z) / 2 - 0.5)]\n\n coords['Z'] = z\n\n if n_times is None:\n n_times = summary[\"Frames\"]\n\n # Nominal timepoints in ms\n times = np.linspace(0, summary[\"Interval_ms\"] * n_times, n_times)\n coords['T'] = times\n\n return coords\n\n\ndef load_image_sequence(filenames, z_centered=True, pattern=None, coords=None):\n \"\"\"\n Load an image sequence from micromanager .ome.tif files.\n Loads as zarr into dask into xarray\n\n Parameters\n ----------\n filenames : str\n The path/regex that identifies the file names.\n z_centered : bool, default: True\n Whether to offset\n pattern : str or None, default: None\n Regex to match the sequence. If None\n this will default to a Regex that matches: Pos[position number]\n coords : None or dict\n Dictionary containing coordinates for the final DataArray. Must have\n 'S', 'T', 'C', 'Z', 'Y, and 'X' as keys. If None\n attempt to read the relevant information from micromanager metadata.\n\n Returns\n -------\n xarray.DataArray\n The tiffs loaded as an Xarray with coords filled in.\n \"\"\"\n if pattern is None:\n pattern = r\"\"\"(?ix)\n _?(?:(Pos)(\\d{1,4}))\n \"\"\"\n\n # load the first file to grab the metadata\n t = tifffile.TiffSequence(filenames, pattern=pattern)\n arr = da.from_zarr(t.aszarr())\n n_times = arr.shape[1]\n\n if coords is None:\n meta = tifffile.TiffFile(t.files[0]).micromanager_metadata\n coords = micromanager_metadata_to_coords(\n meta['Summary'], n_times=n_times, z_centered=z_centered\n )\n\n arr = xr.DataArray(\n arr,\n dims=(\"S\", \"T\", \"C\", \"Z\", \"Y\", \"X\"),\n coords=coords,\n attrs={\"Summary\": meta[\"Summary\"], \"Comment\": meta[\"Comments\"][\"Summary\"]},\n )\n return arr\n\n\ndef load_mm_frames(\n data_dir: str,\n position_folder_regex: str = \"Pos[0-9]+\",\n filename_regex=r\".+\\.tif{1,2}\",\n chunkby_dims=['C', 'Z'],\n z_centered=True,\n coords=None,\n):\n \"\"\"\n Lazily read micromanager generated single frame tiff files.\n\n Parameters\n ----------\n data_dir : str or None\n Path to directory containing Pos{i} subdirectories\n position_folder_regex : str, default: \"Pos[0-9]+\"\n A regular expression to match the folder names specifying positions.\n E.g. to match Pos0, Pos1, and OtherName0, OtherName1 pass\n \"(Pos|OtherName)[0-9]+\"\n filename_regex : str or None\n Regular expression to identify which tiff files to read. If None then\n all .tif[f] files will be read.\n chunkby_dims : list of str default ['C','Z']\n Dimensions to chunk resulting dask array by. X and Y dimensions\n will always be in a single chunk. Can contain any of S, T, C, or Z.\n z_centered : bool default true\n Whether or not to use Z coordinates relative to the center slice.\n coords : None or dict\n Dictionary containing coordinates for the final DataArray. Must have\n 'S', 'T', 'C', 'Z', 'Y, and 'X' as keys. If None\n attempt to read the relevant information from micromanager metadata.\n\n Returns\n -------\n arr : xr.DataArray\n Unevaluated dask array containing all the files from the directory.\n Shape will vary but dimensions will always be (S, T, C, Z, Y, X)\n \"\"\"\n\n pos_reg = re.compile(f\"{data_dir.rstrip('/')}/\" + position_folder_regex)\n fname_reg = re.compile(filename_regex)\n\n position_dirs = sorted(\n f.path for f in os.scandir(data_dir) if f.is_dir() and pos_reg.match(f.path)\n )\n\n def dir_to_df(dir):\n fseries = pd.Series(sorted(filter(fname_reg.match, glob.glob(dir.rstrip(\"/\") + \"/*\"))))\n df = pd.DataFrame({'filename': fseries})\n df[['C', 'S', 'T', 'Z']] = df.apply(\n lambda x: re.split(r'img_channel(\\d+)_position(\\d+)_time(\\d+)_z(\\d+).tif', x.filename)[\n 1:-1\n ],\n axis=1,\n result_type='expand',\n )\n return df\n\n if len(position_dirs) > 0:\n for i, pos in enumerate(position_dirs):\n fseries = pd.Series(sorted(filter(fname_reg.match, glob.glob(pos + \"/*\"))))\n df = pd.DataFrame({'filename': fseries})\n df[['C', 'S', 'T', 'Z']] = df.apply(\n lambda x: re.split(\n r'img_channel(\\d+)_position(\\d+)_time(\\d+)_z(\\d+).tif', x.filename\n )[1:-1],\n axis=1,\n result_type='expand',\n )\n if i == 0:\n all_files = df\n else:\n all_files = all_files.append(df)\n\n else:\n all_files = dir_to_df(data_dir)\n\n if len(df) == 0:\n raise ValueError(\"No files found\")\n\n all_files[['C', 'T', 'S', 'Z']] = all_files[['C', 'T', 'S', 'Z']].astype(int)\n\n # if you end early there might not be the same number of frames in each pos\n # cutoff at the worst case scenario so things can be rectangular\n cutoffs = all_files.groupby('S').nunique().min().drop('filename')\n\n use_files = all_files.loc[\n all_files.apply(lambda x: (x[['C', 'T', 'Z']] < cutoffs).all(), axis=1)\n ]\n\n group_dims = [x for x in ['S', 'T', 'C', 'Z'] if x not in chunkby_dims]\n\n chunks = np.zeros(use_files[group_dims].nunique().values, dtype='object')\n\n for idx, val in use_files.groupby(group_dims):\n darr = da.from_zarr(tifffile.imread(val.filename.tolist(), aszarr=True)).rechunk(-1)\n shape = tuple(cutoffs[x] for x in chunkby_dims) + darr.shape[-2:]\n darr = darr.reshape(shape)\n chunks[idx] = darr\n\n chunks = np.expand_dims(chunks, tuple(range(-1, -len(chunkby_dims) - 3, -1)))\n\n da.block(chunks.tolist())\n\n x_data = xr.DataArray(da.block(chunks.tolist()), dims=group_dims + chunkby_dims + ['Y', 'X'])\n\n if coords is None:\n with open(position_dirs[0] + \"/metadata.txt\") as f:\n meta = json.load(f)\n\n coords = micromanager_metadata_to_coords(\n meta['Summary'], n_times=x_data['T'].values.shape[0], z_centered=z_centered\n )\n\n try:\n x_data = x_data.assign_coords(coords)\n except ValueError as e:\n # can happen if you ignore one of the channels - then the metadata\n # won't match up with actual shape of the array\n print(f'Unable to assign coords due to this error:\\n{e}')\n\n return x_data.transpose('S', 'T', 'C', 'Z', 'Y', 'X')\n"
] |
[
[
"numpy.isnan",
"pandas.to_datetime",
"pandas.Series",
"numpy.linspace"
],
[
"numpy.linspace",
"pandas.DataFrame"
]
] |
fpthink/V2B
|
[
"87561d5cd00ebf31326e8364167a787681ded367"
] |
[
"nuscenes-devkit-master/python-sdk/nuscenes/utils/kitti.py"
] |
[
"# nuScenes dev-kit.\n# Code written by Alex Lang and Holger Caesar, 2019.\n\nimport os\nfrom os import path as osp\nfrom typing import List, Tuple, Any, Union\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom PIL import Image\nfrom matplotlib.axes import Axes\nfrom pyquaternion import Quaternion\nimport sys\nsys.path.append(\"..\")\nfrom nuscenes import NuScenesExplorer\nfrom utils.data_classes import Box, LidarPointCloud\nfrom utils.geometry_utils import box_in_image, BoxVisibility, view_points\n\n\nclass KittiDB:\n \"\"\"\n KITTI database that abstracts away interactions with KITTI files and handles all required transformations.\n This file exists as a utility class for `export_kitti.py`. It may not support more general use cases.\n\n NOTES about KITTI:\n - Setup is defined here: http://www.cvlibs.net/datasets/kitti/setup.php\n - Box annotations live in CamRect frame\n - KITTI lidar frame is 90 degrees rotated from nuScenes lidar frame\n - To export to KITTI format from nuScenes lidar requires:\n - Rotate to KITTI lidar\n - Transform lidar to camera\n - Transform camera to camera rectified\n - To transform from box annotations to nuScenes lidar requires:\n - Inverse of camera rectification\n - Inverse transform of lidar to camera\n - Rotate to nuScenes lidar\n - KITTI 2D boxes cannot always be obtained from the 3D box. The size of a 3D box was fixed for a tracklet\n so it can be large for walking pedestrians that stop moving. Those loose 2D boxes were then corrected\n using Mechanical Turk.\n\n NOTES about KittiDB:\n - The samples tokens are expected to have the format of SPLIT_INT where split is a data folder\n {train, val, test} while INT is the integer label of the sample within that data folder.\n - The KITTI dataset should be downloaded from http://www.cvlibs.net/datasets/kitti/.\n - We use the MV3D splits, not the official KITTI splits (which doesn't have any val).\n \"\"\"\n\n def __init__(self,\n root: str = '/data/sets/kitti',\n splits: Tuple[str, ...] = ('train',)):\n \"\"\"\n :param root: Base folder for all KITTI data.\n :param splits: Which splits to load.\n \"\"\"\n self.root = root\n self.tables = ('calib', 'image_2', 'label_2', 'velodyne')\n self._kitti_fileext = {'calib': 'txt', 'image_2': 'png', 'label_2': 'txt', 'velodyne': 'bin'}\n\n # Grab all the expected tokens.\n self._kitti_tokens = {}\n for split in splits:\n split_dir = osp.join(self.root, split, 'image_2')\n _tokens = os.listdir(split_dir)\n _tokens = [t.replace('.png', '') for t in _tokens]\n _tokens.sort()\n self._kitti_tokens[split] = _tokens\n\n # Creating the tokens.\n self.tokens = []\n for split, tokens in self._kitti_tokens.items():\n self.tokens += ['{}_{}'.format(split, token) for token in tokens]\n\n # KITTI LIDAR has the x-axis pointing forward, but our LIDAR points to the right. So we need to apply a\n # 90 degree rotation around to yaw (z-axis) in order to align.\n # The quaternions will be used a lot of time. We store them as instance variables so that we don't have\n # to create a new one every single time.\n self.kitti_to_nu_lidar = Quaternion(axis=(0, 0, 1), angle=np.pi / 2)\n self.kitti_to_nu_lidar_inv = Quaternion(axis=(0, 0, 1), angle=np.pi / 2).inverse\n\n @staticmethod\n def standardize_sample_token(token: str) -> Tuple[str, str]:\n \"\"\"\n Convert sample token into standard KITTI folder and local filename format.\n :param token: KittiDB unique id.\n :return: folder (ex. train, val, test), filename (ex. 000001)\n \"\"\"\n splits = token.split('_')\n folder = '_'.join(splits[:-1])\n filename = splits[-1]\n return folder, filename\n\n @staticmethod\n def parse_label_line(label_line) -> dict:\n \"\"\"\n Parses single line from label file into a dict. Boxes are in camera frame. See KITTI devkit for details and\n http://www.cvlibs.net/datasets/kitti/setup.php for visualizations of the setup.\n :param label_line: Single line from KittiDB label file.\n :return: Dictionary with all the line details.\n \"\"\"\n\n parts = label_line.split(' ')\n output = {\n 'name': parts[0].strip(),\n 'xyz_camera': (float(parts[11]), float(parts[12]), float(parts[13])),\n 'wlh': (float(parts[9]), float(parts[10]), float(parts[8])),\n 'yaw_camera': float(parts[14]),\n 'bbox_camera': (float(parts[4]), float(parts[5]), float(parts[6]), float(parts[7])),\n 'truncation': float(parts[1]),\n 'occlusion': float(parts[2]),\n 'alpha': float(parts[3])\n }\n\n # Add score if specified\n if len(parts) > 15:\n output['score'] = float(parts[15])\n else:\n output['score'] = np.nan\n\n return output\n\n @staticmethod\n def box_nuscenes_to_kitti(box: Box, velo_to_cam_rot: Quaternion,\n velo_to_cam_trans: np.ndarray,\n r0_rect: Quaternion,\n kitti_to_nu_lidar_inv: Quaternion = Quaternion(axis=(0, 0, 1), angle=np.pi / 2).inverse) \\\n -> Box:\n \"\"\"\n Transform from nuScenes lidar frame to KITTI reference frame.\n :param box: Instance in nuScenes lidar frame.\n :param velo_to_cam_rot: Quaternion to rotate from lidar to camera frame.\n :param velo_to_cam_trans: <np.float: 3>. Translate from lidar to camera frame.\n :param r0_rect: Quaternion to rectify camera frame.\n :param kitti_to_nu_lidar_inv: Quaternion to rotate nuScenes to KITTI LIDAR.\n :return: Box instance in KITTI reference frame.\n \"\"\"\n # Copy box to avoid side-effects.\n box = box.copy()\n\n # Rotate to KITTI lidar.\n box.rotate(kitti_to_nu_lidar_inv)\n\n # Transform to KITTI camera.\n box.rotate(velo_to_cam_rot)\n box.translate(velo_to_cam_trans)\n\n # Rotate to KITTI rectified camera.\n box.rotate(r0_rect)\n\n # KITTI defines the box center as the bottom center of the object.\n # We use the true center, so we need to adjust half height in y direction.\n box.translate(np.array([0, box.wlh[2] / 2, 0]))\n\n return box\n\n @staticmethod\n def project_kitti_box_to_image(box: Box, p_left: np.ndarray, imsize: Tuple[int, int]) \\\n -> Union[None, Tuple[int, int, int, int]]:\n \"\"\"\n Projects 3D box into KITTI image FOV.\n :param box: 3D box in KITTI reference frame.\n :param p_left: <np.float: 3, 4>. Projection matrix.\n :param imsize: (width, height). Image size.\n :return: (xmin, ymin, xmax, ymax). Bounding box in image plane or None if box is not in the image.\n \"\"\"\n\n # Create a new box.\n box = box.copy()\n\n # KITTI defines the box center as the bottom center of the object.\n # We use the true center, so we need to adjust half height in negative y direction.\n box.translate(np.array([0, -box.wlh[2] / 2, 0]))\n\n # Check that some corners are inside the image.\n corners = np.array([corner for corner in box.corners().T if corner[2] > 0]).T\n if len(corners) == 0:\n return None\n\n # Project corners that are in front of the camera to 2d to get bbox in pixel coords.\n imcorners = view_points(corners, p_left, normalize=True)[:2]\n bbox = (np.min(imcorners[0]), np.min(imcorners[1]), np.max(imcorners[0]), np.max(imcorners[1]))\n\n # Crop bbox to prevent it extending outside image.\n bbox_crop = tuple(max(0, b) for b in bbox)\n bbox_crop = (min(imsize[0], bbox_crop[0]),\n min(imsize[0], bbox_crop[1]),\n min(imsize[0], bbox_crop[2]),\n min(imsize[1], bbox_crop[3]))\n\n # Detect if a cropped box is empty.\n if bbox_crop[0] >= bbox_crop[2] or bbox_crop[1] >= bbox_crop[3]:\n return None\n\n return bbox_crop\n\n @staticmethod\n def get_filepath(token: str, table: str, root: str='/data/sets/kitti') -> str:\n \"\"\"\n For a token and table, get the filepath to the associated data.\n :param token: KittiDB unique id.\n :param table: Type of table, for example image or velodyne.\n :param root: Base folder for all KITTI data.\n :return: Full get_filepath to desired data.\n \"\"\"\n folder, filename = KittiDB.standardize_sample_token(token)\n kitti_fileext = {'calib': 'txt', 'image_2': 'png', 'label_2': 'txt', 'velodyne': 'bin'}\n\n ending = kitti_fileext[table]\n\n if token.startswith('test_') and table == 'label_2':\n filepath = None\n print('No cheating! The test set has no labels.')\n else:\n filepath = osp.join(root, folder, table, '{}.{}'.format(filename, ending))\n\n return filepath\n\n @staticmethod\n def get_transforms(token: str, root: str='/data/sets/kitti') -> dict:\n \"\"\"\n Returns transforms for the input token.\n :param token: KittiDB unique id.\n :param root: Base folder for all KITTI data.\n :return: {\n 'velo_to_cam': {'R': <np.float: 3, 3>, 'T': <np.float: 3, 1>}. Lidar to camera transformation matrix.\n 'r0_rect': <np.float: 3, 3>. Rectification matrix.\n 'p_left': <np.float: 3, 4>. Projection matrix.\n 'p_combined': <np.float: 4, 4>. Combined rectification and projection matrix.\n }. Returns the transformation matrices. For details refer to the KITTI devkit.\n \"\"\"\n calib_filename = KittiDB.get_filepath(token, 'calib', root=root)\n\n lines = [line.rstrip() for line in open(calib_filename)]\n velo_to_cam = np.array(lines[5].strip().split(' ')[1:], dtype=np.float32)\n velo_to_cam.resize((3, 4))\n\n r0_rect = np.array(lines[4].strip().split(' ')[1:], dtype=np.float32)\n r0_rect.resize((3, 3))\n p_left = np.array(lines[2].strip().split(' ')[1:], dtype=np.float32)\n p_left.resize((3, 4))\n\n # Merge rectification and projection into one matrix.\n p_combined = np.eye(4)\n p_combined[:3, :3] = r0_rect\n p_combined = np.dot(p_left, p_combined)\n return {\n 'velo_to_cam': {\n 'R': velo_to_cam[:, :3],\n 'T': velo_to_cam[:, 3]\n },\n 'r0_rect': r0_rect,\n 'p_left': p_left,\n 'p_combined': p_combined,\n }\n\n @staticmethod\n def get_pointcloud(token: str, root: str = '/data/sets/kitti') -> LidarPointCloud:\n \"\"\"\n Load up the pointcloud for a sample.\n :param token: KittiDB unique id.\n :param root: Base folder for all KITTI data.\n :return: LidarPointCloud for the sample in the KITTI Lidar frame.\n \"\"\"\n pc_filename = KittiDB.get_filepath(token, 'velodyne', root=root)\n\n # The lidar PC is stored in the KITTI LIDAR coord system.\n pc = LidarPointCloud(np.fromfile(pc_filename, dtype=np.float32).reshape(-1, 4).T)\n\n return pc\n\n def get_boxes(self,\n token: str,\n filter_classes: List[str] = None,\n max_dist: float = None) -> List[Box]:\n \"\"\"\n Load up all the boxes associated with a sample.\n Boxes are in nuScenes lidar frame.\n :param token: KittiDB unique id.\n :param filter_classes: List of Kitti classes to use or None to use all.\n :param max_dist: Maximum distance in m to still draw a box.\n :return: Boxes in nuScenes lidar reference frame.\n \"\"\"\n # Get transforms for this sample\n transforms = self.get_transforms(token, root=self.root)\n\n boxes = []\n if token.startswith('test_'):\n # No boxes to return for the test set.\n return boxes\n\n with open(KittiDB.get_filepath(token, 'label_2', root=self.root), 'r') as f:\n for line in f:\n # Parse this line into box information.\n parsed_line = self.parse_label_line(line)\n\n if parsed_line['name'] in {'DontCare', 'Misc'}:\n continue\n\n center = parsed_line['xyz_camera']\n wlh = parsed_line['wlh']\n yaw_camera = parsed_line['yaw_camera']\n name = parsed_line['name']\n score = parsed_line['score']\n\n # Optional: Filter classes.\n if filter_classes is not None and name not in filter_classes:\n continue\n\n # The Box class coord system is oriented the same way as as KITTI LIDAR: x forward, y left, z up.\n # For orientation confer: http://www.cvlibs.net/datasets/kitti/setup.php.\n\n # 1: Create box in Box coordinate system with center at origin.\n # The second quaternion in yaw_box transforms the coordinate frame from the object frame\n # to KITTI camera frame. The equivalent cannot be naively done afterwards, as it's a rotation\n # around the local object coordinate frame, rather than the camera frame.\n quat_box = Quaternion(axis=(0, 1, 0), angle=yaw_camera) * Quaternion(axis=(1, 0, 0), angle=np.pi/2)\n box = Box([0.0, 0.0, 0.0], wlh, quat_box, name=name)\n\n # 2: Translate: KITTI defines the box center as the bottom center of the vehicle. We use true center,\n # so we need to add half height in negative y direction, (since y points downwards), to adjust. The\n # center is already given in camera coord system.\n box.translate(center + np.array([0, -wlh[2] / 2, 0]))\n\n # 3: Transform to KITTI LIDAR coord system. First transform from rectified camera to camera, then\n # camera to KITTI lidar.\n box.rotate(Quaternion(matrix=transforms['r0_rect']).inverse)\n box.translate(-transforms['velo_to_cam']['T'])\n box.rotate(Quaternion(matrix=transforms['velo_to_cam']['R']).inverse)\n\n # 4: Transform to nuScenes LIDAR coord system.\n box.rotate(self.kitti_to_nu_lidar)\n\n # Set score or NaN.\n box.score = score\n\n # Set dummy velocity.\n box.velocity = np.array((0.0, 0.0, 0.0))\n\n # Optional: Filter by max_dist\n if max_dist is not None:\n dist = np.sqrt(np.sum(box.center[:2] ** 2))\n if dist > max_dist:\n continue\n\n boxes.append(box)\n\n return boxes\n\n def get_boxes_2d(self,\n token: str,\n filter_classes: List[str] = None) -> Tuple[\n List[Tuple[float, float, float, float]],\n List[str]\n ]:\n \"\"\"\n Get the 2d boxes associated with a sample.\n :return: A list of boxes in KITTI format (xmin, ymin, xmax, ymax) and a list of the class names.\n \"\"\"\n boxes = []\n names = []\n with open(KittiDB.get_filepath(token, 'label_2', root=self.root), 'r') as f:\n for line in f:\n # Parse this line into box information.\n parsed_line = self.parse_label_line(line)\n\n if parsed_line['name'] in {'DontCare', 'Misc'}:\n continue\n\n bbox_2d = parsed_line['bbox_camera']\n name = parsed_line['name']\n\n # Optional: Filter classes.\n if filter_classes is not None and name not in filter_classes:\n continue\n\n boxes.append(bbox_2d)\n names.append(name)\n return boxes, names\n\n\n @staticmethod\n def box_to_string(frame:str,\n track_id,\n name: str,\n box: Box,\n bbox_2d: Tuple[float, float, float, float] = (-1.0, -1.0, -1.0, -1.0),\n truncation: float = -1.0,\n occlusion: int = -1,\n alpha: float = -10.0,\n is_key_frame: bool =False,\n num_lidar_pts: bool =0) -> str:\n \"\"\"\n Convert box in KITTI image frame to official label string fromat.\n :param name: KITTI name of the box.\n :param box: Box class in KITTI image frame.\n :param bbox_2d: Optional, 2D bounding box obtained by projected Box into image (xmin, ymin, xmax, ymax).\n Otherwise set to KITTI default.\n :param truncation: Optional truncation, otherwise set to KITTI default.\n :param occlusion: Optional occlusion, otherwise set to KITTI default.\n :param alpha: Optional alpha, otherwise set to KITTI default.\n :return: KITTI string representation of box.\n \"\"\"\n # Convert quaternion to yaw angle.\n v = np.dot(box.rotation_matrix, np.array([1, 0, 0]))\n yaw = -np.arctan2(v[2], v[0])\n\n # Prepare output.\n frame= '{:d} '.format(frame)\n track_id ='{:d} '.format(track_id)\n name += ' '\n trunc = '{:d} '.format(int(truncation))\n occ = '{:d} '.format(occlusion)\n a = '{:.6f} '.format(alpha)\n bb = '{:.6f} {:.6f} {:.6f} {:.6f} '.format(bbox_2d[0], bbox_2d[1], bbox_2d[2], bbox_2d[3])\n hwl = '{:.6f} {:.6f} {:.6f} '.format(box.wlh[2], box.wlh[0], box.wlh[1]) # height, width, length.\n xyz = '{:.6f} {:.6f} {:.6f} '.format(box.center[0], box.center[1], box.center[2]) # x, y, z.\n y = '{:.6f}'.format(yaw) # Yaw angle.\n s = ' {:.6f}'.format(box.score) # Classification score.\n num_lidar_pts=' {:d}'.format(num_lidar_pts)\n output = frame + track_id + name + trunc + occ + a + bb + hwl + xyz + y\n if ~np.isnan(box.score):\n output += s\n output +=num_lidar_pts\n if is_key_frame:\n output += ' 1'\n else:\n output += ' 0'\n return output\n\n def project_pts_to_image(self, pointcloud: LidarPointCloud, token: str) -> np.ndarray:\n \"\"\"\n Project lidar points into image.\n :param pointcloud: The LidarPointCloud in nuScenes lidar frame.\n :param token: Unique KITTI token.\n :return: <np.float: N, 3.> X, Y are points in image pixel coordinates. Z is depth in image.\n \"\"\"\n\n # Copy and convert pointcloud.\n pc_image = LidarPointCloud(points=pointcloud.points.copy())\n pc_image.rotate(self.kitti_to_nu_lidar_inv) # Rotate to KITTI lidar.\n\n # Transform pointcloud to camera frame.\n transforms = self.get_transforms(token, root=self.root)\n pc_image.rotate(transforms['velo_to_cam']['R'])\n pc_image.translate(transforms['velo_to_cam']['T'])\n\n # Project to image.\n depth = pc_image.points[2, :]\n points_fov = view_points(pc_image.points[:3, :], transforms['p_combined'], normalize=True)\n points_fov[2, :] = depth\n\n return points_fov\n\n def render_sample_data(self,\n token: str,\n sensor_modality: str = 'lidar',\n with_anns: bool = True,\n axes_limit: float = 30,\n ax: Axes = None,\n view_3d: np.ndarray = np.eye(4),\n color_func: Any = None,\n augment_previous: bool = False,\n box_linewidth: int = 2,\n filter_classes: List[str] = None,\n max_dist: float = None,\n out_path: str = None,\n render_2d: bool = False) -> None:\n \"\"\"\n Render sample data onto axis. Visualizes lidar in nuScenes lidar frame and camera in camera frame.\n :param token: KITTI token.\n :param sensor_modality: The modality to visualize, e.g. lidar or camera.\n :param with_anns: Whether to draw annotations.\n :param axes_limit: Axes limit for lidar data (measured in meters).\n :param ax: Axes onto which to render.\n :param view_3d: 4x4 view matrix for 3d views.\n :param color_func: Optional function that defines the render color given the class name.\n :param augment_previous: Whether to augment an existing plot (does not redraw pointcloud/image).\n :param box_linewidth: Width of the box lines.\n :param filter_classes: Optionally filter the classes to render.\n :param max_dist: Maximum distance in m to still draw a box.\n :param out_path: Optional path to save the rendered figure to disk.\n :param render_2d: Whether to render 2d boxes (only works for camera data).\n \"\"\"\n # Default settings.\n if color_func is None:\n color_func = NuScenesExplorer.get_color\n\n boxes = self.get_boxes(token, filter_classes=filter_classes, max_dist=max_dist) # In nuScenes lidar frame.\n\n if sensor_modality == 'lidar':\n # Load pointcloud.\n pc = self.get_pointcloud(token, self.root) # In KITTI lidar frame.\n pc.rotate(self.kitti_to_nu_lidar.rotation_matrix) # In nuScenes lidar frame.\n # Alternative options:\n # depth = pc.points[1, :]\n # height = pc.points[2, :]\n intensity = pc.points[3, :]\n\n # Project points to view.\n points = view_points(pc.points[:3, :], view_3d, normalize=False)\n coloring = intensity\n\n if ax is None:\n _, ax = plt.subplots(1, 1, figsize=(9, 9))\n\n if not augment_previous:\n ax.scatter(points[0, :], points[1, :], c=coloring, s=1)\n ax.set_xlim(-axes_limit, axes_limit)\n ax.set_ylim(-axes_limit, axes_limit)\n\n if with_anns:\n for box in boxes:\n color = np.array(color_func(box.name)) / 255\n box.render(ax, view=view_3d, colors=(color, color, 'k'), linewidth=box_linewidth)\n\n elif sensor_modality == 'camera':\n im_path = KittiDB.get_filepath(token, 'image_2', root=self.root)\n im = Image.open(im_path)\n\n if ax is None:\n _, ax = plt.subplots(1, 1, figsize=(9, 16))\n\n if not augment_previous:\n ax.imshow(im)\n ax.set_xlim(0, im.size[0])\n ax.set_ylim(im.size[1], 0)\n\n if with_anns:\n if render_2d:\n # Use KITTI's 2d boxes.\n boxes_2d, names = self.get_boxes_2d(token, filter_classes=filter_classes)\n for box, name in zip(boxes_2d, names):\n color = np.array(color_func(name)) / 255\n ax.plot([box[0], box[0]], [box[1], box[3]], color=color, linewidth=box_linewidth)\n ax.plot([box[2], box[2]], [box[1], box[3]], color=color, linewidth=box_linewidth)\n ax.plot([box[0], box[2]], [box[1], box[1]], color=color, linewidth=box_linewidth)\n ax.plot([box[0], box[2]], [box[3], box[3]], color=color, linewidth=box_linewidth)\n else:\n # Project 3d boxes to 2d.\n transforms = self.get_transforms(token, self.root)\n for box in boxes:\n # Undo the transformations in get_boxes() to get back to the camera frame.\n box.rotate(self.kitti_to_nu_lidar_inv) # In KITTI lidar frame.\n box.rotate(Quaternion(matrix=transforms['velo_to_cam']['R']))\n box.translate(transforms['velo_to_cam']['T']) # In KITTI camera frame, un-rectified.\n box.rotate(Quaternion(matrix=transforms['r0_rect'])) # In KITTI camera frame, rectified.\n\n # Filter boxes outside the image (relevant when visualizing nuScenes data in KITTI format).\n if not box_in_image(box, transforms['p_left'][:3, :3], im.size, vis_level=BoxVisibility.ANY):\n continue\n\n # Render.\n color = np.array(color_func(box.name)) / 255\n box.render(ax, view=transforms['p_left'][:3, :3], normalize=True, colors=(color, color, 'k'),\n linewidth=box_linewidth)\n else:\n raise ValueError(\"Unrecognized modality {}.\".format(sensor_modality))\n\n ax.axis('off')\n ax.set_title(token)\n ax.set_aspect('equal')\n\n # Render to disk.\n plt.tight_layout()\n if out_path is not None:\n plt.savefig(out_path)\n"
] |
[
[
"numpy.dot",
"matplotlib.pyplot.tight_layout",
"numpy.fromfile",
"numpy.min",
"numpy.isnan",
"numpy.eye",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.savefig",
"numpy.arctan2",
"numpy.max",
"numpy.array",
"numpy.sum"
]
] |
Litchichu/Deniable-Steganography
|
[
"e39dc71b049b0051e6e16eee9a6eea55a526459b"
] |
[
"main.py"
] |
[
"import os\nimport pprint\nimport argparse\nimport torch\nimport pickle\nimport utils\nimport logging\nimport sys\n\nfrom options import *\nfrom model.hidden import Hidden\nfrom noise_layers.noiser import Noiser\nfrom noise_argparser import NoiseArgParser\n\nfrom train import train\nimport torchsnooper\n\n\ndef main():\n device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')\n\n parent_parser = argparse.ArgumentParser(description='Training of HiDDeN nets')\n subparsers = parent_parser.add_subparsers(dest='command', help='Sub-parser for commands')\n new_run_parser = subparsers.add_parser('new', help='starts a new run')\n new_run_parser.add_argument('--data-dir', '-d', required=True, type=str,\n help='The directory where the data is stored.')\n new_run_parser.add_argument('--batch-size', '-b', required=True, type=int, help='The batch size.')\n new_run_parser.add_argument('--epochs', '-e', default=300, type=int, help='Number of epochs to run the simulation.')\n new_run_parser.add_argument('--name', required=True, type=str, help='The name of the experiment.')\n\n new_run_parser.add_argument('--size', '-s', default=128, type=int,\n help='The size of the images (images are square so this is height and width).')\n new_run_parser.add_argument('--message', '-m', default=30, type=int, help='The length in bits of the watermark.')\n new_run_parser.add_argument('--continue-from-folder', '-c', default='', type=str,\n help='The folder from where to continue a previous run. Leave blank if you are starting a new experiment.')\n # parser.add_argument('--tensorboard', dest='tensorboard', action='store_true',\n # help='If specified, use adds a Tensorboard log. On by default')\n new_run_parser.add_argument('--tensorboard', action='store_true',\n help='Use to switch on Tensorboard logging.')\n new_run_parser.add_argument('--enable-fp16', dest='enable_fp16', action='store_true',\n help='Enable mixed-precision training.')\n\n new_run_parser.add_argument('--noise', nargs='*', action=NoiseArgParser,\n help=\"Noise layers configuration. Use quotes when specifying configuration, e.g. 'cropout((0.55, 0.6), (0.55, 0.6))'\")\n\n new_run_parser.set_defaults(tensorboard=False)\n new_run_parser.set_defaults(enable_fp16=False)\n\n continue_parser = subparsers.add_parser('continue', help='Continue a previous run')\n continue_parser.add_argument('--folder', '-f', required=True, type=str,\n help='Continue from the last checkpoint in this folder.')\n continue_parser.add_argument('--data-dir', '-d', required=False, type=str,\n help='The directory where the data is stored. Specify a value only if you want to override the previous value.')\n continue_parser.add_argument('--epochs', '-e', required=False, type=int,\n help='Number of epochs to run the simulation. Specify a value only if you want to override the previous value.')\n # continue_parser.add_argument('--tensorboard', action='store_true',\n # help='Override the previous setting regarding tensorboard logging.')\n\n args = parent_parser.parse_args()\n checkpoint = None\n loaded_checkpoint_file_name = None\n\n if args.command == 'continue':\n this_run_folder = args.folder\n options_file = os.path.join(this_run_folder, 'options-and-config.pickle')\n train_options, hidden_config, noise_config = utils.load_options(options_file)\n checkpoint, loaded_checkpoint_file_name = utils.load_last_checkpoint(os.path.join(this_run_folder, 'checkpoints'))\n train_options.start_epoch = checkpoint['epoch'] + 1\n if args.data_dir is not None:\n train_options.train_folder = os.path.join(args.data_dir, 'train')\n train_options.validation_folder = os.path.join(args.data_dir, 'val')\n if args.epochs is not None:\n if train_options.start_epoch < args.epochs:\n train_options.number_of_epochs = args.epochs\n else:\n print(f'Command-line specifies of number of epochs = {args.epochs}, but folder={args.folder} '\n f'already contains checkpoint for epoch = {train_options.start_epoch}.')\n exit(1)\n\n else:\n assert args.command == 'new'\n start_epoch = 1\n train_options = TrainingOptions(\n batch_size=args.batch_size,\n number_of_epochs=args.epochs,\n train_folder=os.path.join(args.data_dir, 'train'),\n validation_folder=os.path.join(args.data_dir, 'val'),\n runs_folder=os.path.join('.', 'runs'),\n start_epoch=start_epoch,\n experiment_name=args.name)\n\n noise_config = args.noise if args.noise is not None else []\n hidden_config = HiDDenConfiguration(H=args.size, W=args.size,\n message_length=args.message,\n encoder_blocks=4, encoder_channels=64,\n decoder_blocks=7, decoder_channels=64,\n use_discriminator=True,\n use_vgg=False,\n discriminator_blocks=3, discriminator_channels=64,\n decoder_loss=1,\n encoder_loss=0.7,\n adversarial_loss=0.001,\n # adversarial_loss=1e-3,\n enable_fp16=args.enable_fp16\n )\n\n this_run_folder = utils.create_folder_for_run(train_options.runs_folder, args.name)\n with open(os.path.join(this_run_folder, 'options-and-config.pickle'), 'wb+') as f:\n pickle.dump(train_options, f)\n pickle.dump(noise_config, f)\n pickle.dump(hidden_config, f)\n\n\n logging.basicConfig(level=logging.INFO,\n format='%(message)s',\n handlers=[\n logging.FileHandler(os.path.join(this_run_folder, f'{train_options.experiment_name}.log')),\n logging.StreamHandler(sys.stdout)\n ])\n if (args.command == 'new' and args.tensorboard) or \\\n (args.command == 'continue' and os.path.isdir(os.path.join(this_run_folder, 'tb-logs'))):\n logging.info('Tensorboard is enabled. Creating logger.')\n from tensorboard_logger import TensorBoardLogger\n tb_logger = TensorBoardLogger(os.path.join(this_run_folder, 'tb-logs'))\n else:\n tb_logger = None\n\n noiser = Noiser(noise_config, device)\n model = Hidden(hidden_config, device, noiser, tb_logger)\n\n if args.command == 'continue':\n # if we are continuing, we have to load the model params\n assert checkpoint is not None\n logging.info(f'Loading checkpoint from file {loaded_checkpoint_file_name}')\n utils.model_from_checkpoint(model, checkpoint)\n\n logging.info('HiDDeN model: {}\\n'.format(model.to_stirng()))\n logging.info('Model Configuration:\\n')\n logging.info(pprint.pformat(vars(hidden_config)))\n logging.info('\\nNoise configuration:\\n')\n logging.info(pprint.pformat(str(noise_config)))\n logging.info('\\nTraining train_options:\\n')\n logging.info(pprint.pformat(vars(train_options)))\n\n train(model, device, hidden_config, train_options, this_run_folder, tb_logger)\n\nif __name__ == '__main__':\n main()\n"
] |
[
[
"torch.device",
"torch.cuda.is_available"
]
] |
rostoker/sagemaker-battlesnake-ai
|
[
"789036399cfdc034d8b79c00feec134834476a9b"
] |
[
"source/BattlesnakeGym/battlesnake_gym/snake.py"
] |
[
"# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.\n# \n# Licensed under the Apache License, Version 2.0 (the \"License\").\n# You may not use this file except in compliance with the License.\n# A copy of the License is located at\n# \n# http://www.apache.org/licenses/LICENSE-2.0\n# \n# or in the \"license\" file accompanying this file. This file is distributed \n# on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either \n# express or implied. See the License for the specific language governing \n# permissions and limitations under the License.\n\nimport numpy as np\n\nfrom .utils import get_random_coordinates\n\nclass Snake:\n '''\n The Snake class mimics the behaviour of snakes in Battlesnake.io based on \n https://docs.battlesnake.com/rules\n \n Parameters:\n -----------\n starting_position: (int, int)\n The initial position of the snake\n\n map_size: (int, int)\n The size of the map\n\n '''\n\n UP = 0\n DOWN = 1\n LEFT = 2\n RIGHT = 3\n\n FULL_HEALTH = 100\n \n def __init__(self, starting_position, map_size):\n self.health = self.FULL_HEALTH\n self.locations = [] # Head of the snake is element n and the end is element 0\n self.locations.append(starting_position)\n self.facing_direction = None\n self._is_alive = True\n self.ate_food = False\n self.map_size = map_size\n self.colour = list(np.random.choice(range(256), size=3))\n self._number_of_initial_body_stacking = 2 # At the start of the game, snakes of size 3 are stacked.\n # self._number_of_initial_body_stacking == 2 to account for the initial body\n\n @classmethod\n def make_from_list(cls, locations, health, map_size):\n '''\n Class method to make a snake from a list of coordinates.\n Parameters:\n ----------\n locations: [(int, int)]\n An ordered list of coordinates of the body (y, x)\n health: int\n The health of the snake\n map_size: (int, int)\n '''\n tmp_locations = []\n for i, j in locations[::-1]: # head is element n\n tmp_locations.append(np.array([i, j])) \n\n if len(tmp_locations) == 0:\n head = None\n else:\n head = tmp_locations[-1]\n cls = Snake(head, map_size)\n cls.locations = tmp_locations\n cls.health = health\n if len(tmp_locations) == 0:\n cls.kill_snake()\n\n if len(cls.locations) > 1:\n # Calculate the facing direction with the head and the next location\n snake_head = cls.locations[-1]\n snake_2nd_body = cls.locations[-2]\n difference = (snake_head[0] - snake_2nd_body[0], snake_head[1] - snake_2nd_body[1])\n if difference[0] == -1 and difference[1] == 0:\n cls.facing_direction = Snake.UP\n elif difference[0] == 1 and difference[1] == 0:\n cls.facing_direction = Snake.DOWN\n elif difference[0] == 0 and difference[1] == -1:\n cls.facing_direction = Snake.LEFT\n elif difference[0] == 0 and difference[1] == 1:\n cls.facing_direction = Snake.RIGHT\n return cls\n \n def move(self, direction):\n '''\n Moves the snakes in the direction stated\n\n If the direction \n \n Parameters:\n -----------\n direction: int, options: [Snake.UP, Snake.DOWN, Snake.LEFT or Snake.RIGHT]\n Move the start towards the directions\n\n Returns:\n -------\n is_forbidden: Boolean\n Whether the move was a forbidden one: moving backward in its own body\n '''\n is_forbidden = False\n if not self._is_alive:\n return is_forbidden\n \n if self.facing_direction == None:\n self.facing_direction == direction\n\n if self.is_facing_opposite_of_direction(direction) and len(self.locations) > 0:\n direction = self.facing_direction\n is_forbidden = True\n\n head = self.get_head()\n new_head = self._translate_coordinate_in_direction(head, direction)\n\n # If the snake is within the first 3 turns of being alive, do no remove the end\n if self._number_of_initial_body_stacking > 0:\n self._number_of_initial_body_stacking -= 1\n # If the snake ate food, do not remove the end\n elif self.ate_food:\n self.ate_food = False\n else:\n self.locations = self.locations[1:] # remove the end\n self.locations.append(new_head)\n self.facing_direction = direction\n return is_forbidden\n \n def is_facing_opposite_of_direction(self, direction):\n '''\n Function to indicate if the indended direction is in the opposite of \n the direction in which is snake is travelling\n\n Parameters:\n -----------\n direction: int, options: [Snake.UP, Snake.DOWN, Snake.LEFT or Snake.RIGHT]\n Direction intended for the snake to travel\n '''\n if self.facing_direction == self.UP and direction == self.DOWN:\n return True\n if self.facing_direction == self.DOWN and direction == self.UP:\n return True\n if self.facing_direction == self.RIGHT and direction == self.LEFT:\n return True\n if self.facing_direction == self.LEFT and direction == self.RIGHT:\n return True\n return False\n\n def get_previous_snake_head(self):\n '''\n Returns the location of head in the previous time step\n \n Move 1 space in the opposite direction of self.facing direction\n '''\n head = self.get_head()\n previous_head = np.copy(head)\n \n if self.facing_direction == Snake.UP:\n previous_head[0] += 1\n elif self.facing_direction == Snake.DOWN:\n previous_head[0] -= 1\n elif self.facing_direction == Snake.RIGHT:\n previous_head[1] -= 1\n elif self.facing_direction == Snake.LEFT:\n previous_head[1] += 1\n return previous_head\n\n def get_head(self):\n return self.locations[-1]\n\n def get_tail(self):\n return self.locations[0]\n\n def get_body(self):\n return self.locations[:-1]\n\n def _translate_coordinate_in_direction(self, origin, direction):\n '''\n Helper function to translate a coordinate to a direction\n \n Parameters:\n -----------\n origin: (int, int)\n Coordinate to be moved\n\n Direction: int, options: [Snake.UP, Snake.DOWN, Snake.LEFT or Snake.RIGHT\n Direction to be moved\n\n Returns:\n -------\n coordinate: (int, int)\n Translated coordinate\n '''\n new_coordinate = np.copy(origin)\n if direction == self.UP:\n new_coordinate[0] = new_coordinate[0] - 1\n elif direction == self.DOWN:\n new_coordinate[0] = new_coordinate[0] + 1\n elif direction == self.LEFT:\n new_coordinate[1] = new_coordinate[1] - 1\n elif direction == self.RIGHT:\n new_coordinate[1] = new_coordinate[1] + 1\n \n return new_coordinate\n\n def can_snake_move_in_direction(self, direction):\n '''\n Helper function to check if it's possible to move in a certain direction\n Checks for:\n - If the snake is moving in the opposite direction as the way it's travelling\n (could be expanded if necessary)\n \n Parameters:\n -----------\n origin: (int, int)\n Coordinate to be moved\n\n Direction: int, options: [Snake.UP, Snake.DOWN, Snake.LEFT or Snake.RIGHT\n Direction to be moved\n\n Returns:\n -------\n coordinate: (int, int)\n Translated coordinate\n\n '''\n if self._is_facing_opposite_of_direction(direction):\n return False\n return True\n\n def is_head_outside_map(self):\n '''\n Returns a boolean indicating if the snake head is outside the map\n '''\n i_head, j_head = self.get_head()\n if 0 <= j_head < self.map_size[1]:\n if 0 <= i_head < self.map_size[0]:\n return False\n return True\n\n def get_snake_map(self, return_type=\"Binary\"):\n '''\n Return an image including the positions of the snakes\n\n Parameter:\n ----------\n return_type: string\n if Binary, a binary image is returned\n if Colour, an image based on the snake's colour is returned\n if Numbered, an image with 1 as the head, 2, 3, 4 as the body\n is returned\n\n Returns:\n --------\n map_image, np.array(self.map_size)\n image of the position of this snake\n ''' \n if return_type == \"Colour\":\n map_image = np.zeros((self.map_size[0], self.map_size[1], 3))\n else:\n map_image = np.zeros((self.map_size[0], self.map_size[1]))\n\n if not self._is_alive or self.is_head_outside_map():\n # To check if the snake is dead or not\n return map_image\n\n for i, location in enumerate(self.locations):\n if return_type == \"Colour\":\n map_image[location[0], location[1], :] = self.colour\n elif return_type == \"Binary\":\n map_image[location[0], location[1]] = 1\n elif return_type == \"Numbered\":\n map_image[location[0], location[1]] = i+1\n\n # Color the head differently\n if return_type == \"Colour\":\n map_image[self.get_head()[0], self.get_head()[1], :] *= 0.5\n elif return_type == \"Binary\":\n map_image[self.get_head()[0], self.get_head()[1]] = 5\n\n return map_image\n\n def kill_snake(self):\n '''\n Set snake to be dead\n '''\n self._is_alive = False\n self.locations = []\n\n def is_alive(self):\n '''\n Get if the snake is alive\n '''\n return self._is_alive\n\n def get_size(self):\n '''\n Get the snake size\n '''\n return len(self.locations)\n\n def set_ate_food(self):\n '''\n Actions taken when the snake eaten food\n '''\n self.ate_food = True\n self.health = self.FULL_HEALTH\n\nclass Snakes:\n '''\n The Snakes class managers n number of snakes\n \n Parameters\n ----------\n map_size: (int, int)\n number_of_snakes: int\n \n snake_spawn_locations: [(int, int)] optional\n Parameter to force snakes to spawn in certain positions. Used for testing\n '''\n def __init__(self, map_size, number_of_snakes, snake_spawn_locations=[]):\n self.map_size = map_size\n self.number_of_snakes = number_of_snakes\n self.snakes = self._initialise_snakes(number_of_snakes, snake_spawn_locations)\n\n def _initialise_snakes(self, number_of_snakes, snake_spawn_locations):\n snakes = []\n\n if len(snake_spawn_locations) == 0:\n starting_positions = get_random_coordinates(self.map_size, number_of_snakes)\n else:\n error_message = \"the number of coordinates in snake_spawn_locations must match the number of snakes\"\n assert len(snake_spawn_locations) == self.number_of_snakes, error_message\n starting_positions = snake_spawn_locations\n\n for i in range(number_of_snakes):\n snakes.append(Snake(starting_position=starting_positions[i], map_size=self.map_size))\n\n return snakes\n\n @classmethod\n def make_from_dict(cls, map_size, snake_dicts):\n '''\n Class method to create the Snakes class from a dictionary of snakes\n\n Parameters\n ----------\n map_size: (int, int)\n snake_dicts: [{}]\n A list of snake_dict.\n dictionary are in the form of the battlesnake engine\n '''\n number_of_snakes = len(snake_dicts)\n cls = Snakes(map_size, number_of_snakes)\n cls.snakes = []\n \n for snake_dict in snake_dicts:\n locations = []\n\n for loc in snake_dict[\"body\"]:\n locations.append((loc[\"y\"], loc[\"x\"]))\n \n health = snake_dict[\"health\"]\n snake = Snake.make_from_list(locations, health, map_size)\n cls.snakes.append(snake)\n return cls\n\n def get_snake_51_map(self, excluded_snakes=[]):\n '''\n Function to generate a 51 map of the locations of any snake\n\n Parameters:\n ----------\n excluded_snakes: [Snake]\n Snakes to not be included in the binary map. \n Used to check if there are collisions between snakes\n \n Returns:\n --------\n map_image: np.array(map_sizep[0], map_size[1], 1)\n If any snake is on coordinate i, j, map_image[i, j] will be 1\n '''\n sum_map = np.sum(self.get_snake_depth_51_map(excluded_snakes=excluded_snakes), 2) \n return sum_map\n \n def get_snake_numbered_map(self, excluded_snakes=[]):\n '''\n Function to generate a numbered map of the locations of any snake\n 1 will be the head, 2, 3 etc will be the body\n\n Parameters:\n ----------\n excluded_snakes: [Snake]\n Snakes to not be included in the binary map. \n Used to check if there are collisions between snakes\n \n Returns:\n --------\n map_image: np.array(map_sizep[0], map_size[1], 1)\n If any snake is on coordinate i, j, map_image[i, j] will be 1\n '''\n return np.sum(self.get_snake_depth_numbered_map(\n excluded_snakes=excluded_snakes), 2)\n\n def get_snake_depth_numbered_map(self, excluded_snakes=[]):\n '''\n Function to generate a numbered map of the locations of any snake\n 1 will be the head, 2, 3 etc will be the body\n\n Parameters:\n ----------\n excluded_snakes: [Snake]\n Snakes to not be included in the binary map. \n Used to check if there are collisions between snakes\n\n Returns:\n --------\n map_image: np.array(map_sizep[0], map_size[1], number_of_snakes)\n The depth of the map_image corresponds to each snakes\n For each snake, 1 indicates the head and 2, 3, 4 etc indicates\n the body that the snake is present in that location and 0\n indicates that the snake is not present in that location\n '''\n map_image = np.zeros((self.map_size[0], self.map_size[1],\n len(self.snakes)),\n dtype=np.uint8)\n for i, snake in enumerate(self.snakes):\n if snake not in excluded_snakes:\n map_image[:, :, i] = snake.get_snake_map(return_type=\"Numbered\")\n return map_image\n\n\n def get_snake_depth_51_map(self, excluded_snakes=[]):\n '''\n Function to generate a 51 map of the locations of the snakes\n\n Parameters:\n ----------\n excluded_snakes: [Snake]\n Snakes to not be included in the binary map. \n Used to check if there are collisions between snakes\n\n Returns:\n --------\n map_image: np.array(map_sizep[0], map_size[1], number_of_snakes)\n The depth of the map_image corresponds to each snakes\n For each snake, 2 indicates the head and 1 indicates the body\n that the snake is present in that location and 0\n indicates that the snake is not present in that location\n '''\n map_image = np.zeros((self.map_size[0], self.map_size[1],\n len(self.snakes)),\n dtype=np.uint8)\n for i, snake in enumerate(self.snakes):\n if snake not in excluded_snakes:\n map_image[:, :, i] = snake.get_snake_map(return_type=\"Binary\")\n\n return map_image\n\n def get_snake_colour_map(self):\n '''\n Function to generate a colour map of the locations of the snakes\n\n Returns:\n --------\n map_image: np.array(map_size[0], map_size[1], 3)\n The positions of the snakes are indicated by the colour of each snake\n '''\n map_image = np.zeros((self.map_size[0], self.map_size[1], 3))\n for snake in self.snakes:\n map_image += snake.get_snake_map(return_type=\"Colour\")\n return map_image\n\n def get_snake_colours(self):\n '''\n The colours of each snake are provided\n '''\n snake_colours = []\n for snake in self.snakes:\n snake_colours.append(snake.colour)\n return snake_colours\n\n def move_snakes(self, action):\n '''\n Move the snakes based on action\n\n Parameters:\n ----------\n action: np.array(number_of_snakes)\n Array of integers containing an action for each number of snake. \n The integers range from 0 to 3 corresponding to up, down, left, and right \n respectively\n '''\n for i in range(len(action)):\n direction = action[i]\n self.snakes[i].move(direction)\n\n def get_snakes(self):\n '''\n Returns the list of snakes\n '''\n return self.snakes\n"
] |
[
[
"numpy.copy",
"numpy.array",
"numpy.zeros"
]
] |
PoCFrance/Pool2019
|
[
"b043a2a06886d72d51d829942a657145020afcf7"
] |
[
"ai/exercices/day01/blood_model.py"
] |
[
"import csv\nimport numpy as np\nimport random\nimport matplotlib\nmatplotlib.use('TkAgg')\nimport matplotlib.pyplot as plt\n\n\nPARAM_MIN_INIT = -1.0\nPARAM_MAX_INIT = 1.0\nLEARNING_RATE = 0.001\nMU = 0.57 # Limit of loss (when the loss become lower than MU, we can stop the training)\n\n# Normalize numpy array\ndef normalize(x):\n return (x - x.mean() / x.std())\n\n# Unormalize numpy array\ndef unormalize(x, mean, std):\n return (x * x.std() + x.mean())\n\n# Load the blood dataset and return normalized data\ndef load_blood_dataset(filename=\"Blood.csv\"):\n x = []\n y = []\n with open(filename) as csvfile:\n reader = csv.DictReader(csvfile)\n for row in reader:\n x.append(float(row['Age']))\n y.append(float(row['Systolic Blood Pressure']))\n\n # Transform x and y to np.array\n x = np.array(x)\n y = np.array(y)\n return normalize(x), normalize(y), x.mean(), x.std(), y.mean(), y.std()\n\n# Apply the linear formula: f(x) = ax + b\ndef do_inference(a, b, x):\n return a*x+b\n\n# Plot values on screen\ndef plot_values(a, b, x, y, mean_x, std_x, mean_y, std_y):\n plt.xlabel(\"Age\") # Set abscissa name\n plt.ylabel(\"Systolic Blood Pressure\") # Set ordinate name\n\n unormalized_x = unormalize(x, mean_x, std_x) # Unormalize entries\n predictions = do_inference(a, b, x) # Get model predictions\n plt.plot(unormalized_x, unormalize(y, mean_y, std_y), \"bo\") # Plot dataset as blue points\n plt.plot(unormalized_x, unormalize(predictions, mean_y, std_y), \"r\") # Plot model predictions as red line\n plt.show()\n\n# Calculate the Mean Squared Error (MSE) of a linear model for a given dataset\ndef calculate_loss(a, b, x, y):\n preds = do_inference(a, b, x)\n\n diff = pow((preds - y), 2)\n\n return diff.mean()\n\n# Apply descent gradient on parameters a and b with a given learning rate (alpha)\ndef update_parameters(a, b, x, y, alpha):\n\n return a, b\n\ndef main():\n # Load the blood dataset and return x (entries vector) and y (expected outputs vector)\n x, y, mean_x, std_x, mean_y, std_y = load_blood_dataset()\n\n # Initialize a and b randomly\n a = random.uniform(PARAM_MIN_INIT, PARAM_MAX_INIT)\n b = random.uniform(PARAM_MIN_INIT, PARAM_MAX_INIT)\n\n epoch = 0\n\n # Execute gradient descent\n loss = calculate_loss(a, b, x, y)\n while loss >= MU:\n print(\"Epoch %d: MSE = %f\" % (epoch, loss))\n a, b = update_parameters(a, b, x, y, LEARNING_RATE)\n epoch += 1\n loss = calculate_loss(a, b, x, y)\n\n # Plot values on screen\n plot_values(a, b, x, y, mean_x, std_x, mean_y, std_y)\n\nif __name__ == \"__main__\":\n main()\n"
] |
[
[
"matplotlib.use",
"matplotlib.pyplot.xlabel",
"numpy.array",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel"
]
] |
aister2020/KDDCUP_2020_MultimodalitiesRecall_3st_Place
|
[
"508c263e72184e28ad6c5eadf637095761e5e035"
] |
[
"code/v1/src/run.py"
] |
[
"# -*- coding: utf-8 -*-\n\nimport tensorflow as tf\nimport datetime\nimport os\n\nimport utils.flag_setup as flag_setup\nimport utils.json_reader as json_reader\nimport model.estimator_builder as estimator_builder\n\ntf.logging.set_verbosity(tf.logging.INFO)\n\n\ndef local_run(model_json):\n epoch_steps = int(flag_setup.FLAGS.epoch_samples/model_json['model']['batch_size'])+1\n print('epoch_steps',epoch_steps)\n run_config = tf.estimator.RunConfig(\n model_dir=os.path.join(model_json[\"export\"][\"model_dir\"], flag_setup.FLAGS.run_id),\n # save_checkpoints_steps=epoch_steps,\n save_checkpoints_secs=model_json[\"export\"][\"checkpoint_secs\"],\n save_summary_steps=model_json[\"export\"][\"summary_steps\"],\n keep_checkpoint_max=model_json[\"export\"][\"max_checkpoints\"],\n )\n\n estimator, train_spec, eval_spec, data_loader = estimator_builder.create_estimator_and_specs(run_config, model_json)\n tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)\n\n # 保存savedModel用来做serving\n estimator.export_savedmodel(\n export_dir_base=os.path.join(model_json[\"export\"][\"savedmodel_dir\"], flag_setup.FLAGS.run_id),\n serving_input_receiver_fn=data_loader.serving_example_input_receiver_fn\n )\n\n\ndef local_predict_export(model_json):\n run_config = tf.estimator.RunConfig(\n model_dir=os.path.join(model_json[\"export\"][\"model_dir\"], flag_setup.FLAGS.warm_start_id),\n # save_checkpoints_steps=epoch_steps,\n save_checkpoints_secs=model_json[\"export\"][\"checkpoint_secs\"],\n save_summary_steps=model_json[\"export\"][\"summary_steps\"],\n keep_checkpoint_max=model_json[\"export\"][\"max_checkpoints\"],\n )\n\n estimator, data_loader = estimator_builder.create_estimator_predict(run_config, model_json)\n\n # 保存savedModel用来做serving\n estimator.export_savedmodel(\n export_dir_base=os.path.join(model_json[\"export\"][\"savedmodel_dir\"], flag_setup.FLAGS.run_id),\n serving_input_receiver_fn=data_loader.serving_example_input_receiver_fn\n )\n\n\ndef afo_run(model_json):\n epoch_steps = int(flag_setup.FLAGS.epoch_samples/model_json['model']['batch_size'])+1\n\n tf.disable_chief_training(shut_ratio=0.8, slow_worker_delay_ratio=1.2)\n run_config = tf.estimator.RunConfig(\n model_dir=os.path.join(model_json[\"export\"][\"model_dir\"], flag_setup.FLAGS.run_id),\n save_checkpoints_steps=epoch_steps,\n save_summary_steps=model_json[\"export\"][\"summary_steps\"],\n keep_checkpoint_max=model_json[\"export\"][\"max_checkpoints\"])\n\n estimator, train_spec, eval_spec, data_loader = estimator_builder.create_estimator_and_specs(run_config, model_json)\n\n tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)\n tf.logging.warn(flag_setup.FLAGS.job_name + \" finished training at \" + str(datetime.datetime.now().time()))\n\n if flag_setup.FLAGS.job_name == \"chief\":\n # 保存savedModel用来做serving\n estimator.export_savedmodel(\n export_dir_base=os.path.join(os.path.join(model_json[\"export\"][\"savedmodel_dir\"], flag_setup.FLAGS.run_id), 'epoch'),\n serving_input_receiver_fn=data_loader.serving_example_input_receiver_fn\n )\n\n\ndef main(unused_argv):\n # 加载模型配置\n if flag_setup.FLAGS.model_conf:\n\n model_conf = json_reader.load_json(flag_setup.FLAGS.model_conf)\n \n if flag_setup.FLAGS.epochs is not None and flag_setup.FLAGS.epochs!=\"\" and flag_setup.FLAGS.epochs > 0:\n model_conf['model']['epoch'] = flag_setup.FLAGS.epochs\n \n print('epochs',model_conf['model']['epoch'])\n \n if flag_setup.FLAGS.script_mode == \"local\":\n local_run(model_conf)\n\n if flag_setup.FLAGS.script_mode == \"afo\":\n afo_run(model_conf)\n \n if flag_setup.FLAGS.script_mode == \"local_predict_export\":\n local_predict_export(model_conf)\n else:\n tf.logging.info('can not load model_conf file %s' % flag_setup.FLAGS.model_conf)\n\n\nif __name__ == \"__main__\":\n flag_setup.flag_setup()\n tf.app.run()\n"
] |
[
[
"tensorflow.logging.info",
"tensorflow.logging.set_verbosity",
"tensorflow.estimator.train_and_evaluate",
"tensorflow.disable_chief_training",
"tensorflow.app.run"
]
] |
coltekin/emoji2018
|
[
"a6795e248ada9a3cebdb4987574038e4e5affa93"
] |
[
"tune-rnn.py"
] |
[
"#!/usr/bin/env python3\nfrom emoji_data import load\nfrom features import doc_to_numseq\nimport random\nimport numpy as np\nfrom sklearn.model_selection import StratifiedShuffleSplit\nfrom collections import Counter\nfrom keras.preprocessing.sequence import pad_sequences\nfrom keras.utils.np_utils import to_categorical\nfrom keras.layers import Input\nfrom keras.layers import Embedding\nfrom keras.layers import GRU\nfrom keras.layers import LSTM\nfrom keras.layers.merge import concatenate\nfrom keras.layers import Dense\nfrom keras.models import Model\nfrom keras.callbacks import EarlyStopping\nfrom keras.layers import SpatialDropout1D\nimport numpy as np\nfrom keras.callbacks import Callback\nfrom sklearn.metrics import precision_recall_fscore_support, accuracy_score\nfrom logging import debug, info, basicConfig\nbasicConfig(level='INFO', format='%(asctime)s %(message)s')\n\n\nfrom argparse import ArgumentParser\nap = ArgumentParser()\nap.add_argument(\"-i\", \"--input\", dest=\"input_prefix\")\nopt = ap.parse_args()\n\nclass Options:\n __slots__ = (\n 'c_cutoff',\n 'c_maxlen',\n 'c_embdim',\n 'c_embdrop',\n 'c_featdim',\n 'w_cutoff',\n 'c_featdrop',\n 'w_maxlen',\n 'w_embdim',\n 'w_embdrop',\n 'w_featdim',\n 'w_featdrop',\n 'rnn')\n def __init__(self,\n c_cutoff = 5,\n c_maxlen = None,\n c_embdim = 64,\n c_embdrop = 0.2,\n c_featdim = 64,\n w_cutoff = 5,\n c_featdrop = 0.2,\n w_maxlen = None,\n w_embdim = 64,\n w_embdrop = 0.2,\n w_featdim = 64,\n w_featdrop = 0.2,\n rnn = 'GRU'):\n self.c_cutoff = c_cutoff\n self.c_maxlen = c_maxlen\n self.c_embdim = c_embdim\n self.c_embdrop = c_embdrop\n self.c_featdim = c_featdim\n self.w_cutoff = w_cutoff\n self.c_featdrop = c_featdrop\n self.w_maxlen = w_maxlen\n self.w_embdim = w_embdim\n self.w_embdrop = w_embdrop\n self.w_featdim = w_featdim \n self.w_featdrop = w_featdrop \n self.rnn = rnn \n\n @classmethod\n def sample(cls):\n o = Options(\n c_cutoff = random.choice((1, 4)),\n c_maxlen = None,\n c_embdim = random.choice((32, 64)),\n c_embdrop = random.choice((0.1, 0.2, 0.5)),\n c_featdim = random.choice((32, 64, 128)),\n w_cutoff = random.choice((1, 4)),\n c_featdrop = random.choice((64, 128, 256)),\n w_maxlen = None,\n w_embdim = random.choice((32, 64, 128)),\n w_embdrop = random.choice((0.1, 0.2, 0.5)),\n w_featdim = random.choice((64, 128, 256)),\n w_featdrop = random.choice((0.1, 0.2, 0.5)),\n rnn = random.choice(('GRU', 'LSTM')))\n \n return o, hash(str(o))\n def __str__(self):\n str = \"\"\n for attr in self.__slots__:\n str += '{}={}, '.format(attr, getattr(self, attr))\n return str[:-2]\n\ndata = load(opt.input_prefix)\nnepoch = 40\n\nssp = StratifiedShuffleSplit(n_splits=1, test_size=0.2)\nssp.get_n_splits(data.docs, data.labels)\ntrn_idx, dev_idx = list(ssp.split(data.docs, data.labels))[0]\n\ntrn_labels = to_categorical(np.array(data.labels)[trn_idx])\ndev_labels = to_categorical(np.array(data.labels)[dev_idx])\n\nsearch_iter = 1000\nsearch_done = set()\nfor _ in range(search_iter):\n o, h = Options.sample()\n if h in search_done: continue\n search_done.add(h)\n\n if not o.c_maxlen:\n o.c_maxlen = np.max(data.len_char)\n c_vocab = Counter({k:v for k,v in data.chars.items() if v > o.c_cutoff})\n c_trn, _ = doc_to_numseq(np.array(data.docs)[trn_idx], vocab=c_vocab,\n pad=o.c_maxlen)\n c_dev, _ = doc_to_numseq(np.array(data.docs)[dev_idx], vocab=c_vocab,\n pad=o.c_maxlen)\n\n if not o.w_maxlen:\n o.w_maxlen = np.max(data.len_word)\n w_vocab = Counter({k:v for k,v in data.words.items() if v > o.w_cutoff})\n w_trn, _ = doc_to_numseq(np.array(data.docs)[trn_idx], vocab=w_vocab,\n tokenizer=\"word\", pad=o.w_maxlen)\n w_dev, _ = doc_to_numseq(np.array(data.docs)[dev_idx], vocab=w_vocab,\n tokenizer=\"word\", pad=o.w_maxlen)\n\n c_inp = Input(shape=(o.c_maxlen, ), name='char_input')\n w_inp = Input(shape=(o.w_maxlen, ), name='word_input')\n\n c_emb = Embedding(len(c_vocab) + 4, o.c_embdim, mask_zero=True,\n name='char_embedding')(c_inp)\n c_emb = SpatialDropout1D(o.c_embdrop)(c_emb)\n w_emb = Embedding(len(w_vocab) + 4, o.w_embdim, mask_zero=True,\n name='word_embedding')(w_inp)\n w_emb = SpatialDropout1D(o.w_embdrop)(w_emb)\n\n if o.rnn == 'LSTM':\n rnn = LSTM\n else:\n rnn = GRU\n\n c_fw = rnn(o.c_featdim, dropout=o.c_featdrop, name='char_fw_rnn')(c_emb)\n c_bw = rnn(o.c_featdim, dropout=o.c_featdrop, go_backwards=True,\n name='char_bw_rnn')(c_emb)\n c_feat = concatenate([c_fw, c_bw])\n\n w_fw = rnn(o.w_featdim, dropout=o.w_featdrop, name='word_fw_rnn')(w_emb)\n w_bw = rnn(o.w_featdim, dropout=o.w_featdrop, go_backwards=True,\n name='word_bw_rnn')(w_emb)\n w_feat = concatenate([w_fw, w_bw])\n\n h = concatenate([c_feat, w_feat])\n\n emo = Dense(trn_labels.shape[1], activation='softmax', name='emoji')(h)\n\n m = Model(inputs=[c_inp, w_inp], outputs=[emo])\n m.summary()\n\n m.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\n\n class Metrics(Callback):\n def on_train_begin(self, logs={}):\n self.val_f1 = []\n self.val_precision = []\n self.val_recall = []\n self.val_accuracy = []\n def on_epoch_end(self, batch, logs={}):\n predict = np.argmax(self.model.predict(self.validation_data[:2]), axis=1)\n targ = np.argmax(self.validation_data[2], axis=1)\n prec, rec, f1, _ = precision_recall_fscore_support(targ, predict, average='macro')\n self.val_f1.append(f1)\n self.val_precision.append(prec)\n self.val_recall.append(rec)\n self.val_accuracy.append(accuracy_score(targ, predict))\n def __str__(self):\n return 'prfa: {} {} {} {}'.format(\n self.val_precision, self.val_recall, self.val_f1,\n self.val_accuracy)\n\n\n prf_scores = Metrics()\n\n early_stop = EarlyStopping(monitor='val_loss', patience=4)\n\n info(\"Fitting: {}\".format(str(o)))\n m.fit(x={'char_input': c_trn, 'word_input': w_trn},\n y=trn_labels,\n validation_data=({'char_input': c_dev, 'word_input': w_dev}, dev_labels),\n epochs=nepoch, callbacks=[prf_scores, early_stop], verbose=0)\n info(\"Results: {}\".format(str(prf_scores)))\n"
] |
[
[
"numpy.max",
"sklearn.metrics.precision_recall_fscore_support",
"numpy.argmax",
"sklearn.model_selection.StratifiedShuffleSplit",
"numpy.array",
"sklearn.metrics.accuracy_score"
]
] |
inspire-group/PatchGuard
|
[
"5ca61b8a3d3814d72ee64d5587d02147fc216478"
] |
[
"nets/bagnet.py"
] |
[
"#################################################################################################################\n# Adapted from https://github.com/wielandbrendel/bag-of-local-features-models/blob/master/bagnets/pytorchnet.py #\n# Mainly changed the model forward() function #\n#################################################################################################################\n\n\nimport torch.nn as nn\nimport math\nimport random\nimport torch\nfrom collections import OrderedDict\nfrom torch.utils import model_zoo\nimport numpy as np \nimport os \ndir_path = os.path.dirname(os.path.realpath(__file__))\n\n__all__ = ['bagnet9', 'bagnet17', 'bagnet33']\n\nmodel_urls = {\n 'bagnet9': 'https://bitbucket.org/wielandbrendel/bag-of-feature-pretrained-models/raw/249e8fa82c0913623a807d9d35eeab9da7dcc2a8/bagnet8-34f4ccd2.pth.tar',\n 'bagnet17': 'https://bitbucket.org/wielandbrendel/bag-of-feature-pretrained-models/raw/249e8fa82c0913623a807d9d35eeab9da7dcc2a8/bagnet16-105524de.pth.tar',\n 'bagnet33': 'https://bitbucket.org/wielandbrendel/bag-of-feature-pretrained-models/raw/249e8fa82c0913623a807d9d35eeab9da7dcc2a8/bagnet32-2ddd53ed.pth.tar',\n }\n\n\nclass Bottleneck(nn.Module):\n expansion = 4\n\n def __init__(self, inplanes, planes, stride=1, downsample=None, kernel_size=1):\n super(Bottleneck, self).__init__()\n # #print('Creating bottleneck with kernel size {} and stride {} with padding {}'.format(kernel_size, stride, (kernel_size - 1) // 2))\n self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)\n self.bn1 = nn.BatchNorm2d(planes)\n self.conv2 = nn.Conv2d(planes, planes, kernel_size=kernel_size, stride=stride,\n padding=0, bias=False) # changed padding from (kernel_size - 1) // 2\n self.bn2 = nn.BatchNorm2d(planes)\n self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)\n self.bn3 = nn.BatchNorm2d(planes * 4)\n self.relu = nn.ReLU(inplace=True)\n self.downsample = downsample\n self.stride = stride\n \n\n def forward(self, x, **kwargs):\n residual = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n out = self.relu(out)\n\n out = self.conv3(out)\n out = self.bn3(out)\n \n if self.downsample is not None:\n residual = self.downsample(x)\n \n if residual.size(-1) != out.size(-1):\n diff = residual.size(-1) - out.size(-1)\n residual = residual[:,:,:-diff,:-diff]\n \n out += residual\n out = self.relu(out)\n\n return out\n\n\nclass BagNet(nn.Module):\n\n def __init__(self, block, layers, strides=[1, 2, 2, 2], kernel3=[0, 0, 0, 0], num_classes=1000,clip_range=None,aggregation='mean'):\n self.inplanes = 64\n super(BagNet, self).__init__()\n self.conv1 = nn.Conv2d(3, 64, kernel_size=1, stride=1, padding=0,\n bias=False)\n self.conv2 = nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=0,\n bias=False)\n self.bn1 = nn.BatchNorm2d(64, momentum=0.001)\n self.relu = nn.ReLU(inplace=True)\n self.layer1 = self._make_layer(block, 64, layers[0], stride=strides[0], kernel3=kernel3[0], prefix='layer1')\n self.layer2 = self._make_layer(block, 128, layers[1], stride=strides[1], kernel3=kernel3[1], prefix='layer2')\n self.layer3 = self._make_layer(block, 256, layers[2], stride=strides[2], kernel3=kernel3[2], prefix='layer3')\n self.layer4 = self._make_layer(block, 512, layers[3], stride=strides[3], kernel3=kernel3[3], prefix='layer4')\n self.avgpool = nn.AvgPool2d(1, stride=1)\n self.fc = nn.Linear(512 * block.expansion, num_classes)\n self.block = block\n\n self.clip_range = clip_range\n self.aggregation = aggregation\n \n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n m.weight.data.normal_(0, math.sqrt(2. / n))\n elif isinstance(m, nn.BatchNorm2d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n\n def _make_layer(self, block, planes, blocks, stride=1, kernel3=0, prefix=''):\n downsample = None\n if stride != 1 or self.inplanes != planes * block.expansion:\n \n downsample = nn.Sequential(\n nn.Conv2d(self.inplanes, planes * block.expansion,\n kernel_size=1, stride=stride, bias=False),\n nn.BatchNorm2d(planes * block.expansion),\n )\n\n layers = []\n kernel = 1 if kernel3 == 0 else 3\n \n layers.append(block(self.inplanes, planes, stride, downsample, kernel_size=kernel))\n self.inplanes = planes * block.expansion\n for i in range(1, blocks):\n kernel = 1 if kernel3 <= i else 3\n \n layers.append(block(self.inplanes, planes, kernel_size=kernel))\n \n return nn.Sequential(*layers)\n\n def forward(self, x,y=None):\n x = self.conv1(x)\n x = self.conv2(x)\n x = self.bn1(x)\n x = self.relu(x)\n x = self.layer1(x)\n x = self.layer2(x)\n x = self.layer3(x)\n x = self.layer4(x)\n\n x = x.permute(0,2,3,1)\n\n x = self.fc(x)\n if self.clip_range is not None:\n x = torch.clamp(x,self.clip_range[0],self.clip_range[1]) \n if self.aggregation == 'mean':\n x = torch.mean(x,dim=(1,2))\n elif self.aggregation == 'median':\n x = x.view([x.size()[0],-1,10])\n x = torch.median(x,dim=1)\n return x.values\n elif self.aggregation =='cbn':#clipped BagNet\n x = torch.tanh(x*0.05-1)\n x = torch.mean(x,dim=(1,2))\n elif self.aggregation == 'adv':# provable adversarial training\n window_size = 6 # the size of window to be masked during the training \n B,W,H,C = x.size()\n x = torch.clamp(x,0,torch.tensor(float('inf'))) #clip\n tmp = x[torch.arange(B),:,:,y] #the feature map for the true class\n tmp = tmp.unfold(1,window_size,1).unfold(2,window_size,1) #unfold\n tmp = tmp.reshape([B,-1,window_size,window_size]) # [B,num_window,window_size,window_size]\n tmp = torch.sum(tmp,axis=(-2,-1)) # [B,num_window] true class evidence in every window\n tmp = torch.max(tmp,axis=-1).values # [B] max window class evidence\n x = torch.sum(x,dim=(1,2)) # \n x[torch.arange(B),y]-=tmp # substract the max true window class evidence\n x/=(W*H)\n elif self.aggregation == 'none':\n pass\n \n return x\n\ndef bagnet33(pretrained=False, strides=[2, 2, 2, 1], **kwargs):\n \"\"\"Constructs a Bagnet-33 model.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n \"\"\"\n model = BagNet(Bottleneck, [3, 4, 6, 3], strides=strides, kernel3=[1,1,1,1], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['bagnet33']))\n return model\n\ndef bagnet17(pretrained=False, strides=[2, 2, 2, 1], **kwargs):\n \"\"\"Constructs a Bagnet-17 model.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n \"\"\"\n model = BagNet(Bottleneck, [3, 4, 6, 3], strides=strides, kernel3=[1,1,1,0], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['bagnet17']))\n return model\n\ndef bagnet9(pretrained=False, strides=[2, 2, 2, 1], **kwargs):\n \"\"\"Constructs a Bagnet-9 model.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n \"\"\"\n model = BagNet(Bottleneck, [3, 4, 6, 3], strides=strides, kernel3=[1,1,0,0], **kwargs)\n #model = BagNet(Bottleneck, [2,2,2,2], strides=strides, kernel3=[1,1,0,0], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['bagnet9']))\n return model\n"
] |
[
[
"torch.nn.Sequential",
"torch.clamp",
"torch.mean",
"torch.max",
"torch.median",
"torch.nn.Conv2d",
"torch.sum",
"torch.arange",
"torch.tanh",
"torch.nn.Linear",
"torch.nn.AvgPool2d",
"torch.nn.BatchNorm2d",
"torch.nn.ReLU",
"torch.utils.model_zoo.load_url"
]
] |
jireh-father/KoBERT
|
[
"13b10589979d9a17137edcbd34c5dcee7eb552f7"
] |
[
"train_extractive_summary.py"
] |
[
"import os\nimport argparse\nimport random\nimport numpy as np\nimport torch\nfrom kobert.pytorch_kobert import get_pytorch_kobert_model\nimport jsonlines\nfrom torch.utils import data\nfrom gluonnlp.data import SentencepieceTokenizer\nfrom kobert.utils import get_tokenizer\nfrom torch import nn\nfrom torch.nn.utils.rnn import pad_sequence\nimport torch.optim as optim\nfrom torch.optim import lr_scheduler\nimport torch.nn.functional as F\nfrom torch.utils.tensorboard import SummaryWriter\nimport datetime\nimport time\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn.metrics import classification_report\nfrom sklearn.metrics import f1_score, accuracy_score\nimport matplotlib.pyplot as plt\nimport itertools\nfrom model import ExtractiveModel\n\n\ndef freeze_params(model):\n \"\"\"Set requires_grad=False for each of model.parameters()\"\"\"\n for par in model.parameters():\n par.requires_grad = False\n\n\ndef non_freeze_params(model):\n \"\"\"Set requires_grad=False for each of model.parameters()\"\"\"\n for par in model.parameters():\n par.requires_grad = False\n\n\ndef init_optimizer(optimizer_name, model, lr, wd, lr_restart_step=1, lr_decay_gamma=0.9,\n scheduler=\"step\", nesterov=False, num_epochs=None, steps_per_epoch=None):\n if optimizer_name == \"sgd\":\n optimizer_ft = optim.SGD(model.parameters(), lr=lr, momentum=0.9, weight_decay=wd, nesterov=nesterov)\n elif optimizer_name == \"adam\":\n optimizer_ft = optim.Adam(model.parameters(), lr=lr, weight_decay=wd)\n elif optimizer_name == \"adamp\":\n from adamp import AdamP\n optimizer_ft = AdamP(model.parameters(), lr=lr, betas=(0.9, 0.999), weight_decay=wd) # 1e-2)\n elif optimizer_name == \"sgdp\":\n from adamp import SGDP\n optimizer_ft = SGDP(model.parameters(), lr=lr, weight_decay=wd, momentum=0.9, nesterov=nesterov)\n # else:\n # opt_attr = getattr(toptim, optimizer_name)\n # if opt_attr:\n # optimizer_ft = opt_attr(model.parameters())\n # else:\n # raise Exception(\"unknown optimizer name\", optimizer_name)\n\n if scheduler == \"cosine\":\n exp_lr_scheduler = lr_scheduler.CosineAnnealingWarmRestarts(optimizer_ft, lr_restart_step)\n use_lr_schedule_steps = True\n elif scheduler == \"cycle\":\n exp_lr_scheduler = torch.optim.lr_scheduler.OneCycleLR(optimizer_ft, max_lr=lr, steps_per_epoch=steps_per_epoch,\n epochs=num_epochs, pct_start=0.1)\n use_lr_schedule_steps = False\n elif scheduler == \"step\":\n exp_lr_scheduler = lr_scheduler.StepLR(optimizer_ft, step_size=lr_restart_step, gamma=lr_decay_gamma)\n use_lr_schedule_steps = False\n\n return optimizer_ft, exp_lr_scheduler, use_lr_schedule_steps\n\n\ndef reduce_loss(loss, reduction='mean'):\n return loss.mean() if reduction == 'mean' else loss.sum() if reduction == 'sum' else loss\n\n\ndef plot_to_image(figure):\n \"\"\"Converts the matplotlib plot specified by 'figure' to a PNG image and\n returns it. The supplied figure is closed and inaccessible after this call.\"\"\"\n figure.canvas.draw()\n return np.array(figure.canvas.renderer._renderer)\n\n\ndef plot_confusion_matrix(cm, class_names):\n \"\"\"\n Returns a matplotlib figure containing the plotted confusion matrix.\n\n Args:\n cm (array, shape = [n, n]): a confusion matrix of integer classes\n class_names (array, shape = [n]): String names of the integer classes\n \"\"\"\n figure = plt.figure(figsize=(8, 8))\n plt.imshow(cm, interpolation='nearest', cmap=plt.cm.Blues)\n plt.title(\"Confusion matrix\")\n plt.colorbar()\n tick_marks = np.arange(len(class_names))\n plt.xticks(tick_marks, class_names, rotation=45)\n plt.yticks(tick_marks, class_names)\n\n # Normalize the confusion matrix.\n cm = np.around(cm.astype('float') / cm.sum(axis=1)[:, np.newaxis], decimals=2)\n\n # Use white text if squares are dark; otherwise black.\n threshold = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n color = \"white\" if cm[i, j] > threshold else \"black\"\n plt.text(j, i, cm[i, j], horizontalalignment=\"center\", color=color)\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n return figure\n\n\ndef linear_combination(x, y, epsilon):\n return epsilon * x + (1 - epsilon) * y\n\n\ndef log_confusion_matrix(writer, epoch, cm, class_names=None):\n # Log the confusion matrix as an image summary.\n figure = plot_confusion_matrix(cm, class_names=class_names)\n cm_image = plot_to_image(figure)\n\n writer.add_image('confusion_matrix', cm_image, epoch, dataformats='HWC')\n\n\nclass LabelSmoothingCrossEntropy(nn.Module):\n def __init__(self, epsilon=0.1, reduction='mean'):\n super().__init__()\n self.epsilon = epsilon\n self.reduction = reduction\n\n def forward(self, preds, target):\n n = preds.size()[-1]\n log_preds = F.log_softmax(preds, dim=-1)\n loss = reduce_loss(-log_preds.sum(dim=-1), self.reduction)\n nll = F.nll_loss(log_preds, target, reduction=self.reduction)\n return linear_combination(loss / n, nll, self.epsilon)\n\n\nclass SentenceDataset(data.Dataset):\n \"\"\"__init__ and __len__ functions are the same as in TorchvisionDataset\"\"\"\n\n def __init__(self, samples, vocab, media_map, word_dropout_prob=0.0, max_word_dropout_ratio=0.0, max_token_cnt=300):\n self.tokenizer = SentencepieceTokenizer(get_tokenizer())\n self.vocab = vocab\n\n self.samples = samples\n self.targets = [s[1] for s in samples]\n self.media_map = media_map\n self.word_dropout_prob = word_dropout_prob\n self.max_word_dropout_ratio = max_word_dropout_ratio\n self.max_token_cnt = max_token_cnt\n # self.classes = classes\n # self.class_to_idx = class_to_idx\n\n def __getitem__(self, index):\n \"\"\"\n Args:\n index (int): Index\n\n Returns:\n tuple: (sample, target) where target is class_index of the target class.\n \"\"\"\n\n sentence, target, pos_idx, media = self.samples[index]\n media = self.media_map[media]\n tokens = self.tokenizer(sentence)\n token_ids = self.vocab.to_indices(tokens)\n if random.random() < self.word_dropout_prob:\n dropout_cnt = round(self.max_word_dropout_ratio * len(token_ids))\n for i in range(dropout_cnt):\n dropout_idx = random.randint(0, len(token_ids) - 1)\n del token_ids[dropout_idx]\n\n if len(token_ids) > self.max_token_cnt:\n token_ids = token_ids[:self.max_token_cnt]\n\n return torch.tensor(token_ids, dtype=torch.long), target, pos_idx, media\n\n def __len__(self):\n return len(self.samples)\n\n\ndef pad_collate(batch):\n token_ids_batch, target_batch, pos_idx_batch, media_batch = zip(*batch)\n token_ids_batch = pad_sequence(token_ids_batch, batch_first=True, padding_value=0)\n\n return token_ids_batch, torch.tensor(target_batch, dtype=torch.long), \\\n torch.tensor(pos_idx_batch, dtype=torch.long), torch.tensor(media_batch, dtype=torch.long),\n\n\ndef save_model(model, model_path):\n if hasattr(model, 'module'):\n model = model.module\n print(\"save model\", model_path)\n torch.save(model.state_dict(), model_path)\n\n\ndef train(args):\n # 문장 최대 갯수 100개\n\n # import unicodedata\n # unicodedata.normalize('NFKC', '한국어로는 안되?ㅋ')\n\n samples_dict = {}\n medias = set()\n with jsonlines.open(args.train_file) as f:\n for line in f.iter():\n media = line['media']\n medias.add(media)\n extractive = line['extractive']\n for i, sentence in enumerate(line['article_original']):\n if i in extractive:\n if args.use_multi_class:\n label = extractive.index(i)\n else:\n label = 0\n else:\n if args.use_multi_class:\n label = 3\n else:\n label = 1\n if label not in samples_dict:\n samples_dict[label] = []\n samples_dict[label].append([sentence.replace('\\n', '').strip(), label, i, media])\n\n medias = list(medias)\n medias.sort()\n media_map = {m: i for i, m in enumerate(medias)}\n print(\"medias\", media_map)\n\n os.makedirs(os.path.join(args.work_dir, \"saved_models\"), exist_ok=True)\n\n train_samples = []\n val_samples = []\n class_cnt = []\n num_classes = 4 if args.use_multi_class else 2\n for label in range(num_classes):\n random.shuffle(samples_dict[label])\n val_cnt = round(len(samples_dict[label]) * args.val_ratio)\n val_samples += samples_dict[label][:val_cnt]\n tmp_train_samples = samples_dict[label][val_cnt:]\n class_cnt.append(len(tmp_train_samples))\n if args.use_all_train:\n train_samples += samples_dict[label]\n elif args.train_val_data:\n train_samples += val_samples\n else:\n train_samples += tmp_train_samples\n\n print('class_cnt', class_cnt)\n\n random.shuffle(train_samples)\n train_targets = [t[1] for t in train_samples]\n print(\"total samples\", len(train_samples) + len(val_samples))\n print(\"train samples\", len(train_samples))\n print(\"val samples\", len(val_samples))\n\n bert_model, vocab = get_pytorch_kobert_model()\n if args.freeze_bert:\n freeze_params(bert_model.embeddings)\n freeze_params(bert_model.encoder)\n freeze_params(bert_model.pooler)\n else:\n non_freeze_params(bert_model.embeddings)\n non_freeze_params(bert_model.encoder)\n non_freeze_params(bert_model.pooler)\n train_dataset = SentenceDataset(train_samples, vocab, media_map, word_dropout_prob=args.word_dropout_prob,\n max_word_dropout_ratio=args.max_word_dropout_ratio,\n max_token_cnt=args.max_token_cnt)\n val_dataset = SentenceDataset(val_samples, vocab, media_map, max_token_cnt=args.max_token_cnt)\n\n weights = 1. / torch.tensor(class_cnt, dtype=torch.float)\n print('weights', weights)\n samples_weights = weights[train_targets]\n sampler = torch.utils.data.sampler.WeightedRandomSampler(samples_weights, len(train_samples))\n train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=args.train_batch_size,\n num_workers=args.num_workers, pin_memory=args.train_pin_memory,\n collate_fn=pad_collate,\n sampler=sampler\n )\n\n val_loader = torch.utils.data.DataLoader(val_dataset, batch_size=args.val_batch_size, num_workers=args.num_workers,\n shuffle=False, pin_memory=args.val_pin_memory, collate_fn=pad_collate)\n\n model = ExtractiveModel(bert_model, 100, 11, 768,\n use_bert_sum_words=args.use_bert_sum_words,\n use_pos=args.use_pos,\n use_media=args.use_media, num_classes=num_classes, simple_model=args.simple_model,\n dim_feedforward=args.dim_feedforward,\n dropout=args.dropout)\n\n if args.checkpoint_path is not None and os.path.isfile(args.checkpoint_path):\n state_dict = torch.load(args.checkpoint_path)\n model.load_state_dict(state_dict)\n\n if torch.cuda.device_count() > 1 and args.data_parallel:\n print(\"Let's use\", torch.cuda.device_count(), \"GPUs!\")\n model = nn.DataParallel(model)\n device = \"cuda\"\n model.to(device)\n\n steps_per_epoch = len(train_samples) // args.train_batch_size\n if len(train_samples) % args.train_batch_size > 0:\n steps_per_epoch += 1\n optimizer, scheduler, use_lr_schedule_steps = init_optimizer(args.optimizer, model,\n args.lr, args.weight_decay,\n args.lr_restart_step,\n args.lr_decay_gamma,\n args.scheduler,\n nesterov=args.nesterov,\n num_epochs=args.num_epochs,\n steps_per_epoch=steps_per_epoch)\n\n if args.weighted_loss > 0:\n weights = [args.weighted_loss, 1.]\n class_weights = torch.FloatTensor(weights).to(device)\n criterion = torch.nn.CrossEntropyLoss(weight=class_weights)\n elif args.label_smoothing > 0:\n criterion = LabelSmoothingCrossEntropy(epsilon=args.label_smoothing)\n else:\n criterion = torch.nn.CrossEntropyLoss()\n criterion = criterion.to(device)\n\n os.makedirs(args.work_dir, exist_ok=True)\n train_writer = SummaryWriter(os.path.join(args.work_dir, 'train'))\n val_writer = SummaryWriter(os.path.join(args.work_dir, 'val'))\n\n print('train_loader', len(train_loader))\n best_f1 = 0.\n best_acc = 0.\n for epoch in range(args.num_epochs):\n\n if args.train:\n print(\"Epoch %d/%d, LR: %f\" % (epoch, args.num_epochs, np.array(scheduler.get_lr()).mean()))\n epoch_start_time = time.time()\n model.train()\n epoch_labels = []\n epoch_preds = []\n epoch_loss = 0.\n for step, (token_ids_batch, labels, pos_idx_batch, media_batch) in enumerate(train_loader):\n batch_start_time = time.time()\n epoch_labels += list(labels.numpy())\n labels = labels.to(device)\n token_ids_batch = token_ids_batch.to(device)\n pos_idx_batch = pos_idx_batch.to(device)\n media_batch = media_batch.to(device)\n\n if use_lr_schedule_steps:\n scheduler.step(epoch - 1 + step / len(train_loader))\n # zero the parameter gradients\n optimizer.zero_grad()\n\n # forward\n # track history if only in train\n with torch.autograd.set_detect_anomaly(True):\n with torch.set_grad_enabled(True):\n outputs = model(token_ids_batch, pos_idx_batch, media_batch)\n loss = criterion(outputs, labels)\n\n _, preds = torch.max(outputs, 1)\n epoch_preds += list(preds.cpu().numpy())\n\n # backward + optimize only if in training phase\n loss.backward()\n optimizer.step()\n epoch_loss += loss.item() * token_ids_batch.size(0)\n\n batch_elapsed_time = time.time() - batch_start_time\n if step >= 0 and (step + 1) % args.log_step_interval == 0:\n current_datetime = datetime.datetime.now().strftime('%Y/%m/%d %H:%M:%S')\n\n f1 = f1_score(labels.cpu().numpy(), preds.cpu().numpy(), average='macro')\n acc = accuracy_score(labels.cpu().numpy(), preds.cpu().numpy())\n\n train_writer.add_scalar('Loss', loss.item(), step + len(train_loader) * epoch)\n train_writer.add_scalar('Acc', acc, step + len(train_loader) * epoch)\n train_writer.add_scalar('F1', f1, step + len(train_loader) * epoch)\n train_writer.add_scalar('LR', np.array(scheduler.get_lr()).mean(),\n step + len(train_loader) * epoch)\n\n print(\"[train-epoch:%02d/%02d,step:%d/%d,%s] batch_elapsed: %f\" %\n (epoch, args.num_epochs, step, len(train_loader), current_datetime, batch_elapsed_time))\n print(\"loss: %f, acc: %f, f1: %f, lr: %f\" % (\n loss.item(), acc, f1, np.array(scheduler.get_lr()).mean()))\n\n if not use_lr_schedule_steps:\n scheduler.step()\n\n epoch_loss = epoch_loss / len(train_loader.dataset)\n epoch_elapsed_time = time.time() - epoch_start_time\n current_datetime = datetime.datetime.now().strftime('%Y/%m/%d %H:%M:%S')\n epoch_f1 = f1_score(epoch_labels, epoch_preds, average='macro')\n epoch_acc = accuracy_score(epoch_labels, epoch_preds)\n\n print(\n \"[result:train-epoch:%02d/%02d,%s] epoch_elapsed: %s, loss: %f, acc: %f, f1: %f, lr: %f\" % (\n epoch, args.num_epochs, current_datetime, epoch_elapsed_time, epoch_loss, epoch_acc, epoch_f1,\n scheduler.get_lr()[0]))\n train_writer.add_scalar('Loss/epoch', epoch_loss, epoch)\n train_writer.add_scalar('Acc/epoch', epoch_acc, epoch)\n train_writer.add_scalar('F1/epoch', epoch_f1, epoch)\n save_model(model, os.path.join(args.work_dir, \"saved_models\", \"epoch_%d.pth\" % epoch))\n\n if args.val:\n model.eval() # Set model to evaluate mode\n epoch_start_time = time.time()\n epoch_preds = []\n epoch_labels = []\n epoch_loss = 0.\n\n for step, (token_ids_batch, labels, pos_idx_batch, media_batch) in enumerate(val_loader):\n batch_start_time = time.time()\n epoch_labels += list(labels.numpy())\n labels = labels.to(device)\n token_ids_batch = token_ids_batch.to(device)\n pos_idx_batch = pos_idx_batch.to(device)\n media_batch = media_batch.to(device)\n\n # forward\n # track history if only in train\n with torch.set_grad_enabled(False):\n start = time.time()\n outputs = model(token_ids_batch, pos_idx_batch, media_batch)\n # print(\"batch speed\", time.time() - start)\n _, preds = torch.max(outputs, 1)\n epoch_preds += list(preds.cpu().numpy())\n loss = criterion(outputs, labels)\n\n # statistics\n epoch_loss += loss.item() * token_ids_batch.size(0)\n batch_elapsed_time = time.time() - batch_start_time\n\n if step >= 0 and (step + 1) % args.log_step_interval == 0:\n current_datetime = datetime.datetime.now().strftime('%Y/%m/%d %H:%M:%S')\n\n f1 = f1_score(labels.cpu().numpy(), preds.cpu().numpy(), average='macro')\n acc = accuracy_score(labels.cpu().numpy(), preds.cpu().numpy())\n\n print(\"[val-epoch:%d, step:%d/%d,%s] batch_elapsed: %f\" %\n (epoch, step, len(val_loader), current_datetime, batch_elapsed_time))\n print(\"loss: %f, acc: %f, f1: %f\" % (loss.item(), acc, f1))\n\n epoch_loss = epoch_loss / len(val_loader.dataset)\n\n current_datetime = datetime.datetime.now().strftime('%Y/%m/%d %H:%M:%S')\n\n epoch_acc = accuracy_score(epoch_labels, epoch_preds)\n epoch_f1 = f1_score(epoch_labels, epoch_preds, average='macro')\n epoch_elapsed = time.time() - epoch_start_time\n print(\n \"[result_val-epoch:%d,%s] epoch_elapsed: %s, loss: %f, acc: %f, f1: %f\" % (\n epoch, current_datetime, epoch_elapsed, epoch_loss, epoch_acc, epoch_f1))\n\n cls_report = classification_report(epoch_labels, epoch_preds) # , target_names=classes)\n print(cls_report)\n epoch_cm = confusion_matrix(epoch_labels, epoch_preds)\n np_epoch_labels = np.unique(np.array(epoch_labels))\n np_epoch_labels.sort()\n log_confusion_matrix(val_writer, epoch, epoch_cm, np_epoch_labels)\n print(\"confusion matrix\")\n print(epoch_cm)\n # np.save(os.path.join(log_dir, \"confusion_matrix_%s_epoch_%d.npy\" % (val_name, epoch)), epoch_cm)\n epoch_cm = epoch_cm.astype('float') / epoch_cm.sum(axis=1)[:, np.newaxis]\n epoch_cm = epoch_cm.diagonal()\n print(\"each accuracies\")\n print(epoch_cm)\n\n val_writer.add_scalar('Loss/epoch', epoch_loss, epoch)\n val_writer.add_scalar('Acc/epoch', epoch_acc, epoch)\n val_writer.add_scalar('F1/epoch', epoch_f1, epoch)\n if epoch_f1 > best_f1 or (epoch_f1 >= best_f1 and epoch_acc > best_acc):\n best_f1 = epoch_f1\n best_acc = epoch_acc\n save_model(model, os.path.join(args.work_dir, \"saved_models\", \"best_model.pth\"))\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n\n parser.add_argument('-l', '--work_dir', type=str, default='./log')\n parser.add_argument('-c', '--checkpoint_path', type=str, default=None, required=False, help='checkpoint path')\n parser.add_argument('--train_file', default='/media/irelin/data_disk/dataset/dacon_summury/abstractive/train.jsonl',\n type=str)\n parser.add_argument('--val_ratio', type=float, default=0.1)\n parser.add_argument('-z', '--optimizer', type=str, default='adam') # adam')\n parser.add_argument('--scheduler', type=str, default='cosine') # cosine, step')\n parser.add_argument('--lr_restart_step', type=int, default=1)\n parser.add_argument('-e', '--num_epochs', type=int, default=100)\n parser.add_argument('--log_step_interval', type=int, default=100)\n\n parser.add_argument('--train_batch_size', type=int, default=32)\n parser.add_argument('--val_batch_size', type=int, default=64)\n parser.add_argument('-w', '--num_workers', type=int, default=8)\n\n parser.add_argument('--max_token_cnt', type=int, default=300)\n\n parser.add_argument('--use_bert_sum_words', action='store_true', default=False)\n parser.add_argument('--use_media', action='store_true', default=False)\n parser.add_argument('--use_pos', action='store_true', default=False)\n parser.add_argument('--simple_model', action='store_true', default=False)\n parser.add_argument('--dim_feedforward', type=int, default=1024)\n parser.add_argument('--dropout', type=float, default=0.1)\n\n parser.add_argument('--word_dropout_prob', type=float, default=0.0)\n parser.add_argument('--max_word_dropout_ratio', type=float, default=0.0)\n parser.add_argument('--lr', type=float, default=0.001)\n parser.add_argument('--lr_decay_gamma', type=float, default=0.9)\n parser.add_argument('-d', '--weight_decay', type=float, default=1e-5)\n parser.add_argument('--label_smoothing', type=float, default=0.0)\n parser.add_argument('--seed', type=int, default=1)\n parser.add_argument('-t', '--train', default=False, action=\"store_true\")\n parser.add_argument('-v', '--val', default=False, action=\"store_true\")\n\n parser.add_argument('--use_multi_class', default=False, action=\"store_true\")\n\n parser.add_argument('--use_all_train', default=False, action=\"store_true\")\n parser.add_argument('--train_val_data', default=False, action=\"store_true\")\n parser.add_argument('--data_parallel', default=False, action=\"store_true\")\n\n parser.add_argument('--train_pin_memory', default=False, action=\"store_true\")\n parser.add_argument('--val_pin_memory', default=False, action=\"store_true\")\n parser.add_argument('--use_benchmark', default=False, action=\"store_true\")\n parser.add_argument('--nesterov', default=False, action=\"store_true\")\n parser.add_argument('--freeze_bert', default=False, action=\"store_true\")\n\n args = parser.parse_args()\n\n for arg in vars(args):\n print(arg, getattr(args, arg))\n\n if args.seed:\n np.random.seed(args.seed)\n random.seed(args.seed)\n torch.manual_seed(args.seed)\n if torch.cuda.is_available():\n torch.cuda.manual_seed_all(args.seed)\n\n if args.use_benchmark:\n torch.backends.cudnn.benchmark = True\n torch.backends.cudnn.deterministic = True\n\n train(args)\n"
] |
[
[
"matplotlib.pyplot.imshow",
"torch.max",
"torch.nn.functional.nll_loss",
"torch.load",
"torch.autograd.set_detect_anomaly",
"torch.nn.utils.rnn.pad_sequence",
"torch.utils.data.DataLoader",
"sklearn.metrics.confusion_matrix",
"torch.set_grad_enabled",
"torch.FloatTensor",
"torch.cuda.is_available",
"torch.cuda.manual_seed_all",
"sklearn.metrics.f1_score",
"sklearn.metrics.classification_report",
"torch.nn.CrossEntropyLoss",
"matplotlib.pyplot.tight_layout",
"torch.tensor",
"matplotlib.pyplot.text",
"matplotlib.pyplot.figure",
"torch.optim.lr_scheduler.OneCycleLR",
"torch.optim.lr_scheduler.StepLR",
"matplotlib.pyplot.title",
"torch.nn.DataParallel",
"torch.cuda.device_count",
"numpy.array",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.ylabel",
"torch.optim.lr_scheduler.CosineAnnealingWarmRestarts",
"torch.nn.functional.log_softmax",
"numpy.random.seed",
"torch.manual_seed",
"matplotlib.pyplot.colorbar",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.yticks",
"sklearn.metrics.accuracy_score"
]
] |
silviomori/udacity-deep-reinforcement-learning-nanodegree
|
[
"4b217ad39d119361fad84e1eaae6396432de341a"
] |
[
"dqn/exercise/model.py"
] |
[
"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nclass QNetwork(nn.Module):\n \"\"\"Actor (Policy) Model.\"\"\"\n\n def __init__(self, state_size, action_size, seed, hidden_nodes=64):\n \"\"\"Initialize parameters and build model.\n Params\n ======\n state_size (int): Dimension of each state\n action_size (int): Dimension of each action\n seed (int): Random seed\n \"\"\"\n super(QNetwork, self).__init__()\n self.seed = torch.manual_seed(seed)\n \n \"*** YOUR CODE HERE ***\"\n self.fc1 = nn.Linear(state_size, hidden_nodes)\n self.fc2 = nn.Linear(hidden_nodes, hidden_nodes)\n self.fc3 = nn.Linear(hidden_nodes, action_size)\n \n def forward(self, state):\n \"\"\"Build a network that maps state -> action values.\"\"\"\n x = F.relu(self.fc1(state))\n x = F.relu(self.fc2(x))\n return self.fc3(x)"
] |
[
[
"torch.nn.Linear",
"torch.manual_seed"
]
] |
mnfienen/sfrmaker
|
[
"7e66d67d6cb0ad84fbb9994402f0baaf5b3fcd01"
] |
[
"sfrmaker/test/test_version.py"
] |
[
"\"\"\"\ncheck for consistancy in package version\n\"\"\"\nimport os\nfrom packaging import version\nimport pandas as pd\nimport sfrmaker\nimport pytest\n\n\ndef get_readme_version(project_root_path):\n readme_file = os.path.join(project_root_path, 'Readme.md')\n with open(readme_file) as src:\n for line in src:\n if '# version' in line.lower():\n version_info = version.parse(line.strip().split()[2])\n break\n return version_info\n\n\ndef get_changelog_version(project_root_path):\n file = os.path.join(project_root_path, 'docs/source/release-history.rst')\n with open(file) as src:\n for line in src:\n if line.strip().lower().startswith('version'):\n _, version_info, date = line.strip().split()\n version_info = version.parse(version_info)\n date = date.strip('()')\n try:\n pd.Timestamp(date)\n break\n except:\n continue\n return version_info\n\n\n#@pytest.mark.skipif(os.environ.get('GITHUB_ACTIONS') == 'true',\n# reason=(\"Clone depth of 1 doesn't allow last \"\n# \"release number to be detected.\"))\ndef test_version(project_root_path):\n version_info = version.parse(sfrmaker.__version__)\n readme_version = get_readme_version(project_root_path)\n changelog_version = get_changelog_version(project_root_path)\n assert version_info.base_version == changelog_version.base_version\n assert readme_version.major == version_info.major\n assert readme_version.minor == version_info.minor\n"
] |
[
[
"pandas.Timestamp"
]
] |
vishalbelsare/SLAPP3
|
[
"da187b771831aaaabaee16a26ad341db2e968104"
] |
[
"6 objectSwarmObserverAgents_AESOP_turtleLib_NetworkX/$$slapp$$/graphicControl.py"
] |
[
"import sys\nimport os\nimport commonVar as common\n\ncommon.graphicStatus = \"\"\n\n\ndef checkRunningIn():\n try:\n __IPYTHON__\n return True\n except NameError:\n return False\n\n\ndef graphicControl():\n # IPython/Python\n IPython = checkRunningIn()\n\n # running in Python (not in IPython)\n if not IPython:\n\n # at http://matplotlib.org/users/shell.html\n # we read \"the python IDLE IDE is a Tkinter gui app that does not\n # support pylab interactive mode, regardless of backend\"\n\n if 'idlelib' in sys.modules:\n print((\n 'Running in IDLE, please start SLAPP using a terminal with\\n' +\n 'python runShell.py\\nexecuted being in SLAPP main folder'))\n os.sys.exit(1)\n\n elif 'spyderlib' in sys.modules:\n print((\n 'Running in Spyder, please start SLAPP using a terminal with\\n' +\n 'python runShell.py\\nexecuted being in SLAPP main folder'))\n os.sys.exit(1)\n\n else:\n common.graphicStatus = \"PythonViaTerminal\"\n print('SLAPP started from a terminal')\n\n # running in IPython\n if IPython:\n from IPython import get_ipython\n import matplotlib\n\n try:\n import tkinter # this is only a control, if tkinter is missing, the\n # use(\"TkAgg\") method will rise an error\n #matplotlib.use(\"TkAgg\") #commented for compatibility with matplotlib 3.3.2 outside Mac system\n except BaseException:\n print(\"Warning, missing tkinter: graphics will not work with\")\n print(\"Jupyter in terminal or QtConsole\")\n\n import matplotlib as mpl\n import matplotlib.pyplot as plt\n\n # check if running in a plain terminal with IPython starting with the\n # 'ipython' command line OR in a jupyter notebook\n\n # IN 'ipython' command line in a terminal, but the case of using it via\n # Spyder, which is behaving as a Jupyter QtConsole (see below)\n if \"IPython.terminal.interactiveshell.TerminalInteractiveShell\" \\\n in str(get_ipython()):\n\n common.graphicStatus = \"PythonViaTerminal\"\n print('SLAPP started from a terminal')\n\n # IN a jupyter notebook OR in a Jupyter QtConsole (same effects)\n if \"zmqshell.ZMQInteractiveShell\" \\\n in str(get_ipython()):\n\n # running in IPython with magic '%matplotlib|%pylab' already set,\n # modified to '%matplotlib inline'\n if 'InlineBackendConfig' in get_ipython().config and \\\n \"backend_inline\" not in mpl.get_backend():\n print(\"SLAPP running with magic '%matplotlib|%pylab' already set\")\n\n get_ipython().magic(\"%matplotlib inline\")\n print(\"'%matplotlib inline' magic command NOW SET\")\n common.graphicStatus = \"%matplotlib inline\"\n\n # running in IPython with magic '%matplotlib inline|%pylab' already\n # set\n elif 'InlineBackendConfig' in get_ipython().config and \\\n \"backend_inline\" in mpl.get_backend():\n print(\n \"running with magic '%matplotlib inline|%pylab inline' already set\")\n common.graphicStatus = \"%matplotlib inline\"\n\n # running in IPython without any magic matplotlib already set\n else:\n print(\"SLAPP starting without any matplotlib magic command\")\n\n get_ipython().magic(\"%matplotlib inline\")\n\n print(\"'%matplotlib inline' magic command NOW SET\")\n common.graphicStatus = \"%matplotlib inline\"\n\n # size of the pictures within an IPython notebook\n width = 12\n height = 8\n try:\n width = common.width\n except BaseException:\n pass\n try:\n height = common.height\n except BaseException:\n pass\n # in inches, but ... on paper\n plt.rcParams['figure.figsize'] = width, height\n # and on the screen the effect is\n # related to the screen\n # and printer pixel density\n # suggested ratio 3/2\n\n\nif __name__ == \"__main__\":\n graphicControl()\n"
] |
[
[
"matplotlib.get_backend"
]
] |
trungnt13/sisua
|
[
"27c01002aaac5b33947946a483bfb6678273bc83"
] |
[
"sisua/analysis/sc_metrics.py"
] |
[
"from __future__ import absolute_import, division, print_function\n\nfrom abc import ABCMeta, abstractmethod\nfrom collections import defaultdict\nfrom numbers import Number\nfrom typing import List, Union\n\nimport numpy as np\nimport tensorflow as tf\nfrom six import add_metaclass\nfrom tensorflow.python.keras.callbacks import Callback\nfrom tensorflow_probability.python import distributions as tfd\nfrom tensorflow_probability.python.distributions import Distribution\n\nfrom odin.bay.distributions import ZeroInflated\nfrom odin.utils import catch_warnings_ignore\nfrom sisua.analysis.imputation_benchmarks import (correlation_scores,\n imputation_mean_score,\n imputation_score,\n imputation_std_score)\nfrom sisua.analysis.latent_benchmarks import clustering_scores\nfrom sisua.data import SingleCellOMIC\nfrom sisua.models import SingleCellModel\nfrom sisua.models.base import _to_sco\n\n__all__ = [\n 'SingleCellMetric', 'NegativeLogLikelihood', 'ImputationError',\n 'CorrelationScores', 'ClusteringScores'\n]\n\n\ndef _preprocess_output_distribution(y_pred):\n r\"\"\" In case of zero inflated distribution, extract the underlying count\n distribution \"\"\"\n if isinstance(y_pred, tfd.Independent) and \\\n isinstance(y_pred.distribution, ZeroInflated):\n y_pred = tfd.Independent(\n y_pred.distribution.count_distribution,\n reinterpreted_batch_ndims=y_pred.reinterpreted_batch_ndims)\n return y_pred\n\n\ndef _to_binary(protein):\n labels = protein.X\n if 'X_prob' in protein.obsm:\n labels = protein.obsm['X_prob']\n elif 'X_bin' in protein.obsm:\n labels = protein.obsm['X_bin']\n if labels.ndim == 2:\n labels = np.argmax(labels, axis=1)\n elif labels.ndim > 2:\n raise RuntimeError(\"protein labels has %d dimensions, no support\" %\n labels.ndim)\n return labels\n\n\n_CORRUPTED_INPUTS = {}\n\n\n# ===========================================================================\n# Base class\n# ===========================================================================\n@add_metaclass(ABCMeta)\nclass SingleCellMetric(Callback):\n r\"\"\" Single cell metrics for evaluating the imputation and latent space\n during training\n\n Parameters\n ----------\n inputs : {`SingleCellOMIC`, `numpy.ndarray`}\n extras : None\n extras object (e.g. protein) used for calculating the metric\n sample_shape : `int` (default=`1`)\n number of MCMC samples for prediction\n batch_size : `int` (default=`64`)\n freq : `int` (default=`3`)\n frequency of evaluating the metric, some metrics are very computational\n intensive and could slow down the training progress significantly\n \"\"\"\n\n def __init__(self,\n inputs: Union[SingleCellOMIC, List[SingleCellOMIC], np.\n ndarray, List[np.ndarray], None] = None,\n extras=None,\n sample_shape=1,\n batch_size=64,\n freq=3,\n name=None,\n **kwargs):\n super(SingleCellMetric, self).__init__(**kwargs)\n self.sample_shape = sample_shape\n self.batch_size = batch_size\n self.inputs = inputs\n self.extras = extras\n self.freq = int(freq)\n self._name = name\n # store the last epoch that the metric was calculated\n self._last_epoch = 0\n assert self.freq > 0\n\n @property\n def name(self):\n return self.__class__.__name__.lower() if self._name is None else self._name\n\n def set_model(self, model: SingleCellModel):\n assert isinstance(\n model, SingleCellModel), \"This callback only support SingleCellModel\"\n self.model = model\n return self\n\n @abstractmethod\n def call(self, y_true: List[SingleCellOMIC], y_crpt: List[SingleCellOMIC],\n y_pred: List[Distribution], latents: List[Distribution], extras):\n raise NotImplementedError\n\n def __call__(self, inputs=None, sample_shape=None):\n if inputs is None:\n inputs = self.inputs\n if sample_shape is None:\n sample_shape = self.sample_shape\n model = self.model\n\n if not isinstance(inputs, (tuple, list)):\n inputs = [inputs]\n inputs = _to_sco(inputs, model.omic_outputs)\n if model.corruption_rate is not None:\n corruption_text = str(model.corruption_dist) + str(model.corruption_rate)\n inputs_corrupt = [\n (data.corrupt(corruption_rate=model.corruption_rate,\n corruption_dist=model.corruption_dist,\n inplace=False) \\\n if str(id(data)) + corruption_text not in _CORRUPTED_INPUTS else\n _CORRUPTED_INPUTS[str(id(data)) + corruption_text]) \\\n if idx == 0 else data\n for idx, data in enumerate(inputs)\n ]\n _CORRUPTED_INPUTS[str(id(inputs[0])) +\n corruption_text] = inputs_corrupt[0]\n else:\n inputs_corrupt = inputs\n\n outputs, latents = model.predict(inputs_corrupt,\n sample_shape=self.sample_shape,\n batch_size=self.batch_size,\n verbose=0,\n apply_corruption=False)\n if not isinstance(outputs, (tuple, list)):\n outputs = [outputs]\n if not isinstance(latents, (tuple, list)):\n latents = [latents]\n\n metrics = self.call(y_true=inputs,\n y_pred=outputs,\n y_crpt=inputs_corrupt,\n latents=latents,\n extras=self.extras)\n if metrics is None:\n metrics = {}\n elif tf.is_tensor(metrics) or \\\n isinstance(metrics, np.ndarray) or \\\n isinstance(metrics, Number):\n metrics = {self.name: metrics}\n assert isinstance(metrics, dict), \\\n \"Return metrics must be a dictionary mapping metric name to scalar value\"\n metrics = {\n i: j.numpy() if tf.is_tensor(j) else j for i, j in metrics.items()\n }\n return metrics\n\n def on_epoch_end(self, epoch, logs=None):\n \"\"\"Called at the end of an epoch.\n\n Subclasses should override for any actions to run. This function should only\n be called during TRAIN mode.\n\n Arguments:\n epoch: integer, index of epoch.\n logs: dict, metric results for this training epoch, and for the\n validation epoch if validation is performed. Validation result keys\n are prefixed with `val_`.\n \"\"\"\n if epoch % self.freq == 0 and logs is not None:\n self._last_epoch = epoch\n # calculating the metric\n try:\n metrics = self()\n except Exception as e:\n print(\"Error:\", e)\n metrics = {}\n # update the log\n for key, val in metrics.items():\n logs[key] = val\n logs[key + '_epoch'] = epoch\n\n def on_train_end(self, logs=None):\n if self.model.epochs != self._last_epoch:\n self._last_epoch = self.model.epochs\n # calculating the metric\n try:\n metrics = self()\n except Exception as e:\n print(\"Error:\", e)\n metrics = {}\n # update the log\n history = self.model.history.history\n for key, val in metrics.items():\n if key in history:\n history[key].append(val)\n history[key + '_epoch'].append(self._last_epoch)\n\n\n# ===========================================================================\n# Losses\n# ===========================================================================\nclass NegativeLogLikelihood(SingleCellMetric):\n \"\"\" Log likelihood metric\n\n Parameters\n ----------\n inputs : {`SingleCellOMIC`, `numpy.ndarray`}\n extras : None\n extras object (e.g. protein) used for calculating the metric\n sample_shape : `int` (default=`1`)\n number of MCMC samples for prediction\n batch_size : `int` (default=`64`)\n freq : `int` (default=`3`)\n frequency of evaluating the metric, some metrics are very computational\n intensive and could slow down the training progress significantly\n\n Returns\n -------\n dict:\n 'nllk%d' for each tuple of input and output\n \"\"\"\n\n def call(self, y_true: List[SingleCellOMIC], y_crpt: List[SingleCellOMIC],\n y_pred: List[Distribution], latents: List[Distribution], extras):\n nllk = {}\n for idx, (t, p) in enumerate(zip(y_true, y_pred)):\n nllk['nllk%d' % idx] = -tf.reduce_mean(p.log_prob(t.X))\n return nllk\n\n\nclass ImputationError(SingleCellMetric):\n \"\"\" Imputation error\n\n Parameters\n ----------\n inputs : {`SingleCellOMIC`, `numpy.ndarray`}\n extras : None\n extras object (e.g. protein) used for calculating the metric\n sample_shape : `int` (default=`1`)\n number of MCMC samples for prediction\n batch_size : `int` (default=`64`)\n freq : `int` (default=`3`)\n frequency of evaluating the metric, some metrics are very computational\n intensive and could slow down the training progress significantly\n\n Return\n ------\n dict :\n 'imp_med'\n 'imp_mean'\n \"\"\"\n\n def call(self, y_true: List[SingleCellOMIC], y_crpt: List[SingleCellOMIC],\n y_pred: List[Distribution], latents: List[Distribution], extras):\n # only care about the first data input\n y_true = y_true[0]\n y_crpt = y_crpt[0]\n y_pred = y_pred[0]\n\n y_pred = _preprocess_output_distribution(y_pred)\n y_pred = y_pred.mean()\n if y_pred.shape.ndims == 3:\n y_pred = tf.reduce_mean(y_pred, axis=0)\n return {\n 'imp_med':\n imputation_score(original=y_true.X, imputed=y_pred),\n 'imp_mean':\n imputation_mean_score(original=y_true.X,\n corrupted=y_crpt.X,\n imputed=y_pred)\n }\n\n\nclass CorrelationScores(SingleCellMetric):\n \"\"\" (1 - correlation_coefficients) to represent the loss\n\n Parameters\n ----------\n inputs : {`SingleCellOMIC`, `numpy.ndarray`}\n extras : {`SingleCellOMIC`, `numpy.ndarray`}\n the protein array\n sample_shape : `int` (default=`1`)\n number of MCMC samples for prediction\n batch_size : `int` (default=`64`)\n freq : `int` (default=`3`)\n frequency of evaluating the metric, some metrics are very computational\n intensive and could slow down the training progress significantly\n\n Returns\n -------\n dict :\n 'pearson_mean': np.mean(pearson),\n 'spearman_mean': np.mean(spearman),\n 'pearson_med': np.median(pearson),\n 'spearman_med': np.median(spearman),\n\n Example\n -------\n >>> CorrelationScores(extras=y_train, freq=1)\n\n \"\"\"\n\n def call(self, y_true: List[SingleCellOMIC], y_crpt: List[SingleCellOMIC],\n y_pred: List[Distribution], latents: List[Distribution], extras):\n y_true = y_true[0]\n y_crpt = y_crpt[0]\n y_pred = y_pred[0]\n assert isinstance(extras, SingleCellOMIC), \\\n \"protein data must be provided as extras in form of SingleCellOMIC\"\n protein = extras[y_true.indices]\n y_true.assert_matching_cells(protein)\n\n y_pred = _preprocess_output_distribution(y_pred)\n y_pred = y_pred.mean()\n if y_pred.shape.ndims == 3:\n y_pred = tf.reduce_mean(y_pred, axis=0)\n\n scores = correlation_scores(X=y_pred,\n y=protein.X,\n gene_name=y_true.var['geneid'],\n protein_name=protein.var['protid'],\n return_series=False)\n if len(scores) == 0:\n return {}\n spearman = []\n pearson = []\n for _, (s, p) in scores.items():\n spearman.append(-s)\n pearson.append(-p)\n return {\n 'pearson_mean': np.mean(pearson),\n 'spearman_mean': np.mean(spearman),\n 'pearson_med': np.median(pearson),\n 'spearman_med': np.median(spearman),\n }\n\n\nclass ClusteringScores(SingleCellMetric):\n \"\"\"\n Parameters\n ----------\n inputs : {`SingleCellOMIC`, `numpy.ndarray`}\n extras : {`SingleCellOMIC`, `numpy.ndarray`}\n the protein array\n sample_shape : `int` (default=`1`)\n number of MCMC samples for prediction\n batch_size : `int` (default=`64`)\n freq : `int` (default=`3`)\n frequency of evaluating the metric, some metrics are very computational\n intensive and could slow down the training progress significantly\n\n Returns\n -------\n dict :\n silhouette_score (higher is better, best is 1, worst is -1)\n adjusted_rand_score (higher is better)\n normalized_mutual_info_score (higher is better)\n unsupervised_clustering_accuracy (higher is better)\n\n Example\n -------\n >>> ClusteringScores(extras=y_train, freq=1)\n \"\"\"\n\n def call(self, y_true: List[SingleCellOMIC], y_crpt: List[SingleCellOMIC],\n y_pred: List[Distribution], latents: List[Distribution], extras):\n y_true = y_true[0]\n y_crpt = y_crpt[0]\n y_pred = y_pred[0]\n assert isinstance(extras, SingleCellOMIC), \\\n \"protein data must be provided as extras in form of SingleCellOMIC\"\n protein = extras[y_true.indices]\n y_true.assert_matching_cells(protein)\n labels = _to_binary(protein)\n\n scores = {}\n scores_avg = defaultdict(list)\n # support multiple latents also\n for idx, z in enumerate(latents):\n for key, val in clustering_scores(latent=z.mean().numpy(),\n labels=labels,\n n_labels=protein.var.shape[0]).items():\n # since all score higher is better, we want them as loss value\n val = -val\n scores['%s_%d' % (key, idx)] = val\n scores_avg[key].append(val)\n # average scores\n scores.update({i: np.mean(j) for i, j in scores_avg.items()})\n return scores\n"
] |
[
[
"tensorflow.is_tensor",
"tensorflow.reduce_mean",
"numpy.median",
"numpy.argmax",
"numpy.mean"
]
] |
gs512/inferelator
|
[
"391223bd8d07476db72c4c7b1cd5fb5bf7494b9c"
] |
[
"inferelator/tests/test_crossvalidation_wrapper.py"
] |
[
"import unittest\nimport pandas as pd\nimport types\nimport numpy as np\nimport tempfile\nimport os\n\nfrom inferelator import crossvalidation_workflow\nfrom inferelator.workflow import WorkflowBase\n\nfake_metadata = pd.DataFrame({\"CONST\": [\"A\"] * 1000,\n \"VAR\": [\"A\"] * 100 + [\"B\"] * 200 + [\"C\"] * 1 + [\"D\"] * 99 + [\"E\"] * 500 + [\"F\"] * 100})\n\nTEMP_DIR = tempfile.gettempdir()\nTEMP_DIR_1 = os.path.join(TEMP_DIR, \"test1\")\n\n\nclass FakeResult(object):\n\n score=1\n name=\"NAME\"\n\n\nclass FakeWorkflow(WorkflowBase):\n\n seed = 10\n\n def __init__(self):\n self.meta_data = fake_metadata.copy()\n\n def run(self):\n return FakeResult()\n\n def get_data(self):\n return \"GotData\"\n\n\ndef fake_class_method(slf):\n pass\n\n\nclass FakeWriter(object):\n\n csv_lil = None\n\n def __init__(self, *args, **kwargs):\n pass\n\n def writerow(self, line, **kwargs):\n if self.csv_lil is None:\n self.csv_lil = []\n self.csv_lil.append(line)\n\n def close(self):\n pass\n\n\nclass TestCV(unittest.TestCase):\n\n def setUp(self):\n wkf = FakeWorkflow()\n wkf.output_dir = TEMP_DIR\n self.cv = crossvalidation_workflow.CrossValidationManager(wkf)\n self.cv._csv_writer_object = FakeWriter\n self.cv._open_csv_handle = types.MethodType(fake_class_method, self.cv)\n self.cv._create_output_path = types.MethodType(fake_class_method, self.cv)\n\n\nclass TestCVSetup(TestCV):\n\n def test_dropin_set(self):\n self.assertIsNone(self.cv.dropin_column)\n\n self.cv.add_grouping_dropin(\"VAR\", group_size=100, seed=50)\n\n self.assertEqual(self.cv.dropin_column, \"VAR\")\n self.assertEqual(self.cv.dropin_max_size, 100)\n self.assertEqual(self.cv.dropin_seed, 50)\n\n def test_dropout_set(self):\n self.assertIsNone(self.cv.dropout_column)\n\n self.cv.add_grouping_dropout(\"VAR\", group_size=100, seed=50)\n\n self.assertEqual(self.cv.dropout_column, \"VAR\")\n self.assertEqual(self.cv.dropout_max_size, 100)\n self.assertEqual(self.cv.dropout_seed, 50)\n\n def test_size_sample_set(self):\n self.assertIsNone(self.cv.size_sample_vector)\n\n self.cv.add_size_subsampling([0.1, 0.2, 1], stratified_column_name=\"VAR\", seed=50)\n\n self.assertListEqual(self.cv.size_sample_vector, [0.1, 0.2, 1])\n self.assertEqual(self.cv.size_sample_stratified_column, \"VAR\")\n self.assertEqual(self.cv.size_sample_seed, 50)\n\n def test_add_grid_param(self):\n self.assertIsNone(self.cv.grid_params)\n self.assertIsNone(self.cv.grid_param_values)\n\n self.cv.add_gridsearch_parameter(\"seed\", [1, 2, 3])\n self.cv.add_gridsearch_parameter(\"test\", [3, 4, 5])\n\n self.assertListEqual(self.cv.grid_params, [\"seed\", \"test\"])\n self.assertListEqual(self.cv.grid_param_values['seed'], [1, 2, 3])\n self.assertListEqual(self.cv.grid_param_values['test'], [3, 4, 5])\n\n def test_load_initial(self):\n\n self.assertEqual(self.cv.workflow.get_data(), \"GotData\")\n self.cv._initial_data_load()\n self.assertIsNone(self.cv.workflow.get_data())\n\n def test_get_copy(self):\n\n copied_work = self.cv._get_workflow_copy()\n copied_work.seed = 50\n\n self.assertEqual(self.cv.workflow.seed, 10)\n self.assertEqual(copied_work.seed, 50)\n\n def test_csv(self):\n self.cv.add_gridsearch_parameter(\"seed\", [1, 2, 3])\n self.cv.add_gridsearch_parameter(\"test\", [3, 4, 5])\n\n self.assertIsNone(self.cv._csv_header)\n self.cv._create_writer()\n self.assertListEqual(self.cv._csv_header, [\"seed\", \"test\", \"Test\", \"Value\", \"Num_Obs\", \"aupr\"])\n\n def test_validate_params(self):\n self.cv.add_gridsearch_parameter(\"seed\", [1, 2, 3])\n self.cv._check_grid_search_params_exist()\n\n self.cv.add_gridsearch_parameter(\"test\", [3, 4, 5])\n with self.assertRaises(ValueError):\n self.cv._check_grid_search_params_exist()\n\n def test_validate_meta_cols(self):\n self.cv.dropin_column = \"VAR\"\n self.cv.dropout_column = \"CONST\"\n\n self.cv._check_metadata()\n\n self.cv.size_sample_stratified_column = \"NOTREAL\"\n with self.assertRaises(ValueError):\n self.cv._check_metadata()\n\n\nclass TestCVProperties(TestCV):\n\n def test_output_dir_cv(self):\n self.assertEqual(TEMP_DIR, self.cv.output_dir)\n self.cv.append_to_path('output_dir', 'test1')\n self.assertEqual(TEMP_DIR_1, self.cv.output_dir)\n\n def test_set_output_dir_cv(self):\n self.cv.output_dir = TEMP_DIR_1\n self.assertEqual(TEMP_DIR_1, self.cv.workflow.output_dir)\n\n def test_input_dir_cv(self):\n self.cv.workflow.input_dir = TEMP_DIR\n self.assertEqual(TEMP_DIR, self.cv.input_dir)\n\n def test_set_input_dir_cv(self):\n self.cv.input_dir = TEMP_DIR_1\n self.assertEqual(TEMP_DIR_1, self.cv.workflow.input_dir)\n\n def test_harmonize(self):\n self.cv.workflow.output_dir = None\n self.cv.workflow.input_dir = None\n\n self.cv._baseline_input_dir = TEMP_DIR\n self.cv._baseline_output_dir = TEMP_DIR_1\n\n self.cv._harmonize_paths()\n\n self.assertEqual(TEMP_DIR_1, self.cv.workflow.output_dir)\n self.assertEqual(TEMP_DIR, self.cv.workflow.input_dir)\n\n\nclass TestCVSampleIndexing(TestCV):\n\n def test_group_index_masker(self):\n self.assertEqual(crossvalidation_workflow.group_index(fake_metadata, \"CONST\", \"A\").sum(), 1000)\n self.assertEqual(crossvalidation_workflow.group_index(fake_metadata, \"CONST\", \"B\").sum(), 0)\n self.assertEqual(crossvalidation_workflow.group_index(fake_metadata, \"CONST\", \"A\", max_size=100).sum(), 100)\n self.assertEqual(crossvalidation_workflow.group_index(fake_metadata, \"CONST\", \"A\", size_ratio=0.5).sum(), 500)\n\n rgen = np.random.RandomState(10)\n idx_1 = crossvalidation_workflow.group_index(fake_metadata, \"CONST\", \"A\", size_ratio=0.1, rgen=rgen)\n idx_2 = crossvalidation_workflow.group_index(fake_metadata, \"CONST\", \"A\", size_ratio=0.1, rgen=rgen)\n\n self.assertEqual(idx_1.sum(), 100)\n self.assertEqual(idx_2.sum(), 100)\n self.assertEqual((idx_1 & idx_2).sum(), 8)\n\n def test_grid_search(self):\n self.cv.add_gridsearch_parameter(\"seed\", [1, 2, 3])\n self.cv._create_writer()\n self.cv._grid_search()\n\n self.assertEqual(len(self.cv._csv_writer.csv_lil), 4)\n\n def test_group_dropout_no_limit(self):\n\n def test_grid_search(slf, test=None, value=None, mask_function=None):\n self.assertEqual(test, \"dropout\")\n self.assertTrue(value in slf.workflow.meta_data[slf.dropout_column].unique())\n self.assertListEqual((slf.workflow.meta_data[slf.dropout_column] != value).tolist(),\n mask_function().tolist())\n\n self.cv._grid_search = types.MethodType(test_grid_search, self.cv)\n\n self.cv.dropout_column = \"VAR\"\n self.cv.dropout_max_size = None\n self.cv.dropout_seed = 50\n\n self.cv._dropout_cv()\n\n def test_group_dropout_limit(self):\n\n def test_grid_search(slf, test=None, value=None, mask_function=None):\n self.assertEqual(test, \"dropout\")\n uniques = slf.workflow.meta_data[slf.dropout_column].unique()\n\n mask = mask_function()\n unique_counts = slf.workflow.meta_data[slf.dropout_column].value_counts()\n unique_counts[unique_counts > slf.dropout_max_size] = slf.dropout_max_size\n\n if value == \"all\":\n self.assertEqual(unique_counts.sum(), mask.sum())\n else:\n self.assertTrue(value in uniques)\n unique_counts[value] = 0\n self.assertEqual(unique_counts.sum(), mask.sum())\n self.assertEqual(sum((self.cv.workflow.meta_data[self.cv.dropout_column] == value)[mask]), 0)\n\n self.cv._grid_search = types.MethodType(test_grid_search, self.cv)\n\n self.cv.dropout_column = \"VAR\"\n self.cv.dropout_max_size = 50\n self.cv.dropout_seed = 50\n\n self.cv._dropout_cv()\n\n def test_group_dropin_no_limit(self):\n\n def test_grid_search(slf, test=None, value=None, mask_function=None):\n self.assertEqual(test, \"dropin\")\n self.assertTrue(value in slf.workflow.meta_data[slf.dropin_column].unique())\n self.assertListEqual((slf.workflow.meta_data[slf.dropin_column] == value).tolist(),\n mask_function().tolist())\n\n self.cv._grid_search = types.MethodType(test_grid_search, self.cv)\n\n self.cv.dropin_column = \"VAR\"\n self.cv.dropin_max_size = None\n self.cv.dropin_seed = 50\n\n self.cv._dropin_cv()\n\n def test_group_dropin_limit(self):\n\n def test_grid_search(slf, test=None, value=None, mask_function=None):\n self.assertEqual(test, \"dropin\")\n\n mask = mask_function()\n\n if value == \"all\":\n self.assertEqual(mask.sum(), slf.dropin_max_size)\n else:\n self.assertTrue(value in slf.workflow.meta_data[slf.dropin_column].unique())\n\n self.assertEqual(min((slf.workflow.meta_data[slf.dropin_column] == value).sum(),\n slf.dropin_max_size),\n mask.sum())\n\n self.cv._grid_search = types.MethodType(test_grid_search, self.cv)\n\n self.cv.dropin_column = \"VAR\"\n self.cv.dropin_max_size = 25\n self.cv.dropin_seed = 50\n\n self.cv._dropin_cv()\n\n def test_size_sampling_no_strat(self):\n\n def test_grid_search(slf, test=None, value=None, mask_function=None):\n self.assertEqual(test, \"size\")\n self.assertTrue(value == \"0.5\")\n\n self.assertEqual(max(int(slf.workflow.meta_data.shape[0] * float(value)), 1),\n mask_function().sum())\n\n self.cv._grid_search = types.MethodType(test_grid_search, self.cv)\n\n self.cv.size_sample_vector = [0.5]\n self.cv.size_sample_seed = 50\n\n self.cv._size_cv()\n\n def test_size_sampling_strat(self):\n\n def test_grid_search(slf, test=None, value=None, mask_function=None):\n self.assertEqual(test, \"size\")\n self.assertTrue(value == \"0.5\")\n\n mask = mask_function()\n for g in slf.workflow.meta_data[slf.size_sample_stratified_column].unique():\n is_group = slf.workflow.meta_data[slf.size_sample_stratified_column] == g\n self.assertEqual(max(int(is_group.sum() * float(value)), 1),\n mask[is_group].sum())\n\n self.cv._grid_search = types.MethodType(test_grid_search, self.cv)\n\n self.cv.size_sample_vector = [0.5]\n self.cv.size_sample_seed = 50\n self.cv.size_sample_stratified_column = \"VAR\"\n\n self.cv._size_cv()\n"
] |
[
[
"numpy.random.RandomState",
"pandas.DataFrame"
]
] |
buzzCraft/RobTek-prosjekt
|
[
"12edd1d83a621a10d74421e51cf8dbf861ee9abe"
] |
[
"pyueye_utils.py"
] |
[
"#!/usr/bin/env python\n\n#------------------------------------------------------------------------------\n# PyuEye example - utilities modul\n#\n# Copyright (c) 2017 by IDS Imaging Development Systems GmbH.\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n# 1. Redistributions of source code must retain the above copyright notice,\n# this list of conditions and the following disclaimer.\n# 2. Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n# 3. Neither the name of the copyright holder nor the names of its contributors\n# may be used to endorse or promote products derived from this software\n# without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE\n# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n# POSSIBILITY OF SUCH DAMAGE.\n#------------------------------------------------------------------------------\n\nfrom pyueye import ueye\nfrom threading import Thread\nfrom ctypes import byref\n\ndef get_bits_per_pixel(color_mode):\n \"\"\"\n returns the number of bits per pixel for the given color mode\n raises exception if color mode is not is not in dict\n \"\"\"\n \n return {\n ueye.IS_CM_SENSOR_RAW8: 8,\n ueye.IS_CM_SENSOR_RAW10: 16,\n ueye.IS_CM_SENSOR_RAW12: 16,\n ueye.IS_CM_SENSOR_RAW16: 16,\n ueye.IS_CM_MONO8: 8,\n ueye.IS_CM_RGB8_PACKED: 24,\n ueye.IS_CM_BGR8_PACKED: 24,\n ueye.IS_CM_RGBA8_PACKED: 32,\n ueye.IS_CM_BGRA8_PACKED: 32,\n ueye.IS_CM_BGR10_PACKED: 32,\n ueye.IS_CM_RGB10_PACKED: 32,\n ueye.IS_CM_BGRA12_UNPACKED: 64,\n ueye.IS_CM_BGR12_UNPACKED: 48,\n ueye.IS_CM_BGRY8_PACKED: 32,\n ueye.IS_CM_BGR565_PACKED: 16,\n ueye.IS_CM_BGR5_PACKED: 16,\n ueye.IS_CM_UYVY_PACKED: 16,\n ueye.IS_CM_UYVY_MONO_PACKED: 16,\n ueye.IS_CM_UYVY_BAYER_PACKED: 16,\n ueye.IS_CM_CBYCRY_PACKED: 16, \n } [color_mode]\n\n\nclass uEyeException(Exception):\n def __init__(self, error_code):\n self.error_code = error_code\n def __str__(self):\n return \"Err: \" + str(self.error_code)\n\n\ndef check(ret):\n if ret != ueye.IS_SUCCESS:\n raise uEyeException(ret)\n\n\nclass ImageBuffer:\n def __init__(self):\n self.mem_ptr = ueye.c_mem_p()\n self.mem_id = ueye.int()\n\n\nclass MemoryInfo:\n def __init__(self, h_cam, img_buff):\n self.x = ueye.int()\n self.y = ueye.int()\n self.bits = ueye.int()\n self.pitch = ueye.int()\n self.img_buff = img_buff\n\n rect_aoi = ueye.IS_RECT()\n check(ueye.is_AOI(h_cam,\n ueye.IS_AOI_IMAGE_GET_AOI, rect_aoi, ueye.sizeof(rect_aoi)))\n self.width = rect_aoi.s32Width.value\n self.height = rect_aoi.s32Height.value\n \n check(ueye.is_InquireImageMem(h_cam,\n self.img_buff.mem_ptr,\n self.img_buff.mem_id, self.x, self.y, self.bits, self.pitch))\n\n\nclass ImageData:\n def __init__(self, h_cam, img_buff):\n self.h_cam = h_cam\n self.img_buff = img_buff\n self.mem_info = MemoryInfo(h_cam, img_buff)\n self.color_mode = ueye.is_SetColorMode(h_cam, ueye.IS_GET_COLOR_MODE)\n self.bits_per_pixel = get_bits_per_pixel(self.color_mode)\n self.array = ueye.get_data(self.img_buff.mem_ptr,\n self.mem_info.width,\n self.mem_info.height,\n self.mem_info.bits,\n self.mem_info.pitch,\n True)\n\n def as_1d_image(self): \n channels = int((7 + self.bits_per_pixel) / 8)\n import numpy\n if channels > 1:\n return numpy.reshape(self.array, (self.mem_info.height, self.mem_info.width, channels))\n else:\n return numpy.reshape(self.array, (self.mem_info.height, self.mem_info.width))\n\n\n def unlock(self):\n check(ueye.is_UnlockSeqBuf(self.h_cam, self.img_buff.mem_id, self.img_buff.mem_ptr))\n\nclass Rect:\n def __init__(self, x=0, y=0, width=0, height=0):\n self.x = x\n self.y = y\n self.width = width\n self.height = height\n\n\n\nclass FrameThread(Thread):\n def __init__(self, cam, views=None, copy=True):\n super(FrameThread, self).__init__()\n self.timeout = 1000\n self.cam = cam\n self.running = True\n self.views = views\n self.copy = copy\n\n def run(self):\n while self.running:\n img_buffer = ImageBuffer()\n ret = ueye.is_WaitForNextImage(self.cam.handle(),\n self.timeout,\n img_buffer.mem_ptr,\n img_buffer.mem_id)\n if ret == ueye.IS_SUCCESS:\n self.notify(ImageData(self.cam.handle(), img_buffer))\n\n #break\n\n def notify(self, image_data):\n if self.views:\n if type(self.views) is not list:\n self.views = [self.views]\n for view in self.views:\n view.handle(image_data)\n\n def stop(self):\n self.cam.stop_video()\n self.running = False\n"
] |
[
[
"numpy.reshape"
]
] |
sh8/kornia
|
[
"b340559dea9c2e01a01fb6062511a23cd175fbde"
] |
[
"test/geometry/test_ransac.py"
] |
[
"import random\n\nimport pytest\nimport torch\nfrom torch.autograd import gradcheck\n\nimport kornia\nimport kornia.testing as utils\nfrom kornia.geometry import RANSAC\nfrom kornia.testing import assert_close\n\n\nclass TestRANSACHomography:\n def test_smoke(self, device, dtype):\n points1 = torch.rand(4, 2, device=device, dtype=dtype)\n points2 = torch.rand(4, 2, device=device, dtype=dtype)\n ransac = RANSAC('homography').to(device=device, dtype=dtype)\n H, inliers = ransac(points1, points2)\n assert H.shape == (3, 3)\n\n @pytest.mark.xfail(reason=\"might slightly and randomly imprecise due to RANSAC randomness\")\n def test_dirty_points(self, device, dtype):\n # generate input data\n\n H = torch.eye(3, dtype=dtype, device=device)\n H[:2] = H[:2] + 0.1 * torch.rand_like(H[:2])\n H[2:, :2] = H[2:, :2] + 0.001 * torch.rand_like(H[2:, :2])\n\n points_src = 100.0 * torch.rand(1, 20, 2, device=device, dtype=dtype)\n points_dst = kornia.geometry.transform_points(H[None], points_src)\n\n # making last point an outlier\n points_dst[:, -1, :] += 800\n ransac = RANSAC('homography', inl_th=0.5, max_iter=20).to(device=device, dtype=dtype)\n # compute transform from source to target\n dst_homo_src, inliers = ransac(points_src[0], points_dst[0])\n\n assert_close(\n kornia.geometry.transform_points(dst_homo_src[None], points_src[:, :-1]),\n points_dst[:, :-1],\n rtol=1e-3,\n atol=1e-3)\n\n @pytest.mark.jit\n @pytest.mark.skip(reason=\"find_homography_dlt is using try/except block\")\n def test_jit(self, device, dtype):\n points1 = torch.rand(4, 2, device=device, dtype=dtype)\n points2 = torch.rand(4, 2, device=device, dtype=dtype)\n model = RANSAC('homography').to(device=device, dtype=dtype)\n model_jit = torch.jit.script(RANSAC('homography').to(device=device,\n dtype=dtype))\n assert_close(model(points1, points2)[0],\n model_jit(points1, points2)[0],\n rtol=1e-4,\n atol=1e-4)\n\n\nclass TestRANSACFundamental:\n def test_smoke(self, device, dtype):\n points1 = torch.rand(8, 2, device=device, dtype=dtype)\n points2 = torch.rand(8, 2, device=device, dtype=dtype)\n ransac = RANSAC('fundamental').to(device=device, dtype=dtype)\n Fm, inliers = ransac(points1, points2)\n assert Fm.shape == (3, 3)\n\n def test_dirty_points(self, device, dtype):\n points1 = torch.tensor(\n [\n [\n [0.8569, 0.5982],\n [0.0059, 0.9649],\n [0.1968, 0.8846],\n [0.6084, 0.3467],\n [0.9633, 0.5274],\n [0.8941, 0.8939],\n [0.0863, 0.5133],\n [0.2645, 0.8882]\n ]\n ],\n device=device,\n dtype=dtype,\n )\n\n points2 = torch.tensor(\n [\n [\n [0.0928, 0.3013],\n [0.0989, 0.9649],\n [0.0341, 0.4827],\n [0.8294, 0.4469],\n [0.2230, 0.2998],\n [0.1722, 0.8182],\n [0.5264, 0.8869],\n [0.8908, 0.1233]\n ]\n ],\n device=device,\n dtype=dtype,\n )\n # generated with OpenCV using above points\n # import cv2\n # Fm_expected, _ = cv2.findFundamentalMat(\n # points1.detach().numpy().reshape(-1, 1, 2),\n # points2.detach().numpy().reshape(-1, 1, 2), cv2.FM_8POINT)\n\n Fm_expected = torch.tensor(\n [\n [\n [0.2019, 0.6860, -0.6610],\n [0.5520, 0.8154, -0.8044],\n [-0.5002, -1.0254, 1.]\n ]\n ],\n device=device,\n dtype=dtype,\n )\n ransac = RANSAC('fundamental',\n max_iter=1,\n inl_th=1.0).to(device=device, dtype=dtype)\n F_mat, inliers = ransac(points1[0], points2[0])\n assert_close(F_mat, Fm_expected[0], rtol=1e-3, atol=1e-3)\n\n @pytest.mark.jit\n def test_jit(self, device, dtype):\n points1 = torch.rand(8, 2, device=device, dtype=dtype)\n points2 = torch.rand(8, 2, device=device, dtype=dtype)\n model = RANSAC('fundamental').to(device=device, dtype=dtype)\n model_jit = torch.jit.script(RANSAC('fundamental').to(device=device,\n dtype=dtype))\n assert_close(model(points1, points2)[0],\n model_jit(points1, points2)[0],\n rtol=1e-3,\n atol=1e-3)\n\n @pytest.mark.skip(reason=\"RANSAC is random algorithm, so Jacobian is not defined\")\n def test_gradcheck(self, device):\n points1 = torch.rand(8, 2, device=device, dtype=torch.float64, requires_grad=True)\n points2 = torch.rand(8, 2, device=device, dtype=torch.float64)\n model = RANSAC('fundamental').to(device=device, dtype=torch.float64)\n\n def gradfun(p1, p2):\n return model(p1, p2)[0]\n assert gradcheck(gradfun, (points1, points2), raise_exception=True)\n"
] |
[
[
"torch.rand_like",
"torch.eye",
"torch.tensor",
"torch.rand",
"torch.autograd.gradcheck"
]
] |
amwufiv/spark
|
[
"b50d4507f52315d5f6d75c617e845248a1c828a9"
] |
[
"python/pyspark/tests/test_serializers.py"
] |
[
"#\n# Licensed to the Apache Software Foundation (ASF) under one or more\n# contributor license agreements. See the NOTICE file distributed with\n# this work for additional information regarding copyright ownership.\n# The ASF licenses this file to You under the Apache License, Version 2.0\n# (the \"License\"); you may not use this file except in compliance with\n# the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\nimport math\nimport sys\nimport unittest\n\nfrom pyspark import serializers\nfrom pyspark.serializers import (\n CloudPickleSerializer,\n CompressedSerializer,\n AutoBatchedSerializer,\n BatchedSerializer,\n AutoSerializer,\n NoOpSerializer,\n PairDeserializer,\n FlattenedValuesSerializer,\n CartesianDeserializer,\n CPickleSerializer,\n UTF8Deserializer,\n MarshalSerializer,\n)\nfrom pyspark.testing.utils import (\n PySparkTestCase,\n read_int,\n write_int,\n ByteArrayOutput,\n have_numpy,\n have_scipy,\n)\n\n\nclass SerializationTestCase(unittest.TestCase):\n def test_namedtuple(self):\n from collections import namedtuple\n from pyspark.cloudpickle import dumps, loads\n\n P = namedtuple(\"P\", \"x y\")\n p1 = P(1, 3)\n p2 = loads(dumps(p1, 2))\n self.assertEqual(p1, p2)\n\n P2 = loads(dumps(P))\n p3 = P2(1, 3)\n self.assertEqual(p1, p3)\n\n def test_itemgetter(self):\n from operator import itemgetter\n\n ser = CloudPickleSerializer()\n d = range(10)\n getter = itemgetter(1)\n getter2 = ser.loads(ser.dumps(getter))\n self.assertEqual(getter(d), getter2(d))\n\n getter = itemgetter(0, 3)\n getter2 = ser.loads(ser.dumps(getter))\n self.assertEqual(getter(d), getter2(d))\n\n def test_function_module_name(self):\n ser = CloudPickleSerializer()\n func = lambda x: x\n func2 = ser.loads(ser.dumps(func))\n self.assertEqual(func.__module__, func2.__module__)\n\n def test_attrgetter(self):\n from operator import attrgetter\n\n ser = CloudPickleSerializer()\n\n class C:\n def __getattr__(self, item):\n return item\n\n d = C()\n getter = attrgetter(\"a\")\n getter2 = ser.loads(ser.dumps(getter))\n self.assertEqual(getter(d), getter2(d))\n getter = attrgetter(\"a\", \"b\")\n getter2 = ser.loads(ser.dumps(getter))\n self.assertEqual(getter(d), getter2(d))\n\n d.e = C()\n getter = attrgetter(\"e.a\")\n getter2 = ser.loads(ser.dumps(getter))\n self.assertEqual(getter(d), getter2(d))\n getter = attrgetter(\"e.a\", \"e.b\")\n getter2 = ser.loads(ser.dumps(getter))\n self.assertEqual(getter(d), getter2(d))\n\n # Regression test for SPARK-3415\n def test_pickling_file_handles(self):\n # to be corrected with SPARK-11160\n try:\n import xmlrunner # type: ignore[import] # noqa: F401\n except ImportError:\n ser = CloudPickleSerializer()\n out1 = sys.stderr\n out2 = ser.loads(ser.dumps(out1))\n self.assertEqual(out1, out2)\n\n def test_func_globals(self):\n class Unpicklable:\n def __reduce__(self):\n raise RuntimeError(\"not picklable\")\n\n global exit\n exit = Unpicklable()\n\n ser = CloudPickleSerializer()\n self.assertRaises(Exception, lambda: ser.dumps(exit))\n\n def foo():\n sys.exit(0)\n\n self.assertTrue(\"exit\" in foo.__code__.co_names)\n ser.dumps(foo)\n\n def test_compressed_serializer(self):\n ser = CompressedSerializer(CPickleSerializer())\n from io import BytesIO as StringIO\n\n io = StringIO()\n ser.dump_stream([\"abc\", \"123\", range(5)], io)\n io.seek(0)\n self.assertEqual([\"abc\", \"123\", range(5)], list(ser.load_stream(io)))\n ser.dump_stream(range(1000), io)\n io.seek(0)\n self.assertEqual([\"abc\", \"123\", range(5)] + list(range(1000)), list(ser.load_stream(io)))\n io.close()\n\n def test_hash_serializer(self):\n hash(NoOpSerializer())\n hash(UTF8Deserializer())\n hash(CPickleSerializer())\n hash(MarshalSerializer())\n hash(AutoSerializer())\n hash(BatchedSerializer(CPickleSerializer()))\n hash(AutoBatchedSerializer(MarshalSerializer()))\n hash(PairDeserializer(NoOpSerializer(), UTF8Deserializer()))\n hash(CartesianDeserializer(NoOpSerializer(), UTF8Deserializer()))\n hash(CompressedSerializer(CPickleSerializer()))\n hash(FlattenedValuesSerializer(CPickleSerializer()))\n\n\[email protected](not have_scipy, \"SciPy not installed\")\nclass SciPyTests(PySparkTestCase):\n\n \"\"\"General PySpark tests that depend on scipy\"\"\"\n\n def test_serialize(self):\n from scipy.special import gammaln\n\n x = range(1, 5)\n expected = list(map(gammaln, x))\n observed = self.sc.parallelize(x).map(gammaln).collect()\n self.assertEqual(expected, observed)\n\n\[email protected](not have_numpy, \"NumPy not installed\")\nclass NumPyTests(PySparkTestCase):\n\n \"\"\"General PySpark tests that depend on numpy\"\"\"\n\n def test_statcounter_array(self):\n import numpy as np\n\n x = self.sc.parallelize([np.array([1.0, 1.0]), np.array([2.0, 2.0]), np.array([3.0, 3.0])])\n s = x.stats()\n self.assertSequenceEqual([2.0, 2.0], s.mean().tolist())\n self.assertSequenceEqual([1.0, 1.0], s.min().tolist())\n self.assertSequenceEqual([3.0, 3.0], s.max().tolist())\n self.assertSequenceEqual([1.0, 1.0], s.sampleStdev().tolist())\n\n stats_dict = s.asDict()\n self.assertEqual(3, stats_dict[\"count\"])\n self.assertSequenceEqual([2.0, 2.0], stats_dict[\"mean\"].tolist())\n self.assertSequenceEqual([1.0, 1.0], stats_dict[\"min\"].tolist())\n self.assertSequenceEqual([3.0, 3.0], stats_dict[\"max\"].tolist())\n self.assertSequenceEqual([6.0, 6.0], stats_dict[\"sum\"].tolist())\n self.assertSequenceEqual([1.0, 1.0], stats_dict[\"stdev\"].tolist())\n self.assertSequenceEqual([1.0, 1.0], stats_dict[\"variance\"].tolist())\n\n stats_sample_dict = s.asDict(sample=True)\n self.assertEqual(3, stats_dict[\"count\"])\n self.assertSequenceEqual([2.0, 2.0], stats_sample_dict[\"mean\"].tolist())\n self.assertSequenceEqual([1.0, 1.0], stats_sample_dict[\"min\"].tolist())\n self.assertSequenceEqual([3.0, 3.0], stats_sample_dict[\"max\"].tolist())\n self.assertSequenceEqual([6.0, 6.0], stats_sample_dict[\"sum\"].tolist())\n self.assertSequenceEqual(\n [0.816496580927726, 0.816496580927726], stats_sample_dict[\"stdev\"].tolist()\n )\n self.assertSequenceEqual(\n [0.6666666666666666, 0.6666666666666666], stats_sample_dict[\"variance\"].tolist()\n )\n\n\nclass SerializersTest(unittest.TestCase):\n def test_chunked_stream(self):\n original_bytes = bytearray(range(100))\n for data_length in [1, 10, 100]:\n for buffer_length in [1, 2, 3, 5, 20, 99, 100, 101, 500]:\n dest = ByteArrayOutput()\n stream_out = serializers.ChunkedStream(dest, buffer_length)\n stream_out.write(original_bytes[:data_length])\n stream_out.close()\n num_chunks = int(math.ceil(float(data_length) / buffer_length))\n # length for each chunk, and a final -1 at the very end\n exp_size = (num_chunks + 1) * 4 + data_length\n self.assertEqual(len(dest.buffer), exp_size)\n dest_pos = 0\n data_pos = 0\n for chunk_idx in range(num_chunks):\n chunk_length = read_int(dest.buffer[dest_pos : (dest_pos + 4)])\n if chunk_idx == num_chunks - 1:\n exp_length = data_length % buffer_length\n if exp_length == 0:\n exp_length = buffer_length\n else:\n exp_length = buffer_length\n self.assertEqual(chunk_length, exp_length)\n dest_pos += 4\n dest_chunk = dest.buffer[dest_pos : dest_pos + chunk_length]\n orig_chunk = original_bytes[data_pos : data_pos + chunk_length]\n self.assertEqual(dest_chunk, orig_chunk)\n dest_pos += chunk_length\n data_pos += chunk_length\n # ends with a -1\n self.assertEqual(dest.buffer[-4:], write_int(-1))\n\n\nif __name__ == \"__main__\":\n from pyspark.tests.test_serializers import * # noqa: F401\n\n try:\n import xmlrunner # type: ignore[import]\n\n testRunner = xmlrunner.XMLTestRunner(output=\"target/test-reports\", verbosity=2)\n except ImportError:\n testRunner = None\n unittest.main(testRunner=testRunner, verbosity=2)\n"
] |
[
[
"numpy.array"
]
] |
huhji/License_plate_recognition_CRNN_korean
|
[
"1f8447f085a0571fead7abdc120db167de048741"
] |
[
"Image_Generator.py"
] |
[
"import cv2\nimport os, random\nimport numpy as np\nfrom parameter import letters\n\n# # Input data generator\ndef labels_to_text(labels): # letters의 index -> text (string)\n return ''.join(list(map(lambda x: letters[int(x)], labels)))\n\ndef text_to_labels(text): # text를 letters 배열에서의 인덱스 값으로 변환\n return list(map(lambda x: letters.index(x), text))\n\n\nclass TextImageGenerator:\n def __init__(self, img_dirpath, img_w, img_h,\n batch_size, downsample_factor, max_text_len=9):\n self.img_h = img_h\n self.img_w = img_w\n self.batch_size = batch_size\n self.max_text_len = max_text_len\n self.downsample_factor = downsample_factor\n self.img_dirpath = img_dirpath # image dir path\n self.img_dir = os.listdir(self.img_dirpath) # images list\n self.n = len(self.img_dir) # number of images\n self.indexes = list(range(self.n))\n self.cur_index = 0\n self.imgs = np.zeros((self.n, self.img_h, self.img_w))\n self.texts = []\n\n ## samples의 이미지 목록들을 opencv로 읽어 저장하기, texts에는 label 저장\n def build_data(self):\n print(self.n, \" Image Loading start...\")\n for i, img_file in enumerate(self.img_dir):\n img = cv2.imread(self.img_dirpath + img_file, cv2.IMREAD_GRAYSCALE)\n img = cv2.resize(img, (self.img_w, self.img_h))\n img = img.astype(np.float32)\n img = (img / 255.0) * 2.0 - 1.0\n\n self.imgs[i, :, :] = img\n self.texts.append(img_file[0:-4])\n print(len(self.texts) == self.n)\n print(self.n, \" Image Loading finish...\")\n\n def next_sample(self): ## index max -> 0 으로 만들기\n self.cur_index += 1\n if self.cur_index >= self.n:\n self.cur_index = 0\n random.shuffle(self.indexes)\n return self.imgs[self.indexes[self.cur_index]], self.texts[self.indexes[self.cur_index]]\n\n def next_batch(self): ## batch size만큼 가져오기\n while True:\n X_data = np.ones([self.batch_size, self.img_w, self.img_h, 1]) # (bs, 128, 64, 1)\n Y_data = np.ones([self.batch_size, self.max_text_len]) # (bs, 9)\n input_length = np.ones((self.batch_size, 1)) * (self.img_w // self.downsample_factor - 2) # (bs, 1)\n label_length = np.zeros((self.batch_size, 1)) # (bs, 1)\n\n for i in range(self.batch_size):\n img, text = self.next_sample()\n img = img.T\n img = np.expand_dims(img, -1)\n X_data[i] = img\n Y_data[i] = text_to_labels(text)\n label_length[i] = len(text)\n\n # dict 형태로 복사\n inputs = {\n 'the_input': X_data, # (bs, 128, 64, 1)\n 'the_labels': Y_data, # (bs, 8)\n 'input_length': input_length, # (bs, 1) -> 모든 원소 value = 30\n 'label_length': label_length # (bs, 1) -> 모든 원소 value = 8\n }\n outputs = {'ctc': np.zeros([self.batch_size])} # (bs, 1) -> 모든 원소 0\n yield (inputs, outputs)\n"
] |
[
[
"numpy.expand_dims",
"numpy.zeros",
"numpy.ones"
]
] |
omnicomdatahousecl/omnicom_libraries
|
[
"d97447b01d845a12462d5313be31f1debd180770"
] |
[
"pycomscore/pycomscore.py"
] |
[
"\"\"\"\r\nThis is a module that allows you to connect to the Comscore library \r\ndeveloped by Annalect and obtain synthesized information.\r\n\"\"\"\r\n\r\n__author__ = \"Carlos Trujillo, Data analytics Manager\"\r\n__email__ = \"[email protected]\"\r\n__status__ = \"planning\"\r\n\r\n\r\nfrom sqlalchemy import create_engine\r\nfrom pandas import read_sql, DataFrame\r\n\r\nimport numpy as np\r\nimport pandas as pd\r\n\r\nfrom collections import Counter\r\n\r\nclass comscore_omnicom_database:\r\n \"\"\"Class to generate a connection to the OMG comscore database.\r\n\r\n Example:\r\n >>> comscore = comscore_omnicom_database(user = 'user_name', password = 'password_value')\r\n >>> dataframe_time = comscore.domain_by_time(country = 'cl')\r\n >>> dataframe_time.head(5)\r\n\r\n \"\"\"\r\n \r\n def __init__(self, user, password, endpoint = None):\r\n \"\"\"Login into our database.\r\n\r\n Note:\r\n All users and passwords must have been provided by the annalect team. \r\n Additionally, they must be connected to the Annalect VPN to be able to access, otherwise, you will get a connection error.\r\n\r\n Args:\r\n user (str): User name that will connect to the database. Delivered by the annalect team.\r\n password (str): User password that will connect to the database. Delivered by the annalect team.\r\n endpoint (:obj:`str`, optional): Database endpoint, by default the redshift database comes but you can choose another one. \r\n This step is optional\r\n\r\n \"\"\"\r\n\r\n self.user = user\r\n self.password = password\r\n \r\n if isinstance(endpoint, type(None)):\r\n \r\n self.engine_str = 'postgres://' + str(self.user) + ':' + str(self.password) + '@dsdk-v0p1-annalect.clf6bikxcquu.us-east-1.redshift.amazonaws.com:5439/dsdk'\r\n \r\n else:\r\n self.engine_str = 'postgres://' + str(self.user) + ':' + str(self.password) + str(endpoint)\r\n \r\n self.engine = create_engine(self.engine_str)\r\n self.connection = self.engine.connect()\r\n \r\n def domain_by_time(self, country = None, start_date = None, end_date = None, event_like = False, domain = None, saved = False):\r\n \"\"\"Function to generate a query to the database and obtain the total number of visits and reach in a period of time.\r\n\r\n Note:\r\n To use this method, you must have previously logged in.\r\n\r\n Args:\r\n country (str, required): Select the country code on which you want to create a query. \r\n Possible options are: ['us','ar', 'au','br', 'ca', 'cl', 'co', 'hk', 'id', 'in', 'mx', 'my', 'nz', 'sg', 'tw']\r\n\r\n start_date (str, optional): Select from which date you want to see your data.\r\n The default is 2019-01-01\r\n\r\n end_date (str, optional): Select the maximum date to consult.\r\n The default value is 2019-06-01\r\n\r\n event_like (str, optional): An additional WHERE statement that adds the \"LIKE\" in SQL. All the words you consider will be taken.\r\n if you use the event_like operator your query will look like the following.\r\n Example: SELECT * FROM table WHERE domain = somedomain.com AND event_detail LIKE '% event_like_value %' \r\n\r\n domain (str, required): The specified domain you want to know about. This value is required.\r\n\r\n saved (bool, optional): If saved is true then you must indicate the path where you want to save \r\n the file and it will be saved on your local machine.\r\n This is optional.\r\n\r\n Returns:\r\n DataFrame: If the process occurs correctly, you will get a dataframe with all the required data.\r\n \r\n Raises:\r\n TypeError: If any of the arguments is sent with the wrong data type, a TypeError will be obtained, \r\n also if some mandatory value is not delivered to the function.\r\n\r\n \"\"\"\r\n \r\n query = \"\"\"\r\n SELECT calendar_date as date, domain, event,\r\n count(distinct guid) as total_reach,\r\n count(guid) as total_visits, sum(time_spent) as total_time, total_time / total_visits as avg_time_per_visit\r\n \r\n FROM spectrum_comscore.clickstream_{country_in}\r\n where calendar_date between '{start_date_in}' and '{end_date_in}'\r\n and domain = '{domain_in}'\r\n {statement_like}\r\n group by calendar_date, domain, event\r\n \"\"\"\r\n \r\n if isinstance(country, type(None)):\r\n error = 'Define your country'\r\n raise TypeError(error)\r\n \r\n if isinstance(start_date, type(None)):\r\n start_date = '2019-01-01'\r\n print('The preset date was defined, which starts from January 1, 2019')\r\n\r\n if isinstance(end_date, type(None)):\r\n end_date = '2019-06-01'\r\n print('The preset date was defined, which starts from June 1, 2019')\r\n\r\n if isinstance(domain, type(None)):\r\n error = 'Define your specific domain'\r\n raise TypeError(error)\r\n\r\n if not isinstance(event_like, type(False)):\r\n if not isinstance(event_like, str):\r\n error = 'Verify your ilike, you use other type value different to string'\r\n raise TypeError(error)\r\n else:\r\n ilike = \"and event_detail like '%%{}%%'\".format(event_like)\r\n else:\r\n ilike = ''\r\n print('you dont define ilike statement')\r\n \r\n setence = query.format(start_date_in = start_date, country_in = country, end_date_in = end_date, domain_in = domain, statement_like = ilike)\r\n print(setence + '\\n')\r\n dataframe = read_sql(setence, con = self.connection)\r\n\r\n if saved:\r\n path_file = input('define your personal path to save the file: ')\r\n dataframe.to_csv(path_file + '{}_{}_{}.csv'.format(domain.replace('.', '-'), start_date, end_date))\r\n \r\n return dataframe\r\n\r\n def custom_query_statement(self, query):\r\n \"\"\"A function that allows you to create a custom query on the comscore database.\r\n\r\n Note:\r\n To use this method, you must have previously logged in.\r\n\r\n Args:\r\n query (str, required): Full statement of the query.\r\n\r\n Returns:\r\n DataFrame: If the process works correctly, you will get a dataframe with all the required data.\r\n\r\n \"\"\"\r\n return read_sql(query, con = self.connection)\r\n\r\n def demographics_by_web(self, country = None, start_date = None, end_date = None, \r\n event_like = False, domain = None, saved = False, age_group = False,\r\n ages_between = None, gender_between = None):\r\n \"\"\"A function that displays all demographic values for a specific domain.\r\n\r\n Note:\r\n To use this method, you must have previously logged in.\r\n\r\n Args:\r\n country (str, required): Select the country code on which you want to create a query. \r\n Possible options are: ['us','ar', 'au','br', 'ca', 'cl', 'co', 'hk', 'id', 'in', 'mx', 'my', 'nz', 'sg', 'tw']\r\n\r\n start_date (str, optional): Select from which date you want to see your data.\r\n The default is 2019-01-01\r\n\r\n end_date (str, optional): Select the maximum date to consult.\r\n The default value is 2019-06-01\r\n\r\n event_like (str, optional): An additional WHERE statement that adds the \"LIKE\" in SQL. All the words you consider will be taken.\r\n if you use the event_like operator your query will look like the following.\r\n Example: SELECT * FROM table WHERE domain = somedomain.com AND event_detail SIMILAR TO '% event_like_value %' \r\n\r\n domain (str, required): The specified domain you want to know about. This value is required.\r\n\r\n saved (bool, optional): If saved is true then you must indicate the path where you want to save \r\n the file and it will be saved on your local machine.\r\n This is optional.\r\n \r\n age_group (bool, optional): A selection of true or false, if it is false it will not be grouped by age range, \r\n if it is true it will be grouped by age range.\r\n\r\n ages_between (tuple, optional): A tuple value, which must have two integer values (min age, max age).\r\n\r\n gender_between (tuple, optional): A tuple that contains the strings of all the genres that you want to examine.\r\n example: ('Male', 'Female', 'other')\r\n\r\n Returns:\r\n DataFrame: If the process occurs correctly, you will get a dataframe with all the required data.\r\n \r\n Raises:\r\n TypeError: If any of the arguments is sent with the wrong data type, a TypeError will be obtained, \r\n also if some mandatory value is not delivered to the function.\r\n\r\n \"\"\"\r\n\r\n query = \"\"\"\r\n SELECT main_base.domain, main_base.device, user_base.gender, \r\n \r\n {age_group_statement},\r\n \r\n \r\n user_base.children_present,\r\n count(distinct main_base.guid) as reach, count(main_base.guid) as visits, count(main_base.machine_id) as number_of_devices,\r\n sum(main_base.time_spent) as total_time, total_time / visits as avg_time_per_visit\r\n \r\n FROM spectrum_comscore.clickstream_{country_in} main_base\r\n \r\n LEFT JOIN\r\n (select person_id, gender, age, children_present\r\n from spectrum_comscore.person_demographics_{country_in}\r\n where date >= '{start_date_in}'\r\n group by person_id, gender, age, children_present) user_base\r\n \r\n ON main_base.guid = user_base.person_id\r\n \r\n where calendar_date between '{start_date_in}' and '{end_date_in}'\r\n and domain = '{domain_in}'\r\n {statement_like}\r\n {statement_ages}\r\n \r\n group by 1,2,3,4,5\r\n \"\"\"\r\n \r\n if isinstance(country, type(None)):\r\n error = 'Define your country'\r\n raise TypeError(error)\r\n\r\n if isinstance(start_date, type(None)):\r\n start_date = '2019-01-01'\r\n print('The preset date was defined, which starts from January 1, 2019')\r\n \r\n if isinstance(end_date, type(None)):\r\n end_date = '2019-06-01'\r\n print('The preset date was defined, which starts from June 1, 2019')\r\n \r\n if isinstance(domain, type(None)):\r\n error = 'Define your specific domain'\r\n raise TypeError(error)\r\n\r\n if isinstance(ages_between, type(None)):\r\n ages_state = ''\r\n else:\r\n if not isinstance(ages_between, tuple):\r\n error = 'Verify your ages, you use other type value different to tuple'\r\n raise TypeError(error)\r\n else:\r\n ages_state = 'and user_base.age between ' + str(ages_between[0]) + ' and ' + str(ages_between[1])\r\n\r\n if not isinstance(event_like, type(False)):\r\n if not isinstance(event_like, str):\r\n error = 'Verify your ilike, you use other type value different to string'\r\n raise TypeError(error)\r\n else:\r\n ilike = \"and event_detail SIMILAR TO '%%{}%%'\".format(event_like)\r\n else:\r\n ilike = ''\r\n print('you dont define ilike statement')\r\n\r\n if age_group:\r\n age_group_value = \"\"\"\r\n CASE\r\n WHEN user_base.age > 13 AND user_base.age <= 17 THEN '13-17'\r\n WHEN user_base.age > 17 AND user_base.age <= 34 THEN '18-34'\r\n WHEN user_base.age > 34 AND user_base.age <= 54 THEN '35-54'\r\n WHEN user_base.age > 54 THEN '55+'\r\n ELSE\r\n 'Undetermined'\r\n END\r\n AS age_group\r\n \"\"\"\r\n else:\r\n age_group_value = \"\"\" user_base.age \"\"\"\r\n\r\n setence = query.format(start_date_in = start_date, end_date_in = end_date, domain_in = domain, statement_like = ilike, \r\n country_in = country, age_group_statement = age_group_value,\r\n statement_ages = ages_state)\r\n print(setence + '\\n')\r\n dataframe = read_sql(setence, con = self.connection)\r\n dataframe.gender = dataframe.gender.astype(str).replace('nan', 'Undetermined')\r\n dataframe.children_present = dataframe.children_present.astype(str).replace('nan', 'Undetermined')\r\n\r\n if isinstance(gender_between, type(None)):\r\n pass\r\n else:\r\n if not isinstance(gender_between, tuple):\r\n error = 'Gender is not a tuple'\r\n raise TypeError(error)\r\n \r\n else:\r\n dataframe = dataframe.query('gender in {}'.format(gender_between))\r\n\r\n if saved:\r\n path_file = input('define your personal path to save the file: ')\r\n dataframe.to_csv(path_file + 'demographics_{}_{}_{}.csv'.format(domain.replace('.', '-'), start_date, end_date))\r\n \r\n return dataframe\r\n \r\n def correlation_between_domains(self, country = None, start_date = None, end_date = None, url_site = None,\r\n domain_name_like = False, reach_greater_than = 8, corr_greater = 0.75, saved = False):\r\n\r\n \"\"\"Correlation between a specific domain and others.\r\n\r\n Note:\r\n To use this method, you must have previously logged in.\r\n\r\n Args:\r\n country (str, required): Select the country code on which you want to create a query. \r\n Possible options are: ['us','ar', 'au','br', 'ca', 'cl', 'co', 'hk', 'id', 'in', 'mx', 'my', 'nz', 'sg', 'tw']\r\n\r\n start_date (str, optional): Select from which date you want to see your data.\r\n The default is 2019-01-01\r\n\r\n end_date (str, optional): Select the maximum date to consult.\r\n The default value is 2019-06-01\r\n\r\n url_site (str, optional): \r\n\r\n domain_name_like (str, required): An additional WHERE statement that adds the \"LIKE\" in SQL. All the words you consider will be taken.\r\n if you use the event_like operator your query will look like the following.\r\n Example: SELECT * FROM table WHERE domain = somedomain.com AND event_detail SIMILAR TO '% event_like_value %' \r\n\r\n reach_greater_than (int, optional): This indicator by default has a value of eight and represents \r\n that all the pages that are studied for the correlation must have a scope greater than 8 people.\r\n You can adjust this number, using this argument.\r\n \r\n corr_greater (float, optional): It is a filter to show only those pages that have a \r\n correlation with the main domain greater than 0.75, which is the default value. \r\n You can adjust this number, using this argument.\r\n\r\n saved (bool, optional): If saved is true then you must indicate the path where you want to save \r\n the file and it will be saved on your local machine.\r\n This is optional.\r\n\r\n Returns:\r\n DataFrame: If the process occurs correctly, you will get a dataframe with all the required data.\r\n \r\n Raises:\r\n TypeError: If any of the arguments is sent with the wrong data type, a TypeError will be obtained, \r\n also if some mandatory value is not delivered to the function.\r\n\r\n \"\"\"\r\n\r\n query = \"\"\"\r\n select calendar_date as date, domain, count(distinct guid) as reach, count(guid) as visits \r\n from spectrum_comscore.clickstream_{country_in}\r\n where domain = '{url_site_in}' \r\n {domain_like_statement}\r\n and calendar_date between '{start_date_in}' and '{end_date_in}'\r\n group by date, domain\r\n having reach >= {reach_greater_than_in}\r\n \r\n UNION ALL\r\n \r\n select calendar_date as date, domain, count(distinct guid) as reach, count(guid) as visits \r\n from spectrum_comscore.clickstream_{country_in}\r\n where guid in (select guid from spectrum_comscore.clickstream_{country_in}\r\n where domain = '{url_site_in}' \r\n {domain_like_statement}\r\n group by guid)\r\n and calendar_date between '{start_date_in}' and '{end_date_in}'\r\n and domain not in ('facebook.com', 'netflix.com', 'google.com', 'gmail.com', 'twitter.com', 'google.cl', 'instagram.com', 'youtube.com')\r\n group by date, domain\r\n having count(distinct guid) >= {reach_greater_than_in}\r\n \r\n \"\"\"\r\n \r\n if isinstance(country, type(None)):\r\n error = 'Define your country'\r\n raise TypeError(error)\r\n\r\n if not isinstance(domain_name_like, type(False)):\r\n if not isinstance(domain_name_like, str):\r\n error = 'Verify your domain like, you use other type value different to string'\r\n raise TypeError(error)\r\n else:\r\n domainlike = \"or event_detail SIMILAR TO '%%{}%%' \".format(domain_name_like)\r\n else:\r\n domainlike = ''\r\n print('you dont define domain like statement')\r\n\r\n if isinstance(url_site, type(None)):\r\n error = 'Define the url of the site'\r\n raise TypeError(error)\r\n \r\n if isinstance(start_date, type(None)):\r\n start_date = '2019-01-01'\r\n print('The preset date was defined, which starts from January 1, 2019')\r\n \r\n if isinstance(end_date, type(None)):\r\n end_date = '2019-05-01'\r\n print('The preset date was defined, which end from May 1, 2019')\r\n\r\n sentence = query.format(country_in = country, start_date_in = start_date, end_date_in = end_date,\r\n url_site_in = url_site, domain_like_statement = domainlike,\r\n reach_greater_than_in = reach_greater_than)\r\n\r\n print(sentence + '\\n')\r\n\r\n dataframe = read_sql(sentence,\r\n con = self.connection)\r\n \r\n dataframe = dataframe.drop_duplicates().pivot(index='date', columns='domain', values='reach')\r\n dataframe_corr = dataframe.corr(method='pearson')\r\n\r\n if domain_name_like == False:\r\n dataframe_uniq_matrix = dataframe_corr[[url_site]]\r\n final_corr = dataframe_uniq_matrix[dataframe_uniq_matrix[url_site] >= corr_greater]\r\n else:\r\n dataframe_uniq_matrix = dataframe_corr[dataframe_corr.index.str.contains(domain_name_like)]\r\n\r\n mask = ~(dataframe_uniq_matrix.mask(np.eye(len(dataframe_uniq_matrix), dtype=bool)).abs() > corr_greater).any()\r\n\r\n final_corr = dataframe_uniq_matrix.loc[mask, mask]\r\n\r\n if saved:\r\n path_file = input('define your personal path to save the file: ')\r\n final_corr.to_csv(path_file + 'corr_{}_{}_{}.csv'.format(url_site.replace('.', '-'), start_date, end_date))\r\n \r\n return dataframe, dataframe_corr, dataframe_uniq_matrix, final_corr\r\n \r\n def overlaps_between_pages(self, country = None, start_date = None, end_date = None, \r\n domain = None, competitors = None, like_domain = None,\r\n like_competitors = None, saved = False):\r\n \"\"\"A function to obtain the interdomain overexposure.\r\n\r\n Note:\r\n To use this method, you must have previously logged in.\r\n\r\n Args:\r\n country (str, required): Select the country code on which you want to create a query. \r\n Possible options are: ['us','ar', 'au','br', 'ca', 'cl', 'co', 'hk', 'id', 'in', 'mx', 'my', 'nz', 'sg', 'tw']\r\n\r\n start_date (str, optional): Select from which date you want to see your data.\r\n The default is 2019-01-01\r\n\r\n end_date (str, optional): Select the maximum date to consult.\r\n The default value is 2019-06-01\r\n\r\n domain (str, optional): The main domain on which we want to overlap\r\n\r\n competitors (tuple, required): All competitors stored in a tuple, where the inner values are strings\r\n\r\n like_domain (str, optional): An additional WHERE statement that adds the \"LIKE\" in SQL. All the words you consider will be taken.\r\n if you use the event_like operator your query will look like the following.\r\n Example: SELECT * FROM table WHERE domain = somedomain.com AND event_detail SIMILAR TO '% event_like_value %' \r\n \r\n like_competitors (str, optional): An additional WHERE statement that adds the \"LIKE\" in SQL. All the words you consider will be taken.\r\n if you use the event_like operator your query will look like the following.\r\n Example: SELECT * FROM table WHERE domain = somedomain.com AND event_detail SIMILAR TO '% event_like_value %' \r\n\r\n saved (bool, optional): If saved is true then you must indicate the path where you want to save \r\n the file and it will be saved on your local machine.\r\n This is optional.\r\n\r\n Returns:\r\n DataFrame: If the process occurs correctly, you will get a dataframe with all the required data.\r\n \r\n Raises:\r\n TypeError: If any of the arguments is sent with the wrong data type, a TypeError will be obtained, \r\n also if some mandatory value is not delivered to the function.\r\n\r\n \"\"\"\r\n\r\n testing = \"\"\"\r\n select guid, domain\r\n from spectrum_comscore.clickstream_{country_in}\r\n where domain = '{domain_in}'\r\n {main_domain_statement_like}\r\n and calendar_date between '{start_date_in}' and '{end_date_in}'\r\n group by guid, domain\r\n \r\n UNION ALL\r\n \r\n select guid, domain\r\n from spectrum_comscore.clickstream_{country_in}\r\n where domain in {competidors_in}\r\n {competidors_statement_like}\r\n and calendar_date between '{start_date_in}' and '{end_date_in}'\r\n group by guid, domain\r\n \"\"\"\r\n \r\n if isinstance(country, type(None)):\r\n error = 'Define your country'\r\n raise TypeError(error)\r\n \r\n if isinstance(domain, type(None)):\r\n error = 'Define your domain'\r\n raise TypeError(error)\r\n \r\n if isinstance(competitors, type(None)):\r\n error = 'Define your competitors'\r\n raise TypeError(error)\r\n else:\r\n if not isinstance(competitors, tuple):\r\n error = 'Competitors must be entered in parentheses'\r\n raise TypeError(error)\r\n else:\r\n pass\r\n \r\n if isinstance(start_date, type(None)):\r\n start_date = '2019-01-01'\r\n print('The preset date was defined, which starts from January 1, 2019')\r\n \r\n if isinstance(end_date, type(None)):\r\n end_date = '2019-06-01'\r\n print('The preset date was defined, which starts from January 1, 2019')\r\n \r\n if isinstance(like_domain, type(None)):\r\n domainlike = ''\r\n else:\r\n if not isinstance(like_domain, str):\r\n error = 'domain like is not a string'\r\n raise TypeError(error)\r\n else:\r\n domainlike = \"and event_detail SIMILAR TO '%%{}%%'\".format(like_domain)\r\n \r\n if isinstance(like_competitors, type(None)):\r\n competitorslike = ''\r\n else:\r\n if not isinstance(like_competitors, str):\r\n error = 'domain like is not a string'\r\n raise TypeError(error)\r\n else:\r\n competitorslike = \"and event_detail SIMILAR TO '%%{}%%'\".format(like_competitors)\r\n \r\n sentence = testing.format(country_in = country, start_date_in = start_date, end_date_in = end_date, \r\n domain_in = domain, competidors_in = competitors,\r\n main_domain_statement_like = domainlike, \r\n competidors_statement_like = competitorslike)\r\n print(sentence, '\\n')\r\n\r\n tests = read_sql(sentence,\r\n con = self.connection)\r\n \r\n #create unique list of names\r\n uniqueNames = tests.domain.unique()\r\n print(uniqueNames)\r\n \r\n #create a data frame dictionary to store your data frames\r\n DataFrameDict = {elem : pd.DataFrame for elem in uniqueNames}\r\n \r\n my_list = []\r\n \r\n for key in DataFrameDict.keys():\r\n DataFrameDict[key] = tests[:][tests.domain == key].reset_index(drop = True)\r\n my_list.append(list(tests[:][tests.domain == key].reset_index(drop = True).guid))\r\n \r\n frame = pd.DataFrame()\r\n \r\n for index in range(len(my_list)):\r\n lista_final = [list(filter(lambda x: x in my_list[index], sublist)) for sublist in my_list]\r\n \r\n mt = [len(x) / len(my_list[index]) for x in lista_final]\r\n frame = pd.concat([frame, DataFrame(mt)], axis = 1)\r\n \r\n frame.columns = list(uniqueNames)\r\n frame.index = list(uniqueNames)\r\n\r\n if saved:\r\n path_file = input('define your personal path to save the file: ')\r\n frame.to_csv(path_file + 'overlap_{}_{}_{}.csv'.format(domain.replace('.', '-'), start_date, end_date))\r\n\r\n return DataFrameDict, frame\r\n \r\n def bayesian_inference_over_sites(self, country = None, domain = None, \r\n time_spent = None, start_date = None, end_date = None, saved = False):\r\n \"\"\"A function that uses the Bayes theorem to calculate which sites are most likely to be visited by a user who visits our site.\r\n\r\n Note:\r\n To use this method, you must have previously logged in.\r\n\r\n Args:\r\n country (str, required): Select the country code on which you want to create a query. \r\n Possible options are: ['us','ar', 'au','br', 'ca', 'cl', 'co', 'hk', 'id', 'in', 'mx', 'my', 'nz', 'sg', 'tw']\r\n\r\n start_date (str, optional): Select from which date you want to see your data.\r\n The default is 2019-01-01\r\n\r\n end_date (str, optional): Select the maximum date to consult.\r\n The default value is 2019-06-01\r\n\r\n domain (str, optional): The main domain on which we want to overlap\r\n\r\n time_spent (int, required): The minimum amount of time a user has to spend on the site and the competitor's site to be examined.\r\n The default value is 300.\r\n\r\n Returns:\r\n DataFrame: If the process occurs correctly, you will get a dataframe with all the required data.\r\n \r\n Raises:\r\n TypeError: If any of the arguments is sent with the wrong data type, a TypeError will be obtained, \r\n also if some mandatory value is not delivered to the function.\r\n\r\n \"\"\"\r\n query = \"\"\"\r\n\r\n WITH my_table_3 as (\r\n select domain\r\n from spectrum_comscore.clickstream_{country_in}\r\n where guid in (select guid from spectrum_comscore.clickstream_{country_in} \r\n where domain = '{domain_in}' \r\n and calendar_date between '{start_date_in}' and '{end_date_in}'\r\n and time_spent >= {time_spent_in}\r\n group by guid)\r\n and time_spent >= {time_spent_in}\r\n group by domain\r\n ),\r\n \r\n my_table_4 as (\r\n select guid\r\n from spectrum_comscore.clickstream_{country_in}\r\n where guid in (select guid from spectrum_comscore.clickstream_{country_in} \r\n where domain = '{domain_in}' \r\n and calendar_date between '{start_date_in}' and '{end_date_in}'\r\n and time_spent >= {time_spent_in}\r\n group by guid)\r\n group by guid\r\n \r\n )\r\n \r\n SELECT domain, 'visitors' as type, count(distinct guid) as reach \r\n from spectrum_comscore.clickstream_{country_in}\r\n where domain in (select domain from my_table_3) and guid in (select guid from my_table_4)\r\n and domain not in ('facebook.com', 'netflix.com', 'google.com', 'gmail.com', 'twitter.com', 'google.cl', 'instagram.com', 'youtube.com', 'bing.com', 'whatsapp.com', 'msn.com', 'live.com', '{domain_in}')\r\n group by 1,2\r\n \r\n UNION ALL\r\n \r\n SELECT domain, 'outsiders' as type, count(distinct guid) as reach \r\n from spectrum_comscore.clickstream_{country_in}\r\n where domain in (select domain from my_table_3) and guid not in (select guid from my_table_4)\r\n and domain not in ('facebook.com', 'netflix.com', 'google.com', 'gmail.com', 'twitter.com', 'google.cl', 'instagram.com', 'youtube.com', 'bing.com', 'whatsapp.com', 'msn.com', 'live.com')\r\n group by 1,2\r\n\r\n \"\"\"\r\n \r\n if isinstance(country, type(None)):\r\n error = 'Define your country'\r\n raise TypeError(error)\r\n \r\n if isinstance(domain, type(None)):\r\n error = 'Define your domain'\r\n raise TypeError(error)\r\n \r\n if isinstance(time_spent, type(None)):\r\n time_spent = 300\r\n print('The preset time_spent was defined, which 300 seconds')\r\n \r\n if isinstance(start_date, type(None)):\r\n start_date = '2019-01-01'\r\n print('The preset date was defined, which starts from January 1, 2019')\r\n \r\n if isinstance(end_date, type(None)):\r\n end_date = '2019-06-01'\r\n print('The preset date was defined, which ends from June 1, 2019')\r\n \r\n sentence = query.format(country_in = country, start_date_in = start_date, end_date_in = end_date,\r\n domain_in = domain, time_spent_in = time_spent)\r\n print(sentence, '\\n')\r\n\r\n table3 = read_sql(sentence, con = self.connection)\r\n \r\n pivot_table_3 = pd.pivot_table(table3, values = 'reach', index = 'type', columns = 'domain')\r\n \r\n dataframe_probs_a = pd.DataFrame(columns=['domain', 'p(a)', 'p(x | a)'])\r\n \r\n totals = pivot_table_3.iloc[0].sum() + pivot_table_3.iloc[1].sum()\r\n \r\n for indexs in range(pivot_table_3.shape[1]):\r\n dataframe_probs_a.loc[indexs] = [str(pivot_table_3.iloc[:,indexs].name), \r\n pivot_table_3.iloc[:,indexs].sum() / totals, \r\n pivot_table_3.iloc[:,indexs].visitors / pivot_table_3.iloc[:,indexs].sum()]\r\n \r\n dataframe_probs_a['p(a)*p(x | a)'] = dataframe_probs_a['p(a)'] * dataframe_probs_a['p(x | a)']\r\n dataframe_probs_a['bayes'] = dataframe_probs_a['p(a)*p(x | a)'] / dataframe_probs_a['p(a)*p(x | a)'].sum()\r\n dataframe_probs_a['bayes %'] = dataframe_probs_a['bayes'] * 100\r\n dataframe_probs_a.sort_values('bayes %', ascending = False,inplace=True)\r\n dataframe_probs_a.reset_index(drop = True, inplace = True)\r\n \r\n short_frame = dataframe_probs_a[dataframe_probs_a['bayes %'] > 0.4].reset_index(drop = True)\r\n\r\n if saved:\r\n path_file = input('define your personal path to save the file: ')\r\n short_frame.to_csv(path_file + 'bayesianinference_sites_prob_{}_{}_{}.csv'.format(domain.replace('.', '-'), start_date, end_date))\r\n \r\n return dataframe_probs_a, short_frame\r\n\r\n def bayesian_site_predictor(self, country = None, \r\n domain = None, time_spent = None, \r\n start_date = None, end_date = None, saved = False):\r\n \"\"\"A function to know which is the next most likely site to visit after a user visits our site.\r\n\r\n Note:\r\n To use this method, you must have previously logged in.\r\n\r\n Args:\r\n country (str, required): Select the country code on which you want to create a query. \r\n Possible options are: ['us','ar', 'au','br', 'ca', 'cl', 'co', 'hk', 'id', 'in', 'mx', 'my', 'nz', 'sg', 'tw']\r\n\r\n start_date (str, optional): Select from which date you want to see your data.\r\n The default is 2019-01-01\r\n\r\n end_date (str, optional): Select the maximum date to consult.\r\n The default value is 2019-06-01\r\n\r\n domain (str, optional): The main domain on which we want to overlap\r\n\r\n time_spent (int, required): The minimum amount of time a user has to spend on the site and the competitor's site to be examined.\r\n The default value is 300.\r\n\r\n Returns:\r\n DataFrame: If the process occurs correctly, you will get a dataframe with all the required data.\r\n \r\n Raises:\r\n TypeError: If any of the arguments is sent with the wrong data type, a TypeError will be obtained, \r\n also if some mandatory value is not delivered to the function.\r\n\r\n \"\"\"\r\n\r\n query_1 = \"\"\"\r\n \r\n SELECT a.guid, concat(concat(b.domain,'_'),a.domain) AS trans_domain, b.positive_interval_time, b.timestamp_date_min, \r\n min(a.visit_date_event) as min_visit\r\n \r\n \r\n FROM\r\n \r\n (select guid, domain, CONVERT(datetime,dateadd(s, event_time,'2000-01-01')) at time zone 'utc' at time zone 'clst' visit_date_event\r\n from spectrum_comscore.clickstream_{country_in}\r\n where guid in (select guid from spectrum_comscore.clickstream_{country_in} \r\n where domain = '{domain_in}' and calendar_date between '{start_date_in}' and '{end_date_in}' and time_spent >= {time_spent_in}\r\n group by guid) and time_spent >= 30\r\n group by guid, domain, event_time) a\r\n \r\n LEFT JOIN\r\n \r\n (select guid, domain, min(timestamp_date) as timestamp_date_min,\r\n timestamp_date_min + INTERVAL '1 hour' as positive_interval_time\r\n from (\r\n select guid, domain,\r\n CONVERT(datetime, dateadd(s, event_time,'2000-01-01')) at time zone 'utc' at time zone 'clst' timestamp_date\r\n from spectrum_comscore.clickstream_{country_in}\r\n where domain = '{domain_in}' and calendar_date between '{start_date_in}' and '{end_date_in}' and time_spent >= {time_spent_in}\r\n ) group by guid, domain) b\r\n \r\n ON a.guid = b.guid\r\n \r\n where a.visit_date_event <= b.positive_interval_time and a.visit_date_event > b.timestamp_date_min\r\n \r\n group by 1,2,3,4\r\n \r\n having trans_domain not in ('{domain_in}_{domain_in}')\r\n \r\n \"\"\"\r\n \r\n if isinstance(country, type(None)):\r\n error = 'Define your country'\r\n raise TypeError(error)\r\n \r\n if isinstance(domain, type(None)):\r\n error = 'Define your domain'\r\n raise TypeError(error)\r\n \r\n if isinstance(time_spent, type(None)):\r\n time_spent = 300\r\n print('The preset time_spent was defined, which 300 seconds')\r\n \r\n if isinstance(start_date, type(None)):\r\n start_date = '2019-01-01'\r\n print('The preset date was defined, which starts from January 1, 2019')\r\n \r\n if isinstance(end_date, type(None)):\r\n end_date = '2019-06-01'\r\n print('The preset date was defined, which ends from June 1, 2019')\r\n \r\n sentence1 = query_1.format(country_in = country, start_date_in = start_date, \r\n end_date_in = end_date,\r\n domain_in = domain, time_spent_in = time_spent)\r\n\r\n\r\n print(sentence1, '\\n')\r\n\r\n tests = read_sql(sentence1, con = self.connection)\r\n \r\n new = tests.sort_values(by = ['guid', 'min_visit'])\r\n new.drop_duplicates(subset = 'guid', keep = 'first', inplace = True)\r\n \r\n new['list_domains'] = new.trans_domain.str.split('_')\r\n \r\n list_of_domains = list(new['list_domains'])\r\n \r\n list_a = map(tuple, list_of_domains) #must convert to tuple because list is an unhashable type\r\n \r\n final_count = Counter(list_a)\r\n \r\n dataframe_of_visits = pd.DataFrame.from_dict(final_count, orient = 'index')\r\n \r\n dataframe_of_visits['other_index'] = dataframe_of_visits.index\r\n dataframe_of_visits.reset_index(inplace = True, drop = True)\r\n \r\n dataframe_of_visits['list'] = dataframe_of_visits.other_index.apply(list)\r\n dataframe_of_visits['list_str'] = dataframe_of_visits['list'].apply(','.join)\r\n dataframe_of_visits[['orig', 'desti']] = [sub.split(\",\") for sub in dataframe_of_visits.list_str]\r\n \r\n dataframe_of_visits['type'] = 'from_me_to_destiny' \r\n \r\n dataframe_of_visits.columns = ['reach', 'indexs', 'list_index', 'list_index_str', 'orig', 'domain', 'type']\r\n final_a_frame = dataframe_of_visits[['domain', 'type', 'reach']]\r\n \r\n second_query = \"\"\"\r\n \r\n SELECT a.guid, b.domain as orig, a.domain as desti, b.positive_interval_time, b.timestamp_date_min, min(a.visit_date_event) as min_visit\r\n \r\n \r\n FROM\r\n \r\n (select guid, domain, CONVERT(datetime,dateadd(s, event_time,'2000-01-01')) at time zone 'utc' at time zone 'clst' visit_date_event\r\n from spectrum_comscore.clickstream_{country_in}\r\n where guid in (select guid from spectrum_comscore.clickstream_{country_in} \r\n where domain in {domain_in} and calendar_date between '{start_date_in}' and '{end_date_in}' and time_spent >= {time_spent_in}\r\n group by guid) and time_spent >= 30\r\n group by guid, domain, event_time) a\r\n \r\n LEFT JOIN\r\n \r\n (select guid, domain, min(timestamp_date) as timestamp_date_min,\r\n timestamp_date_min + INTERVAL '1 hour' as positive_interval_time\r\n from (\r\n select guid, domain,\r\n CONVERT(datetime, dateadd(s, event_time,'2000-01-01')) at time zone 'utc' at time zone 'clst' timestamp_date\r\n from spectrum_comscore.clickstream_{country_in}\r\n where domain in {domain_in} and calendar_date between '{start_date_in}' and '{end_date_in}' and time_spent >= {time_spent_in}\r\n ) group by guid, domain) b\r\n \r\n ON a.guid = b.guid\r\n \r\n where a.visit_date_event <= b.positive_interval_time and a.visit_date_event > b.timestamp_date_min\r\n and a.domain != b.domain\r\n \r\n group by 1,2,3,4,5\r\n \r\n \"\"\"\r\n \r\n optilist = list(dataframe_of_visits.domain)\r\n strings = ','.join(optilist)\r\n my_tuple = tuple(strings.split(','))\r\n\r\n sentence2 = second_query.format(country_in = country, start_date_in = start_date, \r\n domain_in = my_tuple, time_spent_in = time_spent,\r\n end_date_in = end_date)\r\n\r\n print(sentence2, '\\n')\r\n\r\n second_frame_test = read_sql(sentence2, con = self.connection)\r\n\r\n to_my_domain = second_frame_test[second_frame_test.desti == domain]\r\n \r\n new = to_my_domain.sort_values(by = ['guid', 'min_visit']).reset_index(drop = True)\r\n new.drop_duplicates(subset = 'guid', keep = 'first', inplace = True)\r\n new['trans_domain'] = new.orig + '_' + new.desti\r\n \r\n new['list_domains'] = new.trans_domain.str.split('_')\r\n \r\n list_of_domains = list(new['list_domains'])\r\n \r\n list_a = map(tuple, list_of_domains) #must convert to tuple because list is an unhashable type\r\n \r\n final_count = Counter(list_a)\r\n \r\n dataframe_of_visits_to_me = pd.DataFrame.from_dict(final_count, orient = 'index')\r\n \r\n dataframe_of_visits_to_me['other_index'] = dataframe_of_visits_to_me.index\r\n dataframe_of_visits_to_me.reset_index(inplace = True, drop = True)\r\n \r\n dataframe_of_visits_to_me['list'] = dataframe_of_visits_to_me.other_index.apply(list)\r\n dataframe_of_visits_to_me['list_str'] = dataframe_of_visits_to_me['list'].apply(','.join)\r\n dataframe_of_visits_to_me[['orig', 'desti']] = [sub.split(\",\") for sub in dataframe_of_visits_to_me.list_str]\r\n \r\n dataframe_of_visits_to_me['type'] = 'from_domains_to_me'\r\n \r\n dataframe_of_visits_to_me.columns = ['reach', 'indexs', 'list_index', 'list_index_str', 'domain', 'desti', 'type']\r\n final_b_frame = dataframe_of_visits_to_me[['domain', 'type', 'reach']]\r\n\r\n to_others_domain = second_frame_test[second_frame_test.desti != domain]\r\n \r\n new = to_others_domain.sort_values(by = ['guid', 'min_visit']).reset_index(drop = True)\r\n new.drop_duplicates(subset = 'guid', keep = 'first', inplace = True)\r\n new['trans_domain'] = new.orig + '_' + new.desti\r\n \r\n new['list_domains'] = new.trans_domain.str.split('_')\r\n \r\n list_of_domains = list(new['list_domains'])\r\n \r\n list_a = map(tuple, list_of_domains) #must convert to tuple because list is an unhashable type\r\n \r\n final_count = Counter(list_a)\r\n \r\n dataframe_of_visits_to_others = pd.DataFrame.from_dict(final_count, orient = 'index')\r\n \r\n dataframe_of_visits_to_others['other_index'] = dataframe_of_visits_to_others.index\r\n dataframe_of_visits_to_others.reset_index(inplace = True, drop = True)\r\n \r\n dataframe_of_visits_to_others['list'] = dataframe_of_visits_to_others.other_index.apply(list)\r\n dataframe_of_visits_to_others['list_str'] = dataframe_of_visits_to_others['list'].apply(','.join)\r\n dataframe_of_visits_to_others[['orig', 'desti']] = [sub.split(\",\") for sub in dataframe_of_visits_to_others.list_str]\r\n \r\n dataframe_of_visits_to_others['type'] = 'from_domains_to_others'\r\n \r\n dataframe_of_visits_to_others.columns = ['reach', 'indexs', 'list_index', 'list_index_str', 'orig', 'domain', 'type']\r\n final_c_frame = dataframe_of_visits_to_others[['domain', 'type', 'reach']]\r\n\r\n final_frame = pd.concat([final_a_frame, final_b_frame, final_c_frame])\r\n \r\n pivot_table_3 = pd.pivot_table(final_frame, values = 'reach', index = 'type', columns = 'domain').fillna(0)\r\n \r\n dataframe_probs_a = pd.DataFrame(columns=['domain', 'p(a)', 'split'])\r\n\r\n totals = pivot_table_3.iloc[0].sum() + pivot_table_3.iloc[1].sum() + pivot_table_3.iloc[2].sum()\r\n \r\n for indexs in range(pivot_table_3.shape[1]):\r\n dataframe_probs_a.loc[indexs] = [str(pivot_table_3.iloc[:,indexs].name), \r\n pivot_table_3.iloc[:,indexs].sum() / totals, \r\n pivot_table_3.iloc[:,indexs].from_me_to_destiny / pivot_table_3.iloc[:,indexs].sum()]\r\n \r\n dataframe_probs_a['p(X)'] = dataframe_probs_a['p(a)'] * dataframe_probs_a['split']\r\n dataframe_probs_a['bayes'] = (dataframe_probs_a['p(a)'] * dataframe_probs_a['split']) / dataframe_probs_a['p(X)'].sum()\r\n dataframe_probs_a['bayes %'] = dataframe_probs_a['bayes'] * 100\r\n \r\n dataframe_probs_a.sort_values('bayes %', inplace=True, ascending = False,)\r\n dataframe_probs_a.reset_index(drop = True, inplace = True)\r\n \r\n short_frame_a = dataframe_probs_a[dataframe_probs_a['bayes %'] > 0.4].reset_index(drop = True)\r\n \r\n if saved:\r\n path_file = input('define your personal path to save the file: ')\r\n short_frame_a.to_csv(path_file + 'bayesianinference_sites_prob_{}_{}_{}.csv'.format(domain.replace('.', '-'), start_date, end_date))\r\n\r\n return short_frame_a, dataframe_probs_a"
] |
[
[
"pandas.concat",
"pandas.DataFrame",
"pandas.DataFrame.from_dict",
"pandas.read_sql",
"pandas.pivot_table"
]
] |
LIIR-KULeuven/CLDR_CLNER_models
|
[
"5fe47a988b88a36d0ccf4484aff5ab70c59f39d6"
] |
[
"tSNE_analysis/tSNE_RE_space_named_entities.py"
] |
[
"import seaborn as sns\nimport random\nimport os\nimport h5py\nimport matplotlib.pyplot as plt\nfrom sklearn.manifold import TSNE\nimport configparser\n\nparser = configparser.ConfigParser()\nparser.read(\"./../configs/tSNE_analysis.conf\")\n\nSPLIT_NUM = int(parser.get(\"config\", \"split_num\"))\n\nclass tSNE_analysis:\n def __init__(self, split_num):\n self.split_num = split_num\n self.embeddings, self.labels, self.en1, self.en2 = self.read_data()\n self.embeddings = self.embeddings.tolist()\n self.labels = self.labels.tolist()\n self.indexes_1, self.correct_tokens = self.get_indexes_1()\n self.indexes_0_hard = self.get_hard_indexes_0()\n\n def read_data(self):\n # Read the file\n f = h5py.File('dataset/test_set_split_' + str(self.split_num) + '.hdf5', 'r')\n\n embeddings = f['embeddings']\n labels = f['labels']\n en1 = f['entity_1']\n en2 = f['entity_2']\n\n embeddings_ready = embeddings[:]\n labels_ready = labels[:]\n\n en1_ready = [str(el).strip('[]').strip('\\'') for el in en1[:].astype(str)]\n en2_ready = [str(el).strip('[]').strip('\\'') for el in en2[:].astype(str)]\n\n return embeddings_ready, labels_ready, en1_ready, en2_ready\n\n def get_indexes_1(self):\n indexes_1 = []\n indexes_0 = []\n correct_tokens = []\n for i, v in enumerate(self.labels):\n if v == 1:\n indexes_1.append(i)\n if self.en1[i] not in correct_tokens:\n correct_tokens.append(self.en1[i])\n if self.en2[i] not in correct_tokens:\n correct_tokens.append(self.en2[i])\n else:\n indexes_0.append(i)\n\n return indexes_1, correct_tokens\n\n def get_hard_indexes_0(self):\n indexes_0_hard = []\n for i, v in enumerate(self.labels):\n if v == 0:\n if self.en1[i] in self.correct_tokens or self.en2[i] in self.correct_tokens:\n indexes_0_hard.append(i)\n\n return indexes_0_hard\n\n def get_sampled_indexes(self, num_hard):\n sampled_indexes = random.sample(self.indexes_0_hard, num_hard * len(self.indexes_1))\n sampled_indexes.extend(self.indexes_1)\n\n return sampled_indexes\n\n def get_NE_tags_token_repr(self):\n tokens_rep = []\n ne_tags = []\n for i in self.indexes_1:\n if self.embeddings[i][:768] not in tokens_rep:\n tokens_rep.append(self.embeddings[i][:768])\n ne_tags.append('Drug')\n if self.embeddings[i][768:] not in tokens_rep:\n tokens_rep.append(self.embeddings[i][768:])\n ne_tags.append('AE')\n\n return tokens_rep, ne_tags\n\n def create_plots(self):\n for n in [1, 5, 10, 20]:\n sampled_indexes = self.get_sampled_indexes(n)\n\n tokens_rep, ne_tags = self.get_NE_tags_token_repr()\n tokens_rep_rest = []\n ne_tags_rest = []\n for i in sampled_indexes:\n if self.embeddings[i][:768] not in tokens_rep:\n tokens_rep_rest.append(self.embeddings[i][:768])\n ne_tags_rest.append('Outside')\n if self.embeddings[i][768:] not in tokens_rep:\n tokens_rep_rest.append(self.embeddings[i][768:])\n ne_tags_rest.append('Outside')\n\n tokens_rep_all = tokens_rep + tokens_rep_rest\n ne_tags_all = ne_tags + ne_tags_rest\n\n X_embedded = TSNE(n_components=2).fit_transform(tokens_rep_all)\n\n sns.set(rc={'figure.figsize': (11.7, 8.27)})\n\n palette = sns.color_palette(\"bright\", 3)\n\n sns.scatterplot(X_embedded[:, 0], X_embedded[:, 1], hue=ne_tags_all, legend='full', palette=palette)\n\n if not os.path.exists('plots/split_' + str(self.split_num) + '/RE/'):\n os.makedirs('plots/split_' + str(self.split_num) + '/RE/')\n plt.savefig('plots/split_' + str(self.split_num) + '/RE/'+ 'hard_' + str(n * len(self.indexes_1)) + '_NE.png')\n\n plt.close()\n\nif __name__ == '__main__':\n tSNE_obj = tSNE_analysis(SPLIT_NUM)\n tSNE_obj.create_plots()"
] |
[
[
"sklearn.manifold.TSNE",
"matplotlib.pyplot.close"
]
] |
yueri/Cook
|
[
"12162ef040c28662e4f3fb259d3d81bda3b59860"
] |
[
"integration/tests/cook/util.py"
] |
[
"import functools\nimport importlib\nimport itertools\nimport json\nimport logging\nimport os\nimport os.path\nimport subprocess\nimport time\nimport unittest\nimport uuid\nfrom datetime import datetime\nfrom urllib.parse import urlencode, urlparse\n\nimport numpy\nimport requests\nfrom retrying import retry\n\nfrom tests.cook import mesos\n\nlogger = logging.getLogger(__name__)\nsession = importlib.import_module(os.getenv('COOK_SESSION_MODULE', 'requests')).Session()\nsession.headers['User-Agent'] = f\"Cook-Scheduler-Integration-Tests ({session.headers['User-Agent']})\"\n\n# default time limit for each individual integration test\n# if a test takes more than 10 minutes, it's probably broken\nDEFAULT_TEST_TIMEOUT_SECS = int(os.getenv('COOK_TEST_DEFAULT_TEST_TIMEOUT_SECS', 600))\n\n# default time limit used by most wait_* utility functions\n# 2 minutes should be more than sufficient on most cases\nDEFAULT_TIMEOUT_MS = int(os.getenv('COOK_TEST_DEFAULT_TIMEOUT_MS', 120000))\n\n# default wait interval (i.e. time between attempts) used by most wait_* utility functions\nDEFAULT_WAIT_INTERVAL_MS = int(os.getenv('COOK_TEST_DEFAULT_WAIT_INTERVAL_MS', 1000))\n\n# Name of our custom HTTP header for user impersonation\nIMPERSONATION_HEADER = 'X-Cook-Impersonate'\n\n# Reason used by tests that should be skipped on clusters with ephemeral hosts\nEPHEMERAL_HOSTS_SKIP_REASON = 'If the cluster under test has ephemeral hosts, then it is generally ' \\\n 'a bad idea to use HOSTNAME EQUALS constraints, because it can cause ' \\\n 'the process responsible for launching hosts to launch hosts that ' \\\n 'never get used'\n\n\ndef continuous_integration():\n \"\"\"Returns true if the CONTINUOUS_INTEGRATION environment variable is set, as done by Travis-CI.\"\"\"\n return to_bool(os.getenv('CONTINUOUS_INTEGRATION'))\n\n\ndef has_docker_service():\n \"\"\"Returns true if docker services appear to be available to the testing environment.\"\"\"\n return os.path.exists('/var/run/docker.sock')\n\n\n# Default user for multi-user test runs\n_default_user_name = 'root'\n_default_admin_name = 'root'\n_default_impersonator_name = 'poser'\n\n\ndef _get_default_user_name():\n return os.getenv('USER', _default_user_name)\n\n\[email protected]_cache()\ndef _test_user_ids():\n \"\"\"\n Get the numeric user suffixes for this test worker.\n Returns the range 0 to 1 million if COOK_MAX_TEST_USERS is not set.\n If this is a distributed run with a limited number of users,\n e.g., 10 per worker, then this function returns range(0, 10) for worker 0,\n or range(20, 30) for worker 2.\n \"\"\"\n pytest_worker = os.getenv('PYTEST_XDIST_WORKER')\n max_test_users = int(os.getenv('COOK_MAX_TEST_USERS', 0))\n if pytest_worker and max_test_users:\n pytest_worker_id = int(pytest_worker[2:]) # e.g., \"gw4\" -> 4\n test_user_min_id = max_test_users * pytest_worker_id\n test_user_max_id = test_user_min_id + max_test_users\n return range(test_user_min_id, test_user_max_id)\n else:\n return range(1000000)\n\n\ndef _test_user_names(test_name_prefix=None):\n \"\"\"\n Returns a generator of unique test user names, with form {PREFIX}{ID}.\n The COOK_TEST_USER_PREFIX environment variable is used by default;\n otherwise, the test_name_prefix value is used as the PREFIX.\n \"\"\"\n name_prefix = os.getenv('COOK_TEST_USER_PREFIX', test_name_prefix)\n return (f'{name_prefix}{i}' for i in _test_user_ids())\n\n\n# Shell command used to obtain Kerberos credentials for a given test user\n_kerberos_missing_cmd = 'echo \"MISSING COOK_KERBEROS_TEST_AUTH_CMD\" && exit 1'\n_kerberos_auth_cmd = os.getenv('COOK_KERBEROS_TEST_AUTH_CMD', _kerberos_missing_cmd)\n\n\nclass _AuthenticatedUser(object):\n \"\"\"\n Object representing a Cook user, which holds authentication details.\n User objects can be used with python's `with` blocks\n to conveniently set a user for a sequence of commands.\n \"\"\"\n\n def __init__(self, name, impersonatee=None):\n self.name = name\n self.impersonatee = impersonatee\n self.previous_impersonatee = None\n\n def impersonating(self, other_user):\n other_username = other_user.name if isinstance(other_user, _AuthenticatedUser) else other_user\n return type(self)(self.name, impersonatee=other_username)\n\n def __enter__(self):\n logger.debug(f'Switching to user {self.name}')\n if self.impersonatee:\n self.previous_impersonatee = session.headers.get(IMPERSONATION_HEADER)\n session.headers[IMPERSONATION_HEADER] = self.impersonatee\n\n def __exit__(self, ex_type, ex_val, ex_trace):\n logger.debug(f'Switching back from user {self.name}')\n if self.impersonatee:\n if self.previous_impersonatee:\n session.headers[IMPERSONATION_HEADER] = self.previous_impersonatee\n self.previous_impersonatee = None\n else:\n del session.headers[IMPERSONATION_HEADER]\n\n\nclass _BasicAuthUser(_AuthenticatedUser):\n \"\"\"\n Object representing a Cook user with HTTP Basic Auth credentials.\n \"\"\"\n\n def __init__(self, name, impersonatee=None):\n super().__init__(name, impersonatee)\n self.auth = (name, '')\n self.previous_auth = None\n\n def __enter__(self):\n global session\n super().__enter__()\n assert self.previous_auth is None\n self.previous_auth = session.auth\n session.auth = self.auth\n\n def __exit__(self, ex_type, ex_val, ex_trace):\n global session\n super().__exit__(ex_type, ex_val, ex_trace)\n assert self.previous_auth is not None\n session.auth = self.previous_auth\n self.previous_auth = None\n\n\nclass _KerberosUser(_AuthenticatedUser):\n \"\"\"\n Object representing a Cook user with Kerberos credentials.\n \"\"\"\n\n def __init__(self, name, impersonatee=None):\n super().__init__(name, impersonatee)\n self.auth = None\n self.auth_token = self._generate_kerberos_ticket_for_user(name)\n self.previous_token = None\n\n @functools.lru_cache()\n def _generate_kerberos_ticket_for_user(self, username):\n \"\"\"\n Get a Kerberos authentication ticket for the given user.\n Depends on COOK_KERBEROS_TEST_AUTH_CMD being set in the environment.\n \"\"\"\n subcommand = (_kerberos_auth_cmd\n .replace('{{COOK_USER}}', username)\n .replace('{{COOK_SCHEDULER_URL}}', retrieve_cook_url()))\n return subprocess.check_output(subcommand, shell=True).rstrip()\n\n def __enter__(self):\n global session\n super().__enter__()\n assert self.previous_token is None\n self.previous_token = session.headers.get('Authorization')\n session.headers['Authorization'] = self.auth_token\n\n def __exit__(self, ex_type, ex_val, ex_trace):\n global session\n super().__exit__(ex_type, ex_val, ex_trace)\n if self.previous_token is None:\n del session.headers['Authorization']\n else:\n session.headers['Authorization'] = self.previous_token\n self.previous_token = None\n\n\nclass UserFactory(object):\n \"\"\"\n Factory object used to create unique user names for a given test function.\n Usernames are composed of the test name and and increasing integer values.\n \"\"\"\n\n def __init__(self, test_handle):\n # Select the authentication scheme\n if http_basic_auth_enabled():\n self.user_class = _BasicAuthUser\n elif kerberos_enabled():\n self.user_class = _KerberosUser\n else:\n raise NotImplementedError(f'Unsupported user authentication scheme: {_cook_auth_scheme()}')\n # Set up generator for new user objects\n if test_handle:\n test_id = test_handle.id()\n test_base_name = test_id[test_id.rindex('.test_') + 6:].lower()\n self.__user_generator = _test_user_names(test_base_name)\n\n def new_user(self):\n \"\"\"Return a fresh user object.\"\"\"\n return self.user_class(next(self.__user_generator))\n\n def new_users(self, count=None):\n \"\"\"Return a sequence of `count` fresh user objects.\"\"\"\n return [self.user_class(x) for x in itertools.islice(self.__user_generator, 0, count)]\n\n @functools.lru_cache()\n def default(self):\n \"\"\"Return the default user\"\"\"\n return self.user_class(_get_default_user_name())\n\n @functools.lru_cache()\n def admin(self):\n \"\"\"Return the administrator user\"\"\"\n name = os.getenv('COOK_ADMIN_USER_NAME', _default_admin_name)\n return self.user_class(name)\n\n @functools.lru_cache()\n def impersonator(self):\n \"\"\"Return the impersonator user\"\"\"\n name = os.getenv('COOK_IMPERSONATOR_USER_NAME', _default_impersonator_name)\n return self.user_class(name)\n\n\ndef multi_cluster_tests_enabled():\n \"\"\"\n Returns true if the COOK_MULTI_CLUSTER environment variable is set,\n indicating that multiple cook scheduler instances are running.\n \"\"\"\n return os.getenv('COOK_MULTI_CLUSTER') is not None\n\n\[email protected]_cache()\ndef _cook_auth_scheme():\n \"\"\"Get the authentication scheme name from the cook scheduler info endpoint\"\"\"\n cook_url = retrieve_cook_url()\n _wait_for_cook(cook_url)\n cook_info = scheduler_info(cook_url)\n logger.info(f\"Cook's authentication scheme is {cook_info['authentication-scheme']}\")\n return cook_info['authentication-scheme']\n\n\ndef http_basic_auth_enabled():\n \"\"\"Returns true if Cook was configured to use the http-basic authentication scheme.\"\"\"\n return 'http-basic' == _cook_auth_scheme()\n\n\ndef kerberos_enabled():\n \"\"\"Returns true if Cook was configured to use the Kerberos authentication scheme.\"\"\"\n return 'kerberos' == _cook_auth_scheme()\n\n\ndef multi_user_tests_enabled():\n \"\"\"Returns true if Cook was configured to support multiple users.\"\"\"\n return http_basic_auth_enabled() or kerberos_enabled()\n\n\ndef get_in(dct, *keys):\n for key in keys:\n if not dct:\n return None\n try:\n dct = dct[key]\n except KeyError:\n return None\n return dct\n\n\ndef is_valid_uuid(uuid_to_test, version=4):\n \"\"\"\n Check if uuid_to_test is a valid UUID.\n Parameters\n ----------\n uuid_to_test : str\n version : {1, 2, 3, 4}\n Returns\n -------\n `True` if uuid_to_test is a valid UUID, otherwise `False`.\n Examples\n --------\n >>> is_valid_uuid('c9bf9e57-1685-4c89-bafb-ff5af830be8a')\n True\n >>> is_valid_uuid('c9bf9e58')\n False\n \"\"\"\n try:\n uuid_obj = uuid.UUID(uuid_to_test, version=version)\n except:\n return False\n\n return str(uuid_obj) == uuid_to_test\n\n\[email protected]_cache()\ndef retrieve_cook_url(varname='COOK_SCHEDULER_URL', value='http://localhost:12321'):\n cook_url = os.getenv(varname, value)\n logger.info('Using cook url %s' % cook_url)\n return cook_url\n\n\[email protected]_cache()\ndef retrieve_mesos_url(varname='MESOS_PORT', value='5050'):\n mesos_url = os.getenv('COOK_MESOS_LEADER_URL')\n if mesos_url is None:\n mesos_master_host = 'localhost'\n mesos_port = os.getenv(varname, value)\n if os.getenv('COOK_TEST_DERIVE_MESOS_HOST') is not None:\n cook_url = retrieve_cook_url()\n _wait_for_cook(cook_url)\n mesos_master = settings(cook_url).get('mesos-master')\n if not mesos_master:\n raise RuntimeError('Unable to derive Mesos host, mesos-master is not present in settings')\n\n mesos_master_parts = mesos_master.split(',')\n result = urlparse(mesos_master_parts[0])\n if not result.hostname:\n raise RuntimeError(f'Unable to derive Mesos host, hostname is not present in {result}')\n\n mesos_master_host = result.hostname\n\n logger.debug(f'Using mesos master host {mesos_master_host}')\n resp = session.get(f'http://{mesos_master_host}:{mesos_port}/redirect', allow_redirects=False)\n if resp.status_code != 307:\n raise RuntimeError(f'Unable to find mesos leader, redirect endpoint returned {resp.status_code}')\n mesos_url = f\"http:{resp.headers['Location']}\"\n logger.info(f'Using mesos url {mesos_url}')\n return mesos_url\n\n\ndef is_not_blank(in_string):\n \"\"\"Test if a string is not None NOR empty NOR blank.\"\"\"\n return bool(in_string and in_string.strip())\n\n\ndef get_job_executor_type(cook_url):\n \"\"\"Returns 'cook' or 'mesos' based on the default executor Cook is configured with.\"\"\"\n return 'cook' if is_not_blank(get_in(settings(cook_url), 'executor', 'command')) else 'mesos'\n\n\ndef is_connection_error(exception):\n return isinstance(exception, requests.exceptions.ConnectionError)\n\n\n@retry(retry_on_exception=is_connection_error, stop_max_delay=240000, wait_fixed=1000)\ndef _wait_for_cook(cook_url):\n logger.debug('Waiting for connection to cook...')\n # if connection is refused, an exception will be thrown\n session.get(cook_url)\n\n\ndef init_cook_session(*cook_urls):\n for cook_url in cook_urls:\n _wait_for_cook(cook_url)\n if http_basic_auth_enabled():\n session.auth = UserFactory(None).default().auth\n\n\ndef settings(cook_url):\n return session.get(f'{cook_url}/settings').json()\n\n\[email protected]_cache()\ndef scheduler_info(cook_url):\n resp = session.get(f'{cook_url}/info', auth=None)\n assert resp.status_code == 200\n return resp.json()\n\n\ndef docker_image():\n return os.getenv('COOK_TEST_DOCKER_IMAGE')\n\n\ndef minimal_job(**kwargs):\n job = {\n 'command': 'echo Default Test Command',\n 'cpus': float(os.getenv('COOK_DEFAULT_JOB_CPUS', 1.0)),\n 'max_retries': 1,\n 'mem': int(os.getenv('COOK_DEFAULT_JOB_MEM_MB', 256)),\n 'name': 'default_test_job',\n 'priority': 1,\n 'uuid': str(uuid.uuid4())\n }\n image = docker_image()\n if image:\n job['container'] = {\n 'type': 'docker',\n 'docker': {\n 'image': image,\n 'network': 'HOST',\n 'force-pull-image': False\n }\n }\n job.update(kwargs)\n return job\n\n\ndef minimal_jobs(job_count, **kwargs):\n \"\"\"Build a list of of multiple homogeneous job specifications\"\"\"\n return [minimal_job(**kwargs) for _ in range(job_count)]\n\n\ndef minimal_group(**kwargs):\n \"\"\"Build a minimal group spec\"\"\"\n return dict(uuid=str(uuid.uuid4()), **kwargs)\n\n\ndef submit_jobs(cook_url, job_specs, clones=1, pool=None, headers={}, **kwargs):\n \"\"\"\n Create and submit multiple jobs, either cloned from a single job spec,\n or specified individually in multiple job specs.\n Arguments can be manually passed to the scheduler post via kwargs.\n \"\"\"\n if isinstance(job_specs, dict):\n job_specs = [job_specs] * clones\n\n def full_spec(spec):\n if 'uuid' not in spec:\n return minimal_job(**spec)\n else:\n return spec\n\n jobs = [full_spec(j) for j in job_specs]\n request_body = {'jobs': jobs}\n if pool:\n request_body['pool'] = pool\n request_body.update(kwargs)\n logger.info(request_body)\n resp = session.post(f'{cook_url}/jobs', json=request_body, headers=headers)\n return [j['uuid'] for j in jobs], resp\n\n\ndef retry_jobs(cook_url, assert_response=True, use_deprecated_post=False, **kwargs):\n \"\"\"Retry one or more jobs and/or groups of jobs\"\"\"\n request_verb = session.post if use_deprecated_post else session.put\n response = request_verb(f'{cook_url}/retry', json=kwargs)\n if assert_response:\n response_info = {'code': response.status_code, 'msg': response.content}\n assert response.status_code in (200, 201), response_info\n retried_job_count = int(response.text)\n # response code 200 OK implies zero retried jobs\n assert response.status_code != 200 or retried_job_count == 0, response_info\n # response code 201 Created implies non-zero retried jobs\n assert response.status_code != 201 or retried_job_count > 0, response_info\n return response\n\n\ndef kill_jobs(cook_url, jobs, assert_response=True, expected_status_code=204):\n \"\"\"Kill one or more jobs\"\"\"\n chunksize = 100\n chunks = [jobs[i:i + chunksize] for i in range(0, len(jobs), chunksize)]\n response = []\n for chunk in chunks:\n params = {'job': [unpack_uuid(j) for j in chunk]}\n response = session.delete(f'{cook_url}/rawscheduler', params=params)\n if assert_response:\n assert expected_status_code == response.status_code, response.text\n return response\n\n\ndef kill_groups(cook_url, groups, assert_response=True, expected_status_code=204):\n \"\"\"Kill one or more groups of jobs\"\"\"\n params = {'uuid': [unpack_uuid(g) for g in groups]}\n response = session.delete(f'{cook_url}/group', params=params)\n if assert_response:\n assert expected_status_code == response.status_code, response.content\n return response\n\n\ndef submit_job(cook_url, pool=None, headers={}, **kwargs):\n \"\"\"Create and submit a single job\"\"\"\n uuids, resp = submit_jobs(cook_url, job_specs=[kwargs], pool=pool, headers=headers)\n return uuids[0], resp\n\n\ndef unpack_uuid(entity):\n \"\"\"Unpack the UUID string from a job spec, or no-op for UUID strings\"\"\"\n return entity['uuid'] if isinstance(entity, dict) else entity\n\n\ndef to_bool(v):\n \"\"\"Converts the given argument to a boolean value\"\"\"\n return v is True or str(v).lower() in ['true', '1']\n\n\ndef __get(cook_url, endpoint, assert_response=False, **kwargs):\n \"\"\"Makes a GET request to the given root URL and endpoint\"\"\"\n if 'partial' in kwargs:\n kwargs['partial'] = 'true' if to_bool(kwargs['partial']) else 'false'\n response = session.get(f'{cook_url}/{endpoint}', params=kwargs)\n if assert_response:\n response_info = {'code': response.status_code, 'msg': response.content}\n if 200 != response.status_code:\n logging.info(f'Got a non-200 response: {response_info}')\n assert 200 == response.status_code, response_info\n return response\n\n\ndef query_jobs_via_rawscheduler_endpoint(cook_url, assert_response=False, **kwargs):\n \"\"\"\n Queries cook for a set of jobs, by job and/or instance uuid. The kwargs\n passed to this function are sent straight through as query parameters on\n the request.\n If the job or instance values are dictionaries (e.g., job_specs),\n then they are automatically unpacked to get their UUIDs.\n \"\"\"\n for key in ('job', 'instance'):\n if key in kwargs:\n kwargs[key] = map(unpack_uuid, kwargs[key])\n\n return __get(cook_url, 'rawscheduler', assert_response, **kwargs)\n\n\ndef query_resource(cook_url, resource, assert_response=False, **kwargs):\n \"\"\"\n Queries cook for a set of entities by uuid. The kwargs\n passed to this function are sent straight through as\n query parameters on the request. If the uuid values are\n dictionaries (e.g., job_specs), then they are\n automatically unpacked to get their UUIDs.\n \"\"\"\n kwargs['uuid'] = [unpack_uuid(u) for u in kwargs['uuid']]\n return __get(cook_url, resource, assert_response, **kwargs)\n\n\ndef query_jobs(cook_url, assert_response=False, **kwargs):\n \"\"\"Queries cook for a set of jobs by job uuid\"\"\"\n return query_resource(cook_url, 'jobs', assert_response, **kwargs)\n\n\ndef query_instances(cook_url, assert_response=False, **kwargs):\n \"\"\"Queries cook for a set of job instances by instance uuid\"\"\"\n return query_resource(cook_url, 'instances', assert_response, **kwargs)\n\n\ndef query_groups(cook_url, **kwargs):\n \"\"\"\n Queries cook for a set of groups, by groups uuid. The kwargs\n passed to this function are sent straight through as query \n parameters on the request.\n \"\"\"\n return session.get('%s/group' % cook_url, params=kwargs)\n\n\ndef load_resource(cook_url, resource, uuid, assert_response=True):\n \"\"\"Loads an entity by UUID using GET /resource/UUID\"\"\"\n response = session.get(f'{cook_url}/{resource}/{uuid}')\n if assert_response:\n assert 200 == response.status_code, f'Expected 200, got {response.status_code} with body {response.text}'\n return response.json()\n\n\ndef load_job(cook_url, job_uuid, assert_response=True):\n \"\"\"Loads a job by UUID using GET /jobs/UUID\"\"\"\n return load_resource(cook_url, 'jobs', job_uuid, assert_response)\n\n\ndef load_instance(cook_url, instance_uuid, assert_response=True):\n \"\"\"Loads a job instance by UUID using GET /instances/UUID\"\"\"\n return load_resource(cook_url, 'instances', instance_uuid, assert_response)\n\n\ndef wait_until(query, predicate, max_wait_ms=DEFAULT_TIMEOUT_MS, wait_interval_ms=DEFAULT_WAIT_INTERVAL_MS):\n \"\"\"\n Block until the predicate is true for the result of the provided query.\n `query` is a thunk (nullary callable) that may be called multiple times.\n `predicate` is a unary callable that takes the result value of `query`\n and returns True if the condition is met, or False otherwise.\n See `wait_for_job` for an example of using this method.\n \"\"\"\n\n @retry(stop_max_delay=max_wait_ms, wait_fixed=wait_interval_ms)\n def wait_until_inner():\n response = query()\n if not predicate(response):\n error_msg = \"wait_until condition not yet met, retrying...\"\n logger.debug(error_msg)\n raise RuntimeError(error_msg)\n else:\n logger.info(\"wait_until condition satisfied\")\n return response\n\n try:\n return wait_until_inner()\n except:\n final_response = query()\n try:\n details = final_response.content\n except AttributeError:\n details = str(final_response)\n logger.info(f\"Timeout exceeded waiting for condition. Details: {details}\")\n raise\n\n\ndef all_instances_done(response, accepted_states=('success', 'failed')):\n \"\"\"\n Helper method used with the wait_until function.\n Checks a response from query_jobs to see if all jobs and instances have completed.\n \"\"\"\n for job in response.json():\n if job['state'] not in accepted_states:\n return False\n for inst in job['instances']:\n if inst['status'] not in accepted_states:\n logger.info(f\"Job {job['uuid']} instance {inst['task_id']} has unaccepted status {inst['status']}.\")\n return False\n return True\n\n\ndef all_instances_killed(response):\n \"\"\"\n Helper method used with the wait_until function.\n Checks a response from query_jobs to see if all jobs and instances have been killed.\n \"\"\"\n return all_instances_done(response, accepted_states=['failed'])\n\n\ndef group_some_job_started(group_response):\n \"\"\"\n Helper method used with the wait_until function.\n Checks a response from group_detail_query to see if any job in the group has started.\n \"\"\"\n group = group_response.json()[0]\n running_count = group['running']\n logger.info(f\"Currently {running_count} jobs running in group {group}\")\n return running_count > 0\n\n\ndef group_some_job_done(group_response):\n \"\"\"\n Helper method used with the wait_until function.\n Checks a response from group_detail_query to see if any job in the group has completed.\n \"\"\"\n group = group_response.json()[0]\n completed_count = group['completed']\n logger.info(f\"Currently {completed_count} jobs completed in group {group['uuid']}\")\n return completed_count > 0\n\n\ndef group_detail_query(cook_url, group_uuid, assert_response=True):\n \"\"\"Get a group with full status details, returning the response object.\"\"\"\n response = query_groups(cook_url, uuid=[group_uuid], detailed='true')\n if assert_response:\n assert 200 == response.status_code, response.content\n return response\n\n\ndef wait_for_job(cook_url, job_id, status, max_wait_ms=DEFAULT_TIMEOUT_MS):\n \"\"\"Wait for the given job's status to change to the specified value.\"\"\"\n return wait_for_jobs_in_statuses(cook_url, [job_id], [status], max_wait_ms)[0]\n\ndef wait_for_jobs(cook_url, job_ids, status, max_wait_ms=DEFAULT_TIMEOUT_MS):\n return wait_for_jobs_in_statuses(cook_url, job_ids, [status], max_wait_ms)\n\ndef wait_for_job_in_statuses(cook_url, job_id, statuses, max_wait_ms=DEFAULT_TIMEOUT_MS):\n \"\"\"Wait for the given job's status to change to one of the specified statuses.\"\"\"\n return wait_for_jobs_in_statuses(cook_url, [job_id], statuses, max_wait_ms)[0]\n\ndef wait_for_jobs_in_statuses(cook_url, job_ids, statuses, max_wait_ms=DEFAULT_TIMEOUT_MS):\n \"\"\"Wait for the given job's status to change to one of the specified statuses.\"\"\"\n def query():\n return query_jobs(cook_url, True, uuid=job_ids)\n\n def predicate(resp):\n jobs = resp.json()\n for job in jobs:\n logger.info(f\"Job {job['uuid']} has status {job['status']}, expecting {statuses}.\")\n return all([job['status'] in statuses for job in jobs])\n\n response = wait_until(query, predicate, max_wait_ms=max_wait_ms, wait_interval_ms=DEFAULT_WAIT_INTERVAL_MS * 2)\n return response.json()\n\ndef wait_for_exit_code(cook_url, job_id, max_wait_ms=DEFAULT_TIMEOUT_MS):\n \"\"\"\n Wait for the given job's exit_code field to appear.\n (Only supported by Cook Executor jobs.)\n Returns an up-to-date job description object on success,\n and raises an exception if the max_wait_ms wait time is exceeded.\n \"\"\"\n job_id = unpack_uuid(job_id)\n\n def query():\n return query_jobs(cook_url, True, uuid=[job_id]).json()[0]\n\n def predicate(job):\n if not job['instances']:\n logger.info(f\"Job {job_id} has no instances.\")\n else:\n for inst in job['instances']:\n if 'exit_code' not in inst:\n logger.info(f\"Job {job_id} instance {inst['task_id']} has no exit code.\")\n else:\n logger.info(f\"Job {job_id} instance {inst['task_id']} has exit code {inst['exit_code']}.\")\n job['instance-with-exit-code'] = inst\n return True\n\n job = wait_until(query, predicate, max_wait_ms=max_wait_ms)\n return job['instance-with-exit-code']\n\n\ndef wait_for_sandbox_directory(cook_url, job_id):\n \"\"\"\n Wait for the given job's sandbox_directory field to appear.\n Returns an up-to-date job description object on success,\n and raises an exception if the max_wait_ms wait time is exceeded.\n \"\"\"\n job_id = unpack_uuid(job_id)\n\n cook_settings = settings(cook_url)\n cache_ttl_ms = cook_settings['agent-query-cache']['ttl-ms']\n sync_interval_ms = cook_settings['sandbox-syncer']['sync-interval-ms']\n max_wait_ms = min(4 * max(cache_ttl_ms, sync_interval_ms), 4 * 60 * 1000)\n\n def query():\n response = query_jobs(cook_url, True, uuid=[job_id])\n return response.json()[0]\n\n def predicate(job):\n if not job['instances']:\n logger.info(f\"Job {job_id} has no instances.\")\n else:\n for inst in job['instances']:\n if 'sandbox_directory' not in inst:\n logger.info(f\"Job {job_id} instance {inst['task_id']} has no sandbox directory.\")\n else:\n logger.info(\n f\"Job {job_id} instance {inst['task_id']} has sandbox directory {inst['sandbox_directory']}.\")\n return True\n\n job = wait_until(query, predicate, max_wait_ms=max_wait_ms, wait_interval_ms=250)\n for inst in job['instances']:\n if 'sandbox_directory' in inst:\n return inst\n\n\ndef wait_for_end_time(cook_url, job_id, max_wait_ms=DEFAULT_TIMEOUT_MS):\n \"\"\"\n Wait for the given job's end_time field to appear in instance 0.\n Returns an up-to-date job description object on success,\n and raises an exception if the max_wait_ms wait time is exceeded.\n \"\"\"\n job_id = unpack_uuid(job_id)\n\n def query():\n return query_jobs(cook_url, True, uuid=[job_id])\n\n def predicate(resp):\n job = resp.json()[0]\n if not job['instances']:\n logger.info(f\"Job {job_id} has no instances.\")\n else:\n inst = job['instances'][0]\n if 'end_time' not in inst:\n logger.info(f\"Job {job_id} instance {inst['task_id']} has no end time.\")\n else:\n logger.info(f\"Job {job_id} instance {inst['task_id']} has end_time {inst['end_time']}.\")\n return True\n\n response = wait_until(query, predicate, max_wait_ms=max_wait_ms)\n return response.json()[0]\n\n\ndef wait_for_running_instance(cook_url, job_id, max_wait_ms=DEFAULT_TIMEOUT_MS):\n \"\"\"Waits for the job with the given job_id to have a running instance\"\"\"\n job_id = unpack_uuid(job_id)\n\n def query():\n return query_jobs(cook_url, True, uuid=[job_id])\n\n def predicate(resp):\n job = resp.json()[0]\n if not job['instances']:\n logger.info(f\"Job {job_id} has no instances.\")\n else:\n for inst in job['instances']:\n status = inst['status']\n logger.info(f\"Job {job_id} instance {inst['task_id']} has status {status}, expected running.\")\n return status == 'running'\n\n response = wait_until(query, predicate, max_wait_ms=max_wait_ms)\n return response.json()[0]['instances'][0]\n\n\ndef get_mesos_state(mesos_url):\n \"\"\"\n Queries the state.json from mesos\n \"\"\"\n return session.get('%s/state.json' % mesos_url).json()\n\n\ndef wait_for_output_url(cook_url, job_uuid):\n \"\"\"\n Wait for the output_url for the given job to be populated,\n retrying every 5 seconds for a maximum of 2 minutes.\n The retries are necessary because currently the Mesos\n agent sandbox directories are cached in Cook.\n \"\"\"\n\n def query():\n return load_job(cook_url, job_uuid, assert_response=False)\n\n def predicate(job):\n for instance in job['instances']:\n if 'output_url' in instance:\n return True\n else:\n logger.info(f\"Job {job['uuid']} instance {instance['task_id']} had no output_url\")\n\n job = wait_until(query, predicate)\n for instance in job['instances']:\n if 'output_url' in instance:\n return instance\n\n\ndef list_jobs(cook_url, **kwargs):\n \"\"\"Makes a request to the /list endpoint using the provided kwargs as the query params\"\"\"\n if 'start_ms' in kwargs:\n kwargs['start-ms'] = kwargs.pop('start_ms')\n if 'end_ms' in kwargs:\n kwargs['end-ms'] = kwargs.pop('end_ms')\n query_params = urlencode(kwargs)\n resp = session.get('%s/list?%s' % (cook_url, query_params))\n return resp\n\n\ndef jobs(cook_url, headers={}, **kwargs):\n \"\"\"Makes a request to the /jobs endpoint using the provided kwargs as the query params\"\"\"\n query_params = urlencode(kwargs, doseq=True)\n resp = session.get('%s/jobs?%s' % (cook_url, query_params), headers=headers)\n return resp\n\n\ndef contains_job_uuid(jobs, job_uuid):\n \"\"\"Returns true if jobs contains a job with the given uuid\"\"\"\n return any(job for job in jobs if job['uuid'] == job_uuid)\n\n\ndef get_executor(agent_state, executor_id, include_completed=False):\n \"\"\"Returns the executor with id executor_id from agent_state\"\"\"\n for framework in agent_state['frameworks']:\n for executor in framework['executors']:\n if executor['id'] == executor_id:\n return executor\n if include_completed:\n for framework in agent_state['frameworks'] + agent_state['completed_frameworks']:\n for executor in framework['completed_executors']:\n if executor['id'] == executor_id:\n return executor\n return None\n\n\ndef get_user(cook_url, job_uuid):\n \"\"\"Retrieves the job corresponding to the given job_uuid and returns the user\"\"\"\n return load_job(cook_url, job_uuid)['user']\n\n\ndef unscheduled_jobs(cook_url, *job_uuids, partial=None):\n \"\"\"Retrieves the unscheduled_jobs reasons for the given job_uuid\"\"\"\n query_params = [('job', u) for u in job_uuids]\n if partial is not None:\n query_params.append(('partial', partial))\n resp = session.get(f'{cook_url}/unscheduled_jobs?{urlencode(query_params)}')\n job_reasons = resp.json() if resp.status_code == 200 else []\n return job_reasons, resp\n\n\ndef wait_for_instance(cook_url, job_uuid, max_wait_ms=DEFAULT_TIMEOUT_MS, wait_interval_ms=1000, status=None):\n \"\"\"Waits for the job with the given job_uuid to have at least one instance, and returns the first instance uuid\"\"\"\n def instances_with_status(job):\n if status is None:\n return job['instances']\n else:\n return [i for i in job['instances'] if i['status'] == status]\n job = wait_until(lambda: load_job(cook_url, job_uuid), lambda j: len(instances_with_status(j)) >= 1,\n max_wait_ms=max_wait_ms, wait_interval_ms=wait_interval_ms)\n instance = job['instances'][0]\n instance['parent'] = job\n return instance\n\n\ndef sleep_for_publish_interval(cook_url):\n # allow enough time for progress and sandbox updates to be submitted\n cook_settings = settings(cook_url)\n progress_publish_interval_ms = get_in(cook_settings, 'progress', 'publish-interval-ms')\n wait_publish_interval_ms = min(3 * progress_publish_interval_ms, 20000)\n time.sleep(wait_publish_interval_ms / 1000.0)\n\n\ndef progress_line(cook_url, percent, message):\n \"\"\"Simple text replacement of regex string using expected patterns of (\\d+), (?: )? and (.*).\"\"\"\n cook_settings = settings(cook_url)\n regex_string = get_in(cook_settings, 'executor', 'default-progress-regex-string')\n if not regex_string:\n regex_string = 'progress:\\s+([0-9]*\\.?[0-9]+)($|\\s+.*)'\n if '([0-9]*\\.?[0-9]+)' not in regex_string:\n raise Exception(f'([0-9]*\\.?[0-9]+) not present in {regex_string} regex string')\n if '($|\\s+.*)' not in regex_string:\n raise Exception(f'($|\\s+.*) not present in {regex_string} regex string')\n return (regex_string\n .replace('([0-9]*\\.?[0-9]+)', str(percent))\n .replace('($|\\s+.*)', str(f' {message}'))\n .replace('\\s+', ' ')\n .replace('\\\\', ''))\n\n\ndef group_submit_kill_retry(cook_url, retry_failed_jobs_only):\n \"\"\"\n Helper method for integration tests on groups, following these steps:\n 1) Creates a group of 10 jobs\n 2) Waits for at least one job to start\n 3) Kills all the jobs\n 4) Retries the jobs\n 5) Waits for at least one job to start (again)\n 6) Finally kills all the jobs again (clean up)\n Returns the job info (json response) for the group's jobs after step 5.\n \"\"\"\n group_spec = minimal_group()\n group_uuid = group_spec['uuid']\n job_spec = {'group': group_uuid, 'command': f'sleep 600'}\n try:\n jobs, resp = submit_jobs(cook_url, job_spec, 10, groups=[group_spec])\n assert resp.status_code == 201, resp\n\n def group_query():\n return group_detail_query(cook_url, group_uuid)\n\n # wait for some job to start\n wait_until(group_query, group_some_job_started)\n # kill all jobs in the group (and wait for the kill to complete)\n logger.info(f'Killing all jobs in group {group_uuid}.')\n kill_groups(cook_url, [group_uuid])\n\n def jobs_query():\n return query_jobs(cook_url, True, uuid=jobs)\n\n wait_until(jobs_query, all_instances_done)\n jobs = query_jobs(cook_url, assert_response=True, uuid=jobs).json()\n for job in jobs:\n logger.info(f'Job details: {json.dumps(job, sort_keys=True)}')\n # retry all jobs in the group\n retry_jobs(cook_url, retries=2, groups=[group_uuid], failed_only=retry_failed_jobs_only)\n # wait for some job to start\n wait_until(group_query, group_some_job_started)\n # return final job details to caller for assertion checks\n jobs = query_jobs(cook_url, assert_response=True, uuid=jobs).json()\n for job in jobs:\n for instance in job['instances']:\n mesos.dump_sandbox_files(session, instance, job)\n return jobs\n finally:\n # ensure that we don't leave a bunch of jobs running/waiting\n kill_groups(cook_url, [group_uuid])\n\n\ndef group_submit_retry(cook_url, command, predicate_statuses, retry_failed_jobs_only=True):\n \"\"\"\n Helper method for integration tests on groups, following these steps:\n 1) Creates a group of 5 jobs\n 2) Waits for the job statuses to match those in predicate_statuses\n 3) Retries the jobs\n 4) Waits for the job statuses to match those in predicate_statuses (again)\n 5) Finally kills all the jobs again (clean up)\n Returns the job info (json response) for the group's jobs after step 4.\n \"\"\"\n job_count = 5\n group_spec = minimal_group()\n group_uuid = group_spec['uuid']\n job_spec = {'group': group_uuid, 'max_retries': 1, 'command': command}\n\n def group_query():\n return group_detail_query(cook_url, group_uuid)\n\n def status_condition(response):\n group = response.json()[0]\n statuses_map = {x: group[x] for x in predicate_statuses}\n status_counts = statuses_map.values()\n # for running & waiting, we want at least one running (not all waiting)\n not_all_waiting = group['waiting'] != job_count\n logger.debug(f\"Currently {statuses_map} jobs in group {group['uuid']}\")\n if not_all_waiting and sum(status_counts) == job_count:\n return True\n else:\n logger.debug(f'Group details: {group}')\n jobs = query_jobs(cook_url, assert_response=True, uuid=group['jobs']).json()\n for job in jobs:\n logger.debug(f'Job details: {json.dumps(job, sort_keys=True)}')\n return False\n\n try:\n jobs, resp = submit_jobs(cook_url, job_spec, job_count, groups=[group_spec])\n assert resp.status_code == 201, resp\n # wait for the expected job statuses specified in predicate_statuses\n wait_until(group_query, status_condition)\n # retry all failed jobs in the group (if any)\n retry_jobs(cook_url, increment=1, failed_only=retry_failed_jobs_only, groups=[group_uuid])\n # wait again for the expected job statuses specified in predicate_statuses\n wait_until(group_query, status_condition)\n # return final job details to caller for assertion checks\n return query_jobs(cook_url, assert_response=True, uuid=jobs).json()\n finally:\n # ensure that we don't leave a bunch of jobs running/waiting\n kill_groups(cook_url, [group_uuid])\n\n\ndef user_current_usage(cook_url, headers={}, **kwargs):\n \"\"\"\n Queries cook for a user's current resource usage\n based on their currently running jobs.\n \"\"\"\n return session.get('%s/usage' % cook_url, params=kwargs, headers=headers)\n\n\ndef query_queue(cook_url, **kwargs):\n \"\"\"Get current jobs via the queue endpoint (admin-only)\"\"\"\n return session.get(f'{cook_url}/queue', **kwargs)\n\n\ndef get_limit(cook_url, limit_type, user, pool=None, headers={}):\n params = {'user': user}\n if pool is not None:\n params['pool'] = pool\n return session.get(f'{cook_url}/{limit_type}', params=params, headers=headers)\n\n\ndef set_limit(cook_url, limit_type, user, mem=None, cpus=None, gpus=None, count=None, reason='testing', pool=None, headers={}):\n \"\"\"\n Set resource limits for the given user.\n The limit_type parameter should be either 'share' or 'quota', specifying which type of limit is being set.\n Any subset of the mem, cpus, gpus and count (job-count) limits can be specified.\n \"\"\"\n limits = {}\n body = {'user': user, limit_type: limits}\n if reason is not None:\n body['reason'] = reason\n if mem is not None:\n limits['mem'] = mem\n if cpus is not None:\n limits['cpus'] = cpus\n if gpus is not None:\n limits['gpus'] = gpus\n if count is not None:\n limits['count'] = count\n if pool is not None:\n body['pool'] = pool\n logger.debug(f'Setting {user} {limit_type} to {limits}: {body}')\n return session.post(f'{cook_url}/{limit_type}', json=body, headers=headers)\n\n\ndef reset_limit(cook_url, limit_type, user, reason='testing', pool=None, headers={}):\n \"\"\"\n Resets resource limits for the given user to the default for the cluster.\n The limit_type parameter should be either 'share' or 'quota', specifying which type of limit is being reset.\n \"\"\"\n params = {'user': user}\n if reason is not None:\n params['reason'] = reason\n if pool is not None:\n params['pool'] = pool\n return session.delete(f'{cook_url}/{limit_type}', params=params, headers=headers)\n\n\ndef retrieve_progress_file_env(cook_url):\n \"\"\"Retrieves the environment variable used by the cook executor to lookup the progress file.\"\"\"\n cook_settings = settings(cook_url)\n default_value = 'EXECUTOR_PROGRESS_OUTPUT_FILE'\n return get_in(cook_settings, 'executor', 'environment', 'EXECUTOR_PROGRESS_OUTPUT_FILE_ENV') or default_value\n\n\ndef get_instance_stats(cook_url, **kwargs):\n \"\"\"Gets instance stats using the provided kwargs as query params\"\"\"\n resp = session.get(f'{cook_url}/stats/instances', params=kwargs)\n return resp.json(), resp\n\n\ndef to_iso(time_millis):\n \"\"\"Converts the given time since epoch in millis to an ISO 8601 string\"\"\"\n return datetime.utcfromtimestamp(time_millis / 1000).isoformat()\n\n\ndef percentile(a, q):\n \"\"\"Returns the qth percentile of a\"\"\"\n return numpy.percentile(a, q, interpolation='higher')\n\n\ndef default_pool(cook_url):\n \"\"\"Returns the configured default pool, or None if one is not configured\"\"\"\n cook_settings = settings(cook_url)\n default_pool = get_in(cook_settings, 'pools', 'default')\n return default_pool if default_pool != '' else None\n\n\ndef all_pools(cook_url):\n \"\"\"Returns the list of all pools that exist\"\"\"\n resp = session.get(f'{cook_url}/pools')\n return resp.json(), resp\n\n\ndef active_pools(cook_url):\n \"\"\"Returns the list of all active pools that exist\"\"\"\n pools, resp = all_pools(cook_url)\n return [p for p in pools if p['state'] == 'active'], resp\n\n\ndef has_ephemeral_hosts():\n \"\"\"Returns True if the cluster under test has ephemeral hosts\"\"\"\n s = os.getenv('COOK_TEST_EPHEMERAL_HOSTS')\n if s is not None:\n return to_bool(s)\n else:\n # Default to assuming that the cluster\n # under test does not have ephemeral hosts\n return False\n\n\[email protected]_cache()\ndef _cook_executor_config():\n \"\"\"Get the cook executor config from the /settings endpoint\"\"\"\n cook_url = retrieve_cook_url()\n _wait_for_cook(cook_url)\n init_cook_session(cook_url)\n cook_executor_config = get_in(settings(cook_url), 'executor')\n logger.info(f\"Cook's executor config is {cook_executor_config}\")\n return cook_executor_config\n\n\ndef is_cook_executor_in_use():\n \"\"\"Returns true if the cook executor is configured and COOK_TEST_DOCKER_IMAGE is not set\"\"\"\n is_cook_executor_configured = is_not_blank(get_in(_cook_executor_config(), 'command'))\n return is_cook_executor_configured and docker_image() is None\n\n\ndef slave_cpus(mesos_url, hostname):\n \"\"\"Returns the cpus of the specified Mesos agent\"\"\"\n slaves = get_mesos_state(mesos_url)['slaves']\n # Here we need to use unreserved_resources because Mesos might only\n # send offers for the unreserved (role = \"*\") portions of the agents.\n slave_cpus = next(s['unreserved_resources']['cpus'] for s in slaves if s['hostname'] == hostname)\n return slave_cpus\n\n\ndef slave_pool(mesos_url, hostname):\n \"\"\"Returns the pool of the specified Mesos agent, or None if the agent doesn't have the attribute\"\"\"\n slaves = get_mesos_state(mesos_url)['slaves']\n pool = next(s.get('attributes', {}).get('cook-pool', None) for s in slaves if s['hostname'] == hostname)\n return pool\n\n\ndef max_slave_cpus(mesos_url):\n \"\"\"Returns the max cpus of all current Mesos agents\"\"\"\n slaves = get_mesos_state(mesos_url)['slaves']\n max_slave_cpus = max([s['resources']['cpus'] for s in slaves])\n return max_slave_cpus\n\n\ndef task_constraint_cpus(cook_url):\n \"\"\"Returns the max cpus that can be submitted to the cluster\"\"\"\n task_constraint_cpus = settings(cook_url)['task-constraints']['cpus']\n return task_constraint_cpus\n\n\ndef max_cpus(mesos_url, cook_url):\n \"\"\"Returns the maximum cpus we can submit that actually fits on a slave\"\"\"\n slave_cpus = max_slave_cpus(mesos_url)\n constraint_cpus = task_constraint_cpus(cook_url)\n max_cpus = min(slave_cpus, constraint_cpus)\n logging.debug(f'Max cpus we can submit that will get scheduled is {max_cpus}')\n return max_cpus\n\n\nclass CookTest(unittest.TestCase):\n def current_name(self):\n \"\"\"Returns the name of the currently running test function\"\"\"\n test_id = self.id()\n return test_id.split('.')[-1]\n\n\ndef docker_tests_enabled():\n return docker_image() is not None\n\n\[email protected]_cache()\ndef is_preemption_enabled():\n \"\"\"Returns true if task preemption is enabled on the cluster\"\"\"\n cook_url = retrieve_cook_url()\n init_cook_session(cook_url)\n _wait_for_cook(cook_url)\n max_preemption = settings(cook_url)['rebalancer'].get('max-preemption')\n return max_preemption is not None\n\n\ndef current_milli_time():\n \"\"\"Returns the current epoch time in milliseconds\"\"\"\n return int(round(time.time() * 1000))\n\n\[email protected]_cache()\ndef are_pools_enabled():\n \"\"\"Returns true if there are at least 2 active pools on the cluster\"\"\"\n cook_url = retrieve_cook_url()\n init_cook_session(cook_url)\n _wait_for_cook(cook_url)\n return len(active_pools(cook_url)[0]) > 1\n\n\ndef hosts_to_consider(cook_url, mesos_url):\n \"\"\"\n Returns the hosts in the default pool, or all hosts if the cluster is not using pools\n \"\"\"\n state = get_mesos_state(mesos_url)\n slaves = state['slaves']\n pool = default_pool(cook_url)\n slaves = [s for s in slaves if s['attributes'].get('cook-pool', None) == pool] if pool else slaves\n num_to_log = min(len(slaves), 10)\n logging.info(f'First {num_to_log} hosts to consider: {json.dumps(slaves[:num_to_log], indent=2)}')\n return slaves\n\n\ndef num_hosts_to_consider(cook_url, mesos_url):\n \"\"\"\n Returns the number of hosts in the default pool, or the\n total number of hosts if the cluster is not using pools\n \"\"\"\n num_hosts = len(hosts_to_consider(cook_url, mesos_url))\n logging.info(f'There are {num_hosts} hosts to consider')\n return num_hosts\n\n\ndef should_expect_sandbox_directory(instance):\n \"\"\"\n Returns true if we should expect the sandbox directory\n to get populated for the provided instance\n \"\"\"\n expect_sandbox = not has_ephemeral_hosts() or instance['executor'] == 'cook'\n if expect_sandbox:\n logging.info('The sandbox directory is expected to get populated')\n else:\n logging.info('The sandbox directory is not expected to get populated')\n return expect_sandbox\n\n\ndef should_expect_sandbox_directory_for_job(job):\n \"\"\"\n Returns true if we should expect the sandbox directory\n to get populated for the provided job\n \"\"\"\n instance = job['instances'][0]\n return should_expect_sandbox_directory(instance)\n\n\ndef data_local_service_is_set():\n return os.getenv('DATA_LOCAL_SERVICE', None) is not None\n\ndef demo_plugin_is_configured(cook_url):\n settings_dict = settings(cook_url)\n # Because we always create plugin configuration in config.clj, the first keys always exist.\n # The actual factory-fn keys are not set unless the user specifies them.\n if settings_dict['plugins']['job-submission-validator'].get('factory-fn') != \"cook.demo-plugin/submission-factory\":\n return False\n if settings_dict['plugins']['job-launch-filter'].get('factory-fn') != \"cook.demo-plugin/launch-factory\":\n return False\n return True\n\[email protected]_cache()\ndef _fenzo_fitness_calculator():\n \"\"\"Get the cook executor config from the /settings endpoint\"\"\"\n cook_url = retrieve_cook_url()\n _wait_for_cook(cook_url)\n init_cook_session(cook_url)\n fitness_calculator = get_in(settings(cook_url), 'fenzo-fitness-calculator')\n logger.info(f\"Cook's fitness calculator is {fitness_calculator}\")\n return fitness_calculator\n\n\ndef using_data_local_fitness_calculator():\n return _fenzo_fitness_calculator() == 'cook.mesos.data-locality/make-data-local-fitness-calculator'\n\n\ndef get_agent_endpoint(master_state, agent_hostname):\n agent = [agent for agent in master_state['slaves']\n if agent['hostname'] == agent_hostname][0]\n if agent is None:\n logger.warning(f\"Could not find agent for hostname {instance['hostname']}\")\n logger.warning(f\"slaves: {state['slaves']}\")\n return None\n else:\n # Get the host and port of the agent API.\n # Example pid: \"slave(1)@172.17.0.7:5051\"\n agent_hostport = agent['pid'].split('@')[1]\n agent_port = agent_hostport.split(':')[1]\n return f'http://{agent_hostname}:{agent_port}/state.json'\n\n\[email protected]_cache()\ndef _supported_isolators():\n mesos_url = retrieve_mesos_url()\n mesos_state = get_mesos_state(mesos_url)\n slave_endpoint = get_agent_endpoint(mesos_state, mesos_state['slaves'][0]['hostname'])\n slave_state = session.get(slave_endpoint).json()\n if 'flags' in slave_state:\n return set(slave_state['flags']['isolation'].split(','))\n else:\n logger.error(f'Unable to determine flags from slave state at {slave_endpoint}: {slave_state}')\n return []\n\n\ndef supports_mesos_containerizer_images():\n isolators = _supported_isolators()\n return 'filesystem/linux' in isolators and 'docker/runtime' in isolators\n"
] |
[
[
"numpy.percentile"
]
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.