repo_name
stringlengths
6
130
hexsha
list
file_path
list
code
list
apis
list
Ahren09/RecBole
[ "f04084b8d2cffcb79eb9e4b21325f8f6c75c638e" ]
[ "recbole/model/general_recommender/recvae.py" ]
[ "# -*- coding: utf-8 -*-\n# @Time : 2021/2/28\n# @Author : Lanling Xu\n# @Email : [email protected]\n\nr\"\"\"\nRecVAE\n################################################\nReference:\n Shenbin, Ilya, et al. \"RecVAE: A new variational autoencoder for Top-N recommendations with implicit feedback.\" In WSDM 2020.\n\nReference code:\n https://github.com/ilya-shenbin/RecVAE\n\"\"\"\n\nimport numpy as np\nfrom copy import deepcopy\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom recbole.model.abstract_recommender import GeneralRecommender\nfrom recbole.model.init import xavier_normal_initialization\nfrom recbole.utils import InputType\n\n\ndef swish(x):\n r\"\"\"Swish activation function:\n\n .. math::\n \\text{Swish}(x) = \\frac{x}{1 + \\exp(-x)}\n \"\"\"\n return x.mul(torch.sigmoid(x))\n\n\ndef log_norm_pdf(x, mu, logvar):\n return -0.5 * (logvar + np.log(2 * np.pi) + (x - mu).pow(2) / logvar.exp())\n\n\nclass CompositePrior(nn.Module):\n\n def __init__(self, hidden_dim, latent_dim, input_dim, mixture_weights):\n super(CompositePrior, self).__init__()\n\n self.mixture_weights = mixture_weights\n\n self.mu_prior = nn.Parameter(torch.Tensor(1, latent_dim), requires_grad=False)\n self.mu_prior.data.fill_(0)\n\n self.logvar_prior = nn.Parameter(torch.Tensor(1, latent_dim), requires_grad=False)\n self.logvar_prior.data.fill_(0)\n\n self.logvar_uniform_prior = nn.Parameter(torch.Tensor(1, latent_dim), requires_grad=False)\n self.logvar_uniform_prior.data.fill_(10)\n\n self.encoder_old = Encoder(hidden_dim, latent_dim, input_dim)\n self.encoder_old.requires_grad_(False)\n\n def forward(self, x, z):\n post_mu, post_logvar = self.encoder_old(x, 0)\n\n stnd_prior = log_norm_pdf(z, self.mu_prior, self.logvar_prior)\n post_prior = log_norm_pdf(z, post_mu, post_logvar)\n unif_prior = log_norm_pdf(z, self.mu_prior, self.logvar_uniform_prior)\n\n gaussians = [stnd_prior, post_prior, unif_prior]\n gaussians = [g.add(np.log(w)) for g, w in zip(gaussians, self.mixture_weights)]\n\n density_per_gaussian = torch.stack(gaussians, dim=-1)\n\n return torch.logsumexp(density_per_gaussian, dim=-1)\n\n\nclass Encoder(nn.Module):\n\n def __init__(self, hidden_dim, latent_dim, input_dim, eps=1e-1):\n super(Encoder, self).__init__()\n\n self.fc1 = nn.Linear(input_dim, hidden_dim)\n self.ln1 = nn.LayerNorm(hidden_dim, eps=eps)\n self.fc2 = nn.Linear(hidden_dim, hidden_dim)\n self.ln2 = nn.LayerNorm(hidden_dim, eps=eps)\n self.fc3 = nn.Linear(hidden_dim, hidden_dim)\n self.ln3 = nn.LayerNorm(hidden_dim, eps=eps)\n self.fc4 = nn.Linear(hidden_dim, hidden_dim)\n self.ln4 = nn.LayerNorm(hidden_dim, eps=eps)\n self.fc5 = nn.Linear(hidden_dim, hidden_dim)\n self.ln5 = nn.LayerNorm(hidden_dim, eps=eps)\n self.fc_mu = nn.Linear(hidden_dim, latent_dim)\n self.fc_logvar = nn.Linear(hidden_dim, latent_dim)\n\n def forward(self, x, dropout_prob):\n x = F.normalize(x)\n x = F.dropout(x, dropout_prob, training=self.training)\n\n h1 = self.ln1(swish(self.fc1(x)))\n h2 = self.ln2(swish(self.fc2(h1) + h1))\n h3 = self.ln3(swish(self.fc3(h2) + h1 + h2))\n h4 = self.ln4(swish(self.fc4(h3) + h1 + h2 + h3))\n h5 = self.ln5(swish(self.fc5(h4) + h1 + h2 + h3 + h4))\n return self.fc_mu(h5), self.fc_logvar(h5)\n\n\nclass RecVAE(GeneralRecommender):\n r\"\"\"Collaborative Denoising Auto-Encoder (RecVAE) is a recommendation model\n for top-N recommendation with implicit feedback.\n\n We implement the model following the original author\n \"\"\"\n input_type = InputType.PAIRWISE\n\n def __init__(self, config, dataset):\n super(RecVAE, self).__init__(config, dataset)\n\n self.hidden_dim = config[\"hidden_dimension\"]\n self.latent_dim = config['latent_dimension']\n self.dropout_prob = config['dropout_prob']\n self.beta = config['beta']\n self.mixture_weights = config['mixture_weights']\n self.gamma = config['gamma']\n\n self.history_item_id, self.history_item_value, _ = dataset.history_item_matrix()\n self.history_item_id = self.history_item_id.to(self.device)\n self.history_item_value = self.history_item_value.to(self.device)\n\n self.encoder = Encoder(self.hidden_dim, self.latent_dim, self.n_items)\n self.prior = CompositePrior(self.hidden_dim, self.latent_dim, self.n_items, self.mixture_weights)\n self.decoder = nn.Linear(self.latent_dim, self.n_items)\n\n # parameters initialization\n self.apply(xavier_normal_initialization)\n\n def get_rating_matrix(self, user):\n r\"\"\"Get a batch of user's feature with the user's id and history interaction matrix.\n\n Args:\n user (torch.LongTensor): The input tensor that contains user's id, shape: [batch_size, ]\n\n Returns:\n torch.FloatTensor: The user's feature of a batch of user, shape: [batch_size, n_items]\n \"\"\"\n # Following lines construct tensor of shape [B,n_items] using the tensor of shape [B,H]\n col_indices = self.history_item_id[user].flatten()\n row_indices = torch.arange(user.shape[0]).to(self.device) \\\n .repeat_interleave(self.history_item_id.shape[1], dim=0)\n rating_matrix = torch.zeros(1).to(self.device).repeat(user.shape[0], self.n_items)\n rating_matrix.index_put_((row_indices, col_indices), self.history_item_value[user].flatten())\n return rating_matrix\n\n def reparameterize(self, mu, logvar):\n if self.training:\n std = torch.exp(0.5 * logvar)\n epsilon = torch.zeros_like(std).normal_(mean=0, std=0.01)\n return mu + epsilon * std\n else:\n return mu\n\n def forward(self, rating_matrix, dropout_prob):\n mu, logvar = self.encoder(rating_matrix, dropout_prob=dropout_prob)\n z = self.reparameterize(mu, logvar)\n x_pred = self.decoder(z)\n return x_pred, mu, logvar, z\n\n def calculate_loss(self, interaction, encoder_flag):\n user = interaction[self.USER_ID]\n rating_matrix = self.get_rating_matrix(user)\n if encoder_flag:\n dropout_prob = self.dropout_prob\n else:\n dropout_prob = 0\n x_pred, mu, logvar, z = self.forward(rating_matrix, dropout_prob)\n\n if self.gamma:\n norm = rating_matrix.sum(dim=-1)\n kl_weight = self.gamma * norm\n else:\n kl_weight = self.beta\n\n mll = (F.log_softmax(x_pred, dim=-1) * rating_matrix).sum(dim=-1).mean()\n kld = (log_norm_pdf(z, mu, logvar) - self.prior(rating_matrix, z)).sum(dim=-1).mul(kl_weight).mean()\n negative_elbo = -(mll - kld)\n\n return negative_elbo\n\n def predict(self, interaction):\n user = interaction[self.USER_ID]\n item = interaction[self.ITEM_ID]\n\n rating_matrix = self.get_rating_matrix(user)\n\n scores, _, _, _ = self.forward(rating_matrix, self.dropout_prob)\n\n return scores[[torch.arange(len(item)).to(self.device), item]]\n\n def full_sort_predict(self, interaction):\n user = interaction[self.USER_ID]\n\n rating_matrix = self.get_rating_matrix(user)\n\n scores, _, _, _ = self.forward(rating_matrix, self.dropout_prob)\n\n return scores.view(-1)\n\n def update_prior(self):\n self.prior.encoder_old.load_state_dict(deepcopy(self.encoder.state_dict()))\n" ]
[ [ "torch.nn.functional.normalize", "torch.sigmoid", "numpy.log", "torch.Tensor", "torch.nn.functional.dropout", "torch.zeros", "torch.nn.functional.log_softmax", "torch.zeros_like", "torch.nn.LayerNorm", "torch.exp", "torch.nn.Linear", "torch.arange", "torch.stack", "torch.logsumexp" ] ]
davemc84/Cirq
[ "447b2c762cc2820dd28abb3bd2bc785d36bae39a" ]
[ "cirq/sim/simulator.py" ]
[ "# Copyright 2018 The Cirq Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Abstract base classes for different types of simulators.\n\nSimulator types include:\n\n SimulatesSamples: mimics the interface of quantum hardware.\n\n SimulatesFinalState: allows access to the final state of the simulation.\n\n SimulatesIntermediateState: allows for access to the state of the simulation\n as the simulation iterates through the moments of a cirq.\n\"\"\"\n\nfrom typing import (\n Any, Dict, Hashable, Iterator, List, Tuple, Union, Optional)\n\nimport abc\nimport collections\n\nimport numpy as np\n\nfrom cirq import circuits, ops, protocols, schedules, study, value\nfrom cirq.sim import sampler\n\n\nclass SimulatesSamples(sampler.Sampler, metaclass=abc.ABCMeta):\n \"\"\"Simulator that mimics running on quantum hardware.\n\n Implementors of this interface should implement the _run method.\n \"\"\"\n\n def run_sweep(\n self,\n program: Union[circuits.Circuit, schedules.Schedule],\n params: study.Sweepable,\n repetitions: int = 1,\n ) -> List[study.TrialResult]:\n \"\"\"Runs the supplied Circuit or Schedule, mimicking quantum hardware.\n\n In contrast to run, this allows for sweeping over different parameter\n values.\n\n Args:\n program: The circuit or schedule to simulate.\n params: Parameters to run with the program.\n repetitions: The number of repetitions to simulate.\n\n Returns:\n TrialResult list for this run; one for each possible parameter\n resolver.\n \"\"\"\n circuit = (program if isinstance(program, circuits.Circuit)\n else program.to_circuit())\n param_resolvers = study.to_resolvers(params)\n\n trial_results = [] # type: List[study.TrialResult]\n for param_resolver in param_resolvers:\n measurements = self._run(circuit=circuit,\n param_resolver=param_resolver,\n repetitions=repetitions)\n trial_results.append(study.TrialResult(params=param_resolver,\n repetitions=repetitions,\n measurements=measurements))\n return trial_results\n\n @abc.abstractmethod\n def _run(\n self,\n circuit: circuits.Circuit,\n param_resolver: study.ParamResolver,\n repetitions: int\n ) -> Dict[str, np.ndarray]:\n \"\"\"Run a simulation, mimicking quantum hardware.\n\n Args:\n circuit: The circuit to simulate.\n param_resolver: Parameters to run with the program.\n repetitions: Number of times to repeat the run.\n\n Returns:\n A dictionary from measurement gate key to measurement\n results. Measurement results are stored in a 2-dimensional\n numpy array, the first dimension corresponding to the repetition\n and the second to the actual boolean measurement results (ordered\n by the qubits being measured.)\n \"\"\"\n raise NotImplementedError()\n\n def compute_samples_displays(\n self,\n program: Union[circuits.Circuit, schedules.Schedule],\n param_resolver: 'study.ParamResolverOrSimilarType' = None,\n ) -> study.ComputeDisplaysResult:\n \"\"\"Computes SamplesDisplays in the supplied Circuit or Schedule.\n\n Args:\n program: The circuit or schedule to simulate.\n param_resolver: Parameters to run with the program.\n\n Returns:\n ComputeDisplaysResult for the simulation.\n \"\"\"\n return self.compute_samples_displays_sweep(\n program,\n study.ParamResolver(param_resolver))[0]\n\n def compute_samples_displays_sweep(\n self,\n program: Union[circuits.Circuit, schedules.Schedule],\n params: Optional[study.Sweepable] = None\n ) -> List[study.ComputeDisplaysResult]:\n \"\"\"Computes SamplesDisplays in the supplied Circuit or Schedule.\n\n In contrast to `compute_displays`, this allows for sweeping\n over different parameter values.\n\n Args:\n program: The circuit or schedule to simulate.\n params: Parameters to run with the program.\n\n Returns:\n List of ComputeDisplaysResults for this run, one for each\n possible parameter resolver.\n \"\"\"\n circuit = (program if isinstance(program, circuits.Circuit)\n else program.to_circuit())\n param_resolvers = study.to_resolvers(params or study.ParamResolver({}))\n\n compute_displays_results = [] # type: List[study.ComputeDisplaysResult]\n for param_resolver in param_resolvers:\n display_values = {} # type: Dict[Hashable, Any]\n preceding_circuit = circuits.Circuit()\n for i, moment in enumerate(circuit):\n displays = (op for op in moment\n if isinstance(op, ops.SamplesDisplay))\n for display in displays:\n measurement_key = str(display.key)\n measurement_circuit = circuits.Circuit.from_ops(\n display.measurement_basis_change(),\n ops.measure(*display.qubits,\n key=measurement_key)\n )\n measurements = self._run(\n preceding_circuit + measurement_circuit,\n param_resolver,\n display.num_samples)\n display_values[display.key] = (\n display.value_derived_from_samples(\n measurements[measurement_key]))\n preceding_circuit.append(circuit[i])\n compute_displays_results.append(study.ComputeDisplaysResult(\n params=param_resolver,\n display_values=display_values))\n\n return compute_displays_results\n\n\nclass SimulatesFinalState(metaclass=abc.ABCMeta):\n \"\"\"Simulator that allows access to a quantum computer's final state.\n\n Implementors of this interface should implement the simulate_sweep\n method. This simulator only returns the state of the quantum system\n for the final step of a simulation. This simulator state may be a wave\n function, the density matrix, or another representation, depending on the\n implementation. For simulators that also allow stepping through\n a circuit see `SimulatesIntermediateState`.\n \"\"\"\n\n def simulate(\n self,\n program: Union[circuits.Circuit, schedules.Schedule],\n param_resolver: 'study.ParamResolverOrSimilarType' = None,\n qubit_order: ops.QubitOrderOrList = ops.QubitOrder.DEFAULT,\n initial_state: Any = None,\n ) -> 'SimulationTrialResult':\n \"\"\"Simulates the supplied Circuit or Schedule.\n\n This method returns a result which allows access to the entire\n wave function.\n\n Args:\n program: The circuit or schedule to simulate.\n param_resolver: Parameters to run with the program.\n qubit_order: Determines the canonical ordering of the qubits. This\n is often used in specifying the initial state, i.e. the\n ordering of the computational basis states.\n initial_state: The initial state for the simulation. The form of\n this state depends on the simulation implementation. See\n documentation of the implementing class for details.\n\n Returns:\n SimulationTrialResults for the simulation. Includes the final state.\n \"\"\"\n return self.simulate_sweep(\n program,\n study.ParamResolver(param_resolver),\n qubit_order,\n initial_state)[0]\n\n @abc.abstractmethod\n def simulate_sweep(\n self,\n program: Union[circuits.Circuit, schedules.Schedule],\n params: study.Sweepable,\n qubit_order: ops.QubitOrderOrList = ops.QubitOrder.DEFAULT,\n initial_state: Any = None,\n ) -> List['SimulationTrialResult']:\n \"\"\"Simulates the supplied Circuit or Schedule.\n\n This method returns a result which allows access to the entire\n wave function. In contrast to simulate, this allows for sweeping\n over different parameter values.\n\n Args:\n program: The circuit or schedule to simulate.\n params: Parameters to run with the program.\n qubit_order: Determines the canonical ordering of the qubits. This\n is often used in specifying the initial state, i.e. the\n ordering of the computational basis states.\n initial_state: The initial state for the simulation. The form of\n this state depends on the simulation implementation. See\n documentation of the implementing class for details.\n\n Returns:\n List of SimulationTrialResults for this run, one for each\n possible parameter resolver.\n \"\"\"\n raise NotImplementedError()\n\n\nclass SimulatesIntermediateState(SimulatesFinalState, metaclass=abc.ABCMeta):\n \"\"\"A SimulatesFinalState that simulates a circuit by moments.\n\n Whereas a general SimulatesFinalState may return the entire wave\n function at the end of a circuit, a SimulatesIntermediateState can\n simulate stepping through the moments of a circuit.\n\n Implementors of this interface should implement the _simulator_iterator\n method.\n \"\"\"\n\n def simulate_sweep(\n self,\n program: Union[circuits.Circuit, schedules.Schedule],\n params: study.Sweepable,\n qubit_order: ops.QubitOrderOrList = ops.QubitOrder.DEFAULT,\n initial_state: Any = None,\n ) -> List['SimulationTrialResult']:\n \"\"\"Simulates the supplied Circuit or Schedule.\n\n This method returns a result which allows access to the entire\n wave function. In contrast to simulate, this allows for sweeping\n over different parameter values.\n\n Args:\n program: The circuit or schedule to simulate.\n params: Parameters to run with the program.\n qubit_order: Determines the canonical ordering of the qubits. This\n is often used in specifying the initial state, i.e. the\n ordering of the computational basis states.\n initial_state: The initial state for the simulation. The form of\n this state depends on the simulation implementation. See\n documentation of the implementing class for details.\n\n Returns:\n List of SimulationTrialResults for this run, one for each\n possible parameter resolver.\n \"\"\"\n circuit = (program if isinstance(program, circuits.Circuit)\n else program.to_circuit())\n param_resolvers = study.to_resolvers(params)\n\n trial_results = []\n qubit_order = ops.QubitOrder.as_qubit_order(qubit_order)\n for param_resolver in param_resolvers:\n all_step_results = self.simulate_moment_steps(circuit,\n param_resolver,\n qubit_order,\n initial_state)\n measurements = {} # type: Dict[str, np.ndarray]\n for step_result in all_step_results:\n for k, v in step_result.measurements.items():\n measurements[k] = np.array(v, dtype=bool)\n trial_results.append(\n self._create_simulator_trial_result(\n params=param_resolver,\n measurements=measurements,\n final_simulator_state=step_result.simulator_state()))\n return trial_results\n\n def simulate_moment_steps(\n self,\n circuit: circuits.Circuit,\n param_resolver: 'study.ParamResolverOrSimilarType' = None,\n qubit_order: ops.QubitOrderOrList = ops.QubitOrder.DEFAULT,\n initial_state: Any = None\n ) -> Iterator:\n \"\"\"Returns an iterator of StepResults for each moment simulated.\n\n If the circuit being simulated is empty, a single step result should\n be returned with the state being set to the initial state.\n\n Args:\n circuit: The Circuit to simulate.\n param_resolver: A ParamResolver for determining values of Symbols.\n qubit_order: Determines the canonical ordering of the qubits. This\n is often used in specifying the initial state, i.e. the\n ordering of the computational basis states.\n initial_state: The initial state for the simulation. The form of\n this state depends on the simulation implementation. See\n documentation of the implementing class for details.\n\n Returns:\n Iterator that steps through the simulation, simulating each\n moment and returning a StepResult for each moment.\n \"\"\"\n return self._simulator_iterator(\n circuit,\n study.ParamResolver(param_resolver),\n qubit_order,\n initial_state)\n\n @abc.abstractmethod\n def _simulator_iterator(\n self,\n circuit: circuits.Circuit,\n param_resolver: study.ParamResolver,\n qubit_order: ops.QubitOrderOrList,\n initial_state: Any,\n ) -> Iterator:\n \"\"\"Iterator over StepResult from Moments of a Circuit.\n\n Args:\n circuit: The circuit to simulate.\n param_resolver: A ParamResolver for determining values of\n Symbols.\n qubit_order: Determines the canonical ordering of the qubits. This\n is often used in specifying the initial state, i.e. the\n ordering of the computational basis states.\n initial_state: The initial state for the simulation. The form of\n this state depends on the simulation implementation. See\n documentation of the implementing class for details.\n\n Yields:\n StepResults from simulating a Moment of the Circuit.\n \"\"\"\n raise NotImplementedError()\n\n def _create_simulator_trial_result(self,\n params: study.ParamResolver,\n measurements: Dict[str, np.ndarray],\n final_simulator_state: Any) \\\n -> 'SimulationTrialResult':\n \"\"\"This method can be overridden to creation of a trial result.\n\n Args:\n params: The ParamResolver for this trial.\n measurements: The measurement results for this trial.\n final_simulator_state: The final state of the simulator for the\n StepResult.\n\n Returns:\n The SimulationTrialResult.\n \"\"\"\n return SimulationTrialResult(\n params=params,\n measurements=measurements,\n final_simulator_state=final_simulator_state)\n\n\n\nclass StepResult(metaclass=abc.ABCMeta):\n \"\"\"Results of a step of a SimulatesIntermediateState.\n\n Attributes:\n measurements: A dictionary from measurement gate key to measurement\n results, ordered by the qubits that the measurement operates on.\n \"\"\"\n\n def __init__(self,\n measurements: Optional[Dict[str, List[bool]]] = None) -> None:\n self.measurements = measurements or collections.defaultdict(list)\n\n @abc.abstractmethod\n def simulator_state(self) -> Any:\n \"\"\"Returns the simulator_state of the simulator after this step.\n\n The form of the simulator_state depends on the implementation of the\n simulation,see documentation for the implementing class for the form of\n details.\n \"\"\"\n\n @abc.abstractmethod\n def sample(self,\n qubits: List[ops.Qid],\n repetitions: int = 1) -> np.ndarray:\n \"\"\"Samples from the system at this point in the computation.\n\n Note that this does not collapse the wave function.\n\n Args:\n qubits: The qubits to be sampled in an order that influence the\n returned measurement results.\n repetitions: The number of samples to take.\n\n Returns:\n Measurement results with True corresponding to the ``|1⟩`` state.\n The outer list is for repetitions, and the inner corresponds to\n measurements ordered by the supplied qubits. These lists\n are wrapped as an numpy ndarray.\n \"\"\"\n raise NotImplementedError()\n\n def sample_measurement_ops(\n self,\n measurement_ops: List[ops.GateOperation],\n repetitions: int = 1) -> Dict[str, np.ndarray]:\n \"\"\"Samples from the system at this point in the computation.\n\n Note that this does not collapse the wave function.\n\n In contrast to `sample` which samples qubits, this takes a list of\n `cirq.GateOperation` instances whose gates are `cirq.MeasurementGate`\n instances and then returns a mapping from the key in the measurement\n gate to the resulting bit strings. Different measurement operations must\n not act on the same qubits.\n\n Args:\n measurement_ops: `GateOperation` instances whose gates are\n `MeasurementGate` instances to be sampled form.\n repetitions: The number of samples to take.\n\n Returns: A dictionary from measurement gate key to measurement\n results. Measurement results are stored in a 2-dimensional\n numpy array, the first dimension corresponding to the repetition\n and the second to the actual boolean measurement results (ordered\n by the qubits being measured.)\n\n Raises:\n ValueError: If the operation's gates are not `MeasurementGate`\n instances or a qubit is acted upon multiple times by different\n operations from `measurement_ops`.\n \"\"\"\n bounds = {} # type: Dict[str, Tuple]\n all_qubits = [] # type: List[ops.Qid]\n current_index = 0\n for op in measurement_ops:\n gate = op.gate\n if not isinstance(gate, ops.MeasurementGate):\n raise ValueError('{} was not a MeasurementGate'.format(gate))\n key = protocols.measurement_key(gate)\n if key in bounds:\n raise ValueError(\n 'Duplicate MeasurementGate with key {}'.format(key))\n bounds[key] = (current_index, current_index + len(op.qubits))\n all_qubits.extend(op.qubits)\n current_index += len(op.qubits)\n indexed_sample = self.sample(all_qubits, repetitions)\n return {k: np.array([x[s:e] for x in indexed_sample]) for k, (s, e) in\n bounds.items()}\n\n\[email protected]_equality(unhashable=True)\nclass SimulationTrialResult:\n \"\"\"Results of a simulation by a SimulatesFinalState.\n\n Unlike TrialResult these results contain the final simulator_state of the\n system. This simulator_state is dependent on the simulation implementation\n and may be, for example, the wave function of the system or the density\n matrix of the system.\n\n Attributes:\n params: A ParamResolver of settings used for this result.\n measurements: A dictionary from measurement gate key to measurement\n results. Measurement results are a numpy ndarray of actual boolean\n measurement results (ordered by the qubits acted on by the\n measurement gate.)\n final_simulator_state: The final simulator state of the system after the\n trial finishes.\n \"\"\"\n\n def __init__(self,\n params: study.ParamResolver,\n measurements: Dict[str, np.ndarray],\n final_simulator_state: Any) -> None:\n self.params = params\n self.measurements = measurements\n self.final_simulator_state = final_simulator_state\n\n def __repr__(self):\n return (\n 'cirq.SimulationTrialResult(params={!r}, '\n 'measurements={!r}, '\n 'final_simulator_state={!r})').format(\n self.params, self.measurements, self.final_simulator_state)\n\n def __str__(self):\n def bitstring(vals):\n return ''.join('1' if v else '0' for v in vals)\n\n results = sorted(\n [(key, bitstring(val)) for key, val in self.measurements.items()])\n if not results:\n return '(no measurements)'\n return ' '.join(\n ['{}={}'.format(key, val) for key, val in results])\n\n def _repr_pretty_(self, p: Any, cycle: bool) -> None:\n \"\"\"Text output in Jupyter.\"\"\"\n if cycle:\n # There should never be a cycle. This is just in case.\n p.text('SimulationTrialResult(...)')\n else:\n p.text(str(self))\n\n def _value_equality_values_(self):\n measurements = {k: v.tolist() for k, v in\n sorted(self.measurements.items())}\n return (self.params, measurements, self.final_simulator_state)\n" ]
[ [ "numpy.array" ] ]
ntellakula/fixed_matched_markets
[ "187827e614f398d414019a68ec093a39ca8fadfd" ]
[ "matched_markets_fixed/utils.py" ]
[ "# Copyright 2020 Google LLC.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Generic utility functions.\"\"\"\n\nimport random\nimport re\nfrom typing import List\n\nimport altair as alt\nfrom matched_markets_fixed.methodology import common_classes\nimport numpy as np\nimport pandas as pd\nfrom pandas.api.types import is_numeric_dtype\n\nTimeWindow = common_classes.TimeWindow\n\n\ndef kwarg_subdict(prefix, **kwargs):\n \"\"\"Extract sub dict of `kwargs` prefixed by `prefix`, stripping `prefix`.\n E.g.\n kwarg_subdict('a', a_x=1, a_y=2, b_z=3) # returns {x:1, y:2}\n Args:\n prefix: a string specifying the prefix to search for in the kwarg names.\n **kwargs: any number of named arguments.\n Returns:\n A subset of the supplied kwargs in the form of a dictionary.\n \"\"\"\n\n # Define a regex to match the prefix.\n rgx = re.compile(r'%s(.*)' % prefix)\n\n # Extract the kwargs which match the regex.\n sub_kwargs = [k for k in kwargs.keys() if rgx.search(k)]\n\n # Return the matched kwargs, stripping off prefix.\n return {rgx.match(k).group(1): kwargs[k] for k in sub_kwargs}\n\n\ndef float_order(x):\n \"\"\"Calculates the order of magnitude of x.\"\"\"\n abs_x = np.abs(x)\n if abs_x > 0:\n return np.floor(np.log10(abs_x))\n else:\n return -np.inf\n\n\ndef randomize_strata(n_items, group_ids, seed=None):\n \"\"\"Perform stratified randomization.\n Maps a number of items into group ids by dividing the items into strata of\n length equal to the number of groups, then assigning the group ids randomly\n within each strata. If the number of items do not divide the number of groups\n equally, the remaining items are assigned to groups randomly (with equal\n probability of assignment in each group).\n Example: randomize_strata(12, [1, 2, 3]) yields a list of 12 group ids, each\n 1, 2, or 3. There are (3!)^4 = 6^4 = 1296 possible outcomes as there are 12 /\n 3 = 4 strata, and within each strata there are 3! = 6 different ways to assign\n the items into groups. Therefore, the first 3 items of the list (that is,\n positions 0, 1, and 2) can be mapped only to the 3!=6 different mappings:\n [1, 2, 3], [1, 3, 2], [2, 1, 3], [2, 3, 1], [3, 1, 2] or [3, 2, 1].\n Args:\n n_items: (int) Number of items to assign to groups.\n group_ids: A list of group ids that are typically integers or strings, but\n can be of any type.\n seed: (int) Random seed, applied to a local instance of class random.Random;\n if not specified, the global random instance is used instead.\n Returns:\n A list of length n_items, consisting of the group ids whose positions in the\n list correspond to the items.\n \"\"\"\n\n if seed is None:\n random_sampler = random.sample\n else:\n random_sampler = random.Random(seed).sample\n\n n_groups = len(group_ids)\n n_strata = n_items // n_groups\n groups = []\n for _ in range(n_strata):\n groups.extend(random_sampler(group_ids, n_groups))\n remaining = n_items - len(groups)\n if remaining > 0:\n groups.extend(random_sampler(group_ids, remaining))\n return groups\n\n\ndef brownian_bridge_bounds(n, sd_bound_multiplier):\n \"\"\"Obtain bounds of cumulative residuals from Brownian Bridge process.\n The bounds for variance are proportional to t * (n - t) / n for residuals\n t = 1 .. n (for residual n, the bound is zero as the sum of residuals is\n always zero). This function returns the bounds for the standard deviation.\n Args:\n n: (int >= 1) Length of the time series of the cumulative residuals\n following a Brownian Bridge process.\n sd_bound_multiplier: (numeric > 0) Multiplier for bounds on cumulative\n standardized residuals.\n Returns:\n A list of length n, of the Brownian Bridge process bounds in absolute\n values (if n == 1, returns [0]).\n \"\"\"\n if n < 1:\n raise ValueError('n must be >= 1')\n\n if sd_bound_multiplier <= 0:\n raise ValueError('sd_bound_multiplier must be > 0')\n\n n_range = np.arange(1, n + 1) # 1, ..., n.\n bounds = sd_bound_multiplier * np.sqrt(n_range * (1.0 - n_range / float(n)))\n return bounds.tolist()\n\n\ndef credible_interval(simulations, level):\n \"\"\"Construct the (1 - level, 0.5, level) central interval from simulations.\n Args:\n simulations: numeric arraylike. The simulations.\n level: float in (0, 1). The mass of the desired interval.\n Returns:\n An np.array representing the central credible interval at the given level.\n Raises:\n ValueError: if the requested level is too large (< 1/ len(sims)).\n \"\"\"\n alpha = (1 - level)/2.0\n nvals = len(simulations)\n if alpha < 1.0/nvals:\n raise ValueError('Too few values to provide requested quantiles.')\n sims_sort = np.sort(np.copy(simulations))\n frac = nvals * np.array([alpha, 0.5, 1.0 - alpha]) - 1.0\n low = np.floor(frac).astype(np.int64)\n return sims_sort[low] + (frac - low)*(sims_sort[low + 1] - sims_sort[low])\n\n\ndef find_days_to_exclude(\n dates_to_exclude: List[str]) -> List[TimeWindow]:\n \"\"\"Returns a list of time windows to exclude from a list of days and periods.\n Args:\n dates_to_exclude: a List of strings with format indicating a single day as\n '2020/01/01' (YYYY/MM/DD) or an entire time period as\n '2020/01/01 - 2020/02/01' (indicating start and end date of the time period)\n Returns:\n days_exclude: a List of TimeWindows obtained from the list in input.\n \"\"\"\n days_exclude = []\n for x in dates_to_exclude:\n tmp = x.split('-')\n if len(tmp) == 1:\n try:\n days_exclude.append(\n TimeWindow(pd.Timestamp(tmp[0]), pd.Timestamp(tmp[0])))\n except ValueError:\n raise ValueError(f'Cannot convert the string {tmp[0]} to a valid date.')\n elif len(tmp) == 2:\n try:\n days_exclude.append(\n TimeWindow(pd.Timestamp(tmp[0]), pd.Timestamp(tmp[1])))\n except ValueError:\n raise ValueError(\n f'Cannot convert the strings in {tmp} to a valid date.')\n else:\n raise ValueError(f'The input {tmp} cannot be interpreted as a single' +\n ' day or a time window')\n\n return days_exclude\n\n\ndef expand_time_windows(periods: List[TimeWindow]) -> List[pd.Timestamp]:\n \"\"\"Return a list of days to exclude from a list of TimeWindows.\n Args:\n periods: List of time windows (first day, last day).\n Returns:\n days_exclude: a List of obtained by expanding the list in input.\n \"\"\"\n days_exclude = []\n for window in periods:\n days_exclude += pd.date_range(window.first_day, window.last_day, freq='D')\n\n return list(set(days_exclude))\n\n\ndef human_readable_number(number: float) -> str:\n \"\"\"Print a large number in a readable format.\n Return a readable format for a number, e.g. 123 milions becomes 123M.\n Args:\n number: a float to be printed in human readable format.\n Returns:\n readable_number: a string containing the formatted number.\n \"\"\"\n number = float('{:.3g}'.format(number))\n magnitude = 0\n while abs(number) >= 1000 and magnitude < 4:\n magnitude += 1\n number /= 1000.0\n readable_number = '{}{}'.format('{:f}'.format(number).rstrip('0').rstrip('.'),\n ['', 'K', 'M', 'B', 'tn'][magnitude])\n return readable_number\n\n\ndef default_geo_assignment(geo_level_time_series: pd.DataFrame,\n geo_eligibility: pd.DataFrame) -> pd.DataFrame:\n \"\"\"Set the default assignment eligibility for missing geos.\n Geos missing in the geo assignment table but present in the geo level time\n series are considered unconstrained. So, they can be assigned to either\n treatment, control, or excluded.\n Args:\n geo_level_time_series: table containing the response time series at geo\n level.\n geo_eligibility: table containing the possible assignments for some of the\n geos.\n Returns:\n a table containing the possible assignments for all geos in\n geo_level_time_series.\n \"\"\"\n\n if not is_numeric_dtype(geo_level_time_series['geo']):\n geo_level_time_series['geo'] = pd.to_numeric(geo_level_time_series['geo'])\n\n if not is_numeric_dtype(geo_eligibility['geo']):\n geo_eligibility['geo'] = pd.to_numeric(geo_eligibility['geo'])\n\n missing_geos = list(\n set(geo_level_time_series['geo']) - set(geo_eligibility['geo']))\n\n return geo_eligibility.append(\n pd.DataFrame({\n 'geo': missing_geos,\n 'control': 1,\n 'treatment': 1,\n 'exclude': 1\n })).sort_values(by='geo').reset_index(drop=True)\n\n\ndef plot_iroas_over_time(iroas_df: pd.DataFrame, experiment_dates: pd.DataFrame,\n cooldown_date: pd.DataFrame):\n \"\"\"Returns a chart of the iROAS estimate over time with confidence bands.\n This function provides a visualization of the evolution of the iROAS estimate\n over the duration of the experiment and cooldown, together with confidence\n bands.\n Args:\n iroas_df: a dataframe with columns: date, lower, mean, upper\n experiment_dates: dataframe with columns (date, color) which contains two\n dates for each period (start, end), and the column color is the label\n used in the chart to refer to the corresponding period, e.g. \"Experiment\n period\" or \"Pretes period\".\n cooldown_date: dataframe with column (date, color) with only one entry,\n where date indicates the last day in the cooldown period, and color is the\n label used in the plot legend, e.g. \"End of cooldown period\".\n Returns:\n iroas_chart: Chart containing the plot.\n \"\"\"\n iroas_base = alt.Chart(iroas_df).mark_line().encode(\n x=alt.X('date:T', axis=alt.Axis(title='', format=('%b %e'))))\n\n iroas_selection = alt.selection_single(\n fields=['date'],\n nearest=True,\n on='mouseover',\n empty='none',\n clear='mouseout')\n\n iroas_lines = iroas_base.mark_line().encode(\n y=alt.Y('mean:Q', axis=alt.Axis(title=' ', format='.3')))\n\n iroas_points = iroas_lines.mark_point().transform_filter(iroas_selection)\n\n iroas_rule1 = iroas_base.mark_rule().encode(\n tooltip=['date:T', 'mean:Q', 'lower:Q', 'upper:Q'])\n\n iroas_rule = iroas_rule1.encode(\n opacity=alt.condition(iroas_selection, alt.value(0.3), alt.value(\n 0))).add_selection(iroas_selection)\n\n iroas_ci_bands_rule = alt.Chart(iroas_df).mark_area(color='gray').encode(\n alt.X('date:T'), y='lower:Q', y2='upper:Q', opacity=alt.value(0.5))\n\n date_rule = alt.Chart(\n experiment_dates[experiment_dates['color'] ==\n 'Experiment period']).mark_rule(strokeWidth=2).encode(\n x='date:T',\n color=alt.Color(\n 'color',\n scale=alt.Scale(\n domain=[\n 'Experiment period',\n 'End of cooldown period',\n 'iROAS estimate'\n ],\n range=['black', 'black', '#1f77b4'])))\n cooldown_date_rule = alt.Chart(cooldown_date).mark_rule(\n strokeWidth=2, strokeDash=[5, 2], color='black').encode(\n x='date:T', color='color:N')\n # Compile chart\n iroas_chart = alt.layer(iroas_lines, iroas_rule, iroas_points, date_rule,\n cooldown_date_rule, iroas_ci_bands_rule)\n\n return iroas_chart\n" ]
[ [ "numpy.abs", "pandas.Timestamp", "numpy.arange", "pandas.api.types.is_numeric_dtype", "pandas.DataFrame", "numpy.copy", "numpy.log10", "numpy.floor", "pandas.date_range", "numpy.array", "pandas.to_numeric" ] ]
wavelets/bird
[ "ae0fe470a6517e34bfe8713fe389f0a2dd223afe" ]
[ "bird/_bird.py" ]
[ "# -*- coding: utf-8 -*-\n# Authors: Alexandre Gramfort <[email protected]>\n# Manuel Moussallam <[email protected]>\n#\n# Algorithm presented here are described in:\n# Blind Denoising with Random Greedy Pursuits.\n# Moussallam, M., Gramfort, A., Daudet, L., & Richard, G. (2014).\n# IEEE Signal Processing Letters, 21(11), 1341�1345\n#\n# License: BSD (3-clause)\n\nfrom math import sqrt\nimport multiprocessing\nfrom functools import partial\n\nimport numpy as np\nfrom scipy.special import erfinv\nfrom scipy import linalg\n\nfrom joblib import Parallel, delayed, Memory\nfrom mdct_tools import mdct_waveform, mdct, MDCT\n\n\ndef check_random_state(seed):\n \"\"\"Turn seed into a np.random.RandomState instance\n\n If seed is None, return the RandomState singleton used by np.random.\n If seed is an int, return a new RandomState instance seeded with seed.\n If seed is already a RandomState instance, return it.\n Otherwise raise ValueError.\n \"\"\"\n if seed is None or seed is np.random:\n return np.random.mtrand._rand\n if isinstance(seed, (int, np.integer)):\n return np.random.RandomState(seed)\n if isinstance(seed, np.random.RandomState):\n return seed\n raise ValueError('%r cannot be used to seed a numpy.random.RandomState'\n ' instance' % seed)\n\n\ndef _single_mp_run(x, Phi, bound, max_iter, verbose=False, pad=0,\n random_state=None, memory=Memory(None)):\n \"\"\" run of the RSSMP algorithm \"\"\"\n\n rng = check_random_state(random_state)\n pad = int(pad)\n x = np.concatenate((np.zeros(pad), x, np.zeros(pad)))\n\n n = x.size\n m = Phi.doth(x).size\n err_mse = []\n\n # Initialisation\n residual = np.concatenate((x.copy(), np.zeros(max(Phi.sizes) / 2)))\n\n s = np.zeros(m)\n x_est = np.zeros(n)\n # Main algorithm\n coeffs = np.zeros(m)\n it_number = 0\n current_lambda = 1\n err_mse.append(linalg.norm(residual))\n\n # Decomposition loop: stopping criteria is either SNR or iteration number\n while (current_lambda > bound) & (it_number < max_iter):\n\n # pick a shift at random : in each size\n rndshifts = []\n for scale_idx, size in enumerate(Phi.sizes):\n shift = rng.randint(low=0, high=size / 4)\n coeffs[scale_idx * n:(scale_idx + 1) * n] = mdct(\n residual[shift:shift + n], size).ravel()\n rndshifts.append(shift)\n\n # Select a new element\n idx = np.argmax(np.abs(coeffs))\n\n # Update coefficients\n s[idx] += coeffs[idx]\n\n # Only one method now : local update via a cached waveform\n # find scale and frequency bin of selected atom\n mdct_wf = memory.cache(mdct_waveform)\n\n scale_idx = idx // n\n size = Phi.sizes[scale_idx]\n F = n // (size // 2)\n frame = (idx - (scale_idx * n)) % F\n freq_bin = ((idx - (scale_idx * n))) // F\n pos = (frame * size / 2) - size / 4 + rndshifts[scale_idx]\n residual[pos:pos + size] -= coeffs[idx] * mdct_wf(size, freq_bin)\n\n # also add it to the reconstruction\n x_est[pos:pos + size] += coeffs[idx] * mdct_wf(size, freq_bin)\n\n # error computation (err_mse)\n err_mse.append(linalg.norm(residual))\n\n current_lambda = np.sqrt(1 - err_mse[-1] / err_mse[-2])\n if current_lambda <= bound:\n x_est[pos:pos + size] -= coeffs[idx] * mdct_wf(size, freq_bin)\n if verbose:\n print(\"Iteration %d : Current lambda of %1.4f\" % (\n it_number, current_lambda))\n it_number += 1\n\n return x_est, err_mse\n\n\ndef _single_multichannel_mp_run(X, Phi, bound, selection_rule, stop_crit,\n max_iter, verbose=False, pad=0,\n random_state=None, memory=Memory(None)):\n \"\"\" run of the structured variant of the RSSMP algorithm \"\"\"\n rng = check_random_state(random_state)\n\n # padding as v stak\n pad = int(pad)\n n_channels = X.shape[0]\n X = np.hstack((np.zeros((n_channels, pad)), X,\n np.zeros((n_channels, pad))))\n n_samples = X.shape[1]\n n_projs = Phi.doth(X).shape[1]\n err_mse = {}\n\n # Initialisation\n residual = np.hstack((X.copy(), np.zeros((n_channels,\n max(Phi.sizes) / 2))))\n\n s_rep = np.zeros((n_channels, n_projs))\n X_est = np.zeros((n_channels, n_samples))\n # Main algorithm\n coeffs = np.zeros((n_channels, n_projs))\n\n it_number = 0\n current_lambda = 1\n for c_idx in range(n_channels):\n err_mse[c_idx] = []\n err_mse[c_idx].append(linalg.norm(residual[c_idx, :]))\n\n # Decomposition loop: stopping criteria is either SNR or iteration number\n while (current_lambda > bound) & (it_number < max_iter):\n\n # pick a shift at random : in each size\n rndshifts = {}\n for c_idx in range(n_channels):\n rndshifts[c_idx] = []\n for s_idx, L in enumerate(Phi.sizes):\n shift = rng.randint(low=0, high=L / 4)\n for c_idx in range(n_channels):\n coeffs[c_idx, s_idx * n_samples:(s_idx + 1) * n_samples] = \\\n mdct(residual[c_idx, shift:shift + n_samples], L).ravel()\n rndshifts[c_idx].append(shift)\n\n # Multichannel mode : we combine projections\n combined = selection_rule(coeffs ** 2)\n\n # Select a new element\n idx = np.argmax(np.abs(combined))\n # find scale and frequency bin of selected atom\n s_idx = idx // n_samples\n L = Phi.sizes[s_idx]\n F = n_samples // (L // 2)\n frame = (idx - (s_idx * n_samples)) % F\n freq_bin = ((idx - (s_idx * n_samples))) // F\n\n mdct_wf = memory.cache(mdct_waveform)\n\n # Update coefficients and residual\n current_lambda_array = np.zeros(n_channels)\n for c_idx in range(n_channels):\n s_rep[c_idx, idx] += coeffs[c_idx, idx]\n\n # Only one method now : local update via a cached waveform\n pos = (frame * L / 2) - L / 4 + rndshifts[c_idx][s_idx]\n residual[c_idx, pos:pos + L] -= coeffs[c_idx, idx] * \\\n mdct_wf(L, freq_bin)\n\n # also add it to the reconstruction\n X_est[c_idx, pos:pos + L] += coeffs[c_idx, idx] * \\\n mdct_wf(L, freq_bin)\n\n # error computation (err_mse)\n err_mse[c_idx].append(linalg.norm(residual[c_idx, :]))\n\n current_lambda_array[c_idx] = np.sqrt(\n 1. - err_mse[c_idx][-1] / err_mse[c_idx][-2])\n\n current_lambda = stop_crit(current_lambda_array)\n\n if verbose:\n print(\"Iteration %d : Current lambda of %1.4f\" % (\n it_number, current_lambda))\n it_number += 1\n\n return X_est[:, pad: -pad], err_mse\n\n\ndef _pad(X):\n \"\"\" add zeroes on the border to make sure the signal length is a\n power of two \"\"\"\n p_above = int(np.floor(np.log2(X.shape[1])))\n M = 2 ** (p_above + 1) - X.shape[1]\n X = np.hstack((np.zeros((X.shape[0], M)), X))\n\n return X, M\n\n\ndef _denoise(seeds, x, dico, sup_bound, n_atoms, verbose=False, indep=True,\n stop_crit=None, selection_rule=None, pad=0,\n memory=Memory(None)):\n \"\"\" multiple rssmp runs with a smart stopping criterion using\n the convergence decay monitoring\n \"\"\"\n approx = []\n for seed in seeds:\n if verbose > 0:\n print(\"Run seed %d\" % seed)\n if indep:\n approx.append(_single_mp_run(x, dico, sup_bound, n_atoms,\n verbose=verbose, pad=pad,\n random_state=seed,\n memory=memory)[0])\n else:\n approx.append(_single_multichannel_mp_run(x, dico, sup_bound,\n selection_rule,\n stop_crit,\n n_atoms, verbose=verbose,\n pad=pad,\n random_state=seed,\n memory=memory)[0])\n return approx\n\n\ndef _bird_core(X, scales, n_runs, Lambda_W, max_iter=100,\n stop_crit=np.mean,\n selection_rule=np.sum,\n n_jobs=1, indep=True,\n random_state=None, memory=Memory(None), verbose=False):\n \"\"\"Automatically detect when noise zone has been reached and stop\n MP at this point\n\n Parameters\n ----------\n X : array, shape (n_channels, n_times)\n The numpy n_channels-vy-N array to be denoised where n_channels is\n number of sensors and N the dimension\n scales : list\n The list of MDCT scales that will be used to built the\n dictionary Phi\n n_runs : int\n the number of runs (n_runs in the paper)\n Lambda_W : float\n bound for lambda under which a run will be stopped\n max_iter : int\n Maximum number of iterations (serves as alternate stopping criterion)\n stop_crit : function\n controls the calculation of Lambda\n selection_rule : callable\n controls the way multiple channel projections are combined for atom\n selection only used if indep=False\n n_jobs : int\n number of jobs to run in parallel\n indep : bool\n True for BIRD (independent processing of each channel,\n False for S-BIRD (structured sparsity seeked)\n random_state : None | int | np.random.RandomState\n To specify the random generator state (seed).\n memory : instance of Memory\n The object to use to cache some computations. If cachedir is None, no\n caching is performed.\n verbose : bool\n verbose mode\n\n Returns\n -------\n X_denoise : array, shape (n_channels, n_times)\n denoised array of same shape as X\n \"\"\"\n Phi = MDCT(scales)\n pad = int(1.5 * max(scales))\n X_denoise = np.zeros_like(X)\n approx = []\n rng = check_random_state(random_state)\n seeds = rng.randint(4294967295, size=n_runs) # < max seed value\n\n if n_jobs <= 0:\n n_cores = multiprocessing.cpu_count()\n n_jobs = min(n_cores + n_jobs + 1, n_cores)\n\n if indep:\n # Independent treat of each channel (plain BIRD)\n for r, x in zip(X_denoise, X):\n this_approx = Parallel(n_jobs=n_jobs)(\n delayed(_denoise)(this_seeds, x, Phi, Lambda_W,\n max_iter, pad=pad, verbose=verbose,\n indep=True, memory=memory)\n for this_seeds in\n np.array_split(seeds, n_jobs))\n this_approx = sum(this_approx[1:], this_approx[0])\n r[:] = sum([a[pad:-pad] for a in this_approx])\n approx.append(this_approx)\n else:\n # data need to be processed jointly\n this_approx = Parallel(n_jobs=n_jobs)(\n delayed(_denoise)(this_seeds, X, Phi, Lambda_W,\n max_iter, pad=pad, verbose=verbose,\n selection_rule=selection_rule,\n indep=False, memory=memory,\n stop_crit=stop_crit)\n for this_seeds in\n np.array_split(seeds, n_jobs))\n\n # reconstruction by averaging\n for jidx in range(len(this_approx)):\n for ridx in range(len(this_approx[jidx])):\n X_denoise += this_approx[jidx][ridx]\n\n X_denoise /= float(n_runs)\n return X_denoise\n\n\ndef bird(X, scales, n_runs, p_above, max_iter=100, random_state=None,\n n_jobs=1, memory=Memory(None), verbose=False):\n \"\"\" The BIRD algorithm as described in the paper\n\n Parameters\n ----------\n X : array, shape (n_channels, n_times)\n The numpy n_channels-vy-N array to be X_denoised where n_channels\n is number of sensors and n_times the dimension\n scales : list\n The list of MDCT scales that will be used to built the\n dictionary Phi\n n_runs : int\n the number of runs (n_runs in the paper)\n p_above : float\n probability of appearance of the max above which the noise hypothesis\n is considered false\n max_iter : int\n The maximum number of iterations in one pursuit.\n random_state : None | int | np.random.RandomState\n To specify the random generator state (seed).\n max_iter : int\n The maximum number of iterations in one pursuit.\n n_jobs : int\n The number of jobs to run in parallel.\n memory : instance of Memory\n The object to use to cache some computations. If cachedir is None, no\n caching is performed.\n verbose : bool\n verbose mode\n\n Returns\n -------\n X_denoise : array, shape (n_channels, n_times)\n The X_denoised data.\n \"\"\"\n X, prepad = _pad(X)\n\n # Computing Lambda_W(Phi, p_above)\n N = float(X.shape[1])\n # size of the full shift-invariant dictionary\n M = np.sum(np.array(scales) / 2) * N\n sigma = sqrt((1.0 - (2.0 / np.pi)) / float(N))\n Lambda_W = sigma * sqrt(2.0) * erfinv((1.0 - p_above) ** (1.0 / float(M)))\n print(\"Starting BIRD with MDCT dictionary of %d Atoms. \"\n \"Lambda_W=%1.3f, n_runs=%d\" % (M, Lambda_W, n_runs))\n X_denoised = _bird_core(X, scales, n_runs, Lambda_W, verbose=verbose,\n max_iter=max_iter, indep=True, n_jobs=n_jobs,\n random_state=random_state, memory=memory)\n return X_denoised[:, prepad:]\n\n\n# the stopping criterion is determined by the p_active parameter\ndef stop_crit(lambda_array, lint):\n lambda_array.sort()\n return np.mean(lambda_array[-int(lint):])\n\n\ndef selection_rule(projections_matrix, lint):\n sorted_projs = np.sort(projections_matrix ** 2, axis=0)\n return np.mean(sorted_projs[-lint:, :], axis=0)\n\n\ndef s_bird(X, scales, n_runs, p_above, p_active=1, max_iter=100,\n random_state=None, n_jobs=1, memory=Memory(None), verbose=False):\n \"\"\" Multichannel version of BIRD (S-BIRD) seeking Structured Sparsity\n\n Parameters\n ----------\n X : array, shape (n_channels, n_times)\n The numpy n_channels-vy-n_samples array to be denoised where n_channels\n is the number of sensors and n_samples the dimension\n scales : list of int\n The list of MDCT scales that will be used to built the\n dictionary Phi\n n_runs : int\n the number of runs (n_runs in the paper)\n p_above : float\n probability of appearance of the max above which the noise hypothesis\n is considered false\n p_active : float\n proportion of active channels (l in the paper)\n max_iter : int\n The maximum number of iterations in one pursuit.\n random_state : None | int | np.random.RandomState\n To specify the random generator state (seed).\n n_jobs : int\n The number of jobs to run in parallel.\n memory : instance of Memory\n The object to use to cache some computations. If cachedir is None, no\n caching is performed.\n verbose : bool\n verbose mode\n\n Returns\n -------\n X_denoise : array, shape (n_channels, n_times)\n The denoised data.\n \"\"\"\n X, prepad = _pad(X)\n # Computing Lambda_W(Phi, p_above)\n n_channels = X.shape[0]\n n_samples = float(X.shape[1])\n # size of the full shift-invariant dictionary\n M = np.sum(np.array(scales) / 2) * n_samples\n sigma = sqrt((1.0 - (2.0 / np.pi)) / float(n_samples))\n Lambda_W = sigma * sqrt(2.0) * erfinv((1.0 - p_above) ** (1.0 / float(M)))\n\n lint = int(n_channels * p_active)\n\n this_stop_crit = partial(stop_crit, lint=lint) # XXX : check lint here\n this_selection_rule = partial(selection_rule, lint=lint)\n\n print(\"Starting S-BIRD with MDCT dictionary of %d Atoms.\"\n \" Lambda_W=%1.3f, n_runs=%d, p_active=%1.1f\" % (M, Lambda_W,\n n_runs, p_active))\n denoised = _bird_core(X, scales, n_runs, Lambda_W, verbose=verbose,\n stop_crit=this_stop_crit, n_jobs=n_jobs,\n selection_rule=this_selection_rule,\n max_iter=max_iter,\n indep=False, memory=memory)\n\n return denoised[:, prepad:]\n" ]
[ [ "numpy.log2", "numpy.sqrt", "numpy.abs", "numpy.sort", "numpy.array_split", "numpy.mean", "numpy.zeros_like", "scipy.linalg.norm", "numpy.array", "numpy.random.RandomState", "numpy.zeros" ] ]
hzclarksm/hublib
[ "e8f2168d80464b6343b980e30fdd552d1b0c2479" ]
[ "hublib/rappture/test/test_number.py" ]
[ "from __future__ import print_function\nimport pytest\nimport os, sys\nimport numpy as np\n\nsys.path.insert(0, os.path.abspath('../../..'))\nimport hublib.rappture as rappture\nfrom hublib import ureg, Q_\n\n\nclass TestNumber:\n\n @classmethod\n def setup_class(cls):\n print(\"cls\", cls)\n cls.io = rappture.RapXML('number.xml')\n\n def test_input_list(self):\n inputs = str(self.io.inputs())\n expected = \"\"\"input.number(temperature)\\t\"Ambient temperature\"\ninput.number(temperature2)\\t\"Ambient Temperature Celsius\"\ninput.number(temperature3)\\t\"Ambient Temperature Current No Units\"\ninput.number(temperature4)\\t\"Ambient Temperature Unitless\"\ninput.number(vsweep)\\t\"Voltage Sweep +/-\"\n\"\"\"\n\n assert expected == inputs\n\n def test_output_list(self):\n outputs = str(self.io.outputs())\n expected = \"\"\"output.number(outt)\\t\"Ambient temperature\"\noutput.number(outv)\\t\"Voltage Sweep +/-\"\n\"\"\"\n assert expected == outputs\n\n def test_read_value1(self):\n val = self.io['input.number(temperature)'].value\n assert val.m == 300\n assert val.u == ureg.kelvin\n assert '{:~}'.format(val) == '300 K'\n assert self.io['input.number(temperature)'].rvalue == '300K'\n\n def test_read_value2(self):\n val = self.io['input.number(temperature2)'].value\n assert np.isclose(val.m, 26.85)\n assert val.u == ureg.degC\n assert self.io['input.number(temperature2)'].rvalue == '300K'\n assert self.io['input.number(temperature2).units'].rvalue == 'C'\n\n def test_read_value3(self):\n val = self.io['input.number(temperature3)'].value\n assert val.m == 300\n assert val.u == ureg.degC\n assert '{:~}'.format(val) == '300 celsius'\n\n assert self.io['input.number(temperature3)'].rvalue == '300'\n assert self.io['input.number(temperature3).units'].rvalue == 'C'\n\n def test_read_value4(self):\n assert self.io['input.number(temperature4)'].value == 300.0\n assert self.io['input.number(temperature4)'].rvalue == '300'\n\n def test_write_value1(self):\n # set without units\n self.io['input.number(temperature)'] = 270\n val = self.io['input.number(temperature)'].value\n assert val.m == 270\n assert val.u == ureg.kelvin\n\n def test_write_value1b(self):\n # set with units\n self.io['input.number(temperature)'] = \"260 K\"\n val = self.io['input.number(temperature)'].value\n assert val.m == 260\n assert val.u == ureg.kelvin\n\n def test_write_value1c(self):\n # Set default and check that current is not affected\n self.io['input.number(temperature).default'] = \"305 K\"\n val = self.io['input.number(temperature).default'].value\n assert val.m == 305\n assert val.u == ureg.kelvin\n val = self.io['input.number(temperature)'].value\n assert val.m == 260\n assert val.u == ureg.kelvin\n\n def test_write_value2(self):\n # set without units\n self.io['input.number(temperature2)'] = 270\n val = self.io['input.number(temperature2)'].value\n assert val.m == 270\n assert val.u == ureg.degC\n\n def test_write_value2b(self):\n # set without units\n self.io['input.number(temperature2)'] = \"270 K\"\n val = self.io['input.number(temperature2)'].value\n assert np.allclose(val.m, -3.15)\n assert val.u == ureg.degC\n\n def test_write_value2c(self):\n # change units\n self.io['input.number(temperature2).units'] = 'K'\n val = self.io['input.number(temperature2)'].value\n assert val.m == 270\n assert val.u == ureg.kelvin\n\n def test_write_value4(self):\n # set without units\n self.io['input.number(temperature4)'] = 270\n val = self.io['input.number(temperature4)'].value\n assert val == 270\n\n def test_write_value4b(self):\n # set without units\n self.io['input.number(temperature4)'] = '270'\n val = self.io['input.number(temperature4)'].value\n assert val == 270\n\n" ]
[ [ "numpy.allclose", "numpy.isclose" ] ]
Shubham-0212/ga-learner-dsmp-repo
[ "ed2ebf13fb959746be3b97a6ece0a1784ebb0166" ]
[ "Banking-Inference/code.py" ]
[ "# --------------\nimport pandas as pd\r\nimport scipy.stats as stats\r\nimport math\r\nimport numpy as np\r\nimport warnings\r\n\r\nwarnings.filterwarnings('ignore')\r\n#Sample_Size\r\nsample_size=2000\r\n\r\n#Z_Critical Score\r\nz_critical = stats.norm.ppf(q = 0.95) \r\n\r\n\r\n# path [File location variable]\r\ndata=pd.read_csv(path)\r\ndata_sample=data.sample(n=sample_size,random_state=0)\r\nsample_mean=data_sample.installment.mean()\r\nsample_std=data_sample.installment.std()\r\nmargin_of_error=z_critical*sample_std/np.sqrt(sample_size)\r\nconfidence_interval=[]\r\nconfidence_interval.append(sample_mean-margin_of_error)\r\nconfidence_interval.append(sample_mean+margin_of_error)\r\ntrue_mean=data.installment.mean()\r\nif true_mean>confidence_interval[0] and true_mean<confidence_interval[1]:\r\n print('true',true_mean)\r\nelse:\r\n print('false')#Code starts here\r\n\n\n\n# --------------\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\n\r\n#Different sample sizes to take\r\nsample_size=np.array([20,50,100])\r\n\r\n#Code starts here\r\nfig, axes = plt.subplots(3,1, figsize=(20,10))\r\nfor i in range(len(sample_size)):\r\n m=[]\r\n for j in range(1000):\r\n m.append(data['installment'].sample(n=sample_size[i]).mean())\r\n mean_series=pd.Series(m) \r\nprint(mean_series)\n\n\n# --------------\n#Importing header files\r\n\r\nfrom statsmodels.stats.weightstats import ztest\r\n\r\n#Code starts here\r\ndata['int.rate']=data['int.rate'].str.replace('%','').astype(float)/100\r\nprint(data['int.rate'].head())\r\nz_statistic, p_value = ztest(data[data['purpose']=='small_business']['int.rate'],value=data['int.rate'].mean(),alternative='larger')\r\nprint(z_statistic, p_value)\r\nif p_value<0.05:\r\n print('reject')\r\nelse:\r\n print('accept')\n\n\n# --------------\n#Importing header files\r\nfrom statsmodels.stats.weightstats import ztest\r\n\r\n#Code starts here\r\nz_statistic, p_value=ztest(data[data['paid.back.loan']=='No']['installment'],data[data['paid.back.loan']=='Yes']['installment'])\r\nprint(z_statistic,p_value)\r\nif p_value<0.05:\r\n print('reject')\r\nelse:\r\n print('accept')\r\n\n\n\n# --------------\nfrom scipy.stats import chi2_contingency\r\n\r\n#Critical value \r\ncritical_value = stats.chi2.ppf(q = 0.95, # Find the critical value for 95% confidence*\r\n df = 6) # Df = number of variable categories(in purpose) - 1\r\n\r\n#Code starts here\r\nyes=data[data['paid.back.loan']=='Yes']['purpose'].value_counts()\r\n#print(yes)\r\nno=data[data['paid.back.loan']=='No']['purpose'].value_counts()\r\nobserved=pd.concat([yes.transpose(),no.transpose()],keys=['Yes','No'],axis=1)\r\nprint(observed)\r\nchi2, p, dof, ex = stats.chi2_contingency(observed)\r\nif chi2>critical_value:\r\n print('reject')\r\nelse:\r\n print('accept')\r\n\r\n\n\n\n" ]
[ [ "scipy.stats.chi2.ppf", "scipy.stats.norm.ppf", "pandas.read_csv", "numpy.sqrt", "scipy.stats.chi2_contingency", "pandas.Series", "matplotlib.pyplot.subplots", "numpy.array" ] ]
HuchieWuchie/Mask_RCNN
[ "93f74c5fae72852563b1d3e0e22428d6abf86dc2" ]
[ "mrcnn/utils.py" ]
[ "\"\"\"\nMask R-CNN\nCommon utility functions and classes.\n\nCopyright (c) 2017 Matterport, Inc.\nLicensed under the MIT License (see LICENSE for details)\nWritten by Waleed Abdulla\n\"\"\"\n\nimport sys\nimport os\nimport logging\nimport math\nimport random\nimport numpy as np\nimport tensorflow as tf\nimport scipy\nimport skimage.color\nimport skimage.io\nimport skimage.transform\nimport urllib.request\nimport shutil\nimport warnings\nfrom distutils.version import LooseVersion\n\n# URL from which to download the latest COCO trained weights\nCOCO_MODEL_URL = \"https://github.com/matterport/Mask_RCNN/releases/download/v2.0/mask_rcnn_coco.h5\"\n\n\n############################################################\n# Bounding Boxes\n############################################################\n\ndef extract_bboxes(mask):\n \"\"\"Compute bounding boxes from masks.\n mask: [num_instances, height, width, num_affordances]. Mask pixels are either 1 or 0.\n\n Returns: bbox array [num_instances, (y1, x1, y2, x2)].\n \"\"\"\n # Albert was here\n boxes = np.zeros([mask.shape[0], 4], dtype=np.int32)\n for i in range(mask.shape[0]):\n m = mask[i, :, :, 1:]\n #m_viz = np.zeros((m.shape[0], m.shape[1]))\n # Bounding box.\n #for i in range(m.shape[-1]):\n # m_viz[m[:,:, i] == 1] = 255\n \n #cv2.imwrite(os.path.join(\"mask_gt\", str(np.random.randint(10000, size=1)[0]) + \".png\"), m_viz)\n horizontal_indicies = np.where(np.any(m, axis=0))[0]\n vertical_indicies = np.where(np.any(m, axis=1))[0]\n if horizontal_indicies.shape[0]:\n x1, x2 = horizontal_indicies[[0, -1]]\n y1, y2 = vertical_indicies[[0, -1]]\n # x2 and y2 should not be part of the box. Increment by 1.\n x2 += 1\n y2 += 1\n else:\n # No mask for this instance. Might happen due to\n # resizing or cropping. Set bbox to zeros\n x1, x2, y1, y2 = 0, 0, 0, 0\n boxes[i] = np.array([y1, x1, y2, x2])\n return boxes.astype(np.int32)\n\n\ndef compute_iou(box, boxes, box_area, boxes_area):\n \"\"\"Calculates IoU of the given box with the array of the given boxes.\n box: 1D vector [y1, x1, y2, x2]\n boxes: [boxes_count, (y1, x1, y2, x2)]\n box_area: float. the area of 'box'\n boxes_area: array of length boxes_count.\n\n Note: the areas are passed in rather than calculated here for\n efficiency. Calculate once in the caller to avoid duplicate work.\n \"\"\"\n # Calculate intersection areas\n y1 = np.maximum(box[0], boxes[:, 0])\n y2 = np.minimum(box[2], boxes[:, 2])\n x1 = np.maximum(box[1], boxes[:, 1])\n x2 = np.minimum(box[3], boxes[:, 3])\n intersection = np.maximum(x2 - x1, 0) * np.maximum(y2 - y1, 0)\n union = box_area + boxes_area[:] - intersection[:]\n iou = intersection / union\n return iou\n\n\ndef compute_overlaps(boxes1, boxes2):\n \"\"\"Computes IoU overlaps between two sets of boxes.\n boxes1, boxes2: [N, (y1, x1, y2, x2)].\n\n For better performance, pass the largest set first and the smaller second.\n \"\"\"\n # Areas of anchors and GT boxes\n area1 = (boxes1[:, 2] - boxes1[:, 0]) * (boxes1[:, 3] - boxes1[:, 1])\n area2 = (boxes2[:, 2] - boxes2[:, 0]) * (boxes2[:, 3] - boxes2[:, 1])\n\n # Compute overlaps to generate matrix [boxes1 count, boxes2 count]\n # Each cell contains the IoU value.\n overlaps = np.zeros((boxes1.shape[0], boxes2.shape[0]))\n for i in range(overlaps.shape[1]):\n box2 = boxes2[i]\n overlaps[:, i] = compute_iou(box2, boxes1, area2[i], area1)\n return overlaps\n\n\ndef compute_overlaps_masks(masks1, masks2):\n \"\"\"Computes IoU overlaps between two sets of masks.\n masks1, masks2: [Height, Width, instances]\n \"\"\"\n\n # If either set of masks is empty return empty result\n #if masks1.shape[-1] == 0 or masks2.shape[-1] == 0:\n # return np.zeros((masks1.shape[-1], masks2.shape[-1]))\n if np.sum(masks1.shape[:,:, 1:]) == 0 or np.sum(masks2.shape[:,:, 1:]) == 0:\n return np.zeros((masks1.shape[-1], masks2.shape[-1]))\n # flatten masks and compute their areas\n masks1 = np.reshape(masks1 > .5, (-1, masks1.shape[-1])).astype(np.float32)\n masks2 = np.reshape(masks2 > .5, (-1, masks2.shape[-1])).astype(np.float32)\n area1 = np.sum(masks1, axis=0)\n area2 = np.sum(masks2, axis=0)\n\n # intersections and union\n intersections = np.dot(masks1.T, masks2)\n union = area1[:, None] + area2[None, :] - intersections\n overlaps = intersections / union\n\n return overlaps\n\n\ndef non_max_suppression(boxes, scores, threshold):\n \"\"\"Performs non-maximum suppression and returns indices of kept boxes.\n boxes: [N, (y1, x1, y2, x2)]. Notice that (y2, x2) lays outside the box.\n scores: 1-D array of box scores.\n threshold: Float. IoU threshold to use for filtering.\n \"\"\"\n assert boxes.shape[0] > 0\n if boxes.dtype.kind != \"f\":\n boxes = boxes.astype(np.float32)\n\n # Compute box areas\n y1 = boxes[:, 0]\n x1 = boxes[:, 1]\n y2 = boxes[:, 2]\n x2 = boxes[:, 3]\n area = (y2 - y1) * (x2 - x1)\n\n # Get indicies of boxes sorted by scores (highest first)\n ixs = scores.argsort()[::-1]\n\n pick = []\n while len(ixs) > 0:\n # Pick top box and add its index to the list\n i = ixs[0]\n pick.append(i)\n # Compute IoU of the picked box with the rest\n iou = compute_iou(boxes[i], boxes[ixs[1:]], area[i], area[ixs[1:]])\n # Identify boxes with IoU over the threshold. This\n # returns indices into ixs[1:], so add 1 to get\n # indices into ixs.\n remove_ixs = np.where(iou > threshold)[0] + 1\n # Remove indices of the picked and overlapped boxes.\n ixs = np.delete(ixs, remove_ixs)\n ixs = np.delete(ixs, 0)\n return np.array(pick, dtype=np.int32)\n\n\ndef apply_box_deltas(boxes, deltas):\n \"\"\"Applies the given deltas to the given boxes.\n boxes: [N, (y1, x1, y2, x2)]. Note that (y2, x2) is outside the box.\n deltas: [N, (dy, dx, log(dh), log(dw))]\n \"\"\"\n boxes = boxes.astype(np.float32)\n # Convert to y, x, h, w\n height = boxes[:, 2] - boxes[:, 0]\n width = boxes[:, 3] - boxes[:, 1]\n center_y = boxes[:, 0] + 0.5 * height\n center_x = boxes[:, 1] + 0.5 * width\n # Apply deltas\n center_y += deltas[:, 0] * height\n center_x += deltas[:, 1] * width\n height *= np.exp(deltas[:, 2])\n width *= np.exp(deltas[:, 3])\n # Convert back to y1, x1, y2, x2\n y1 = center_y - 0.5 * height\n x1 = center_x - 0.5 * width\n y2 = y1 + height\n x2 = x1 + width\n return np.stack([y1, x1, y2, x2], axis=1)\n\n\ndef box_refinement_graph(box, gt_box):\n \"\"\"Compute refinement needed to transform box to gt_box.\n box and gt_box are [N, (y1, x1, y2, x2)]\n \"\"\"\n box = tf.cast(box, tf.float32)\n gt_box = tf.cast(gt_box, tf.float32)\n\n height = box[:, 2] - box[:, 0]\n width = box[:, 3] - box[:, 1]\n center_y = box[:, 0] + 0.5 * height\n center_x = box[:, 1] + 0.5 * width\n\n gt_height = gt_box[:, 2] - gt_box[:, 0]\n gt_width = gt_box[:, 3] - gt_box[:, 1]\n gt_center_y = gt_box[:, 0] + 0.5 * gt_height\n gt_center_x = gt_box[:, 1] + 0.5 * gt_width\n\n dy = (gt_center_y - center_y) / height\n dx = (gt_center_x - center_x) / width\n dh = tf.log(gt_height / height)\n dw = tf.log(gt_width / width)\n\n result = tf.stack([dy, dx, dh, dw], axis=1)\n return result\n\n\ndef box_refinement(box, gt_box):\n \"\"\"Compute refinement needed to transform box to gt_box.\n box and gt_box are [N, (y1, x1, y2, x2)]. (y2, x2) is\n assumed to be outside the box.\n \"\"\"\n box = box.astype(np.float32)\n gt_box = gt_box.astype(np.float32)\n\n height = box[:, 2] - box[:, 0]\n width = box[:, 3] - box[:, 1]\n center_y = box[:, 0] + 0.5 * height\n center_x = box[:, 1] + 0.5 * width\n\n gt_height = gt_box[:, 2] - gt_box[:, 0]\n gt_width = gt_box[:, 3] - gt_box[:, 1]\n gt_center_y = gt_box[:, 0] + 0.5 * gt_height\n gt_center_x = gt_box[:, 1] + 0.5 * gt_width\n\n dy = (gt_center_y - center_y) / height\n dx = (gt_center_x - center_x) / width\n dh = np.log(gt_height / height)\n dw = np.log(gt_width / width)\n\n return np.stack([dy, dx, dh, dw], axis=1)\n\n\n############################################################\n# Dataset\n############################################################\n\nclass Dataset(object):\n \"\"\"The base class for dataset classes.\n To use it, create a new class that adds functions specific to the dataset\n you want to use. For example:\n\n class CatsAndDogsDataset(Dataset):\n def load_cats_and_dogs(self):\n ...\n def load_mask(self, image_id):\n ...\n def image_reference(self, image_id):\n ...\n\n See COCODataset and ShapesDataset as examples.\n \"\"\"\n\n def __init__(self, class_map=None):\n self._image_ids = []\n self.image_info = []\n\n #CHANGE\n # Background is always the first class\n #self.class_info = [{\"source\": \"\", \"id\": 0, \"name\": \"BG\"}] # outcommented since there should be no background\n self.class_info = []\n self.affordance_info = [] # new\n self.source_class_ids = {}\n \n def add_affordance(self, source, affordance_id, affordance_name):\n assert \".\" not in source, \"Source name cannot contain a dot\"\n # Does the class exist already?\n for info in self.affordance_info:\n if info['source'] == source and info[\"id\"] == affordance_id:\n # source.class_id combination already available, skip\n return\n # Add the class\n self.affordance_info.append({\n \"source\": source,\n \"id\": affordance_id,\n \"name\": affordance_name,\n })\n\n def add_class(self, source, class_id, class_name):\n assert \".\" not in source, \"Source name cannot contain a dot\"\n # Does the class exist already?\n for info in self.class_info:\n if info['source'] == source and info[\"id\"] == class_id:\n # source.class_id combination already available, skip\n return\n # Add the class\n self.class_info.append({\n \"source\": source,\n \"id\": class_id,\n \"name\": class_name,\n })\n\n def add_image(self, source, image_id, path, **kwargs):\n image_info = {\n \"id\": image_id,\n \"source\": source,\n \"path\": path,\n }\n image_info.update(kwargs)\n self.image_info.append(image_info)\n\n def image_reference(self, image_id):\n \"\"\"Return a link to the image in its source Website or details about\n the image that help looking it up or debugging it.\n\n Override for your dataset, but pass to this function\n if you encounter images not in your dataset.\n \"\"\"\n return \"\"\n\n def prepare(self, class_map=None):\n \"\"\"Prepares the Dataset class for use.\n\n TODO: class map is not supported yet. When done, it should handle mapping\n classes from different datasets to the same class ID.\n \"\"\"\n\n def clean_name(name):\n \"\"\"Returns a shorter version of object names for cleaner display.\"\"\"\n return \",\".join(name.split(\",\")[:1])\n\n # Build (or rebuild) everything else from the info dicts.\n self.num_classes = len(self.class_info)\n self.class_ids = np.arange(self.num_classes)\n self.class_names = [clean_name(c[\"name\"]) for c in self.class_info]\n self.num_affordances = len(self.affordance_info)\n self.affordance_ids = np.arange(self.num_affordances)\n self.affordances_names = [clean_name(c[\"name\"]) for c in self.affordance_info]\n self.num_images = len(self.image_info)\n self._image_ids = np.arange(self.num_images)\n\n # Mapping from source class and image IDs to internal IDs\n # ALBERT: Maybe there is something to do here\n self.class_from_source_map = {\"{}.{}\".format(info['source'], info['id']): id\n for info, id in zip(self.class_info, self.class_ids)}\n self.image_from_source_map = {\"{}.{}\".format(info['source'], info['id']): id\n for info, id in zip(self.image_info, self.image_ids)}\n\n # Map sources to class_ids they support\n self.sources = list(set([i['source'] for i in self.class_info]))\n self.source_class_ids = {}\n # Loop over datasets\n for source in self.sources:\n self.source_class_ids[source] = []\n # Find classes that belong to this dataset\n for i, info in enumerate(self.class_info):\n # Include BG class in all datasets\n if i == 0 or source == info['source']:\n # ALBERT: Maybe there is something to do here\n self.source_class_ids[source].append(i)\n\n def map_source_class_id(self, source_class_id):\n \"\"\"Takes a source class ID and returns the int class ID assigned to it.\n\n For example:\n dataset.map_source_class_id(\"coco.12\") -> 23\n \"\"\"\n return self.class_from_source_map[source_class_id]\n\n def get_source_class_id(self, class_id, source):\n \"\"\"Map an internal class ID to the corresponding class ID in the source dataset.\"\"\"\n info = self.class_info[class_id]\n assert info['source'] == source\n return info['id']\n\n @property\n def image_ids(self):\n return self._image_ids\n\n def source_image_link(self, image_id):\n \"\"\"Returns the path or URL to the image.\n Override this to return a URL to the image if it's available online for easy\n debugging.\n \"\"\"\n return self.image_info[image_id][\"path\"]\n\n def load_image(self, image_id):\n \"\"\"Load the specified image and return a [H,W,3] Numpy array.\n \"\"\"\n # Load image\n image = skimage.io.imread(self.image_info[image_id]['path'])\n # If grayscale. Convert to RGB for consistency.\n if image.ndim != 3:\n image = skimage.color.gray2rgb(image)\n # If has an alpha channel, remove it for consistency\n if image.shape[-1] == 4:\n image = image[..., :3]\n return image\n\n def load_mask(self, image_id):\n \"\"\"Load instance masks for the given image.\n\n Different datasets use different ways to store masks. Override this\n method to load instance masks and return them in the form of am\n array of binary masks of shape [height, width, instances].\n\n Returns:\n masks: A bool array of shape [height, width, instance count] with\n a binary mask per instance.\n class_ids: a 1D array of class IDs of the instance masks.\n \"\"\"\n # Override this function to load a mask from your dataset.\n # Otherwise, it returns an empty mask.\n logging.warning(\"You are using the default load_mask(), maybe you need to define your own one.\")\n mask = np.empty([0, 0, 0])\n class_ids = np.empty([0], np.int32)\n return mask, class_ids\n\n\ndef resize_image(image, min_dim=None, max_dim=None, min_scale=None, mode=\"square\"):\n \"\"\"Resizes an image keeping the aspect ratio unchanged.\n\n min_dim: if provided, resizes the image such that it's smaller\n dimension == min_dim\n max_dim: if provided, ensures that the image longest side doesn't\n exceed this value.\n min_scale: if provided, ensure that the image is scaled up by at least\n this percent even if min_dim doesn't require it.\n mode: Resizing mode.\n none: No resizing. Return the image unchanged.\n square: Resize and pad with zeros to get a square image\n of size [max_dim, max_dim].\n pad64: Pads width and height with zeros to make them multiples of 64.\n If min_dim or min_scale are provided, it scales the image up\n before padding. max_dim is ignored in this mode.\n The multiple of 64 is needed to ensure smooth scaling of feature\n maps up and down the 6 levels of the FPN pyramid (2**6=64).\n crop: Picks random crops from the image. First, scales the image based\n on min_dim and min_scale, then picks a random crop of\n size min_dim x min_dim. Can be used in training only.\n max_dim is not used in this mode.\n\n Returns:\n image: the resized image\n window: (y1, x1, y2, x2). If max_dim is provided, padding might\n be inserted in the returned image. If so, this window is the\n coordinates of the image part of the full image (excluding\n the padding). The x2, y2 pixels are not included.\n scale: The scale factor used to resize the image\n padding: Padding added to the image [(top, bottom), (left, right), (0, 0)]\n \"\"\"\n # Keep track of image dtype and return results in the same dtype\n image_dtype = image.dtype\n # Default window (y1, x1, y2, x2) and default scale == 1.\n h, w = image.shape[:2]\n window = (0, 0, h, w)\n scale = 1\n padding = [(0, 0), (0, 0), (0, 0)]\n crop = None\n\n if mode == \"none\":\n return image, window, scale, padding, crop\n\n # Scale?\n if min_dim:\n # Scale up but not down\n scale = max(1, min_dim / min(h, w))\n if min_scale and scale < min_scale:\n scale = min_scale\n\n # Does it exceed max dim?\n if max_dim and mode == \"square\":\n image_max = max(h, w)\n if round(image_max * scale) > max_dim:\n scale = max_dim / image_max\n\n # Resize image using bilinear interpolation\n if scale != 1:\n image = resize(image, (round(h * scale), round(w * scale)),\n preserve_range=True)\n\n # Need padding or cropping?\n if mode == \"square\":\n # Get new height and width\n h, w = image.shape[:2]\n top_pad = (max_dim - h) // 2\n bottom_pad = max_dim - h - top_pad\n left_pad = (max_dim - w) // 2\n right_pad = max_dim - w - left_pad\n padding = [(top_pad, bottom_pad), (left_pad, right_pad), (0, 0)]\n image = np.pad(image, padding, mode='constant', constant_values=0)\n window = (top_pad, left_pad, h + top_pad, w + left_pad)\n elif mode == \"pad64\":\n h, w = image.shape[:2]\n # Both sides must be divisible by 64\n assert min_dim % 64 == 0, \"Minimum dimension must be a multiple of 64\"\n # Height\n if h % 64 > 0:\n max_h = h - (h % 64) + 64\n top_pad = (max_h - h) // 2\n bottom_pad = max_h - h - top_pad\n else:\n top_pad = bottom_pad = 0\n # Width\n if w % 64 > 0:\n max_w = w - (w % 64) + 64\n left_pad = (max_w - w) // 2\n right_pad = max_w - w - left_pad\n else:\n left_pad = right_pad = 0\n padding = [(top_pad, bottom_pad), (left_pad, right_pad), (0, 0)]\n image = np.pad(image, padding, mode='constant', constant_values=0)\n window = (top_pad, left_pad, h + top_pad, w + left_pad)\n elif mode == \"crop\":\n # Pick a random crop\n h, w = image.shape[:2]\n y = random.randint(0, (h - min_dim))\n x = random.randint(0, (w - min_dim))\n crop = (y, x, min_dim, min_dim)\n image = image[y:y + min_dim, x:x + min_dim]\n window = (0, 0, min_dim, min_dim)\n else:\n raise Exception(\"Mode {} not supported\".format(mode))\n return image.astype(image_dtype), window, scale, padding, crop\n\n\ndef resize_mask(mask, scale, padding, crop=None):\n \"\"\"Resizes a mask using the given scale and padding.\n Typically, you get the scale and padding from resize_image() to\n ensure both, the image and the mask, are resized consistently.\n\n scale: mask scaling factor\n padding: Padding to add to the mask in the form\n [(top, bottom), (left, right), (0, 0)]\n \"\"\"\n # Suppress warning from scipy 0.13.0, the output shape of zoom() is\n # calculated with round() instead of int()\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n # mask [instance, height, width, affordance]\n # only scale height and width\n mask = scipy.ndimage.zoom(mask, zoom=[1, scale, scale, 1], order=0)\n if crop is not None:\n y, x, h, w = crop\n mask = mask[:, y:y + h, x:x + w]\n else:\n mask = np.pad(mask, padding, mode='constant', constant_values=0)\n return mask\n\n\ndef minimize_mask(bbox, mask, mini_shape):\n \"\"\"Resize masks to a smaller version to reduce memory load.\n Mini-masks can be resized back to image scale using expand_masks()\n\n See inspect_data.ipynb notebook for more details.\n \"\"\"\n mini_mask = np.zeros((mask.shape[0],) + mini_shape + (mask.shape[-1],), dtype=bool)\n for i in range(mask.shape[0]):\n # Pick slice and cast to bool in case load_mask() returned wrong dtype\n m = mask[i, :, :, :].astype(bool)\n y1, x1, y2, x2 = bbox[i][:4]\n m = m[y1:y2, x1:x2]\n if m.size == 0:\n raise Exception(\"Invalid bounding box with area of zero\")\n # Resize with bilinear interpolation\n m = resize(m, mini_shape)\n mini_mask[i, :, :, :] = np.around(m).astype(np.bool)\n return mini_mask\n\n\ndef expand_mask(bbox, mini_mask, image_shape):\n \"\"\"Resizes mini masks back to image size. Reverses the change\n of minimize_mask().\n\n See inspect_data.ipynb notebook for more details.\n \"\"\"\n mask = np.zeros(image_shape[:2] + (mini_mask.shape[-1],), dtype=bool)\n for i in range(mask.shape[-1]):\n m = mini_mask[:, :, i]\n y1, x1, y2, x2 = bbox[i][:4]\n h = y2 - y1\n w = x2 - x1\n # Resize with bilinear interpolation\n m = resize(m, (h, w))\n mask[y1:y2, x1:x2, i] = np.around(m).astype(np.bool)\n return mask\n\n\n# TODO: Build and use this function to reduce code duplication\ndef mold_mask(mask, config):\n pass\n\n\ndef unmold_mask(mask, bbox, image_shape):\n \"\"\"Converts a mask generated by the neural network to a format similar\n to its original shape.\n original: mask: [height, width] of type float. A small, typically 28x28 mask.\n new: mask: [height, width, NUM_AFFORDANCES] of type float. A small, typically 28x28 mask.\n bbox: [y1, x1, y2, x2]. The box to fit the mask in.\n\n Returns a binary mask with the same size as the original image.\n \"\"\"\n #threshold = 0.1\n threshold = 0.05\n y1, x1, y2, x2 = bbox\n mask = resize(mask, (y2 - y1, x2 - x1)) \n print(mask)\n print(mask.shape)\n #mask = mask * 255\n #mask = np.where(mask >= threshold, 1, 0).astype(np.bool)\n\n # Put the mask in the right location.\n full_mask = np.zeros(image_shape[:2] + (mask.shape[-1],), dtype=np.float)\n #full_mask = np.zeros(image_shape[:2] + (mask.shape[-1],), dtype=np.bool)\n full_mask[y1:y2, x1:x2] = mask\n return full_mask\n\n\n############################################################\n# Anchors\n############################################################\n\ndef generate_anchors(scales, ratios, shape, feature_stride, anchor_stride):\n \"\"\"\n scales: 1D array of anchor sizes in pixels. Example: [32, 64, 128]\n ratios: 1D array of anchor ratios of width/height. Example: [0.5, 1, 2]\n shape: [height, width] spatial shape of the feature map over which\n to generate anchors.\n feature_stride: Stride of the feature map relative to the image in pixels.\n anchor_stride: Stride of anchors on the feature map. For example, if the\n value is 2 then generate anchors for every other feature map pixel.\n \"\"\"\n # Get all combinations of scales and ratios\n scales, ratios = np.meshgrid(np.array(scales), np.array(ratios))\n scales = scales.flatten()\n ratios = ratios.flatten()\n\n # Enumerate heights and widths from scales and ratios\n heights = scales / np.sqrt(ratios)\n widths = scales * np.sqrt(ratios)\n\n # Enumerate shifts in feature space\n shifts_y = np.arange(0, shape[0], anchor_stride) * feature_stride\n shifts_x = np.arange(0, shape[1], anchor_stride) * feature_stride\n shifts_x, shifts_y = np.meshgrid(shifts_x, shifts_y)\n\n # Enumerate combinations of shifts, widths, and heights\n box_widths, box_centers_x = np.meshgrid(widths, shifts_x)\n box_heights, box_centers_y = np.meshgrid(heights, shifts_y)\n\n # Reshape to get a list of (y, x) and a list of (h, w)\n box_centers = np.stack(\n [box_centers_y, box_centers_x], axis=2).reshape([-1, 2])\n box_sizes = np.stack([box_heights, box_widths], axis=2).reshape([-1, 2])\n\n # Convert to corner coordinates (y1, x1, y2, x2)\n boxes = np.concatenate([box_centers - 0.5 * box_sizes,\n box_centers + 0.5 * box_sizes], axis=1)\n return boxes\n\n\ndef generate_pyramid_anchors(scales, ratios, feature_shapes, feature_strides,\n anchor_stride):\n \"\"\"Generate anchors at different levels of a feature pyramid. Each scale\n is associated with a level of the pyramid, but each ratio is used in\n all levels of the pyramid.\n\n Returns:\n anchors: [N, (y1, x1, y2, x2)]. All generated anchors in one array. Sorted\n with the same order of the given scales. So, anchors of scale[0] come\n first, then anchors of scale[1], and so on.\n \"\"\"\n # Anchors\n # [anchor_count, (y1, x1, y2, x2)]\n anchors = []\n for i in range(len(scales)):\n anchors.append(generate_anchors(scales[i], ratios, feature_shapes[i],\n feature_strides[i], anchor_stride))\n return np.concatenate(anchors, axis=0)\n\n\n############################################################\n# Miscellaneous\n############################################################\n\ndef trim_zeros(x):\n \"\"\"It's common to have tensors larger than the available data and\n pad with zeros. This function removes rows that are all zeros.\n\n x: [rows, columns].\n \"\"\"\n assert len(x.shape) == 2\n return x[~np.all(x == 0, axis=1)]\n\n\ndef compute_matches(gt_boxes, gt_class_ids, gt_masks,\n pred_boxes, pred_class_ids, pred_scores, pred_masks,\n iou_threshold=0.5, score_threshold=0.0):\n \"\"\"Finds matches between prediction and ground truth instances.\n\n Returns:\n gt_match: 1-D array. For each GT box it has the index of the matched\n predicted box.\n pred_match: 1-D array. For each predicted box, it has the index of\n the matched ground truth box.\n overlaps: [pred_boxes, gt_boxes] IoU overlaps.\n \"\"\"\n # Trim zero padding\n # TODO: cleaner to do zero unpadding upstream\n gt_boxes = trim_zeros(gt_boxes)\n gt_masks = gt_masks[:gt_boxes.shape[0], :, :, :]\n pred_boxes = trim_zeros(pred_boxes)\n pred_scores = pred_scores[:pred_boxes.shape[0]]\n # Sort predictions by score from high to low\n indices = np.argsort(pred_scores)[::-1]\n pred_boxes = pred_boxes[indices]\n pred_class_ids = pred_class_ids[indices]\n pred_scores = pred_scores[indices]\n pred_masks = pred_masks[indices]\n\n # Compute IoU overlaps [pred_masks, gt_masks]\n overlaps = compute_overlaps_masks(pred_masks, gt_masks)\n\n # Loop through predictions and find matching ground truth boxes\n match_count = 0\n pred_match = -1 * np.ones([pred_boxes.shape[0]])\n gt_match = -1 * np.ones([gt_boxes.shape[0]])\n for i in range(len(pred_boxes)):\n # Find best matching ground truth box\n # 1. Sort matches by score\n sorted_ixs = np.argsort(overlaps[i])[::-1]\n # 2. Remove low scores\n low_score_idx = np.where(overlaps[i, sorted_ixs] < score_threshold)[0]\n if low_score_idx.size > 0:\n sorted_ixs = sorted_ixs[:low_score_idx[0]]\n # 3. Find the match\n for j in sorted_ixs:\n # If ground truth box is already matched, go to next one\n if gt_match[j] > -1:\n continue\n # If we reach IoU smaller than the threshold, end the loop\n iou = overlaps[i, j]\n if iou < iou_threshold:\n break\n # Do we have a match?\n if pred_class_ids[i] == gt_class_ids[j]:\n match_count += 1\n gt_match[j] = i\n pred_match[i] = j\n break\n\n return gt_match, pred_match, overlaps\n\n\ndef compute_ap(gt_boxes, gt_class_ids, gt_masks,\n pred_boxes, pred_class_ids, pred_scores, pred_masks,\n iou_threshold=0.5):\n \"\"\"Compute Average Precision at a set IoU threshold (default 0.5).\n\n Returns:\n mAP: Mean Average Precision\n precisions: List of precisions at different class score thresholds.\n recalls: List of recall values at different class score thresholds.\n overlaps: [pred_boxes, gt_boxes] IoU overlaps.\n \"\"\"\n # Get matches and overlaps\n gt_match, pred_match, overlaps = compute_matches(\n gt_boxes, gt_class_ids, gt_masks,\n pred_boxes, pred_class_ids, pred_scores, pred_masks,\n iou_threshold)\n\n # Compute precision and recall at each prediction box step\n precisions = np.cumsum(pred_match > -1) / (np.arange(len(pred_match)) + 1)\n recalls = np.cumsum(pred_match > -1).astype(np.float32) / len(gt_match)\n\n # Pad with start and end values to simplify the math\n precisions = np.concatenate([[0], precisions, [0]])\n recalls = np.concatenate([[0], recalls, [1]])\n\n # Ensure precision values decrease but don't increase. This way, the\n # precision value at each recall threshold is the maximum it can be\n # for all following recall thresholds, as specified by the VOC paper.\n for i in range(len(precisions) - 2, -1, -1):\n precisions[i] = np.maximum(precisions[i], precisions[i + 1])\n\n # Compute mean AP over recall range\n indices = np.where(recalls[:-1] != recalls[1:])[0] + 1\n mAP = np.sum((recalls[indices] - recalls[indices - 1]) *\n precisions[indices])\n\n return mAP, precisions, recalls, overlaps\n\n\ndef compute_ap_range(gt_box, gt_class_id, gt_mask,\n pred_box, pred_class_id, pred_score, pred_mask,\n iou_thresholds=None, verbose=1):\n \"\"\"Compute AP over a range or IoU thresholds. Default range is 0.5-0.95.\"\"\"\n # Default is 0.5 to 0.95 with increments of 0.05\n iou_thresholds = iou_thresholds or np.arange(0.5, 1.0, 0.05)\n\n # Compute AP over range of IoU thresholds\n AP = []\n for iou_threshold in iou_thresholds:\n ap, precisions, recalls, overlaps =\\\n compute_ap(gt_box, gt_class_id, gt_mask,\n pred_box, pred_class_id, pred_score, pred_mask,\n iou_threshold=iou_threshold)\n if verbose:\n print(\"AP @{:.2f}:\\t {:.3f}\".format(iou_threshold, ap))\n AP.append(ap)\n AP = np.array(AP).mean()\n if verbose:\n print(\"AP @{:.2f}-{:.2f}:\\t {:.3f}\".format(\n iou_thresholds[0], iou_thresholds[-1], AP))\n return AP\n\n\ndef compute_recall(pred_boxes, gt_boxes, iou):\n \"\"\"Compute the recall at the given IoU threshold. It's an indication\n of how many GT boxes were found by the given prediction boxes.\n\n pred_boxes: [N, (y1, x1, y2, x2)] in image coordinates\n gt_boxes: [N, (y1, x1, y2, x2)] in image coordinates\n \"\"\"\n # Measure overlaps\n overlaps = compute_overlaps(pred_boxes, gt_boxes)\n iou_max = np.max(overlaps, axis=1)\n iou_argmax = np.argmax(overlaps, axis=1)\n positive_ids = np.where(iou_max >= iou)[0]\n matched_gt_boxes = iou_argmax[positive_ids]\n\n recall = len(set(matched_gt_boxes)) / gt_boxes.shape[0]\n return recall, positive_ids\n\n\n# ## Batch Slicing\n# Some custom layers support a batch size of 1 only, and require a lot of work\n# to support batches greater than 1. This function slices an input tensor\n# across the batch dimension and feeds batches of size 1. Effectively,\n# an easy way to support batches > 1 quickly with little code modification.\n# In the long run, it's more efficient to modify the code to support large\n# batches and getting rid of this function. Consider this a temporary solution\ndef batch_slice(inputs, graph_fn, batch_size, names=None):\n \"\"\"Splits inputs into slices and feeds each slice to a copy of the given\n computation graph and then combines the results. It allows you to run a\n graph on a batch of inputs even if the graph is written to support one\n instance only.\n\n inputs: list of tensors. All must have the same first dimension length\n graph_fn: A function that returns a TF tensor that's part of a graph.\n batch_size: number of slices to divide the data into.\n names: If provided, assigns names to the resulting tensors.\n \"\"\"\n if not isinstance(inputs, list):\n inputs = [inputs]\n\n outputs = []\n for i in range(batch_size):\n inputs_slice = [x[i] for x in inputs]\n output_slice = graph_fn(*inputs_slice)\n if not isinstance(output_slice, (tuple, list)):\n output_slice = [output_slice]\n outputs.append(output_slice)\n # Change outputs from a list of slices where each is\n # a list of outputs to a list of outputs and each has\n # a list of slices\n outputs = list(zip(*outputs))\n\n if names is None:\n names = [None] * len(outputs)\n\n result = [tf.stack(o, axis=0, name=n)\n for o, n in zip(outputs, names)]\n if len(result) == 1:\n result = result[0]\n\n return result\n\n\ndef download_trained_weights(coco_model_path, verbose=1):\n \"\"\"Download COCO trained weights from Releases.\n\n coco_model_path: local path of COCO trained weights\n \"\"\"\n if verbose > 0:\n print(\"Downloading pretrained model to \" + coco_model_path + \" ...\")\n with urllib.request.urlopen(COCO_MODEL_URL) as resp, open(coco_model_path, 'wb') as out:\n shutil.copyfileobj(resp, out)\n if verbose > 0:\n print(\"... done downloading pretrained model!\")\n\n\ndef norm_boxes(boxes, shape):\n \"\"\"Converts boxes from pixel coordinates to normalized coordinates.\n boxes: [N, (y1, x1, y2, x2)] in pixel coordinates\n shape: [..., (height, width)] in pixels\n\n Note: In pixel coordinates (y2, x2) is outside the box. But in normalized\n coordinates it's inside the box.\n\n Returns:\n [N, (y1, x1, y2, x2)] in normalized coordinates\n \"\"\"\n h, w = shape\n scale = np.array([h - 1, w - 1, h - 1, w - 1])\n shift = np.array([0, 0, 1, 1])\n return np.divide((boxes - shift), scale).astype(np.float32)\n\n\ndef denorm_boxes(boxes, shape):\n \"\"\"Converts boxes from normalized coordinates to pixel coordinates.\n boxes: [N, (y1, x1, y2, x2)] in normalized coordinates\n shape: [..., (height, width)] in pixels\n\n Note: In pixel coordinates (y2, x2) is outside the box. But in normalized\n coordinates it's inside the box.\n\n Returns:\n [N, (y1, x1, y2, x2)] in pixel coordinates\n \"\"\"\n h, w = shape\n scale = np.array([h - 1, w - 1, h - 1, w - 1])\n shift = np.array([0, 0, 1, 1])\n return np.around(np.multiply(boxes, scale) + shift).astype(np.int32)\n\n\ndef resize(image, output_shape, order=1, mode='constant', cval=0, clip=True,\n preserve_range=False, anti_aliasing=False, anti_aliasing_sigma=None):\n \"\"\"A wrapper for Scikit-Image resize().\n\n Scikit-Image generates warnings on every call to resize() if it doesn't\n receive the right parameters. The right parameters depend on the version\n of skimage. This solves the problem by using different parameters per\n version. And it provides a central place to control resizing defaults.\n \"\"\"\n if LooseVersion(skimage.__version__) >= LooseVersion(\"0.14\"):\n # New in 0.14: anti_aliasing. Default it to False for backward\n # compatibility with skimage 0.13.\n return skimage.transform.resize(\n image, output_shape,\n order=order, mode=mode, cval=cval, clip=clip,\n preserve_range=preserve_range, anti_aliasing=anti_aliasing,\n anti_aliasing_sigma=anti_aliasing_sigma)\n else:\n return skimage.transform.resize(\n image, output_shape,\n order=order, mode=mode, cval=cval, clip=clip,\n preserve_range=preserve_range)\n" ]
[ [ "numpy.dot", "numpy.minimum", "numpy.sqrt", "tensorflow.stack", "numpy.around", "tensorflow.cast", "numpy.cumsum", "numpy.concatenate", "numpy.max", "numpy.all", "numpy.any", "numpy.exp", "numpy.where", "numpy.divide", "numpy.pad", "numpy.reshape", "numpy.arange", "scipy.ndimage.zoom", "numpy.stack", "numpy.argmax", "numpy.zeros", "numpy.log", "numpy.multiply", "numpy.delete", "numpy.argsort", "numpy.meshgrid", "numpy.array", "numpy.sum", "numpy.maximum", "numpy.ones", "tensorflow.log", "numpy.empty" ] ]
hduliufan/work
[ "951a69aad5de3387c26fabe417a939349def3df6" ]
[ "one_one.py" ]
[ "import numpy as np\nimport matplotlib.pyplot as plt\nfrom getlen_bit import getlen\nfrom begain import getbegain\nfrom x1x2 import x1_x2\nfrom exchange_normal import variation\nfrom fitvalue import fitvalue\n#计算精度(人为设定)0.001\ns=0.0001\na1=-3\na2=4.1\nb1=12.1\nb2=5.8\n#种群规模\nN=20\n#-3<=x1<=12.1 4.1<=x2<=5.8\n#二进制长度t1,t2\nt1=getlen(a1,b1,s)\nt2=getlen(a2,b2,s)\n#print(t1,t2)\nt=t1+t2\n#print(t)\n#二进制种群(N*t)\npop=getbegain(N,t)\n#print(pop)\nx1,x2=x1_x2(pop,t,t1,t2,a1,b1,a2,b2)\n#print(x1,x2)\n\ndef one_one(x1,x2,N):\n T=0\n #记录变异前的最大适应值\n fit1_=[]\n #记录变异后的最大适应值\n fit2_=[]\n #记录最终的适应值\n fit=[]\n while(T<10):\n #父本个体适应值(N*1)\n fit1=fitvalue(x1,x2)\n fit1_.append(np.max(fit1))\n #变异采用高斯算子即N(0,1)标准正太分布\n x11,x22=variation(N,x1,x2)\n #变异后个体适应值(N*1)\n fit2=fitvalue(x11,x22)\n fit2_.append(np.max(fit2))\n #记录索引\n i=0\n for fit_1,fit_2 in zip(fit1,fit2):\n if fit_1<fit_2:\n #变异前与变异后交换\n x1[i]=x11[i]\n x2[i]=x22[i]\n i=i+1\n T=T+1\n #输出最大适应值变化\n for b,a in zip (fit1_,fit2_):\n fit.append(np.where(b>a,b,a))\n #输出图形变异前后的最大适应值变化\n plt.title('1+1')\n plt.subplot(111)\n plt.xlabel('T')\n plt.ylabel(\"fitvalue\")\n plt.plot(range(1,T+1),fit1_,'b--',label='bef')\n plt.plot(range(1,T+1),fit2_,'g--',label='aft')\n plt.legend(loc='upper left')\n plt.plot(range(1,T+1),fit,'r-',label='maxfit')\n plt.show()\none_one(x1,x2,N)\n#print(x1,x2)\n" ]
[ [ "matplotlib.pyplot.legend", "matplotlib.pyplot.title", "numpy.max", "matplotlib.pyplot.subplot", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.show", "numpy.where", "matplotlib.pyplot.ylabel" ] ]
buf1024/OpenData
[ "6268a5f7bee88cc943b3a05858b8ab6f371e8e3b" ]
[ "opendatatools/stock/stock_interface.py" ]
[ "# encoding: utf-8\n\nimport datetime\n\nfrom .stock_agent import SHExAgent, SZExAgent, CSIAgent, XueqiuAgent, SinaAgent, CNInfoAgent, EastMoneyAgent\nfrom opendatatools.common import get_current_day\n\nshex_agent = SHExAgent()\nszex_agent = SZExAgent()\ncsi_agent = CSIAgent()\nxq_agent = XueqiuAgent()\nsina_agent = SinaAgent()\ncninfo_agent = CNInfoAgent()\neastmoney_agent = EastMoneyAgent()\n\nxq_count_map = {\n '1m': -142,\n '5m': -142,\n '15m': -142,\n '30m': -142,\n '60m': -142,\n 'day' : -142,\n}\n\nbar_span_map = {\n '1m' : 1,\n '5m' : 5,\n '15m' : 15,\n '30m' : 30,\n '60m' : 60,\n 'day' : 1440,\n}\n\n\ndef make_index(period, trade_date):\n bar_index = list()\n span = bar_span_map[period]\n dt = datetime.datetime.strptime(trade_date,'%Y-%m-%d')\n bar_index.extend(pd.DatetimeIndex(start=\"%s 09:30:00\" % trade_date, end=\"%s 11:30:00\" % trade_date, freq='%sT' % span)[1:])\n bar_index.extend(pd.DatetimeIndex(start=\"%s 13:00:00\" % trade_date, end=\"%s 15:00:00\" % trade_date, freq='%sT' % span)[1:])\n return bar_index\n\ndef set_proxies(proxies):\n shex_agent.set_proxies(proxies)\n szex_agent.set_proxies(proxies)\n csi_agent.set_proxies(proxies)\n xq_agent.set_proxies(proxies)\n\ndef get_index_list(market='SH'):\n if market == 'SH':\n return shex_agent.get_index_list()\n\n if market == 'SZ':\n return szex_agent.get_index_list()\n\n if market == 'CSI':\n return csi_agent.get_index_list()\n\ndef get_index_component(symbol):\n temp = symbol.split(\".\")\n\n if len(temp) == 2:\n market = temp[1]\n index = temp[0]\n if market == 'SH':\n return shex_agent.get_index_component(index)\n elif market == 'SZ':\n return szex_agent.get_index_component(index)\n elif market == 'CSI':\n return csi_agent.get_index_component(index)\n else:\n return None\n else:\n return None\n\ndef get_rzrq_info(market='SH', date = None):\n if date is None:\n date = get_current_day(format = '%Y-%m-%d')\n\n if market == 'SH':\n return shex_agent.get_rzrq_info(date)\n\n if market == 'SZ':\n return szex_agent.get_rzrq_info(date)\n\n return None, None\n\ndef get_pledge_info(market='SH', date = None):\n if date is None:\n date = get_current_day(format = '%Y-%m-%d')\n\n if market == 'SH':\n return shex_agent.get_pledge_info(date)\n\n if market == 'SZ':\n return szex_agent.get_pledge_info(date)\n\n return None, None\n\ndef get_dividend(symbol):\n temp = symbol.split(\".\")\n\n if len(temp) == 2:\n market = temp[1]\n code = temp[0]\n if market == 'SH':\n return shex_agent.get_dividend(code)\n if market == 'SZ':\n return cninfo_agent.get_dividend(code)\n\ndef get_quote(symbols):\n return xq_agent.get_quote(symbols)\n\ndef fill_df(df, period, trade_date, symbol):\n df.index = df['time']\n index = make_index(period, trade_date)\n df_new = pd.DataFrame(index=index, columns=['last'])\n df_new['last'] = df['last']\n df_new.fillna(method='ffill', inplace=True)\n df_new['high'] = df['high']\n df_new['low'] = df['low']\n df_new['open'] = df['open']\n df_new.fillna(method='ffill', axis=1, inplace=True)\n df_new['change'] = df['change']\n df_new['percent'] = df['percent']\n df_new['symbol'] = symbol\n df_new['turnover_rate'] = df['turnover_rate']\n df_new['volume'] = df['volume']\n df_new['time'] = df_new.index\n df_new.fillna(0, inplace=True)\n return df_new\n\n# period 1m, 5m, 15m, 30m, 60m, day\ndef get_kline(symbol, trade_date, period):\n curr_date = datetime.datetime.strptime(trade_date, '%Y-%m-%d')\n next_date = datetime.datetime.strptime(trade_date, '%Y-%m-%d') + datetime.timedelta(days=1)\n timestamp = next_date.timestamp()\n\n timestamp = int ( timestamp * 1000)\n df, msg = xq_agent.get_kline(symbol, timestamp, period, xq_count_map[period])\n if len(df) == 0:\n return df, msg\n\n df = df[(df.time < next_date) & (df.time >= curr_date)]\n if len(df) < abs(xq_count_map[period]):\n df_new = fill_df(df, period, trade_date, symbol)\n return df_new, ''\n else:\n return df, ''\n\ndef get_kline_multisymbol(symbols, trade_date, period):\n\n symbol_list = symbols.split(',')\n\n timestamp = datetime.datetime.strptime(trade_date, '%Y-%m-%d').timestamp()\n timestamp = int ( timestamp * 1000)\n df, msg = xq_agent.get_kline_multisymbol(symbol_list, timestamp, period, xq_count_map[period])\n next_date = datetime.datetime.strptime(trade_date, '%Y-%m-%d') + datetime.timedelta(days=1)\n if df is None:\n return df, msg\n\n df = df[df.time < next_date]\n gp = df.groupby('symbol')\n df_list = list()\n for symbol, df_item in gp:\n if len(df_item) < xq_count_map[period]:\n df_list.append(fill_df(df_item, period, trade_date, symbol))\n else:\n df_list(df_item)\n\n return pd.concat(df_list), ''\n\ndef get_timestamp_list(start_date, end_date):\n timestamp_list = []\n curr_date = start_date\n while curr_date <= end_date:\n curr_datetime = datetime.datetime.strptime(curr_date, '%Y-%m-%d')\n timestamp = curr_datetime.timestamp()\n timestamp_list.append(int(timestamp * 1000))\n next_time = curr_datetime + datetime.timedelta(days=1)\n curr_date = datetime.datetime.strftime(next_time, '%Y-%m-%d')\n\n return timestamp_list\n\ndef get_kline_multidate(symbol, start_date, end_date, period):\n timestamp_list = get_timestamp_list(start_date, end_date)\n return xq_agent.get_kline_multitimestamp(symbol, timestamp_list, period, xq_count_map[period])\n\nimport pandas as pd\ndef get_daily(symbol, start_date, end_date):\n curr_date = start_date\n df_result = []\n while curr_date <= end_date:\n curr_datetime = datetime.datetime.strptime(curr_date, '%Y-%m-%d')\n next_time = curr_datetime + datetime.timedelta(days=100)\n next_date = datetime.datetime.strftime(next_time, '%Y-%m-%d')\n\n timestamp = curr_datetime.timestamp()\n df, msg = xq_agent.get_kline(symbol, int(timestamp*1000), 'day', 100)\n if df is not None and len(df) != 0:\n df_result.append(df[df['time']<next_time])\n\n curr_date = next_date\n\n if len(df_result) > 0:\n df = pd.concat(df_result)\n df = df[(df['time'] >= start_date) & (df['time'] <= end_date) ]\n return df, ''\n else:\n return None, '没有获取到数据'\n\ndef get_adj_factor(symbol):\n return sina_agent.get_adj_factor(symbol)\n\ndef get_trade_detail(symbol, trade_date):\n return sina_agent.get_trade_detail(symbol, trade_date)\n\ndef get_report_data(symbol='600000.SH', type='资产负债表'):\n\n dict_type = {\n '利润表' : 'lrb',\n '资产负债表' : 'fzb',\n '现金流量表' : 'llb',\n }\n\n if type not in dict_type:\n return None, 'type输入错误,可以输入 %s' % dict_type.keys()\n\n data = symbol.split(sep='.')\n market = data[1].lower()\n code = data[0]\n return cninfo_agent.get_report_data(market, code, dict_type[type])\n\ndef get_shareholder_structure(symbol='600000.SH'):\n data = symbol.split(sep='.')\n market = data[1].lower()\n code = data[0]\n return cninfo_agent.get_shareholder_structure(market, code)\n\n# 单位:百万元\ndef get_hist_money_flow(symbol):\n data = symbol.split(sep='.')\n market = data[1]\n if market == 'SH':\n marketnum = '1'\n else:\n marketnum = '2'\n code = data[0]+marketnum\n return eastmoney_agent.get_hist_money_flow(code)\n\n# 单位:万元\ndef get_realtime_money_flow(symbol):\n data = symbol.split(sep='.')\n market = data[1]\n if market == 'SH':\n marketnum = '1'\n else:\n marketnum = '2'\n code = data[0]+marketnum\n return eastmoney_agent.get_realtime_money_flow(code)\n\n# 单位:亿元\ndef get_realtime_money_flow_market():\n return eastmoney_agent.get_realtime_money_flow_market()\n\ndef get_hist_money_flow_market():\n return eastmoney_agent.get_hist_money_flow_market()\ndef get_allstock_flow():\n return eastmoney_agent.get_allstock_flow()\n\n\n" ]
[ [ "pandas.DatetimeIndex", "pandas.concat", "pandas.DataFrame" ] ]
AWehrhahn/PyPSG
[ "adaa1e50998b3366541e16143034d6acdc379bb3" ]
[ "pypsg/psg.py" ]
[ "# ---------------------------------------------------\n# Planet Spectrum Generator Interface\n# PSG multiple-scattering model: https://psg.gsfc.nasa.gov/helpmodel.php\n# PSG databases: https://psg.gsfc.nasa.gov/helpatm.php\n# PSG API driver: https://psg.gsfc.nasa.gov/helpapi.php\n# ---------------------------------------------------\nfrom io import StringIO\nfrom os.path import dirname, join\nimport subprocess\nfrom datetime import datetime\n\nimport re\nimport hashlib\nfrom tempfile import NamedTemporaryFile\nimport numpy as np\n\nfrom astropy.utils.data import (\n import_file_to_cache,\n download_file,\n clear_download_cache,\n is_url_in_cache,\n)\nfrom astropy import units as u\n\n# Add Atmosphere scale units\nppb = u.def_unit(\n [\"ppb\", \"ppbv\"], 1e-9 * u.one, namespace=globals(), doc=\"Parts Per Billion\"\n)\nppm = u.def_unit(\n [\"ppm\", \"ppmv\", \"ppv\"],\n 1e3 * ppb,\n namespace=globals(),\n doc=\"Parts Per Million Volume\",\n)\nppt = u.def_unit(\n [\"ppt\", \"pptv\"], 1e6 * ppb, namespace=globals(), doc=\"Parts Per Thousand Volume\"\n)\nm2 = u.def_unit(\n [\"m2\", \"m-2\"], None, namespace=globals(), doc=\"Molecules per square meter\"\n)\ndiameter = u.def_unit(\n [\"diameter\"], None, namespace=globals(), doc=\"Diameter of the telescope\"\n)\ndiffrac = u.def_unit(\n [\"diffrac\"],\n None,\n namespace=globals(),\n doc=\"defined by the telescope diameter and center wavelength\",\n)\n\nscl = u.def_unit([\"scl\"], None, namespace=globals(), doc=\"Relative Scale\")\nu.add_enabled_units([ppb, ppm, ppt, m2, scl, diameter, diffrac])\n\n# Package Name for the Astropy cache\nPKGNAME = \"planet-spectrum-generator\"\n\n\nclass PSG_Config:\n def __init__(self, config) -> None:\n # Store all the other atmosphere parameters\n self.other = {}\n for key, value in config.items():\n self.other[key] = value\n\n @staticmethod\n def get_value(config, key, func=None):\n try:\n value = config[key]\n if func is not None:\n value = func(value)\n except KeyError:\n value = None\n return value\n\n def get_quantity(self, config, key, unit):\n return self.get_value(config, key, lambda x: float(x) * unit)\n\n def get_bool(self, config, key, true_value=\"Y\"):\n return self.get_value(config, key, lambda x: x == true_value)\n\n def get_list(self, config, key, func=None, array=False, sep=\",\"):\n value = self.get_value(config, key, lambda x: x.split(\",\"))\n if value is None:\n return None\n if func is not None:\n value = [func(v) for v in value]\n if array:\n value = np.array(value)\n return value\n\n @staticmethod\n def parse_units(unit, units, names):\n if unit is None:\n return None\n for u, n in zip(units, names):\n if unit == n:\n return u\n raise ValueError(\"Could not parse unit\")\n\n @staticmethod\n def get_units(unit, units, names):\n if unit is None:\n return None, None\n for un, n in zip(units, names):\n if unit == un:\n return un, n\n\n for un, n in zip(units, names):\n try:\n unit.to(un)\n return un, n\n except u.core.UnitConversionError:\n continue\n raise ValueError(\"Could not determine units\")\n\n def to_config(self):\n return self.other\n\n\nclass PSG_Object(PSG_Config):\n def __init__(self, config) -> None:\n #:str: Object type (e.g., Exoplanet, Planet, Asteroid, Moon, Comet, Object)\n self.object = config.get(\"OBJECT\")\n #:str: Object name\n self.name = config.get(\"OBJECT-NAME\")\n # Datetime\n if \"OBJECT-DATE\" in config.keys():\n match = re.match(\n r\"(\\d{4})/(\\d{2})/(\\d{2}) (\\d{2}):(\\d{2})\", config[\"OBJECT-DATE\"]\n )\n date = datetime(\n int(match.group(1)),\n int(match.group(2)),\n int(match.group(3)),\n int(match.group(4)),\n int(match.group(5)),\n )\n else:\n date = None\n #:datetime: Date of the observation (yyyy/mm/dd hh:mm) in Universal time [UT]\n self.date = date\n #:Quantity: Diameter of the object [km]\n self.diameter = self.get_quantity(config, \"OBJECT-DIAMETER\", u.km)\n # Gravity\n gravity_unit = config.get(\"OBJECT-GRAVITY-UNIT\")\n if gravity_unit == \"g\": # Surface Gravity\n gravity_unit = u.m / u.s ** 2\n elif gravity_unit == \"rho\": # Mean Density\n gravity_unit = u.g / u.cm ** 3\n elif gravity_unit == \"kg\": # Total Mass\n gravity_unit = u.kg\n #:Quantity: Gravity/density/mass of the object\n self.gravity = self.get_quantity(config, \"OBJECT-GRAVITY\", gravity_unit)\n #:Quantity: Distance of the planet to the Sun [AU], and for exoplanets the semi-major axis [AU]\n self.star_distance = self.get_quantity(config, \"OBJECT-STAR-DISTANCE\", u.AU)\n #:Quantity: Velocity of the planet to the Sun [km/s], and for exoplanets the RV amplitude [km/s]\n self.star_velocity = self.get_quantity(config, \"OBJECT-STAR-VELOCITY\", (u.km / u.s))\n #:Quantity: Sub-solar east longitude [degrees]\n self.solar_longitude = self.get_quantity(config, \"OBJECT-SOLAR-LONGITUDE\", u.deg)\n #:Quantity: Sub-solar latitude [degrees]\n self.solar_latitude = self.get_quantity(config, \"OBJECT-SOLAR-LATITUDE\", u.deg)\n #:Quantity: Angular parameter (season/phase) that defines the position of the planet moving along its Keplerian orbit. For exoplanets, 0:Secondary transit, 180:Primary transit, 90/270:Opposition. For solar-system bodies, 0:'N spring equinox', 90:'N summer solstice', 180:'N autumn equinox', 270:'N winter solstice' [degrees]\n self.season = self.get_quantity(config, \"OBJECT-SEASON\", u.deg)\n #:Quantity: Orbital inclination [degree], mainly relevant for exoplanets. Zero is phase on, 90 is a transiting orbit\n self.inclination = self.get_quantity(config, \"OBJECT-INCLINATION\", u.deg)\n #:float: Orbital eccentricity, mainly relevant for exoplanets\n self.eccentricity = self.get_value(config, \"OBJECT-ECCENTRICITY\", float)\n #:Quantity: Orbital longitude of periapse [degrees]. It indicates the phase at which the planet reaches periapsis\n self.periapsis = self.get_quantity(config, \"OBJECT-PERIAPSIS\", u.deg)\n #:str: Stellar type of the parent star [O/B/A/F/G/K/M]\n self.star_type = config.get(\"OBJECT-STAR-TYPE\")\n #:Quantity: Temperature of the parent star [K]\n self.star_temperature = self.get_quantity(config, \"OBJECT-STAR-TEMPERATURE\", u.K)\n #:Quantity: Radius of the parent star [Rsun]\n self.star_radius = self.get_quantity(config, \"OBJECT-STAR-RADIUS\", u.Rsun)\n #:float: Metallicity of the parent star and object with respect to the Sun in log [dex]\n self.star_metallicity = self.get_value(config, \"OBJECT-STAR-METALLICITY\", float)\n #:Quantity: Sub-observer east longitude [degrees]\n self.obs_longitude = self.get_quantity(config, \"OBJECT-OBS-LONGITUDE\", u.deg)\n #:Quantity: Sub-observer latitude, for exoplanets inclination [degrees]\n self.obs_latitude = self.get_quantity(config, \"OBJECT-OBS-LATITUDE\", u.deg)\n #:Quantity: Relative velocity between the observer and the object [km/s]\n self.obs_velocity = self.get_quantity(config, \"OBJECT-OBS-VELOCITY\", u.km / u.s)\n #:Quantity: This field is computed by the geometry module - It is the apparent rotational period of the object as seen from the observer [days]\n self.period = self.get_quantity(config, \"OBJECT-PERIOD\", u.day)\n #:str: This field reports the orbital parameters for small bodies. It is only relevant for display purposes of the orbital diagram.\n self.orbit = config.get(\"OBJECT-ORBIT\")\n\n @property\n def gravity_unit(self):\n \"\"\" Unit for the OBJECT-GRAVITY field, g:'Surface gravity [m/s2]', rho:'Mean density [g/cm3]', or kg:'Total mass [kg]' \"\"\"\n return self.gravity.unit\n\n def to_config(self):\n gravity_unit_loc, gravity_unit = self.get_units(\n self.gravity_unit,\n [u.m / u.s ** 2, u.g / u.cm ** 3, u.kg],\n [\"g\", \"rho\", \"kg\"],\n )\n\n config = {\n \"OBJECT\": self.object,\n \"OBJECT-NAME\": self.name,\n \"OBJECT-DATE\": f\"{self.date.year:04}/{self.date.month:02}/{self.date.day:02} {self.date.hour:02}:{self.date.minute:02}\" if self.date is not None else None,\n \"OBJECT-DIAMETER\": self.diameter.to_value(u.km) if self.diameter is not None else None,\n \"OBJECT-GRAVITY\": self.gravity.to_value(gravity_unit_loc) if self.gravity is not None and gravity_unit_loc is not None else None,\n \"OBJECT-GRAVITY-UNIT\": gravity_unit,\n \"OBJECT-STAR-DISTANCE\": self.star_distance.to_value(u.AU) if self.star_distance is not None else None,\n \"OBJECT-STAR-VELOCITY\": self.star_velocity.to_value(u.km / u.s) if self.star_velocity is not None else None,\n \"OBJECT-SOLAR-LONGITUDE\": self.solar_longitude.to_value(u.deg) if self.solar_longitude is not None else None,\n \"OBJECT-SOLAR-LATITUDE\": self.solar_latitude.to_value(u.deg) if self.solar_latitude is not None else None,\n \"OBJECT-SEASON\": self.season.to_value(u.deg) if self.season is not None else None,\n \"OBJECT-INCLINATION\": self.inclination.to_value(u.deg) if self.inclination is not None else None,\n \"OBJECT-ECCENTRICITY\": self.eccentricity,\n \"OBJECT-PERIAPSIS\": self.periapsis.to_value(u.deg) if self.periapsis is not None else None,\n \"OBJECT-STAR-TYPE\": self.star_type,\n \"OBJECT-STAR-TEMPERATURE\": self.star_temperature.to_value(u.K) if self.star_temperature is not None else None,\n \"OBJECT-STAR-RADIUS\": self.star_radius.to_value(u.Rsun) if self.star_radius is not None else None,\n \"OBJECT-STAR-METALLICITY\": self.star_metallicity,\n \"OBJECT-OBS-LONGITUDE\": self.obs_longitude.to_value(u.deg) if self.obs_longitude is not None else None,\n \"OBJECT-OBS-LATITUDE\": self.obs_latitude.to_value(u.deg) if self.obs_latitude is not None else None,\n \"OBJECT-OBS-VELOCITY\": self.obs_velocity.to_value(u.km / u.s) if self.obs_velocity is not None else None,\n \"OBJECT-PERIOD\": self.period.to_value(u.day) if self.period is not None else None,\n \"OBJECT-ORBIT\": self.orbit,\n }\n config = {k: str(v) for k, v in config.items() if v is not None}\n return config\n\n\nclass PSG_Geometry(PSG_Config):\n def __init__(self, config) -> None:\n #:str: Type of observing geometry\n self.geometry = config.get(\"GEOMETRY\")\n #:str: Reference geometry (e.g., ExoMars, Maunakea), default is user defined or 'User'\n self.ref = config.get(\"GEOMETRY-REF\")\n\n offset_unit = config.get(\"GEOMETRY-OFFSET-UNIT\")\n offset_unit = self.parse_units(offset_unit, [u.arcsec, u.arcmin, u.deg, u.km, u.one],\n [\"arcsec\", \"arcmin\", \"degree\", \"km\", \"diameter\"],)\n #:quantity: Vertical offset with respect to the sub-observer location\n self.offset_ns = self.get_quantity(config, \"GEOMETRY-OFFSET-NS\", offset_unit)\n #:quantity: Horizontal offset with respect to the sub-observer location\n self.offset_ew = self.get_quantity(config, \"GEOMETRY-OFFSET-EW\", offset_unit)\n\n altitude_unit = config.get(\"GEOMETRY-ALTITUDE-UNIT\")\n altitude_unit = self.parse_units(altitude_unit, [u.AU, u.km, u.one, u.pc], [\"AU\", \"km\", \"diameter\", \"pc\"])\n #:quantity: Distance between the observer and the surface of the planet\n self.obs_altitude = self.get_quantity(config, \"GEOMETRY-OBS-ALTITUDE\", altitude_unit)\n #:quantity: The azimuth angle between the observational projected vector and the solar vector on the reference plane\n self.azimuth = self.get_quantity(config, \"GEOMETRY-AZIMUTH\", u.deg)\n #:float: Parameter for the selected geometry, for Nadir / Lookingup this field indicates the zenith angle [degrees], for limb / occultations this field indicates the atmospheric height [km] being sampled\n self.user_param = self.get_value(config, \"GEOMETRY-USER-PARAM\", float)\n #:str: For stellar occultations, this field indicates the type of the occultation star [O/B/A/F/G/K/M]\n self.stellar_type = config.get(\"GEOMETRY-STELLAR-TYPE\")\n #:quantity: For stellar occultations, this field indicates the temperature [K] of the occultation star\n self.stellar_temperature = self.get_quantity(config, \"GEOMETRY-STELLAR-TEMPERATURE\", u.K)\n #:quantity: For stellar occultations, this field indicates the brightness [magnitude] of the occultation star\n self.stellar_magnitude = self.get_quantity(config, \"GEOMETRY-STELLAR-MAGNITUDE\", u.mag)\n #:str: This field is computed by the geometry module - It indicates the angle between the observer and the planetary surface\n self.obs_angle = config.get(\"GEOMETRY-OBS-ANGLE\")\n #:str: This field is computed by the geometry module - It indicates the angle between the Sun and the planetary surface\n self.solar_angle = config.get(\"GEOMETRY-SOLAR-ANGLE\")\n #:int: This field allows to divide the observable disk in finite rings so radiative-transfer calculations are performed with higher accuracy\n self.disk_angles = int(config.get(\"GEOMETRY-DISK-ANGLES\"))\n #:quantity: This field is computed by the geometry module - It indicates the phase between the Sun and observer\n self.phase = self.get_quantity(config, \"GEOMETRY-PHASE\", u.deg)\n #:str: This field is computed by the geometry module - It indicates how much the beam fills the planetary area (1:maximum)\n self.planet_fraction = config.get(\"GEOMETRY-PLANET-FRACTION\")\n #:float: This field is computed by the geometry module - It indicates how much the beam fills the parent star (1:maximum)\n self.star_fraction = self.get_value(config, \"GEOMETRY-STAR-FRACTION\", float)\n #:float: This field is computed by the geometry module - It indicates the projected distance between the beam and the parent star in arcsceconds\n self.star_distance = self.get_value(config, \"GEOMETRY-STAR-DISTANCE\", float)\n #:str: This field is computed by the geometry module - It indicates the rotational Doppler shift [km/s] affecting the spectra and the spread of rotational velocities [km/s] within the FOV\n self.rotation = config.get(\"GEOMETRY-ROTATION\")\n #:str: This field is computed by the geometry module - It indicates the scaling factor between the integrated reflectance for the FOV with respect to the BRDF as computed using the geometry indidence/emission angles\n self.brdfscaler = config.get(\"GEOMETRY-BRDFSCALER\")\n\n @property\n def offset_unit(self):\n \"\"\" Unit of the GEOMETRY-OFFSET field, arcsec / arcmin / degree / km / diameter \"\"\"\n return self.offset_ns.unit\n\n @property\n def altitude_unit(self):\n \"\"\" Unit of the GEOMETRY-OBS-ALTITUDE field, AU / km / diameter and pc:'parsec' \"\"\"\n return self.obs_altitude.unit\n\n def to_config(self):\n loc_offset_unit, offset_unit = self.get_units(\n self.offset_unit,\n [u.arcsec, u.arcmin, u.deg, u.km, u.one],\n [\"arcsec\", \"arcmin\", \"degree\", \"km\", \"diameter\"],\n )\n loc_altitude_unit, altitude_unit = self.get_units(\n self.altitude_unit,\n [u.AU, u.km, u.pc, u.one],\n [\"AU\", \"km\", \"pc\", \"diameter\"],\n )\n\n config = {\n \"GEOMETRY\": self.geometry,\n \"GEOMETRY-REF\": self.ref,\n \"GEOMETRY-OFFSET-NS\": self.offset_ns.to_value(loc_offset_unit) if self.offset_ns is not None and loc_offset_unit is not None else None,\n \"GEOMETRY-OFFSET-EW\": self.offset_ew.to_value(loc_offset_unit) if self.offset_ew is not None and loc_offset_unit is not None else None,\n \"GEOMETRY-OFFSET-UNIT\": offset_unit,\n \"GEOMETRY-OBS-ALTITUDE\": self.obs_altitude.to_value(loc_altitude_unit) if self.obs_altitude is not None and loc_altitude_unit is not None else None,\n \"GEOMETRY-ALTITUDE-UNIT\": altitude_unit,\n \"GEOMETRY-AZIMUTH\": self.azimuth.to_value(u.deg) if self.azimuth is not None else None,\n \"GEOMETRY-USER-PARAM\": self.user_param,\n \"GEOMETRY-STELLAR-TYPE\": self.stellar_type,\n \"GEOMETRY-STELLAR-TEMPERATURE\": self.stellar_temperature.to_value(u.K) if self.stellar_temperature is not None else None,\n \"GEOMETRY-STELLAR-MAGNITUDE\": self.stellar_magnitude.to_value(u.mag) if self.stellar_magnitude is not None else None,\n \"GEOMETRY-OBS-ANGLE\": self.obs_angle,\n \"GEOMETRY-SOLAR-ANGLE\": self.solar_angle,\n \"GEOMETRY-DISK-ANGLES\": self.disk_angles,\n \"GEOMETRY-PHASE\": self.phase.to_value(u.deg) if self.phase is not None else None,\n \"GEOMETRY-PLANET-FRACTION\": self.planet_fraction,\n \"GEOMETRY-STAR-FRACTION\": self.star_fraction,\n \"GEOMETRY-STAR-DISTANCE\": self.star_distance,\n \"GEOMETRY-ROTATION\": self.rotation,\n \"GEOMETRY-BRDFSCALER\": self.brdfscaler,\n }\n config = {k: str(v) for k, v in config.items() if v is not None}\n return config\n\n\nclass PSG_Atmosphere(PSG_Config):\n # from https://hitran.org/docs/molec-meta/\n hitran_molecule_id = {\n \"H2O\": 1,\n \"CO2\": 2,\n \"O3\": 3,\n \"N2O\": 4,\n \"CO\": 5,\n \"CH4\": 6,\n \"O2\": 7,\n \"NO\": 8,\n \"SO2\": 9,\n \"NO2\": 10,\n \"NH3\": 11,\n \"HNO3\": 12,\n \"OH\": 13,\n \"HF\": 14,\n \"HCl\": 15,\n \"HBr\": 16,\n \"HI\": 17,\n \"ClO\": 18,\n \"OCS\": 19,\n \"H2CO\": 20,\n \"HOCl\": 21,\n \"N2\": 22,\n \"HCN\": 23,\n \"CH3Cl\": 24,\n \"H2O2\": 25,\n \"C2H2\": 26,\n \"C2H6\": 27,\n \"PH3\": 28,\n \"COF2\": 29,\n \"SF6\": 30,\n \"H2S\": 31,\n \"HCOOH\": 32,\n \"HO2\": 33,\n \"O\": 34,\n \"ClONO2\": 35,\n \"NO+\": 36,\n \"HOBr\": 37,\n \"C2H4\": 38,\n \"CH3OH\": 39,\n \"CH3Br\": 40,\n \"CH3CN\": 41,\n \"CF4\": 42,\n \"C4H2\": 43,\n \"HC3N\": 44,\n \"H2\": 45,\n \"CS\": 46,\n \"SO3\": 47,\n \"C2N2\": 48,\n \"COCl2\": 49,\n \"CS2\": 53,\n \"NF3\": 55,\n }\n\n def __init__(self, config) -> None:\n # Handle the component molecules\n self._gas = self.get_list(config, \"ATMOSPHERE-GAS\")\n #:list(str): Sub-type of the gases, e.g. 'HIT[1], HIT[2]'\n self.type = self.get_list(config, \"ATMOSPHERE-TYPE\")\n abun = self.get_list(config, \"ATMOSPHERE-ABUN\", func=float)\n unit = self.get_list(config, \"ATMOSPHERE-UNIT\", func=u.Unit)\n #:list(quantity): Abundance of gases. The values can be assumed to be same across all altitudes/layers [%,ppmv,ppbv,pptv,m-2], or as a multiplier [scl] to the provided vertical profile\n self.abun = [a * u for a, u in zip(abun, unit)] if abun is not None and unit is not None else None\n\n # Handle the Aerosols\n self._aeros = self.get_list(config, \"ATMOSPHERE-AEROS\")\n #:list(str): Sub-type of the aerosols\n self.atype = self.get_list(config, \"ATMOSPHERE-ATYPE\")\n abun = self.get_list(config, \"ATMOSPHERE-AABUN\", func=float)\n unit = self.get_list(config, \"ATMOSPHERE-AUNIT\", func=u.Unit)\n #:list(quantity): Abundance of aerosols. The values can be assumed to be same across all altitudes/layers [%,ppm,ppb,ppt,Kg/m2], or as a multiplier [scaler] to the provided vertical profile\n self.aabun = [a * u for a, u in zip(abun, unit)] if abun is not None and unit is not None else None\n size = self.get_list(config, \"ATMOSPHERE-ASIZE\", func=float)\n unit = self.get_list(config, \"ATMOSPHERE-ASUNI\", func=float)\n #:list(quantity): Effective radius of the aerosol particles. The values can be assumed to be same across all layers [um, m, log(um)], or as a multiplier [scaler] to the provided size vertical profile\n self.asize = [a * u for a, u in zip(size, unit)] if size is not None and unit is not None else None\n\n # Handle the atmosphere layers\n #:list(str): Molecules quantified by the vertical profile\n self.layers_molecules = self.get_list(config, \"ATMOSPHERE-LAYERS-MOLECULES\")\n nlayers = self.get_value(config, \"ATMOSPHERE-LAYERS\", int)\n if nlayers is not None:\n try:\n layers = [config[f\"ATMOSPHERE-LAYER-{i}\"] for i in range(1, nlayers + 1)]\n layers = StringIO(\"\\n\".join(layers))\n layers = np.genfromtxt(layers, delimiter=\",\")\n except KeyError:\n layers = None\n else:\n layers = None\n #:array: Values for that specific layer: Pressure[bar], Temperature[K], gases[mol/mol], aerosols [kg/kg] - Optional fields: Altitude[km], X_size[m, aerosol X particle size]\n self.layer = layers\n #:str: Parameters defining the 3D General Circulation Model grid: num_lons, num_lats, num_alts, lon0, lat0, delta_lon, delta_lat, variables (csv)\n self.gcm_parameters = config.get(\"ATMOSPHERE-GCM-PARAMETERS\")\n #:str: The structure of the atmosphere, None / Equilibrium:'Hydrostatic equilibrium' / Coma:'Cometary expanding coma'\n self.structure = config.get(\"ATMOSPHERE-STRUCTURE\")\n #:float: For equilibrium atmospheres, this field defines the surface pressure; while for cometary coma, this field indicates the gas production rate\n self.pressure = self.get_value(config, \"ATMOSPHERE-PRESSURE\", float)\n #:str: The unit of the ATMOSPHERE-PRESSURE field, Pa:Pascal / bar / kbar / mbar / ubar / at / atm / torr / psi / gas:'molecules / second' / gasau:'molecules / second at rh=1AU'\n self.punit = config.get(\"ATMOSPHERE-PUNIT\")\n #:quantity: For atmospheres without a defined P/T profile, this field indicates the temperature across all altitudes\n self.temperature = self.get_quantity(config, \"ATMOSPHERE-TEMPERATURE\", u.K)\n #:float: Molecular weight of the atmosphere [g/mol] or expansion velocity [m/s] for expanding atmospheres\n self.weight = self.get_value(config, \"ATMOSPHERE-WEIGHT\", float)\n #:str: Continuum processes to be included in the calculation\n self.continuum = config.get(\"ATMOSPHERE-CONTINUUM\")\n #:str: For expanding cometary coma, this field indicates the photodissociation lifetime of the molecules [s]\n self.tau = config.get(\"ATMOSPHERE-TAU\")\n #:int: When performing scattering aerosols calculations, this parameter indicates the number of n-stream pairs - Use 0 for extinction calculations only (e.g. transit, occultation)\n self.nmax = self.get_value(config, \"ATMOSPHERE-NMAX\", int)\n #:int: When performing scattering aerosols calculations, this parameter indicates the number of scattering Legendre polynomials used for describing the phase function - Use 0 for extinction calculations only (e.g. transit, occultation)\n self.lmax = self.get_value(config, \"ATMOSPHERE-LMAX\", int)\n #:str: Description establishing the source/reference for the vertical profile\n self.description = config.get(\"ATMOSPHERE-DESCRIPTION\")\n\n def to_config(self):\n config = {\n \"ATMOSPHERE-NGAS\": self.ngas,\n \"ATMOSPHERE-GAS\": \",\".join([str(v) for v in self.gas]) if self.gas is not None else None,\n \"ATMOSPHERE-TYPE\": \",\".join([str(v) for v in self.type]) if self.type is not None else None,\n \"ATMOSPHERE-ABUN\": \",\".join([str(v.value) for v in self.abun]) if self.abun is not None else None,\n \"ATMOSPHERE-UNIT\": \",\".join([str(v.unit) for v in self.abun]) if self.abun is not None else None,\n \"ATMOSPHERE-NAERO\": self.naero,\n \"ATMOSPHERE-AEROS\": \",\".join([str(v) for v in self.aeros]) if self.aeros is not None else None,\n \"ATMOSPHERE-ATYPE\": \",\".join([str(v) for v in self.atype]) if self.atype is not None else None,\n \"ATMOSPHERE-AABUN\": \",\".join([str(v.value) for v in self.aabun]) if self.aabun is not None else None,\n \"ATMOSPHERE-AUNIT\": \",\".join([str(v.unit) for v in self.aabun]) if self.aabun is not None else None,\n \"ATMOSPHERE-ASIZE\": \",\".join([str(v.value) for v in self.asize]) if self.asize is not None else None,\n \"ATMOSPHERE-ASUNI\": \",\".join([str(v.unit) for v in self.asize]) if self.asize is not None else None,\n \"ATMOSPHERE-LAYERS-MOLECULES\": \",\".join(\n [str(v) for v in self.layers_molecules]\n ) if self.layers_molecules is not None else None,\n \"ATMOSPHERE-LAYERS\": self.layers ,\n \"ATMOSPHERE-STRUCTURE\": self.structure,\n \"ATMOSPHERE-PRESSURE\": self.pressure,\n \"ATMOSPHERE-PUNIT\": self.punit,\n \"ATMOSPHERE-TEMPERATURE\": self.temperature.to_value(u.K) if self.temperature is not None else None,\n \"ATMOSPHERE-WEIGHT\": self.weight,\n \"ATMOSPHERE-CONTINUUM\": self.continuum,\n \"ATMOSPHERE-TAU\": self.tau,\n \"ATMOSPHERE-NMAX\": self.nmax,\n \"ATMOSPHERE-LMAX\": self.lmax,\n \"ATMOSPHERE-DESCRIPTION\": self.description,\n \"ATMOSPHERE-GCM-PARAMETERS\": self.gcm_parameters,\n }\n if self.layers is not None:\n for i in range(1, self.layers + 1):\n config[f\"ATMOSPHERE-LAYER-{i}\"] = np.array2string(\n self.layer[i - 1], separator=\",\", max_line_width=np.inf\n )[1:-1]\n\n config = {k: str(v) for k, v in config.items() if v is not None}\n return config\n\n @property\n def gas(self):\n #:list(str): Name of the gases to include in the simulation, e.g 'H2O, CO2'. Only these will considered for the radiative transfer\n return self._gas\n\n @gas.setter\n def gas(self, value):\n self._gas = value\n self.abun = [1 * scl] * len(value)\n self.type = [f\"HIT[{self.hitran_molecule_id[v]}]\" for v in self.gas]\n\n @property\n def unit(self):\n #:list(unit): Unit of the ATMOSPHERE-ABUN field, % / ppmv / ppbv / pptv / m2:'molecules/m2' / scl:'scaler of profile'\n return [a.unit for a in self.abun]\n\n @property\n def ngas(self):\n #:int: Number of gases to include in the simulation, maximum 20\n return len(self.gas)\n\n @property\n def aeros(self):\n return self._aeros\n\n @aeros.setter\n def aeros(self, value):\n self._aeros = value\n self.aabun = [1 * scl] * len(value)\n self.atype = [\"\"] * len(value)\n self.asize = [1 * scl] * len(value)\n\n @property\n def aunit(self):\n #:list(unit): Unit of the ATMOSPHERE-AABUN field, % / ppmv / ppbv / pptv / m2:'molecules/m2' / scl:'scaler of profile'\n return [a.unit for a in self.aabun]\n\n @property\n def asuni(self):\n #:list(init): Unit of the size of the aerosol particles\n if self.asize is None:\n return None\n return [a.unit for a in self.asize]\n\n @property\n def naero(self):\n #:int: Number of aerosols to include in the simulation, maximum 20\n if self.aeros is None:\n return None\n return len(self.aeros)\n\n @property\n def layers(self):\n return self.layer.shape[0]\n\n\nclass PSG_Surface(PSG_Config):\n def __init__(self, config) -> None:\n #:str: Type of scattering model describing the surface, and the model parameters\n self.model = self.get_value(config, \"SURFACE-MODEL\")\n #:quantity: Temperature of the surface [K]\n self.temperature = self.get_quantity(config, \"SURFACE-TEMPERATURE\", u.K)\n #:float: Albedo the surface [0:non-reflectance, 1:fully-reflective]\n self.albedo = self.get_value(config, \"SURFACE-ALBEDO\", float)\n #:float: Emissivity of the surface [0:non-emitting, 1:perfect-emitter]\n self.emissivity = self.get_value(config, \"SURFACE-EMISSIVITY\", float)\n #:float: For expanding cometary coma, this value indicates an scaling value for the dust in the coma\n self.gas_ratio = self.get_value(config, \"SURFACE-GAS-RATIO\", float)\n #:str: Unit of the dust abundance, [ratio] is dust/gas mass ratio, while [afrho] is the Afrho value and [lafrho] is log[Afrho/Q]\n self.gas_unit = config.get(\"SURFACE-GAS-UNIT\")\n #:int: Number of components describing the surface properties [areal mixing]\n self.nsurf = self.get_value(config, \"SURFACE-NSURF\", int)\n #:str: Name of surface components to be included in the simulation\n self.surf = config.get(\"SURFACE-SURF\")\n #:str: Sub-type of the surface components\n self.type = config.get(\"SURFACE-TYPE\")\n #:str: Relative abundance of the surface components. For the remaining abundance, average surface albedo/emissivity will be considered\n self.abun = config.get(\"SURFACE-ABUN\")\n #:Unit: Unit of the SURFACE-ABUN field, % / ppm / ppv\n self.unit = self.get_value(config, \"SURFACE-UNIT\", u.Unit)\n #:str: Thickness for each surface component [um]\n self.thick = self.get_quantity(config, \"SURFACE-THICK\", u.um)\n\n def to_config(self):\n config = {\n \"SURFACE-MODEL\": self.model,\n \"SURFACE-TEMPERATURE\": self.temperature.to_value(u.K)\n if self.temperature is not None\n else None,\n \"SURFACE-ALBEDO\": self.albedo,\n \"SURFACE-EMISSIVITY\": self.emissivity,\n \"SURFACE-GAS-RATIO\": self.gas_ratio,\n \"SURFACE-GAS-UNIT\": self.gas_unit,\n \"SURFACE-NSURF\": self.nsurf,\n \"SURFACE-SURF\": self.surf,\n \"SURFACE-TYPE\": self.type,\n \"SURFACE-ABUN\": self.abun,\n \"SURFACE-UNIT\": self.unit,\n \"SURFACE-THICK\": self.thick.to_value(u.um)\n if self.thick is not None\n else None,\n }\n config = {k: str(v) for k, v in config.items() if v is not None}\n return config\n\n\nclass PSG_Generator(PSG_Config):\n def __init__(self, config) -> None:\n # Unit of the GENERATOR-RANGE fields, um / nm / mm / An:'Angstrom' / cm:'Wavenumber [cm-1]' / MHz / GHz / kHz\n range_unit = config.get(\"GENERATOR-RANGEUNIT\")\n range_unit = self.parse_units(range_unit, [u.um, u.nm, u.mm, u.AA, 1 / u.cm, u.MHz, u.GHz, u.kHz],\n [\"um\", \"nm\", \"An\", \"cm\", \"MHz\", \"GHz\", \"kHz\"],)\n #:quantity: Lower spectral range for the simulation\n self.range1 = self.get_quantity(config, \"GENERATOR-RANGE1\", range_unit)\n #:quantity: Upper spectral range for the simulation\n self.range2 = self.get_quantity(config, \"GENERATOR-RANGE2\", range_unit)\n\n resolution_unit = config.get(\"GENERATOR-RESOLUTIONUNIT\")\n resolution_unit = self.parse_units(resolution_unit, [u.one, u.um, u.nm, u.mm, u.AA, 1/u.cm, u.MHz, u.GHz, u.kHz], [\"RP\", \"um\", \"nm\", \"mm\", \"An\", \"cm\", \"MHz\", \"GHz\", \"kHz\"])\n #:quantity: Spectral resolution for the simulation. PSG assumes that the sampling resolution is equal is to the instrumental resolution, yet radiative transfer resolutions are always performed at the necessary/higher resolutions in order to accurately describe the lineshapes\n self.resolution = self.get_quantity(config, \"GENERATOR-RESOLUTION\", resolution_unit)\n #:bool: Convolution kernel applied to the spectra, default is 'N'\n self.resolution_kernel = self.get_bool(config, \"GENERATOR-RESOLUTIONKERNEL\")\n #:bool: Flag indicating whether to include molecular signatures as generated with PUMAS or CEM [Y/N]\n self.gas_model = self.get_bool(config, \"GENERATOR-GAS-MODEL\")\n #:bool: Flag indicating whether to include continuum signatures as generated by the surface, the star (when in the field) and dust/nucleus (when synthesizing comets) [Y/N]\n self.cont_model = self.get_bool(config, \"GENERATOR-CONT-MODEL\")\n #:bool: Flag indicating whether to include stellar absorption signatures in the reflected sunlight / stellar spectra [Y/N]\n self.cont_stellar = self.get_bool(config, \"GENERATOR-CONT-STELLAR\")\n #:bool: Flag indicating whether we are synthesizing planetary spectra as observed with a ground-based telescope. This flag will ensure that the noise module properly includes telluric signatures\n self.trans_show = self.get_bool(config, \"GENERATOR-TRANS-SHOW\")\n #:bool: Flag indicating whether to show the spectra as observed and multiplied by the telluric transmittance [Y]\n self.trans_apply = self.get_bool(config, \"GENERATOR-TRANS-APPLY\")\n #:str: Keyword [SS-WW] indicating the site [SS] and water abundance [WW]. Values of SS are 00:'0m (sea level)', 01:'2,600m (8,500 feet)', 02:'4,200m (14,000 feet)', 03:'14,000m (46,000 feet)', 04:'35,000m (120,000 feet)'. Values of WW are 00:'10% tropical', 01:'30% tropical', 02:'70% tropical', 03:'100% tropical'\n self.trans = config.get(\"GENERATOR-TRANS\")\n #:str: Radiation unit for the generated spectra, see full list of permitted keywords in the 'Modeling > Radiation Units' section\n self.rad_units = config.get(\"GENERATOR-RADUNITS\")\n #:bool: Flag indicating whether to show the spectra employing a logarithmic scale\n self.lograd = self.get_bool(config, \"GENERATOR-LOGRAD\")\n #:str: Type of telescope, SINGLE:'single dish telescope or instrument', ARRAY:'Interferometric array', CORONA:'Coronagraph', AOTF or LIDAR\n self.telescope = config.get(\"GENERATOR-TELESCOPE\")\n\n beam_unit = self.get_value(config, \"GENERATOR-BEAM-UNIT\", u.Unit)\n #:quantity: Full width half-maximum (FWHM) of the instrument's beam or field-of-view (FOV)\n self.beam = self.get_quantity(config, \"GENERATOR-BEAM\", beam_unit)\n #:quantity: Diameter of the main reflecting surface of the telescope or instrument [m]\n self.diam_tele = self.get_quantity(config, \"GENERATOR-DIAMTELE\", u.m)\n #:str: For interferometers, the number of telescopes; for coronagraphs, the instrument's contrast\n self.telescope1 = config.get(\"GENERATOR-TELESCOPE1\")\n #:str: This field indicates the zodi-level (1.0:Ecliptic pole/minimum, 2.0:HST/JWST low values, 10.0:Normal values, 100.0:Close to ecliptic/Sun), or order number for the AOTF system. For coronagraphs, this field indicates allows two entries: the exozodi level and the local zodiacal dust level\n self.telescope2 = config.get(\"GENERATOR-TELESCOPE2\")\n #:str: For coronagraphic observations, the inner working angle (IWA) in units of [L/D]\n self.telescope3 = config.get(\"GENERATOR-TELESCOPE3\")\n #:str: Keyword identifying the noise model to consider, NO:'None', TRX:'Receiver temperature', RMS:'Constant noise in radiation units', BKG:'Constant noise with added background', NEP:'Power equivalent noise detector model', D*:'Detectability noise detector model', CCD:'Image sensor'\n self.noise = config.get(\"GENERATOR-NOISE\")\n #:quantity: Exposure time per frame [sec]\n self.noise_time = self.get_quantity(config, \"GENERATOR-NOISETIME\", u.s)\n #:int: Number of exposures\n self.noise_frames = self.get_value(config, \"GENERATOR-NOISEFRAMES\", int)\n #:int: Total number of pixels that encompass the beam (GENERATOR-BEAM) and the spectral unit (GENERATOR-RESOLUTION)\n self.noise_pixels = self.get_value(config, \"GENERATOR-NOISEPIXELS\", int)\n #:str: First noise model parameter - For RMS, 1-sigma noise; for TRX, the receiver temperature; for BKG, the 1-sigma noise; for NEP, the sensitivity in W/sqrt(Hz); for DET, the sensitivity in cm.sqrt(Hz)/W; for CCD, the read noise [e-]\n self.noise1 = config.get(\"GENERATOR-NOISE1\")\n #:str: Second noise model parameter - For RMS, not used; for TRX, the sideband g-factor; for BKG, the not used; for NEP, not used; for DET, the pixel size [um]; for CCD, the dark rate [e-/s]\n self.noise2 = config.get(\"GENERATOR-NOISE2\")\n #:float: Total throughput of the telescope+instrument, from photons arriving to the main mirror to photons being quantified by the detector [0:none to 1:perfect]. The user can provide wavelength dependent values as neff@wavelength[um] (e.g., '[email protected],[email protected],[email protected],[email protected],...')\n self.noise_oeff = config.get(\"GENERATOR-NOISEOEFF\")\n #:float: Emissivity of the telescope+instrument optics [0 to 1]\n self.noise_oemis = self.get_value(config, \"GENERATOR-NOISEOEMIS\", float)\n #:float: Temperature of the telescope+instrument optics [K]\n self.noise_otemp = self.get_quantity(config, \"GENERATOR-NOISEOTEMP\", u.K)\n #:str: Text describing if an instrument template was used to define the GENERATOR parameters\n self.instrument = config.get(\"GENERATOR-INSTRUMENT\")\n #:float: Well depth [e-] of each pixel detector\n self.noise_well = self.get_value(config, \"GENERATOR-NOISEWELL\", float)\n #:float: Spatial binning applied to the GCM data when computing spectra. 1: Full resolution\n self.gcm_binning = self.get_value(config, \"GENERATOR-GCM-BINNING\", float)\n\n @property\n def range_unit(self):\n return self.range1.unit\n\n @property\n def resolution_unit(self):\n return self.resolution.unit\n\n @property\n def beam_unit(self):\n return self.beam.unit\n\n def to_config(self):\n range_unit_loc, range_unit = self.get_units(\n self.range_unit,\n [u.um, u.nm, u.mm, u.AA, 1 / u.cm, u.MHz, u.GHz, u.kHz],\n [\"um\", \"nm\", \"An\", \"cm\", \"MHz\", \"GHz\", \"kHz\"],\n )\n resolution_unit_loc, resolution_unit = self.get_units(\n self.resolution_unit,\n [u.one, u.um, u.nm, u.mm, u.AA, 1 / u.cm, u.MHz, u.GHz, u.kHz],\n [\"RP\", \"um\", \"nm\", \"mm\", \"An\", \"cm\", \"MHz\", \"GHz\", \"kHz\"],\n )\n beam_unit_loc, beam_unit = self.get_units(\n self.beam_unit,\n [u.arcsec, u.arcmin, u.deg, u.km, diameter, diffrac],\n [\"arcsec\", \"arcmin\", \"degree\", \"km\", \"diameter\", \"diffrac\"],\n )\n config = {\n \"GENERATOR-RANGE1\": self.range1.to_value(range_unit_loc) if self.range1 is not None and range_unit_loc is not None else None,\n \"GENERATOR-RANGE2\": self.range2.to_value(range_unit_loc) if self.range2 is not None and range_unit_loc is not None else None,\n \"GENERATOR-RANGEUNIT\": range_unit,\n \"GENERATOR-RESOLUTION\": self.resolution.to_value(resolution_unit_loc) if self.resolution is not None and resolution_unit_loc is not None else None,\n \"GENERATOR-RESOLUTIONUNIT\": resolution_unit,\n \"GENERATOR-RESOLUTIONKERNEL\": \"Y\" if self.resolution_kernel else \"N\" if self.resolution_kernel is not None else None,\n \"GENERATOR-GAS-MODEL\": \"Y\" if self.gas_model else \"N\" if self.gas_model is not None else None,\n \"GENERATOR-CONT-MODEL\": \"Y\" if self.cont_model else \"N\" if self.cont_model is not None else None,\n \"GENERATOR-CONT-STELLAR\": \"Y\" if self.cont_stellar else \"N\" if self.cont_stellar is not None else None,\n \"GENERATOR-TRANS-SHOW\": \"Y\" if self.trans_show else \"N\" if self.trans_show is not None else None,\n \"GENERATOR-TRANS-APPLY\": \"Y\" if self.trans_apply else \"N\" if self.trans_apply is not None else None,\n \"GENERATOR-TRANS\": self.trans,\n \"GENERATOR-RADUNITS\": self.rad_units,\n \"GENERATOR-LOGRAD\": \"Y\" if self.lograd else \"N\" if self.lograd is not None else None,\n \"GENERATOR-TELESCOPE\": self.telescope,\n \"GENERATOR-BEAM\": self.beam.to_value(beam_unit_loc) if self.beam is not None and beam_unit_loc is not None else None,\n \"GENERATOR-BEAM-UNIT\": beam_unit,\n \"GENERATOR-DIAMTELE\": self.diam_tele.to_value(u.m) if self.diam_tele is not None else None,\n \"GENERATOR-TELESCOPE1\": self.telescope1,\n \"GENERATOR-TELESCOPE2\": self.telescope2,\n \"GENERATOR-TELESCOPE3\": self.telescope3,\n \"GENERATOR-NOISE\": self.noise,\n \"GENERATOR-NOISETIME\": self.noise_time.to_value(u.s) if self.noise_time is not None else None,\n \"GENERATOR-NOISEFRAMES\": self.noise_frames,\n \"GENERATOR-NOISEPIXELS\": self.noise_pixels,\n \"GENERATOR-NOISE1\": self.noise1,\n \"GENERATOR-NOISE2\": self.noise2,\n \"GENERATOR-NOISEOEFF\": self.noise_oeff,\n \"GENERATOR-NOISEOEMIS\": self.noise_oemis,\n \"GENERATOR-NOISEOTEMP\": self.noise_otemp.to_value(u.K) if self.noise_otemp is not None else None,\n \"GENERATOR-INSTRUMENT\": self.instrument,\n \"GENERATOR-NOISEWELL\": self.noise_well,\n \"GENERATOR-GCM-BINNING\": self.gcm_binning,\n }\n config = {k: str(v) for k, v in config.items() if v is not None}\n return config\n\n\nclass PSG_Retrieval(PSG_Config):\n def __init__(self, config) -> None:\n #:float: The parameter Gamma (or Levenberg-Marquart parameter) is the extra regularization parameter (e.g., 0:Classic LM, 1:classic Rodgers' formalism, 10:Heavily tailored to the a-priori)\n self.gamma = self.get_value(config, \"RETRIEVAL-GAMMA\", float)\n #:str: Parameters for the nested sampling retrieval method\n self.nest = config.get(\"RETRIEVAL-NEST\")\n\n range_unit = config.get(\"RETRIEVAL-RANGEUNIT\")\n #:Unit: Spectral unit of the user-provided data for the retrieval, um / nm / mm / An:'Angstrom' / cm:'Wavenumber [cm-1]' / MHz / GHz / kHz\n self.range_unit = self.parse_units(\n range_unit,\n [u.um, u.nm, u.mm, u.AA, 1 / u.cm, u.MHz, u.GHz, u.kHz],\n [\"um\", \"nm\", \"mm\", \"An\", \"cm\", \"MHz\", \"GHz\", \"kHz\"],\n )\n\n resolution_unit = config.get(\"RETRIEVAL-RESOLUTIONUNIT\")\n resolution_unit = self.parse_units(\n resolution_unit,\n [u.one, u.um, u.nm, u.mm, u.AA, 1 / u.cm, u.MHz, u.GHz, u.kHz],\n [\"RP\", \"um\", \"nm\", \"mm\", \"An\", \"cm\", \"MHz\", \"GHz\", \"kHz\"],\n )\n #:quantity: Instrument's spectral resolution [FWHM] of the user-provided data. This value is independent of the sampling rate of the data, and refers to the actual spectral resolution of the instrument\n self.resolution = self.get_quantity(config, \"RETRIEVAL-RESOLUTION\", resolution_unit)\n #:float: Scaling value to be applied to all fluxes of the user-provided data file\n self.flux_scaler = self.get_value(config, \"RETRIEVAL-FLUXSCALER\", float)\n #:str: Frequency/wavelength corrections (0th, 1st and 2nd orders) to be applied to the data\n self.freq_shift = config.get(\"RETRIEVAL-FREQSHIFT\")\n #:str: Labels for the columns of the data file\n self.flux_labels = config.get(\"RETRIEVAL-FLUXLABELS\")\n #:int: Polynomical degree of the instrument's gain function, -1:None, 0:Constant, 1:Sloped, 2:Quadratic, etc\n self.fit_gain = self.get_value(config, \"RETRIEVAL-FITGAIN\", int)\n #:bool: Flag indicating whether to preserve the photometric information of the data (zeroth order of gain fitting) [Y:Disable 0th order / N:Enable]\n self.fit_gain_photometric = self.get_bool(config, \"RETRIEVAL-FITGAIN-PHOTOMETRIC\")\n #:int: Polynomical degree of the residual offset, -1:None, 0:Constant, 1:Sloped, 2:Quadratic, etc\n self.remove_offset = self.get_value(config, \"RETRIEVAL-REMOVEOFFSET\", int)\n #:int: Maximum number of spectral fringes to be removed from the data\n self.remove_fringe = self.get_value(config, \"RETRIEVAL-REMOVEFRINGE\", int)\n #:bool: Flag indicating whether to fit the intensity of the solar/stellar features [Y/N]\n self.fit_stellar = self.get_bool(config, \"RETRIEVAL-FITSTELLAR\")\n #:bool: Flag indicating whether to refine the spectral calibration [Y/N]\n self.fit_freq = self.get_bool(config, \"RETRIEVAL-FITFREQ\")\n #:bool: Flag indicating whether to fit the spectral resolution [Y/N]\n self.fit_resolution = self.get_bool(config, \"RETRIEVAL-FITRESOLUTION\")\n #:bool: Flag indicating whether to fit the telluric features [Y/N]. This is done by perturbing the selected telluric column/water abundances\n self.fit_telluric = self.get_bool(config, \"RETRIEVAL-FITTELLURIC\")\n #:list: Name of the variables of the retrieval (comma separated)\n self.variables = self.get_list(config, \"RETRIEVAL-VARIABLES\")\n #:array: A-priori and resulting values of the retrieval parameters (comma separated)\n self.values = self.get_list(config, \"RETRIEVAL-VALUES\", func=float, array=True)\n #:array: Resulting 1-sigma uncertainties (corrected by chi-square) of the retrieval parameters (comma separated)\n self.sigmas = self.get_list(config, \"RETRIEVAL-SIGMAS\", func=float, array=True)\n #:array: Lower boundary permitted for each parameter (comma separated)\n self.min = self.get_list(config, \"RETRIEVAL-MIN\", func=float, array=True)\n #:array: Upper boundary permitted for each parameter (comma separated)\n self.max = self.get_list(config, \"RETRIEVAL-MAX\", func=float, array=True)\n #:list: Magnitude unit of the a-priori and boundary entries (comma separated)\n self.units = self.get_list(config, \"RETRIEVAL-UNITS\")\n #:str: Flag indicating the status of the retrieval suite (e.g., RUNNING, OK)\n self.status = self.get_bool(config, \"RETRIEVAL-STATUS\")\n\n @property\n def resolution_unit(self):\n if self.resolution is None:\n return None\n return self.resolution.unit\n\n @property\n def nvars(self):\n if self.variables is None:\n return None\n return len(self.variables)\n\n def to_config(self):\n range_unit_loc, range_unit = self.get_units(\n self.range_unit,\n [u.um, u.nm, u.mm, u.AA, 1 / u.cm, u.MHz, u.GHz, u.kHz],\n [\"um\", \"nm\", \"mm\", \"An\", \"cm\", \"MHz\", \"GHz\", \"kHz\"],\n )\n resolution_unit_loc, resolution_unit = self.get_units(\n self.resolution_unit,\n [u.one, u.um, u.nm, u.mm, u.AA, 1 / u.cm, u.MHz, u.GHz, u.kHz],\n [\"RP\", \"um\", \"nm\", \"mm\", \"An\", \"cm\", \"MHz\", \"GHz\", \"kHz\"],\n )\n\n config = {\n \"RETRIEVAL-GAMMA\": self.gamma,\n \"RETRIEVAL-NEST\": self.nest,\n \"RETRIEVAL-RANGEUNIT\": self.range_unit,\n \"RETRIEVAL-RESOLUTION\": self.resolution.to_value(resolution_unit_loc) if self.resolution is not None and resolution_unit_loc is not None else None,\n \"RETRIEVAL-RESOLUTIONUNIT\": resolution_unit,\n \"RETRIEVAL-FLUXSCALER\": self.flux_scaler,\n \"RETRIEVAL-FREQSHIFT\": self.freq_shift,\n \"RETRIEVAL-FLUXLABELS\": self.flux_labels,\n \"RETRIEVAL-FITGAIN\": self.fit_gain,\n \"RETRIEVAL-FITGAIN-PHOTOMETRIC\": \"Y\" if self.fit_gain_photometric else \"N\" if self.fit_gain_photometric is not None else None,\n \"RETRIEVAL-REMOVEOFFSET\": self.remove_offset,\n \"RETRIEVAL-REMOVEFRINGE\": self.remove_fringe,\n \"RETRIEVAL-FITSTELLAR\": \"Y\" if self.fit_stellar else \"N\" if self.fit_stellar is not None else None,\n \"RETRIEVAL-FITFREQ\": \"Y\" if self.fit_freq else \"N\" if self.fit_freq is not None else None,\n \"RETRIEVAL-FITRESOLUTION\": \"Y\" if self.fit_resolution else \"N\" if self.fit_resolution is not None else None,\n \"RETRIEVAL-FITTELLURIC\": \"Y\" if self.fit_telluric else \"N\" if self.fit_telluric is not None else None,\n \"RETRIEVAL-NVARS\": self.nvars,\n \"RETRIEVAL-VARIABLES\": \",\".join(self.variables) if self.variables is not None else None,\n \"RETRIEVAL-VALUES\": \",\".join([str(v) for v in self.values]) if self.values is not None else None,\n \"RETRIEVAL-SIGMAS\": \",\".join([str(v) for v in self.sigmas]) if self.sigmas is not None else None,\n \"RETRIEVAL-MIN\": \",\".join([str(v) for v in self.min]) if self.min is not None else None,\n \"RETRIEVAL-MAX\": \",\".join([str(v) for v in self.max]) if self.max is not None else None,\n \"RETRIEVAL-UNITS\": \",\".join(self.units) if self.units is not None else None,\n \"RETRIEVAL-STATUS\": self.status,\n }\n config = {k: str(v) for k, v in config.items() if v is not None}\n return config\n\n\nclass PSG_Package:\n \"\"\" Abstract Class for a PSG package \"\"\"\n\n name = None\n\n def __init__(self, server, version=None) -> None:\n self.server = server\n if version is None:\n try:\n version = server.get_package_version(self.name)\n except:\n version = \"\"\n self.version = version\n\n def __str__(self):\n return f\"{self.name.upper()} - version ({self.version})\"\n\n def update(self):\n result = self.sever.update_package(self.name)\n self.version = server.get_package_version(self.name)\n return result\n\n def install(self):\n result = self.server.install_package(self.name)\n self.version = server.get_package_version(self.name)\n return result\n\n def remove(self):\n self.version = None\n return self.server.remove_package(self.name)\n\n def help(self):\n \"\"\" Return a formatted docstring \"\"\"\n return self.__doc__\n\n\n\nclass Programs_Package(PSG_Package):\n \"\"\"\n Operational software and programs required by PSG. This package includes all the necessary binaries\n to perform the radiative transfer calculations and retrievals, together with the PHP server interpreter modules. \n This package cannot be removed (it is fundamental for the operation of PSG), and it should be constantly\n updated to reflect the upgrades performed to the suite.\n \"\"\"\n\n name = \"programs\"\n\n\nclass Base_Package(PSG_Package):\n \"\"\"\n This package includes the basic spectroscopic data for performing line-by-line calculations across a broad range of wavelengths and domains. \n It includes:\n\n - HITRAN line-by-line database formatted for PSG (binary) with collissional information for CO2, H2, He when available.\n - HITRAN Collission-Induced-Absorption (CIA) database for many species due to various collisionally interacting atoms or molecules. \n Some CIA spectra are given over an extended range of frequencies.\n - UV cross-sections database for a multiple species (ingested automatically by the RT modules when applicable).\n - Kurucz's stellar templates.\n - Scattering models for hundreds of optical constants (HRI, GSFC, Mie scattering), and for typical Mars aerosols \n (dust, water-ice, T-max based on Wolff et al.)\n \"\"\"\n\n name = \"base\"\n\n\nclass Surfaces_Package(PSG_Package):\n \"\"\"\n The surface spectroscopic package includes the complete repository of optical constants and reflectances \n from all databases handled by PSG (e.g., RELAB, USGS, JENA).\n \"\"\"\n\n name = \"surfaces\"\n\n\nclass Atmospheres_Package(PSG_Package):\n \"\"\"\n This package includes the climatological and atmospheric files needed by the 'atmosphere' module. \n Specifically, it includes:\n\n - Atmospheric templates for most Solar-System bodies.\n - Earth MERRA2 auxiliary data (e.g., topography), Mars-MCD, and Mars-GEM databases.\n - Exoplanet basic templates.\n - Exoplanet Parmentier T/P model and Kempton equilibrium chemistry modules.\n - Exoplanet Terrestrial LAPS model (Turbet+2015).\n \"\"\"\n\n name = \"atmospheres\"\n\n\nclass Ephm_Package(PSG_Package):\n \"\"\"\n The ephemerides package includes orbital and geometric used by the 'geometry' module. \n Specifically, it includes:\n\n - Ephemerides information for hundreds of Solar-System bodies (1960-2050).\n - Ephemerides information for dozens of planetary missions (e.g., Cassini, MAVEN).\n \"\"\"\n\n name = \"ephm\"\n\n\nclass Telluric_Package(PSG_Package):\n \"\"\"\n This package includes the database of telluric transmittances necessary when computing spectra as observed with ground-based observatories. \n It includes:\n\n - Database of telluric transmittances pre-computed for 5 altitudes and 4 columns of water for each case.\n - The altitudes include that of Mauna-Kea/Hawaii (4200 m), Paranal/Chile (2600 m), SOFIA (14,000 m) and balloon observatories (35,000 m).\n - The water vapor column was established by scaling the tropical water profile by a factor of 0.1, 0.3 and 0.7 and 1.\n \"\"\"\n\n name = \"telluric\"\n\n\nclass Xcross_Package(PSG_Package):\n \"\"\"\n This package contains hundreds of absorption cross-sections for \n complex volatiles as reported by the latest HITRAN release. \n \"\"\"\n\n name = \"xcross\"\n\n\nclass Lines_Package(PSG_Package):\n \"\"\"\n This package contains line-by-line spectroscopic information from several databases.\n\n - GEISA database.\n - JPL Molecular spectroscopy database.\n - CDMS molecular spectroscopy database.\n - CFA/Harvard Kurucz atomic database.\n \"\"\"\n\n name = \"lines\"\n\n\nclass Fluor_Package(PSG_Package):\n \"\"\"\n This package contains non-LTE fluorescence efficiencies for dozens of species, \n suitable when synthesizing cometary spectra in the UV/optical/IR range. \n \"\"\"\n\n name = \"fluor\"\n\n\nclass Exo_Package(PSG_Package):\n \"\"\"\n This package contains molecular and atomic cross-sections applicable for exoplanetary modeling, \n and it is based on the database employed by the open source 'Exo-Transmit' code (Kempton et al. 2017). \n \"\"\"\n\n name = \"exo\"\n\n\nclass Mass_Package(PSG_Package):\n \"\"\"\n The mass spectrometry package provides access to the MS fragmentation pattern \n database for >20,000 species computed based on the NIST Standard Reference Database Number 69 library.\n \"\"\"\n\n name = \"mass\"\n\n\nclass Corrklow_Package(PSG_Package):\n \"\"\"\n This package contains correlated-k tables for the main HITRAN species \n (H2O, CO2, O3, N2O, CO, CH4, O2, SO2, NO2, NH3, HCl, OCS, H2CO, N2, HCN, C2H2, C2H4, PH3, H2S, C2H4, H2), \n and for different collissional partners (e.g., CO2, H2, He) when available. The tables were computed with PUMAS \n assuming wings of 25 cm-1 and a fine core of 1 cm-1 where maximum resolution calculations are applied.\n This is the 'low' resolution package applicable to synthesis of spectra with a resolving power lower/equal than 500. \n \"\"\"\n\n name = \"corrklow\"\n\n\nclass Corrkmed_Package(PSG_Package):\n \"\"\"\n This package contains correlated-k tables for the main HITRAN species \n (H2O, CO2, O3, N2O, CO, CH4, O2, SO2, NO2, NH3, HCl, OCS, H2CO, N2, HCN, C2H2, C2H4, PH3, H2S, C2H4, H2), \n and for different collissional partners (e.g., CO2, H2, He) when available. The tables were computed with PUMAS \n assuming wings of 25 cm-1 and a fine core of 1 cm-1 where maximum resolution calculations are applied.\n This is the 'med' resolution package applicable to synthesis of spectra with a resolving power greater than 500 and lower/equal to 5000.\n \"\"\"\n\n name = \"corrkmed\"\n\n\nclass PSG:\n # Assign package names to package classes\n _packages = {\n \"programs\": Programs_Package,\n \"base\": Base_Package,\n \"surfaces\": Surfaces_Package,\n \"atmospheres\": Atmospheres_Package,\n \"ephm\": Ephm_Package,\n \"telluric\": Telluric_Package,\n \"xcross\": Xcross_Package,\n \"lines\": Lines_Package,\n \"fluor\": Fluor_Package,\n \"exo\": Exo_Package,\n \"mass\": Mass_Package,\n \"corrklow\": Corrklow_Package,\n \"corrkmed\": Corrkmed_Package,\n }\n\n def __init__(self, server=None, config=None) -> None:\n # self.server = 'https://psg.gsfc.nasa.gov'\n if server is None:\n server = \"http://localhost:3000\"\n self.server = server\n\n # Read configuration from file\n if config is None:\n config_file = join(dirname(__file__), \"psg_cfg.txt\")\n config = self.read_config(config_file)\n else:\n try:\n config = self.read_config(config)\n except FileNotFoundError:\n config = config\n self._config = config\n\n # Pass config to substructures\n self.object = PSG_Object(self._config)\n self.geometry = PSG_Geometry(self._config)\n self.atmosphere = PSG_Atmosphere(self._config)\n self.surface = PSG_Surface(self._config)\n self.generator = PSG_Generator(self._config)\n self.retrieval = PSG_Retrieval(self._config)\n\n # Load the individual packages for object oriented interface\n versions = self.get_package_version()\n self.packages = {\n name: cls(self, versions[name.upper()])\n for name, cls in self._packages.items() if name.upper() in versions.keys()\n }\n\n @property\n def config(self):\n return self.to_config()\n\n @staticmethod\n def read_config(config_file):\n with open(config_file, \"r\") as f:\n lines = f.read()\n\n matches = re.findall(r\"<(.*?)>(.*)\\n\", lines)\n config = {k: v for k, v in matches}\n return config\n\n def to_config(self):\n self._config.update(self.object.to_config())\n self._config.update(self.geometry.to_config())\n self._config.update(self.atmosphere.to_config())\n self._config.update(self.surface.to_config())\n self._config.update(self.generator.to_config())\n self._config.update(self.retrieval.to_config())\n return self._config\n\n def write_config(self, config_file=None):\n config = self.to_config()\n lines = [f\"<{k}>{v}\\n\" for k, v in config.items()]\n text = \"\".join(lines)\n if config_file is not None:\n with open(config_file, \"w\") as f:\n f.write(text)\n f.flush()\n return text\n\n @staticmethod\n def read_datafile(datafile):\n # Read the header\n # and split into the seperate parts\n with open(datafile, \"r\") as f:\n columns = None\n for i, line in enumerate(f):\n if not line.startswith(\"#\"):\n # Skip after the header\n break\n if line.startswith(\"# WARNING\"):\n print(line[2:])\n continue\n columns = line\n if columns is not None:\n columns = columns[2:-1].split(\" \")\n # Read the data\n data = np.genfromtxt(datafile, names=columns)\n return data\n\n @staticmethod\n def download_file(\n server, config_text, psg_type=\"rad\", wgeo=\"y\", wephm=\"n\", watm=\"n\", cache=True\n ):\n hash = hashlib.sha256((psg_type + wephm + watm + config_text).encode(\"utf-8\"))\n url = join(server, f\"{hash.hexdigest()}.txt\")\n\n if not is_url_in_cache(url, pkgname=PKGNAME) or not cache:\n with NamedTemporaryFile(\"w\") as cf:\n cf.write(config_text)\n cf.flush()\n\n result = subprocess.run(\n f\"curl -s -d type={psg_type} -d wgeo={wgeo} -d wephm={wephm} -d watm={watm} --data-urlencode file@{cf.name} {server}/api.php\",\n capture_output=True,\n shell=True,\n )\n text = result.stdout.decode()\n if text == \"\":\n raise RuntimeError(\"The PSG server did not return a result\")\n with NamedTemporaryFile(\"w\") as output:\n output.write(text)\n output.flush()\n import_file_to_cache(url, output.name, pkgname=PKGNAME)\n\n # Return the filename in the cache\n result = download_file(url, cache=True, pkgname=PKGNAME)\n return result\n\n @staticmethod\n def clear_cache():\n clear_download_cache(pkgname=PKGNAME)\n\n @staticmethod\n def run_curl_command(server, page=\"index.php\", command=None):\n call = f\"curl {server}/index.php\"\n if command is not None:\n call = f\"{call}?{command}\"\n\n result = subprocess.run(call, capture_output=True, shell=True,)\n text = result.stdout.decode()\n return text\n\n def get_info(self):\n return self.run_curl_command(self.server)\n\n def get_installed_packages(self):\n text = self.get_info()\n lines = text.splitlines()\n packages = [l.split(\"-\", 1)[0].strip().lower() for l in lines]\n return packages\n\n def get_package_version(self, package=None):\n text = self.get_info()\n if package is not None:\n match = re.match(\n package.upper() + r\" - .*version \\((\\d{4}-\\d{2}-\\d{2})\\)\", text\n )\n version = match.group(1)\n return version\n else:\n match = re.findall(r\"(\\w*) - .*version \\((\\d{4}-\\d{2}-\\d{2})\\)\", text)\n version = {m[0]: m[1] for m in match}\n return version\n\n def install_package(self, package):\n text = self.run_curl_command(self.server, command=f\"install={package}\")\n # TODO: Check that the result is successful\n return text\n\n def update_package(self, package):\n text = self.run_curl_command(self.server, command=f\"update={package}\")\n # TODO: Check that the result is successful\n return text\n\n def remove_package(self, package):\n text = self.run_curl_command(self.server, command=f\"remove={package}\")\n # TODO: Check that the result is successful\n return text\n\n def update_all_packages(self):\n text = self.run_curl_command(self.server)\n lines = text.splitlines()\n lines = [l.split(\"-\", 1) for l in lines]\n packages = [l[0].strip().lower() for l in lines if \"Update available\" in l[1]]\n for package in packages:\n self.update_package(package)\n return None\n\n def request(self, psg_type=\"rad\", wgeo=\"y\", wephm=\"n\", watm=\"n\"):\n # Create the configuration for the PSG\n config_text = self.write_config()\n # Get the filename from the cache\n output_name = self.download_file(\n self.server,\n config_text,\n psg_type=psg_type,\n wgeo=wgeo,\n wephm=wephm,\n watm=watm,\n )\n # Read the results from file\n data = self.read_datafile(output_name)\n return data\n" ]
[ [ "numpy.array2string", "numpy.array", "numpy.genfromtxt" ] ]
nybupt/athena
[ "2808f5060831382e603e5dc5ec6a9e9d8901a3b2", "2808f5060831382e603e5dc5ec6a9e9d8901a3b2" ]
[ "src/models/test.py", "src/models/detection_as_defense.py" ]
[ "import os\nimport sys\nimport time\n\nimport numpy as np\n\nfrom utils.config import *\nfrom utils.util import *\n\n\ndef usage():\n print(\n \"====================================================================================================================\")\n print(\n \"python <this script> samplesDir experimentRootDir modelsDir numOfSamples testResultFoldName datasetName numOfClasses\")\n print(\n \"====================================================================================================================\")\n\n\nif len(sys.argv) != 8:\n usage()\n exit(1)\n\nsamplesDir = sys.argv[1]\nexperimentRootDir = sys.argv[2]\nmodelsDir = sys.argv[3]\nnumOfSamples = int(sys.argv[4])\ntestResultFoldName = sys.argv[5]\ndatasetName = sys.argv[6]\nnumOfClasses = int(sys.argv[7])\n\nDATA.set_current_dataset_name(datasetName)\n# Basic parameters for k-fold experiment setup\narchitecture = MODEL.ARCHITECTURE\ntestDir = os.path.join(experimentRootDir, testResultFoldName)\n\nAETypes = ATTACK.get_AETypes()\n\nnumOfAETypes = len(AETypes)\nsampleTypes = [\"BS\"]\nsampleTypes.extend(AETypes)\nnumOfSampleTypes = numOfAETypes + 1\n\ntargetModelName = \"clean\"\ntransformConfig = TRANSFORMATION()\ntransformationList = transformConfig.supported_types()\n\n# Create fold directories for evaluation\npredictionResultDir = os.path.join(testDir, \"prediction_result\")\n\n# Prediction : needs a new prediction function\npredictionForTest(\n predictionResultDir,\n datasetName,\n architecture,\n numOfClasses,\n targetModelName,\n modelsDir,\n samplesDir,\n numOfSamples,\n AETypes,\n transformationList)\n\nnumOfTrans = len(transformationList) - 1\nnumOfModels = 1 + numOfTrans # clean model + transform models\n\n# Evaluation: training and testing\npredProbBS = np.load(os.path.join(predictionResultDir, \"BS/predProb.npy\"))\n# predProbBS = predProbBS[1:]\npredLogitBS = np.load(os.path.join(predictionResultDir, \"BS/predLogit.npy\"))\n# predLogitBS = predLogitBS[1:]\nlabels = np.load(os.path.join(samplesDir, \"Label-\" + datasetName + \"-\" + targetModelName + \".npy\"))\nlabels = np.argmax(labels, axis=1)\npredLCBS = np.zeros((predProbBS.shape[0], predProbBS.shape[1], 2))\npredLCBS[:, :, 0] = np.argmax(predProbBS, axis=2)\npredLCBS[:, :, 1] = np.max(predProbBS, axis=2)\nlabelsBS = labels\n\ntrainModelDir = os.path.join(experimentRootDir, \"train_models\")\n\nnumOfDefenses = numOfCVDefenses + 2 * numOfWCDefenses\n\nacc1Model = np.zeros((numOfAETypes + 1, numOfModels))\nacc1Model[0, :] = calAccuracyAllSingleModels(labelsBS, predProbBS)\n\n# the 1st dimension maps to a kind of ensemble model trained on the specific type of AE\ndefenseAccAEs = np.zeros((numOfAETypes, numOfDefenses))\ndefenseAccBSs = np.zeros((numOfAETypes, numOfDefenses))\ndefenseTCAEs = np.zeros((numOfAETypes, numOfDefenses))\ndefenseTCBSs = np.zeros((numOfAETypes, numOfDefenses))\n# accuracies of clean model, random defense and upper bound\nrdCleanUPAcc = np.zeros((numOfAETypes + 1, 3))\nclusters = []\nfor tmID in range(numOfTrans):\n clusters.append([tmID])\n# BS - accuracy of clean model, random defense and upper bound\n# accuracy of clean model\nrdCleanUPAcc[0, 0] = acc1Model[0, 0]\n# accuracy of random defense\nrdCleanUPAcc[0, 1] = np.mean(acc1Model[0, 1:])\n# upper-bound accuracy\nrdCleanUPAcc[0, 2] = getUpperBoundAccuracy(\n predLCBS[1:, :, :],\n clusters,\n labelsBS)\n\n# Test each ensemble model trained by each type of AEs\nfor AETypeIdx in range(numOfAETypes):\n AEType = AETypes[AETypeIdx]\n curTrainModelDir = os.path.join(trainModelDir, AEType)\n curPredictionResultDir = os.path.join(predictionResultDir, AEType)\n\n print(\"Evaluating AE type: \" + AEType)\n predProbAE = np.load(os.path.join(curPredictionResultDir, \"predProb.npy\"))\n predLogitAE = np.load(os.path.join(curPredictionResultDir, \"predLogit.npy\"))\n predProbLC = np.zeros((numOfModels, numOfSamples, 2))\n predProbLC[:, :, 0] = np.argmax(predProbAE, axis=2)\n predProbLC[:, :, 1] = np.max(predProbAE, axis=2)\n\n # accuracy of AE on clean model and all transform models\n acc1Model[AETypeIdx + 1, :] = calAccuracyAllSingleModels(labels, predProbAE)\n\n # accuracy of clean model\n rdCleanUPAcc[AETypeIdx + 1, 0] = acc1Model[AETypeIdx + 1, 0]\n # accuracy of random defense\n rdCleanUPAcc[AETypeIdx + 1, 1] = np.mean(acc1Model[AETypeIdx + 1, 1:])\n # upper-bound accuracy\n rdCleanUPAcc[AETypeIdx + 1, 2] = getUpperBoundAccuracy(\n predProbLC[1:, :, :],\n clusters,\n labels)\n\n # accuracy of clustering-and-voting based defenses\n for defenseIdx in range(numOfCVDefenses):\n defenseName = cvDefenseNames[defenseIdx]\n clusters = loadCAVModel(os.path.join(curTrainModelDir, defenseName + \".txt\"))\n\n # testing AE\n votedResults, defenseTCAEs[AETypeIdx, defenseIdx] = votingAsDefense(\n predProbLC[1:, :, :],\n clusters,\n vsac=cvDefenseNames[defenseIdx],\n measureTC=True)\n defenseAccAEs[AETypeIdx, defenseIdx] = calAccuracy(votedResults[:, 0], labels)\n\n # tesing BS\n votedResults, defenseTCBSs[AETypeIdx, defenseIdx] = votingAsDefense(\n predLCBS[1:, :, :],\n clusters,\n vsac=cvDefenseNames[defenseIdx],\n measureTC=True)\n defenseAccBSs[AETypeIdx, defenseIdx] = calAccuracy(votedResults[:, 0], labelsBS)\n\n # accuracy of weithed-confidence based defenses\n for defenseIdx in range(numOfWCDefenses):\n defenseName = wcDefenseNames[defenseIdx]\n for plIdx in range(2):\n wcMatFilename = defenseName + \"_EM.npy\"\n mIDsFilename = defenseName + \"_modelIDs.npy\"\n predAE = predProbAE[1:, :, :]\n predBS = predProbBS[1:, :, :]\n if plIdx == 1: # predict logit instead of probability\n wcMatFilename = \"LG_\" + wcMatFilename\n mIDsFilename = \"LG_\" + mIDsFilename\n predAE = predLogitAE[1:, :, :]\n predBS = predLogitBS[1:, :, :]\n\n wcMat = np.load(os.path.join(curTrainModelDir, wcMatFilename))\n # ID of transform models: starts from 0.\n mIDs = np.load(os.path.join(curTrainModelDir, mIDsFilename))\n\n curPredAE = predAE[mIDs]\n curPredBS = predBS[mIDs]\n dIdx = numOfCVDefenses + plIdx * numOfWCDefenses + defenseIdx\n\n # testing AE\n predLabels, defenseTCAEs[AETypeIdx, dIdx] = wcdefenses(\n curPredAE, wcMat, defenseName, measureTC=True)\n defenseAccAEs[AETypeIdx, dIdx] = calAccuracy(predLabels, labels)\n\n # testing BS\n predLabels, defenseTCBSs[AETypeIdx, dIdx] = wcdefenses(\n curPredBS, wcMat, defenseName, measureTC=True)\n defenseAccBSs[AETypeIdx, dIdx] = calAccuracy(predLabels, labels)\n\n# Report accuracy data\n# accuracies of random defense, clean model and upper bound\n# rdCleanUPAcc = np.zeros((numOfAETypes+1, 3))\n# defenseAccBSs, defenseAccAEs: (numOfAETypes, numofDefenses)\nrdCleanUPAccFP = os.path.join(testDir, \"acc_randomDefense_cleanModel_upperBound.txt\")\nwith open(rdCleanUPAccFP, \"w\") as fp:\n sformat = \"{}\\t{}\\t{}\\t{}\\n\"\n fp.write(sformat.format(\"Type\", \"Clean_Model\", \"Rrandom_Defense\", \"Upper_Bound\"))\n for sampleTypeIdx in range(numOfSampleTypes):\n fp.write(sformat.format(\n sampleTypes[sampleTypeIdx],\n rdCleanUPAcc[sampleTypeIdx, 0],\n rdCleanUPAcc[sampleTypeIdx, 1],\n rdCleanUPAcc[sampleTypeIdx, 2]))\n\ndefenseAccAEsFP = os.path.join(testDir, \"acc_AEs_ensembles.txt\")\nwith open(defenseAccAEsFP, \"w\") as fp:\n sformat = \"{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\n\"\n fp.write(sformat.format(\n \"Type\",\n \"CV_Maj\",\n \"CV_Max\",\n \"1s_Mean\",\n \"EM_Mean\",\n \"EM_MXMV\",\n \"1s_Mean_L\",\n \"EM_Mean_L\",\n \"EM_MXMV_L\"))\n for AETypeIdx in range(numOfAETypes):\n fp.write(sformat.format(\n AETypes[AETypeIdx],\n defenseAccAEs[AETypeIdx, 0],\n defenseAccAEs[AETypeIdx, 1],\n defenseAccAEs[AETypeIdx, 2],\n defenseAccAEs[AETypeIdx, 3],\n defenseAccAEs[AETypeIdx, 4],\n defenseAccAEs[AETypeIdx, 5],\n defenseAccAEs[AETypeIdx, 6],\n defenseAccAEs[AETypeIdx, 7]))\n\ndefenseAccBSsFP = os.path.join(testDir, \"acc_BSs_ensembles.txt\")\nwith open(defenseAccBSsFP, \"w\") as fp:\n sformat = \"{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\n\"\n fp.write(sformat.format(\n \"Type\",\n \"CV_Maj\",\n \"CV_Max\",\n \"1s_Mean\",\n \"EM_Mean\",\n \"EM_MXMV\",\n \"1s_Mean_L\",\n \"EM_Mean_L\",\n \"EM_MXMV_L\"))\n for AETypeIdx in range(numOfAETypes):\n fp.write(sformat.format(\n AETypes[AETypeIdx],\n defenseAccBSs[AETypeIdx, 0],\n defenseAccBSs[AETypeIdx, 1],\n defenseAccBSs[AETypeIdx, 2],\n defenseAccBSs[AETypeIdx, 3],\n defenseAccBSs[AETypeIdx, 4],\n defenseAccBSs[AETypeIdx, 5],\n defenseAccBSs[AETypeIdx, 6],\n defenseAccBSs[AETypeIdx, 7]))\n\n# Report latency\n# defenseTCBSs , defenseTCAEs : (numOfAETypes, numofDefenses)\n# predTCs: (numOfSampleTypes, numOfModels, 3)\npredTCs = np.load(os.path.join(predictionResultDir, \"predTCs.npy\"))\npredAndTransTCs = np.zeros((predTCs.shape[0], predTCs.shape[1], 2))\npredAndTransTCs[:, :, 0] = predTCs[:, :, 0] + predTCs[:, :, 1]\npredAndTransTCs[:, :, 1] = predTCs[:, :, 0] + predTCs[:, :, 2]\n\nmaxTCTransModels = np.argmax(predAndTransTCs[:, 1:, :], axis=1)\nmaxTCTransModelsFP = os.path.join(testDir, \"maxTCTransModels.txt\")\nwith open(maxTCTransModelsFP, \"w\") as fp:\n sformat = \"{}\\t{}\\t{}\\n\"\n fp.write(sformat.format(\n \"Type\",\n \"ProbPred\",\n \"LogitPred\"))\n fp.write(sformat.format(\n \"BS\",\n maxTCTransModels[0, 0],\n maxTCTransModels[0, 1]))\n for AETypeIdx in range(numOfAETypes):\n fp.write(sformat.format(\n AETypes[AETypeIdx],\n maxTCTransModels[1 + AETypeIdx, 0],\n maxTCTransModels[1 + AETypeIdx, 1]))\n\npredAndTransTCs = np.max(predAndTransTCs[:, 1:, :],\n axis=1) # find the largest time cost of transformation and inference across models\nCAVEnsembleTCs = np.zeros((numOfAETypes, 2))\nCAVEnsembleTCs[:, 0] = predAndTransTCs[1:, 0] + defenseTCAEs[:, 0]\nCAVEnsembleTCs[:, 1] = predAndTransTCs[1:, 0] + defenseTCAEs[:, 1]\nWCEnsemblesTCs = np.zeros((numOfAETypes, 6))\nWCEnsemblesTCs[:, 0] = predAndTransTCs[1:, 0] + defenseTCAEs[:, 2]\nWCEnsemblesTCs[:, 1] = predAndTransTCs[1:, 0] + defenseTCAEs[:, 3]\nWCEnsemblesTCs[:, 2] = predAndTransTCs[1:, 0] + defenseTCAEs[:, 4]\nWCEnsemblesTCs[:, 3] = predAndTransTCs[1:, 1] + defenseTCAEs[:, 5]\nWCEnsemblesTCs[:, 4] = predAndTransTCs[1:, 1] + defenseTCAEs[:, 6]\nWCEnsemblesTCs[:, 5] = predAndTransTCs[1:, 1] + defenseTCAEs[:, 7]\n\n# probability inference on clean model\n# defense time costs\ntotalTCsAE = np.zeros((numOfAETypes, 1 + numOfDefenses))\ntotalTCsAE[:, 0] = predTCs[1:, 0, 1]\ntotalTCsAE[:, 1:3] = CAVEnsembleTCs\ntotalTCsAE[:, 3:] = WCEnsemblesTCs\ntotalTCAEFP = os.path.join(testDir, \"time_cost_of_each_ensemble_model.txt\")\nwith open(totalTCAEFP, \"w\") as fp:\n sformat = \"{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\n\"\n fp.write(sformat.format(\n \"Type\",\n \"Clean\",\n \"CV_Maj\",\n \"CV_Max\",\n \"1s_Mean\",\n \"EM_Mean\",\n \"EM_MXMV\",\n \"1s_Mean_L\",\n \"EM_Mean_L\",\n \"EM_MXMV_L\"))\n for AETypeIdx in range(numOfAETypes):\n fp.write(sformat.format(\n AETypes[AETypeIdx],\n totalTCsAE[AETypeIdx, 0],\n totalTCsAE[AETypeIdx, 1],\n totalTCsAE[AETypeIdx, 2],\n totalTCsAE[AETypeIdx, 3],\n totalTCsAE[AETypeIdx, 4],\n totalTCsAE[AETypeIdx, 5],\n totalTCsAE[AETypeIdx, 6],\n totalTCsAE[AETypeIdx, 7],\n totalTCsAE[AETypeIdx, 8]))\nensembleModelNames = [\n \"CV_Maj\",\n \"CV_Max\",\n \"1s_Mean\",\n \"EM_Mean\",\n \"EM_MXMV\",\n \"1s_Mean_L\",\n \"EM_Mean_L\",\n \"EM_MXMV_L\"]\nxLabel = [\"Clean\"]\nxLabel.extend(ensembleModelNames)\nyLabel = \"Latency (ms)\"\ntitle = \"Latency of clean model and ensemble models\"\nsaveFP = os.path.join(testDir, \"latency.pdf\")\nxtickSize = 8\nboxPlot(totalTCsAE * 1000, title, xLabel, yLabel, saveFP, xtickSize, 45)\n\nrelativeTotTCAE = totalTCsAE / totalTCsAE[:, 0][:, None]\nrelativeTotTCAEFP = os.path.join(testDir, \"relative_time_cost_of_each_ensemble_model.txt\")\nwith open(relativeTotTCAEFP, \"w\") as fp:\n sformat = \"{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\n\"\n fp.write(sformat.format(\n \"Type\",\n \"CV_Maj\",\n \"CV_Max\",\n \"1s_Mean\",\n \"EM_Mean\",\n \"EM_MXMV\",\n \"1s_Mean_L\",\n \"EM_Mean_L\",\n \"EM_MXMV_L\"))\n for AETypeIdx in range(numOfAETypes):\n fp.write(sformat.format(\n AETypes[AETypeIdx],\n relativeTotTCAE[AETypeIdx, 1],\n relativeTotTCAE[AETypeIdx, 2],\n relativeTotTCAE[AETypeIdx, 3],\n relativeTotTCAE[AETypeIdx, 4],\n relativeTotTCAE[AETypeIdx, 5],\n relativeTotTCAE[AETypeIdx, 6],\n relativeTotTCAE[AETypeIdx, 7],\n relativeTotTCAE[AETypeIdx, 8]))\n\nxLabel = ensembleModelNames\nyLabel = \"Latency Percentage\"\ntitle = \"Latency of ensemble models relative to clean model\"\nsaveFP = os.path.join(testDir, \"relative_latency.pdf\")\nboxPlot(relativeTotTCAE[:, 1:], title, xLabel, yLabel, saveFP, xtickSize, 45)\n\n# backup raw data of time cost\nnp.save(os.path.join(testDir, \"defenseTC_BS.npy\"), defenseTCBSs)\nnp.save(os.path.join(testDir, \"defenseTC_AE.npy\"), defenseTCAEs)\n\n# backup accuracy of BS and AEs on clean models and all transform models\nnp.save(os.path.join(testDir, \"acc1Model.npy\"), acc1Model)\n", "import os\nimport sys\nimport time\n\nimport numpy as np\nfrom scipy.spatial import distance_matrix\n\nimport seaborn as sns; sns.set()\n\nfrom utils.config import *\nfrom utils.util import *\n\ndef usage():\n print(\"==========================================================================================================\")\n print(\"python <this script> samplesDir rootDir modelsDir numOfSamples testResultFoldName datasetName numOfClasses\")\n print(\"==========================================================================================================\")\n\nif len(sys.argv) != 8:\n usage()\n exit(1)\n\n\nsamplesDir = sys.argv[1]\nrootDir = sys.argv[2]\nmodelsDir = sys.argv[3]\nnumOfSamples = int(sys.argv[4])\ntestResultFoldName = sys.argv[5]\ndatasetName = sys.argv[6]\nnumOfClasses = int(sys.argv[7])\n\nDATA.set_current_dataset_name(datasetName)\n\n# Basic parameters for k-fold experiment setup\ntimeStamp=time.strftime(\"%Y-%m-%d_%H-%M-%S\", time.gmtime())\nexperimentRootDir=os.path.join(rootDir,timeStamp)\ncreateDirSafely(experimentRootDir)\n\narchitecture = MODEL.ARCHITECTURE\ntestDir = os.path.join(experimentRootDir, testResultFoldName)\n\nAETypes = ATTACK.get_AETypes()\n\n\nnumOfAETypes = len(AETypes)\nsampleTypes =[\"BS\"]\nsampleTypes.extend(AETypes)\nnumOfSampleTypes = numOfAETypes + 1\n\ntargetModelName = \"clean\"\ntransformConfig = TRANSFORMATION()\ntransformationList = transformConfig.supported_types() \n\n# Create fold directories for evaluation\npredictionResultDir = os.path.join(testDir, \"prediction_result\")\n\n\n# Prediction : needs a new prediction function\npredictionForTest(\n predictionResultDir,\n datasetName,\n architecture,\n numOfClasses,\n targetModelName,\n modelsDir,\n samplesDir,\n numOfSamples,\n AETypes,\n transformationList)\n\nnumOfTrans = len(transformationList) - 1\nnumOfModels = 1 + numOfTrans # clean model + transform models\n\n\n# Evaluation: training and testing\nlabels = np.load(os.path.join(samplesDir, \"Label-\"+datasetName+\"-\"+targetModelName+\".npy\"))\nlabels = np.argmax(labels, axis=1)\n\ntrainModelDir = os.path.join(experimentRootDir, \"train_models\")\n\nnumOfDefenses = numOfCVDefenses+2*numOfWCDefenses\n\nacc1Model = np.zeros((numOfAETypes+1, numOfModels))\n\nsampleTypes = [\"BS\"]\nsampleTypes.extend(AETypes)\nnumOfSampleTypes = len(sampleTypes)\n\n\n\ndef majorityVote(participants):\n '''\n Input:\n participants: a list of opinions. Each element in the list is a numpy array, N X 2.\n N is the number of events. The second dimension contains (opinion/label, confidence)\n Output:\n voteResult : a numapy array NX2 represents opinion and confidence across N events \n '''\n numOfEvents = participants[0].shape[0]\n voteResult = np.zeros((numOfEvents, 2))\n misCount = 0\n for eventID in range(numOfEvents):\n countDict={}\n # counting\n for participant in participants:\n vote = participant[eventID][0] \n if vote in countDict:\n countDict[vote] = (1+countDict[vote][0], participant[eventID][1]+countDict[vote][1])\n else:\n countDict[vote] = (1, participant[eventID][1])\n\n # voting\n votingResult=None\n count = 0\n for key, value in countDict.items():\n if (value[0] > count) or (value[0]==count and (value[1]/value[0] > countDict[votingResult][1] / count)):\n count = value[0]\n votingResult = key\n \n voteResult[eventID, 0] = votingResult\n voteResult[eventID, 1] = count \n return voteResult\n\ndef saveDATable(sampleTypes, dA, vA, cA, filepath):\n with open(filepath, \"w\") as fp:\n sf = \"{}\\t{}\\t{}\\t{}\\n\"\n fp.write(sf.format(\"SampleType\", \"DetectionAcc\", \"VotingAcc\", \"Acc(Clean)\"))\n for sIdx in range(len(sampleTypes)):\n fp.write(sf.format(\n sampleTypes[sIdx],\n dA[sIdx],\n vA[sIdx],\n cA[sIdx]))\n\n# calculate the accuracy of each model for all type of samples\naccEachModelEachSampleType = np.zeros((numOfModels, numOfSampleTypes))\nfor sampleTypeIdx in range(numOfSampleTypes):\n sampleType = sampleTypes[sampleTypeIdx]\n curPredictionResultDir = os.path.join(predictionResultDir, sampleType)\n print(\"calculating accuracy of each model for sample type: \"+sampleType)\n predProb = np.load(os.path.join(curPredictionResultDir, \"predProb.npy\"))\n accEachModelEachSampleType[:, sampleTypeIdx] = calAccuracyAllSingleModels(labels, predProb)\nwith open(os.path.join(testDir, \"acc_each_model_each_sample_type.txt\"), \"w\") as fp:\n fp.write(\"Model\")\n for sampleType in sampleTypes:\n fp.write(\"\\t\"+sampleType)\n fp.write(\"\\n\")\n\n for mIdx in range(numOfModels):\n fp.write(transformationList[mIdx])\n for sampleTypeIdx in range(numOfSampleTypes):\n fp.write(\"\\t\"+str(accEachModelEachSampleType[mIdx, sampleTypeIdx]))\n fp.write(\"\\n\")\n\n\n \n\nthresholdRatios = [0.5, 0.6, 0.7, 0.8]\n\nsampleKinds = [True]\nfor _ in AETypes:\n sampleKinds.append(False)\n\nfor thresholdRatio in thresholdRatios:\n threshold = int(thresholdRatio*numOfModels)\n print(\"Threshold Ratio: \"+str(thresholdRatio))\n # the 1st dimension maps to a kind of ensemble model trained on the specific type of AE\n detectionAccs = np.zeros((numOfSampleTypes))\n votingAccs = np.zeros((numOfSampleTypes))\n cleanAccs = np.zeros((numOfSampleTypes))\n # Test each ensemble model trained by each type of AEs\n for sampleTypeIdx in range(numOfSampleTypes):\n sampleType = sampleTypes[sampleTypeIdx]\n sampleKind = sampleKinds[sampleTypeIdx] # True - BS, False - AE\n curPredictionResultDir = os.path.join(predictionResultDir, sampleType)\n \n print(\"\\tTesting sample type: \"+sampleType)\n predProb = np.load(os.path.join(curPredictionResultDir, \"predProb.npy\"))\n predLogit = np.load(os.path.join(curPredictionResultDir, \"predLogit.npy\"))\n predProbLC = np.zeros((numOfModels, numOfSamples, 2))\n predProbLC[:, :, 0] = np.argmax(predProb, axis=2)\n predProbLC[:, : ,1] = np.max(predProb, axis=2)\n\n # clean model accuracy\n cleanAccs[sampleTypeIdx] = accEachModelEachSampleType[0, sampleTypeIdx]\n\n # majority voting as detection - threshold 80% of transform models\n participants = []\n for mIdx in range(numOfModels):\n participants.append(predProbLC[mIdx, :, :])\n votedResult2 = util.majorityVote(participants)\n votingAccs[sampleTypeIdx] = calAccuracy(labels, votedResult2[:, 0]) \n\n votedResult = majorityVote(participants)\n detectedAECnt = 0\n for sIdx in range(numOfSamples):\n if votedResult[sIdx, 1] < threshold:\n detectedAECnt += 1\n if sampleKind:\n detectionAccs[sampleTypeIdx] = 1 - (detectedAECnt)/numOfSamples\n else:\n detectionAccs[sampleTypeIdx] = (detectedAECnt)/numOfSamples\n filepath = os.path.join(testDir, \"detection_acc_\"+str(thresholdRatio)+\".txt\")\n saveDATable(sampleTypes, detectionAccs, votingAccs, cleanAccs, filepath)\n # distance matrix - heatmap\n print(\"\\tThreshold of number of Models that have the same vote: \"+str(threshold))\n print(\"\\t\"+str(sampleTypes))\n print(\"\\tdetection accuracy: \"+str(detectionAccs))\n print(\"\\tvoting accuracy: \"+str(votingAccs))\n print(\"\\tclean model accuracy: \"+str(cleanAccs))\n print(\"\\n\")\n\ndef distMatrx(groupOfPoints, metric=\"l2\"):\n '''\n nPoints X nGroup X nDim\n '''\n nGroups = groupOfPoints[0].shape[0]\n nPoints = groupOfPoints.shape[0]\n dms = np.zeros((nGroups, nPoints, nPoints))\n for gIdx in range(nGroups):\n points = groupOfPoints[:, gIdx, :]\n dms[gIdx]=distance_matrix(points, points, p=2)\n meanDM = dms.mean(axis=0)\n stdDM = dms.std(axis=0)\n return meanDM, stdDM\n\ndef dumpMat(mat, filepath):\n with open(filepath, \"w\") as fp:\n for rIdx in range(mat.shape[0]):\n for cIdx in range(mat.shape[1]):\n fp.write(str(mat[rIdx, cIdx])+\"\\t\")\n fp.write(\"\\n\")\n\nfor sampleTypeIdx in range(numOfSampleTypes):\n sampleType = sampleTypes[sampleTypeIdx]\n sampleKind = sampleKinds[sampleTypeIdx] # True - BS, False - AE\n curPredictionResultDir = os.path.join(predictionResultDir, sampleType)\n \n print(\"[DM] Testing sample type: \"+sampleType)\n predProb = np.load(os.path.join(curPredictionResultDir, \"predProb.npy\"))\n predLogit = np.load(os.path.join(curPredictionResultDir, \"predLogit.npy\"))\n\n meanDM, stdDM = distMatrx(predProb, metric=\"l2\")\n dumpMat(meanDM, os.path.join(testDir, \"meanDM_\"+sampleType+\"_Prob.txt\"))\n dumpMat(stdDM, os.path.join(testDir, \"stdDM_\"+sampleType+\"_Prob.txt\"))\n\n ax = sns.heatmap(meanDM) \n fig = ax.get_figure()\n fig.savefig(os.path.join(testDir, \"DM_\"+sampleType+\"_Prob.pdf\"))\n fig.clf()\n\n\n meanDM, stdDM = distMatrx(predLogit, metric=\"l2\")\n dumpMat(meanDM, os.path.join(testDir, \"meanDM_\"+sampleType+\"_Logit.txt\"))\n dumpMat(stdDM, os.path.join(testDir, \"stdDM_\"+sampleType+\"_Logit.txt\"))\n\n ax = sns.heatmap(meanDM) \n fig = ax.get_figure()\n fig.savefig(os.path.join(testDir, \"DM_\"+sampleType+\"_Logit.pdf\"))\n fig.clf()\n" ]
[ [ "numpy.max", "numpy.argmax", "numpy.zeros", "numpy.mean" ], [ "numpy.max", "numpy.argmax", "numpy.zeros", "scipy.spatial.distance_matrix" ] ]
And0k/vaex
[ "298d0d5c6ace0ea4c335339fef10ba7ee54cc077" ]
[ "packages/vaex-core/vaex/dataframe.py" ]
[ "# -*- coding: utf-8 -*-\nfrom __future__ import division, print_function\nimport io\nimport difflib\nimport base64\nfrom typing import Iterable\nimport os\nimport math\nimport time\nimport itertools\nimport functools\nimport collections\nimport sys\nimport platform\nimport warnings\nimport re\nfrom functools import reduce\nimport threading\nimport six\nimport vaex.utils\n# import vaex.image\nimport numpy as np\nimport concurrent.futures\nimport numbers\nimport pyarrow as pa\n\nfrom vaex.utils import Timer\nimport vaex.events\n# import vaex.ui.undo\nimport vaex.grids\nimport vaex.hash\nimport vaex.multithreading\nimport vaex.promise\nimport vaex.execution\nimport vaex.expresso\nimport logging\nimport vaex.kld\nfrom . import selections, tasks, scopes\nfrom .expression import expression_namespace\nfrom .delayed import delayed, delayed_args, delayed_list\nfrom .column import Column, ColumnIndexed, ColumnSparse, ColumnString, ColumnConcatenatedLazy, supported_column_types\nfrom . import array_types\nimport vaex.events\nfrom .datatype import DataType\nfrom .docstrings import docsubst\n\n\nastropy = vaex.utils.optional_import(\"astropy.units\")\nxarray = vaex.utils.optional_import(\"xarray\")\n\n# py2/p3 compatibility\ntry:\n from urllib.parse import urlparse\nexcept ImportError:\n from urlparse import urlparse\n\n_DEBUG = os.environ.get('VAEX_DEBUG', False) # extra sanity checks that might hit performance\n_REPORT_EXECUTION_TRACES = vaex.utils.get_env_type(int, 'VAEX_EXECUTE_TRACE', 0)\nDEFAULT_REPR_FORMAT = 'plain'\nFILTER_SELECTION_NAME = '__filter__'\n\nsys_is_le = sys.byteorder == 'little'\n\nlogger = logging.getLogger(\"vaex\")\nlock = threading.Lock()\ndefault_shape = 128\ndefault_chunk_size = 1024**2\n# executor = concurrent.futures.ThreadPoolExecutor(max_workers=2)\n# executor = vaex.execution.default_executor\n\ndef _len(o):\n return o.__len__()\n\n\ndef _requires(name):\n def wrap(*args, **kwargs):\n raise RuntimeError('this function is wrapped by a placeholder, you probably want to install vaex-' + name)\n return wrap\n\nfrom .utils import (_ensure_strings_from_expressions,\n _ensure_string_from_expression,\n _ensure_list,\n _is_limit,\n _isnumber,\n _issequence,\n _is_string, _normalize_selection,\n _parse_reduction,\n _parse_n,\n _normalize_selection_name,\n _normalize,\n _parse_f,\n _expand,\n _expand_shape,\n _expand_limits,\n as_flat_float,\n as_flat_array,\n _split_and_combine_mask)\n\nmain_executor = None # vaex.execution.Executor(vaex.multithreading.pool)\nfrom vaex.execution import Executor\n\n\ndef get_main_executor():\n global main_executor\n if main_executor is None:\n main_executor = vaex.execution.ExecutorLocal(vaex.multithreading.get_main_pool())\n return main_executor\n\n\n# we import after function_mapping is defined\nfrom .expression import Expression\n\n\n_functions_statistics_1d = []\n\n\ndef stat_1d(f):\n _functions_statistics_1d.append(f)\n return f\n\ndef _hidden(meth):\n \"\"\"Mark a method as hidden\"\"\"\n meth.__hidden__ = True\n return meth\n\[email protected](\"dataframe\")\nclass _DataFrameEncoder:\n @staticmethod\n def encode(encoding, df):\n state = df.state_get(skip=[df.dataset])\n return {\n 'state': encoding.encode('dataframe-state', state),\n 'dataset': encoding.encode('dataset', df.dataset)\n }\n\n @staticmethod\n def decode(encoding, spec):\n dataset = encoding.decode('dataset', spec['dataset'])\n state = encoding.decode('dataframe-state', spec['state'])\n df = vaex.from_dataset(dataset)._future()\n df.state_set(state)\n return df\n\n\nclass DataFrame(object):\n \"\"\"All local or remote datasets are encapsulated in this class, which provides a pandas\n like API to your dataset.\n\n Each DataFrame (df) has a number of columns, and a number of rows, the length of the DataFrame.\n\n All DataFrames have multiple 'selection', and all calculations are done on the whole DataFrame (default)\n or for the selection. The following example shows how to use the selection.\n\n >>> df.select(\"x < 0\")\n >>> df.sum(df.y, selection=True)\n >>> df.sum(df.y, selection=[df.x < 0, df.x > 0])\n\n :type signal_selection_changed: events.Signal\n :type executor: Executor\n \"\"\"\n\n def __init__(self, name=None, executor=None):\n self.executor = executor or get_main_executor()\n self.name = name\n self._init()\n\n def _init(self):\n self.column_names = []\n self.signal_pick = vaex.events.Signal(\"pick\")\n self.signal_sequence_index_change = vaex.events.Signal(\"sequence index change\")\n self.signal_selection_changed = vaex.events.Signal(\"selection changed\")\n self.signal_active_fraction_changed = vaex.events.Signal(\"active fraction changed\")\n self.signal_column_changed = vaex.events.Signal(\"a column changed\") # (df, column_name, change_type=[\"add\", \"remove\", \"change\"])\n self.signal_variable_changed = vaex.events.Signal(\"a variable changed\")\n\n self.variables = {}\n self.virtual_columns = {}\n # we also store the virtual columns as expressions, for performance reasons\n # the expression object can cache the ast, making renaming/rewriting faster\n self._virtual_expressions = {}\n self.functions = {}\n self._length_original = None\n self._length_unfiltered = None\n self._cached_filtered_length = None\n self._filter_filled = False\n self._active_fraction = 1\n self._current_row = None\n self._index_start = 0\n self._index_end = None\n\n self.description = None\n self.ucds = {}\n self.units = {}\n self.descriptions = {}\n\n self.favorite_selections = {}\n\n # this is to be backward compatible with v4 for now\n self._future_behaviour = False\n\n self.mask = None # a bitmask for the selection does not work for server side\n\n # maps from name to list of Selection objets\n self.selection_histories = collections.defaultdict(list)\n # after an undo, the last one in the history list is not the active one, -1 means no selection\n self.selection_history_indices = collections.defaultdict(lambda: -1)\n assert self.filtered is False\n self._auto_fraction = False\n\n self._sparse_matrices = {} # record which sparse columns belong to which sparse matrix\n\n self._categories = {}\n self._selection_mask_caches = collections.defaultdict(dict)\n self._selection_masks = {} # maps to vaex.superutils.Mask object\n self._renamed_columns = []\n\n # weak refs of expression that we keep to rewrite expressions\n self._expressions = []\n\n self.local = threading.local()\n # a check to avoid nested aggregator calls, which make stack traces very difficult\n # like the ExecutorLocal.local.executing, this needs to be thread local\n self.local._aggregator_nest_count = 0\n\n def fingerprint(self, dependencies=None, treeshake=False):\n '''Id that uniquely identifies a dataframe (cross runtime).\n\n :param set[str] dependencies: set of column, virtual column, function or selection names to be used.\n :param bool treeshake: Get rid of unused variables before calculating the fingerprint.\n '''\n df = self.copy(treeshake=True) if treeshake else self\n selections = {name: self.get_selection(name) for name, history in self.selection_histories.items() if self.has_selection(name)}\n if dependencies is not None:\n dependencies = set(dependencies) # copy\n # these are implicit dependencies that we need to add\n for selection in selections.values():\n dependencies.update(selection.dependencies(self))\n\n # we only use the state parts that affect data (no metadata)\n encoding = vaex.encoding.Encoding()\n def dep_filter(d : dict):\n if dependencies is None:\n return d\n return {k: v for k, v in d.items() if k in dependencies}\n\n state = dict(\n column_names=[k for k in list(self.column_names) if dependencies is None or k in dependencies],\n virtual_columns=dep_filter(self.virtual_columns),\n # variables go unencoded\n variables=dep_filter(self.variables),\n # for functions it should be fast enough (not large amounts of data)\n functions={name: encoding.encode(\"function\", value) for name, value in dep_filter(self.functions).items()},\n active_range=[self._index_start, self._index_end]\n )\n # selections can affect the filter, so put them all in\n state['selections'] = {name: selection.to_dict() if selection is not None else None for name, selection in selections.items()}\n fp = vaex.cache.fingerprint(state, df.dataset.fingerprint)\n return f'dataframe-{fp}'\n\n def __dataframe__(self, nan_as_null : bool = False, allow_copy : bool = True):\n \"\"\"\n \"\"\"\n import vaex.dataframe_protocol\n return vaex.dataframe_protocol._VaexDataFrame(self, nan_as_null=nan_as_null, allow_copy=allow_copy)\n\n def _future(self, version=5, inplace=False):\n '''Act like a Vaex dataframe version 5.\n\n meaning:\n * A dataframe with automatically encoded categorical data\n * state version 5 (which stored the dataset)\n '''\n df = self if inplace else self.copy()\n df._future_behaviour = 5\n return df\n\n _auto_encode = _hidden(vaex.utils.deprecated('use _future')(_future))\n\n def __getattr__(self, name):\n # will support the hidden methods\n if name in self.__hidden__:\n return self.__hidden__[name].__get__(self)\n else:\n return object.__getattribute__(self, name)\n\n def _ipython_key_completions_(self):\n return self.get_column_names()\n\n @property\n def func(self):\n class Functions(object):\n pass\n\n functions = Functions()\n for name, value in expression_namespace.items():\n # f = vaex.expression.FunctionBuiltin(self, name)\n def closure(name=name, value=value):\n local_name = name\n def wrap(*args, **kwargs):\n def myrepr(k):\n if isinstance(k, Expression):\n return str(k)\n elif isinstance(k, np.ndarray) and k.ndim == 0:\n # to support numpy scalars\n return myrepr(k.item())\n elif isinstance(k, np.ndarray):\n # to support numpy arrays\n var = self.add_variable('arg_numpy_array', k, unique=True)\n return var\n elif isinstance(k, list):\n # to support numpy scalars\n return '[' + ', '.join(myrepr(i) for i in k) + ']'\n else:\n return repr(k)\n arg_string = \", \".join([myrepr(k) for k in args] + ['{}={}'.format(name, myrepr(value)) for name, value in kwargs.items()])\n expression = \"{}({})\".format(local_name, arg_string)\n return vaex.expression.Expression(self, expression)\n return wrap\n f = closure()\n try:\n f = functools.wraps(value)(f)\n except AttributeError:\n pass # python2 quicks.. ?\n setattr(functions, name, f)\n for name, value in self.functions.items():\n setattr(functions, name, value)\n\n return functions\n\n @_hidden\n @vaex.utils.deprecated('use is_category')\n def iscategory(self, column):\n return self.is_category(column)\n\n def is_datetime(self, expression):\n dtype = self.data_type(expression)\n return isinstance(dtype, np.dtype) and dtype.kind == 'M'\n\n def is_string(self, expression):\n return vaex.array_types.is_string_type(self.data_type(expression))\n\n def is_category(self, column):\n \"\"\"Returns true if column is a category.\"\"\"\n column = _ensure_string_from_expression(column)\n # TODO: we don't support DictionaryType for remote dataframes\n if self.is_local() and column in self.columns:\n # TODO: we don't support categories as expressions\n dtype = vaex.dtype_of(self.columns[column])\n if dtype.is_encoded:\n return True\n return column in self._categories\n\n def _category_dictionary(self, column):\n '''Return the dictionary for a column if it is an arrow dict type'''\n if column in self.columns:\n x = self.columns[column]\n dtype = vaex.dtype_of(x)\n if dtype.is_encoded:\n x = x[:1] # could be a proxy\n # we're interested in the type of the dictionary or the indices?\n if isinstance(x, pa.ChunkedArray):\n # take the first dictionaryu\n x = x.chunks[0]\n dictionary = x.dictionary\n return dictionary\n\n def category_labels(self, column, aslist=True):\n column = _ensure_string_from_expression(column)\n if column in self._categories:\n return self._categories[column]['labels']\n dictionary = self._category_dictionary(column)\n if dictionary is not None:\n if aslist:\n dictionary = dictionary.to_pylist()\n return dictionary\n else:\n raise ValueError(f'Column {column} is not a categorical')\n\n def category_values(self, column):\n column = _ensure_string_from_expression(column)\n return self._categories[column]['values']\n\n def category_count(self, column):\n column = _ensure_string_from_expression(column)\n if column in self._categories:\n return self._categories[column]['N']\n dictionary = self._category_dictionary(column)\n if dictionary is not None:\n return len(dictionary)\n else:\n raise ValueError(f'Column {column} is not a categorical')\n\n def category_offset(self, column):\n column = _ensure_string_from_expression(column)\n if column in self._categories:\n return self._categories[column]['min_value']\n dictionary = self._category_dictionary(column)\n if dictionary is not None:\n return 0\n else:\n raise ValueError(f'Column {column} is not a categorical')\n\n def execute(self):\n '''Execute all delayed jobs.'''\n # make sure we only add the tasks at the last moment, after all operations are added (for cache keys)\n if not self.executor.tasks:\n logger.info('no task to execute')\n return\n if _REPORT_EXECUTION_TRACES:\n import traceback\n trace = ''.join(traceback.format_stack(limit=_REPORT_EXECUTION_TRACES))\n print('Execution triggerd from:\\n', trace)\n print(\"Tasks:\")\n for task in self.executor.tasks:\n print(repr(task))\n if self.executor.tasks:\n self.executor.execute()\n\n async def execute_async(self):\n '''Async version of execute'''\n await self.executor.execute_async()\n\n @property\n def filtered(self):\n return self.has_selection(FILTER_SELECTION_NAME)\n\n def map_reduce(self, map, reduce, arguments, progress=False, delay=False, info=False, to_numpy=True, ignore_filter=False, pre_filter=False, name='map reduce (custom)', selection=None):\n # def map_wrapper(*blocks):\n pre_filter = pre_filter and self.filtered\n task = tasks.TaskMapReduce(self, arguments, map, reduce, info=info, to_numpy=to_numpy, ignore_filter=ignore_filter, selection=selection, pre_filter=pre_filter)\n progressbar = vaex.utils.progressbars(progress)\n progressbar.add_task(task, f'map reduce: {name}')\n task = self.executor.schedule(task)\n return self._delay(delay, task)\n\n def apply(self, f, arguments=None, vectorize=False, multiprocessing=True):\n \"\"\"Apply a function on a per row basis across the entire DataFrame.\n\n Example:\n\n >>> import vaex\n >>> df = vaex.example()\n >>> def func(x, y):\n ... return (x+y)/(x-y)\n ...\n >>> df.apply(func, arguments=[df.x, df.y])\n Expression = lambda_function(x, y)\n Length: 330,000 dtype: float64 (expression)\n -------------------------------------------\n 0 -0.460789\n 1 3.90038\n 2 -0.642851\n 3 0.685768\n 4 -0.543357\n\n\n :param f: The function to be applied\n :param arguments: List of arguments to be passed on to the function f.\n :param vectorize: Call f with arrays instead of a scalars (for better performance).\n :param bool multiprocessing: Use multiple processes to avoid the GIL (Global interpreter lock).\n :return: A function that is lazily evaluated.\n \"\"\"\n assert arguments is not None, 'for now, you need to supply arguments'\n import types\n if isinstance(f, types.LambdaType):\n name = 'lambda_function'\n else:\n name = f.__name__\n if not vectorize:\n f = vaex.expression.FunctionToScalar(f, multiprocessing)\n else:\n f = vaex.expression.FunctionSerializablePickle(f, multiprocessing)\n lazy_function = self.add_function(name, f, unique=True)\n arguments = _ensure_strings_from_expressions(arguments)\n return lazy_function(*arguments)\n\n @docsubst\n def nop(self, expression=None, progress=False, delay=False):\n \"\"\"Evaluates expression or a list of expressions, and drops the result. Usefull for benchmarking, since vaex is usually lazy.\n\n :param expression: {expression}\n :param progress: {progress}\n :param delay: {delay}\n :returns: None\n \"\"\"\n if expression is None:\n expressions = self.get_column_names()\n else:\n expressions = _ensure_list(_ensure_strings_from_expressions(expression))\n def map(*ar):\n pass\n def reduce(a, b):\n pass\n return self.map_reduce(map, reduce, expressions, delay=delay, progress=progress, name='nop', to_numpy=False)\n\n def _hash_map_unique(self, expression, progress=False, selection=None, flatten=True, delay=False, limit=None, limit_raise=True, return_inverse=False):\n if selection is not None:\n selection = str(selection)\n expression = _ensure_string_from_expression(expression)\n task = vaex.tasks.TaskHashmapUniqueCreate(self, expression, flatten, limit=limit, selection=selection, return_inverse=return_inverse, limit_raise=limit_raise)\n task = self.executor.schedule(task)\n progressbar = vaex.utils.progressbars(progress)\n progressbar.add_task(task, f\"set for {str(expression)}\")\n return self._delay(delay, task)\n\n # kept for compatibility\n _set = _hash_map_unique\n\n def _index(self, expression, progress=False, delay=False, prime_growth=False, cardinality=None):\n column = _ensure_string_from_expression(expression)\n # TODO: this does not seem needed\n # column = vaex.utils.valid_expression(self.dataset, column)\n columns = [column]\n from .hash import index_type_from_dtype\n from vaex.column import _to_string_sequence\n\n transient = self[column].transient or self.filtered or self.is_masked(column)\n if self.is_string(expression) and not transient:\n # string is a special case, only ColumnString are not transient\n ar = self.columns[str(self[column].expand())]\n if not isinstance(ar, ColumnString):\n transient = True\n\n dtype = self.data_type(column)\n index_type = index_type_from_dtype(dtype, transient, prime_growth=prime_growth)\n import queue\n if cardinality is not None:\n N_index = min(self.executor.thread_pool.nthreads, max(1, len(self)//cardinality))\n capacity_initial = len(self) // N_index\n else:\n N_index = self.executor.thread_pool.nthreads\n capacity_initial = 10\n indices = queue.Queue()\n # we put None to lazily create them\n for i in range(N_index):\n indices.put(None)\n def map(thread_index, i1, i2, selection_masks, blocks):\n ar = blocks[0]\n index = indices.get()\n if index is None:\n index = index_type(1)\n if hasattr(index, 'reserve'):\n index.reserve(capacity_initial)\n if vaex.array_types.is_string_type(dtype):\n previous_ar = ar\n ar = _to_string_sequence(ar)\n if not transient:\n assert ar is previous_ar.string_sequence\n if np.ma.isMaskedArray(ar):\n mask = np.ma.getmaskarray(ar)\n index.update(ar, mask, i1)\n else:\n index.update(ar, i1)\n indices.put(index)\n # cardinality_estimated = sum()\n def reduce(a, b):\n pass\n self.map_reduce(map, reduce, columns, delay=delay, name='index', info=True, to_numpy=False)\n index_list = [] #[k for k in index_list if k is not None]\n while not indices.empty():\n index = indices.get(timeout=10)\n if index is not None:\n index_list.append(index)\n index0 = index_list[0]\n for other in index_list[1:]:\n index0.merge(other)\n return index0\n\n @docsubst\n def unique(self, expression, return_inverse=False, dropna=False, dropnan=False, dropmissing=False, progress=False, selection=None, axis=None, delay=False, limit=None, limit_raise=True, array_type='python'):\n \"\"\"Returns all unique values.\n\n :param expression: {expression}\n :param return_inverse: Return the inverse mapping from unique values to original values.\n :param dropna: {dropna}\n :param dropnan: {dropnan}\n :param dropmissing: {dropmissing}\n :param progress: {progress}\n :param selection: {selection}\n :param int axis: Axis over which to determine the unique elements (None will flatten arrays or lists)\n :param delay: {delay}\n :param int limit: {limit}\n :param bool limit_raise: {limit_raise}\n :param str array_type: {array_type}\n \"\"\"\n if dropna:\n dropnan = True\n dropmissing = True\n if axis is not None:\n raise ValueError('only axis=None is supported')\n expression = _ensure_string_from_expression(expression)\n if self._future_behaviour and self.is_category(expression):\n if self.filtered:\n keys = pa.array(self.category_labels(expression))\n @delayed\n def encode(codes):\n used_keys = keys.take(codes)\n return vaex.array_types.convert(used_keys, array_type)\n codes = self[expression].index_values().unique(delay=True)\n return self._delay(delay, encode(codes))\n else:\n keys = pa.array(self.category_labels(expression))\n keys = vaex.array_types.convert(keys, array_type)\n return self._delay(delay, vaex.promise.Promise.fulfilled(keys))\n else:\n @delayed\n def process(hash_map_unique):\n transient = True\n data_type_item = self.data_type(expression, axis=-1)\n if return_inverse:\n # inverse type can be smaller, depending on length of set\n inverse = np.zeros(self._length_unfiltered, dtype=np.int64)\n dtype = self.data_type(expression)\n from vaex.column import _to_string_sequence\n def map(thread_index, i1, i2, selection_mask, blocks):\n ar = blocks[0]\n if vaex.array_types.is_string_type(dtype):\n previous_ar = ar\n ar = _to_string_sequence(ar)\n if not transient:\n assert ar is previous_ar.string_sequence\n # TODO: what about masked values?\n inverse[i1:i2] = hash_map_unique.map(ar)\n def reduce(a, b):\n pass\n self.map_reduce(map, reduce, [expression], delay=delay, name='unique_return_inverse', progress=progress_inverse, info=True, to_numpy=False, selection=selection)\n # ordered_set.seal()\n # if array_type == 'python':\n if data_type_item.is_object:\n key_values = hash_map_unique._internal.extract()\n keys = list(key_values.keys())\n counts = list(key_values.values())\n if hash_map_unique.has_nan and not dropnan:\n keys = [np.nan] + keys\n counts = [hash_map_unique.nan_count] + counts\n if hash_map_unique.has_null and not dropmissing:\n keys = [None] + keys\n counts = [hash_map_unique.null_count] + counts\n if dropmissing and None in keys:\n # we still can have a None in the values\n index = keys.index(None)\n keys.pop(index)\n counts.pop(index)\n counts = np.array(counts)\n keys = np.array(keys)\n else:\n keys = hash_map_unique.keys()\n # TODO: we might want to put the dropmissing in .keys(..)\n deletes = []\n if dropmissing and hash_map_unique.has_null:\n deletes.append(hash_map_unique.null_index)\n if dropnan and hash_map_unique.has_nan:\n deletes.append(hash_map_unique.nan_index)\n if isinstance(keys, (vaex.strings.StringList32, vaex.strings.StringList64)):\n keys = vaex.strings.to_arrow(keys)\n indices = np.delete(np.arange(len(keys)), deletes)\n keys = keys.take(indices)\n else:\n keys = np.delete(keys, deletes)\n if not dropmissing and hash_map_unique.has_null:\n mask = np.zeros(len(keys), dtype=np.uint8)\n mask[hash_map_unique.null_index] = 1\n keys = np.ma.array(keys, mask=mask)\n if data_type_item == str and isinstance(keys, np.ndarray):\n # the np.delete will cast to dtype object\n keys = pa.array(keys)\n keys = vaex.array_types.convert(keys, array_type)\n if return_inverse:\n return keys, inverse\n else:\n return keys\n progressbar = vaex.utils.progressbars(progress, title=\"unique\")\n hash_map_result = self._hash_map_unique(expression, progress=progressbar, selection=selection, flatten=axis is None, delay=True, limit=limit, limit_raise=limit_raise)\n if return_inverse:\n progress_inverse = progressbar.add(\"find inverse\")\n return self._delay(delay, progressbar.exit_on(process(hash_map_result)))\n\n\n @docsubst\n def mutual_information(self, x, y=None, dimension=2, mi_limits=None, mi_shape=256, binby=[], limits=None, shape=default_shape, sort=False, selection=False, delay=False):\n \"\"\"Estimate the mutual information between and x and y on a grid with shape mi_shape and mi_limits, possibly on a grid defined by binby.\n\n The `x` and `y` arguments can be single expressions of lists of expressions:\n - If `x` and `y` are single expression, it computes the mutual information between `x` and `y`;\n - If `x` is a list of expressions and `y` is a single expression, it computes the mutual information between each expression in `x` and the expression in `y`;\n - If `x` is a list of expressions and `y` is None, it computes the mutual information matrix amongst all expressions in `x`;\n - If `x` is a list of tuples of length 2, it computes the mutual information for the specified dimension pairs;\n - If `x` and `y` are lists of expressions, it computes the mutual information matrix defined by the two expression lists.\n\n If sort is True, the mutual information is returned in sorted (descending) order and the list of expressions is returned in the same order.\n\n Example:\n\n >>> import vaex\n >>> df = vaex.example()\n >>> df.mutual_information(\"x\", \"y\")\n array(0.1511814526380327)\n >>> df.mutual_information([[\"x\", \"y\"], [\"x\", \"z\"], [\"E\", \"Lz\"]])\n array([ 0.15118145, 0.18439181, 1.07067379])\n >>> df.mutual_information([[\"x\", \"y\"], [\"x\", \"z\"], [\"E\", \"Lz\"]], sort=True)\n (array([ 1.07067379, 0.18439181, 0.15118145]),\n [['E', 'Lz'], ['x', 'z'], ['x', 'y']])\n >>> df.mutual_information(x=['x', 'y', 'z'])\n array([[3.53535106, 0.06893436, 0.11656418],\n [0.06893436, 3.49414866, 0.14089177],\n [0.11656418, 0.14089177, 3.96144906]])\n >>> df.mutual_information(x=['x', 'y', 'z'], y=['E', 'Lz'])\n array([[0.32316291, 0.16110026],\n [0.36573065, 0.17802792],\n [0.35239151, 0.21677695]])\n\n\n :param x: {expression}\n :param y: {expression}\n :param limits: {limits}\n :param shape: {shape}\n :param binby: {binby}\n :param limits: {limits}\n :param shape: {shape}\n :param sort: return mutual information in sorted (descending) order, and also return the correspond list of expressions when sorted is True\n :param selection: {selection}\n :param delay: {delay}\n :return: {return_stat_scalar},\n \"\"\"\n # either a list of tuples with custom combinations\n if y is None and _issequence(x) and all([_issequence(k) for k in x]):\n waslist, [combinations, ] = vaex.utils.listify(x)\n shape_result = (len(combinations),)\n elif _issequence(x) and (_issequence(y) or y is None):\n # or ask for a matrix of combinations\n if y is None:\n combinations = list(itertools.product(x, repeat=dimension))\n shape_result = (len(x), ) * dimension\n else:\n shape_result = (len(x), len(y))\n combinations = np.array([[(i, j) for i in y] for j in x]).reshape((-1, 2)).tolist()\n waslist = True\n elif _issequence(x):\n shape_result = (len(x),)\n combinations = [(i, y) for i in x]\n waslist = True\n elif _issequence(y):\n shape_result = (len(y),)\n combinations = [(i, y) for i in x]\n waslist = True\n else:\n shape_result = tuple()\n combinations = [(x, y)]\n waslist = False\n if mi_limits:\n mi_limits = [mi_limits]\n\n limits = self.limits(binby, limits, delay=True)\n # make sure we only do the unique combinations\n combinations_sorted = [tuple(sorted(k)) for k in combinations]\n combinations_unique, unique_reverse = np.unique(combinations_sorted, return_inverse=True, axis=0)\n combinations_unique = list(map(tuple, combinations_unique.tolist()))\n mi_limits = self.limits(combinations_unique, mi_limits, delay=True)\n\n @delayed\n def calculate(counts):\n # TODO: mutual information doesn't take axis arguments, so ugly solution for now\n counts = counts.astype(np.float64)\n fullshape = _expand_shape(shape, len(binby))\n out = np.zeros((fullshape), dtype=float)\n if len(fullshape) == 0:\n out = vaex.kld.mutual_information(counts)\n # print(\"count> \", np.sum(counts))\n elif len(fullshape) == 1:\n for i in range(fullshape[0]):\n out[i] = vaex.kld.mutual_information(counts[..., i])\n # print(\"counti> \", np.sum(counts[...,i]))\n # print(\"countt> \", np.sum(counts))\n elif len(fullshape) == 2:\n for i in range(fullshape[0]):\n for j in range(fullshape[1]):\n out[i, j] = vaex.kld.mutual_information(counts[..., i, j])\n elif len(fullshape) == 3:\n for i in range(fullshape[0]):\n for j in range(fullshape[1]):\n for k in range(fullshape[2]):\n out[i, j, k] = vaex.kld.mutual_information(counts[..., i, j, k])\n else:\n raise ValueError(\"binby with dim > 3 is not yet supported\")\n return out\n\n @delayed\n def has_limits(limits, mi_limits):\n if not _issequence(binby):\n limits = [list(limits)]\n values = []\n for expressions, expression_limits in zip(combinations_unique, mi_limits):\n total_shape = _expand_shape(mi_shape, len(expressions)) + _expand_shape(shape, len(binby))\n counts = self.count(binby=list(expressions) + list(binby), limits=list(expression_limits) + list(limits),\n shape=total_shape, delay=True, selection=selection)\n values.append(calculate(counts))\n return values\n\n @delayed\n def finish(mi_list):\n if sort:\n mi_list = np.array(mi_list)\n indices = np.argsort(mi_list)[::-1]\n sorted_x = list([x[k] for k in indices])\n return mi_list[indices], sorted_x\n else:\n mi_list = np.array(mi_list)\n # reconstruct original ordering\n mi_list = mi_list[unique_reverse]\n total_shape = _expand_shape(shape, len(binby))\n total_shape += shape_result\n return np.array(vaex.utils.unlistify(waslist, mi_list)).reshape(total_shape)\n values = finish(delayed_list(has_limits(limits, mi_limits)))\n return self._delay(delay, values)\n\n def bin_edges(self, expression, limits, shape=default_shape):\n return self.bins(expression, limits, shape=shape, edges=True)\n\n def bin_centers(self, expression, limits, shape=default_shape):\n return self.bins(expression, limits, shape=shape, edges=False)\n\n def bins(self, expression, limits, shape=default_shape, edges=True):\n vmin, vmax = limits\n if edges:\n bins = np.ogrid[limits[0]:limits[1]:(shape + 1) * 1j]\n return bins\n else:\n dx = (limits[1] - limits[0]) / shape\n bins = np.ogrid[limits[0]:limits[1] - dx:(shape) * 1j]\n return bins + dx / 2\n\n def nearest_bin(self, value, limits, shape):\n bins = self.bins('', limits=limits, edges=False, shape=shape)\n index = np.argmin(np.abs(bins - value))\n return index\n\n def _compute_agg(self, name, expression, binby=[], limits=None, shape=default_shape, selection=False, delay=False, edges=False, progress=None, extra_expressions=None, array_type=None):\n logger.debug(\"aggregate %s(%r, binby=%r, limits=%r)\", name, expression, binby, limits)\n expression = _ensure_strings_from_expressions(expression)\n if extra_expressions:\n extra_expressions = _ensure_strings_from_expressions(extra_expressions)\n expression_waslist, [expressions, ] = vaex.utils.listify(expression)\n # TODO: doesn't seemn needed anymore?\n # expressions = [self._column_aliases.get(k, k) for k in expressions]\n import traceback\n trace = ''.join(traceback.format_stack())\n for expression in expressions:\n if expression and expression != \"*\":\n self.validate_expression(expression)\n if not hasattr(self.local, '_aggregator_nest_count'):\n self.local._aggregator_nest_count = 0\n if self.local._aggregator_nest_count != 0:\n raise RuntimeError(\"nested aggregator call: \\nlast trace:\\n%s\\ncurrent trace:\\n%s\" % (self.local.last_trace, trace))\n else:\n self.local.last_trace = trace\n # Instead of 'expression is not None', we would like to have 'not virtual'\n # but in agg.py we do some casting, which results in calling .dtype(..) with a non-column\n # expression even though all expressions passed here are column references\n # virtual = [k for k in expressions if k and k not in self.columns]\n if self._future_behaviour != 5 and (self.filtered and expression not in [None, '*']):\n # When our dataframe is filtered, and we have expressions, we may end up calling\n # df.dtype(..) which in turn may call df.evaluate(..) which in turn needs to have\n # the filter cache filled in order to compute the first non-missing row. This last\n # item could call df.count() again, leading to nested aggregators, which we do not\n # support. df.dtype() needs to call evaluate with filtering enabled since we consider\n # it invalid that expressions are evaluate with filtered data. Sklearn for instance may\n # give errors when evaluated with NaN's present.\n # TODO: GET RID OF THIS\n # TODO: temporary disabled\n # len(self) # fill caches and masks\n pass\n progressbar = vaex.utils.progressbars(progress, title=name)\n if not isinstance(binby, (list, tuple)) or len(binby) > 0:\n progressbar_limits = progressbar.add(\"binners\")\n binners = self._create_binners(binby, limits, shape, selection=selection, delay=True, progress=progressbar_limits)\n else:\n binners = ()\n progressbar_agg = progressbar\n @delayed\n def compute(expression, binners, selection, edges):\n binners = tuple(binners)\n if not hasattr(self.local, '_aggregator_nest_count'):\n self.local._aggregator_nest_count = 0\n self.local._aggregator_nest_count += 1\n try:\n if expression in [\"*\", None]:\n agg = vaex.agg.aggregates[name](selection=selection, edges=edges)\n else:\n if extra_expressions:\n agg = vaex.agg.aggregates[name](expression, *extra_expressions, selection=selection, edges=edges)\n else:\n agg = vaex.agg.aggregates[name](expression, selection=selection, edges=edges)\n tasks, result = agg.add_tasks(self, binners, progress=progressbar)\n @delayed\n def finish(counts):\n return np.asanyarray(counts)\n return finish(result)\n finally:\n self.local._aggregator_nest_count -= 1\n @delayed\n def finish(binners, *counts):\n if array_type == 'xarray':\n dims = [binner.expression for binner in binners]\n if expression_waslist:\n dims = ['expression'] + dims\n\n def to_coord(binner):\n if isinstance(binner, BinnerOrdinal):\n return self.category_labels(binner.expression)\n elif isinstance(binner, BinnerScalar):\n return self.bin_centers(binner.expression, [binner.minimum, binner.maximum], binner.count)\n coords = [to_coord(binner) for binner in binners]\n if expression_waslist:\n coords = [expressions] + coords\n counts = np.asanyarray(counts)\n else:\n counts = counts[0]\n return xarray.DataArray(counts, dims=dims, coords=coords)\n elif array_type == 'list':\n return vaex.utils.unlistify(expression_waslist, counts).tolist()\n elif array_type in [None, 'numpy']:\n def possibly_masked_array(ar):\n if isinstance(ar, (list, tuple)):\n has_mask = any(np.ma.isMaskedArray(k) for k in ar)\n else:\n has_mask = np.ma.isMaskedArray(ar)\n if has_mask:\n return np.ma.array(ar)\n else:\n return np.asanyarray(ar)\n return possibly_masked_array(vaex.utils.unlistify(expression_waslist, counts))\n else:\n raise RuntimeError(f'Unknown array_type {format}')\n stats = [compute(expression, binners, selection=selection, edges=edges) for expression in expressions]\n var = finish(binners, *stats)\n return self._delay(delay, progressbar.exit_on(var))\n\n @docsubst\n def count(self, expression=None, binby=[], limits=None, shape=default_shape, selection=False, delay=False, edges=False, progress=None, array_type=None):\n \"\"\"Count the number of non-NaN values (or all, if expression is None or \"*\").\n\n Example:\n\n >>> df.count()\n 330000\n >>> df.count(\"*\")\n 330000.0\n >>> df.count(\"*\", binby=[\"x\"], shape=4)\n array([ 10925., 155427., 152007., 10748.])\n\n :param expression: Expression or column for which to count non-missing values, or None or '*' for counting the rows\n :param binby: {binby}\n :param limits: {limits}\n :param shape: {shape}\n :param selection: {selection}\n :param delay: {delay}\n :param progress: {progress}\n :param edges: {edges}\n :param array_type: {array_type}\n :return: {return_stat_scalar}\n \"\"\"\n return self._compute_agg('count', expression, binby, limits, shape, selection, delay, edges, progress, array_type=array_type)\n\n @delayed\n def _first_calculation(self, expression, order_expression, binby, limits, shape, selection, edges, progressbar):\n if shape:\n limits, shapes = limits\n else:\n limits, shapes = limits, shape\n task = tasks.TaskStatistic(self, binby, shapes, limits, weights=[expression, order_expression], op=tasks.OP_FIRST, selection=selection, edges=edges)\n task = self.executor.schedule(task)\n progressbar.add_task(task, \"count for %s\" % expression)\n @delayed\n def finish(counts):\n counts = np.array(counts)\n return counts\n return finish(task)\n\n @docsubst\n def first(self, expression, order_expression=None, binby=[], limits=None, shape=default_shape, selection=False, delay=False, edges=False, progress=None, array_type=None):\n \"\"\"Return the first element of a binned `expression`, where the values each bin are sorted by `order_expression`.\n\n Example:\n\n >>> import vaex\n >>> df = vaex.example()\n >>> df.first(df.x, df.y, shape=8)\n >>> df.first(df.x, df.y, shape=8, binby=[df.y])\n >>> df.first(df.x, df.y, shape=8, binby=[df.y])\n array([-4.81883764, 11.65378 , 9.70084476, -7.3025589 , 4.84954977,\n 8.47446537, -5.73602629, 10.18783 ])\n\n :param expression: {expression}\n :param order_expression: Order the values in the bins by this expression.\n :param binby: {binby}\n :param limits: {limits}\n :param shape: {shape}\n :param selection: {selection}\n :param delay: {delay}\n :param progress: {progress}\n :param edges: {edges}\n :param array_type: {array_type}\n :return: Ndarray containing the first elements.\n :rtype: numpy.array\n \"\"\"\n return self._compute_agg('first', expression, binby, limits, shape, selection, delay, edges, progress, extra_expressions=[order_expression], array_type=array_type)\n logger.debug(\"count(%r, binby=%r, limits=%r)\", expression, binby, limits)\n logger.debug(\"count(%r, binby=%r, limits=%r)\", expression, binby, limits)\n expression = _ensure_strings_from_expressions(expression)\n order_expression = _ensure_string_from_expression(order_expression)\n binby = _ensure_strings_from_expressions(binby)\n waslist, [expressions,] = vaex.utils.listify(expression)\n @delayed\n def finish(*counts):\n counts = np.asarray(counts)\n return vaex.utils.unlistify(waslist, counts)\n progressbar = vaex.utils.progressbars(progress)\n limits = self.limits(binby, limits, delay=True, shape=shape)\n stats = [self._first_calculation(expression, order_expression, binby=binby, limits=limits, shape=shape, selection=selection, edges=edges, progressbar=progressbar) for expression in expressions]\n var = finish(*stats)\n return self._delay(delay, var)\n\n @docsubst\n def last(self, expression, order_expression=None, binby=[], limits=None, shape=default_shape, selection=False, delay=False, edges=False, progress=None, array_type=None):\n \"\"\"Return the last element of a binned `expression`, where the values each bin are sorted by `order_expression`.\n\n :param expression: The value to be placed in the bin.\n :param order_expression: Order the values in the bins by this expression.\n :param binby: {binby}\n :param limits: {limits}\n :param shape: {shape}\n :param selection: {selection}\n :param delay: {delay}\n :param progress: {progress}\n :param edges: {edges}\n :param array_type: {array_type}\n :return: Ndarray containing the first elements.\n :rtype: numpy.array\n \"\"\"\n return self._compute_agg('last', expression, binby, limits, shape, selection, delay, edges, progress, extra_expressions=[order_expression], array_type=array_type)\n\n @docsubst\n @stat_1d\n def mean(self, expression, binby=[], limits=None, shape=default_shape, selection=False, delay=False, progress=None, edges=False, array_type=None):\n \"\"\"Calculate the mean for expression, possibly on a grid defined by binby.\n\n Example:\n\n >>> df.mean(\"x\")\n -0.067131491264005971\n >>> df.mean(\"(x**2+y**2)**0.5\", binby=\"E\", shape=4)\n array([ 2.43483742, 4.41840721, 8.26742458, 15.53846476])\n\n :param expression: {expression}\n :param binby: {binby}\n :param limits: {limits}\n :param shape: {shape}\n :param selection: {selection}\n :param delay: {delay}\n :param progress: {progress}\n :param array_type: {array_type}\n :return: {return_stat_scalar}\n \"\"\"\n return self._compute_agg('mean', expression, binby, limits, shape, selection, delay, edges, progress, array_type=array_type)\n logger.debug(\"mean of %r, with binby=%r, limits=%r, shape=%r, selection=%r, delay=%r\", expression, binby, limits, shape, selection, delay)\n expression = _ensure_strings_from_expressions(expression)\n selection = _ensure_strings_from_expressions(selection)\n binby = _ensure_strings_from_expressions(binby)\n\n @delayed\n def calculate(expression, limits):\n task = tasks.TaskStatistic(self, binby, shape, limits, weight=expression, op=tasks.OP_ADD_WEIGHT_MOMENTS_01, selection=selection)\n task = self.executor.schedule(task)\n progressbar.add_task(task, \"mean for %s\" % expression)\n return task\n\n @delayed\n def finish(*stats_args):\n stats = np.array(stats_args)\n counts = stats[..., 0]\n with np.errstate(divide='ignore', invalid='ignore'):\n mean = stats[..., 1] / counts\n return vaex.utils.unlistify(waslist, mean)\n waslist, [expressions, ] = vaex.utils.listify(expression)\n progressbar = vaex.utils.progressbars(progress)\n limits = self.limits(binby, limits, delay=True)\n stats = [calculate(expression, limits) for expression in expressions]\n var = finish(*stats)\n return self._delay(delay, var)\n\n @delayed\n def _sum_calculation(self, expression, binby, limits, shape, selection, progressbar):\n task = tasks.TaskStatistic(self, binby, shape, limits, weight=expression, op=tasks.OP_ADD_WEIGHT_MOMENTS_01, selection=selection)\n task = self.executor.schedule(task)\n progressbar.add_task(task, \"sum for %s\" % expression)\n @delayed\n def finish(sum_grid):\n stats = np.array(sum_grid)\n return stats[...,1]\n return finish(task)\n\n @docsubst\n @stat_1d\n def sum(self, expression, binby=[], limits=None, shape=default_shape, selection=False, delay=False, progress=None, edges=False, array_type=None):\n \"\"\"Calculate the sum for the given expression, possible on a grid defined by binby\n\n Example:\n\n >>> df.sum(\"L\")\n 304054882.49378014\n >>> df.sum(\"L\", binby=\"E\", shape=4)\n array([ 8.83517994e+06, 5.92217598e+07, 9.55218726e+07,\n 1.40008776e+08])\n\n :param expression: {expression}\n :param binby: {binby}\n :param limits: {limits}\n :param shape: {shape}\n :param selection: {selection}\n :param delay: {delay}\n :param progress: {progress}\n :param array_type: {array_type}\n :return: {return_stat_scalar}\n \"\"\"\n return self._compute_agg('sum', expression, binby, limits, shape, selection, delay, edges, progress, array_type=array_type)\n @delayed\n def finish(*sums):\n return vaex.utils.unlistify(waslist, sums)\n expression = _ensure_strings_from_expressions(expression)\n binby = _ensure_strings_from_expressions(binby)\n waslist, [expressions, ] = vaex.utils.listify(expression)\n progressbar = vaex.utils.progressbars(progress)\n limits = self.limits(binby, limits, delay=True)\n # stats = [calculate(expression, limits) for expression in expressions]\n sums = [self._sum_calculation(expression, binby=binby, limits=limits, shape=shape, selection=selection, progressbar=progressbar) for expression in expressions]\n s = finish(*sums)\n return self._delay(delay, s)\n\n @docsubst\n @stat_1d\n def std(self, expression, binby=[], limits=None, shape=default_shape, selection=False, delay=False, progress=None, array_type=None):\n \"\"\"Calculate the standard deviation for the given expression, possible on a grid defined by binby\n\n\n >>> df.std(\"vz\")\n 110.31773397535071\n >>> df.std(\"vz\", binby=[\"(x**2+y**2)**0.5\"], shape=4)\n array([ 123.57954851, 85.35190177, 61.14345748, 38.0740619 ])\n\n :param expression: {expression}\n :param binby: {binby}\n :param limits: {limits}\n :param shape: {shape}\n :param selection: {selection}\n :param delay: {delay}\n :param progress: {progress}\n :param array_type: {array_type}\n :return: {return_stat_scalar}\n \"\"\"\n @delayed\n def finish(var):\n return var**0.5\n return self._delay(delay, finish(self.var(expression, binby=binby, limits=limits, shape=shape, selection=selection, delay=True, progress=progress)))\n\n @docsubst\n @stat_1d\n def var(self, expression, binby=[], limits=None, shape=default_shape, selection=False, delay=False, progress=None, array_type=None):\n \"\"\"Calculate the sample variance for the given expression, possible on a grid defined by binby\n\n Example:\n\n >>> df.var(\"vz\")\n 12170.002429456246\n >>> df.var(\"vz\", binby=[\"(x**2+y**2)**0.5\"], shape=4)\n array([ 15271.90481083, 7284.94713504, 3738.52239232, 1449.63418988])\n >>> df.var(\"vz\", binby=[\"(x**2+y**2)**0.5\"], shape=4)**0.5\n array([ 123.57954851, 85.35190177, 61.14345748, 38.0740619 ])\n\n :param expression: {expression}\n :param binby: {binby}\n :param limits: {limits}\n :param shape: {shape}\n :param selection: {selection}\n :param delay: {delay}\n :param progress: {progress}\n :param array_type: {array_type}\n :return: {return_stat_scalar}\n \"\"\"\n edges = False\n return self._compute_agg('var', expression, binby, limits, shape, selection, delay, edges, progress, array_type=array_type)\n\n @docsubst\n def skew(self, expression, binby=[], limits=None, shape=default_shape, selection=False, delay=False, progress=None, edges=False, array_type=None):\n '''\n Calculate the skew for the given expression, possible on a grid defined by binby.\n\n Example:\n\n >>> df.skew(\"vz\")\n 0.02116528\n >>> df.skew(\"vz\", binby=[\"E\"], shape=4)\n array([-0.069976 , -0.01003445, 0.05624177, -2.2444322 ])\n\n :param expression: {expression}\n :param binby: {binby}\n :param limits: {limits}\n :param shape: {shape}\n :param selection: {selection}\n :param delay: {delay}\n :param progress: {progress}\n :param array_type: {array_type}\n :return: {return_stat_scalar}\n '''\n edges=False\n return self._compute_agg('skew', expression, binby, limits, shape, selection, delay, edges, progress, array_type=array_type)\n\n @docsubst\n def kurtosis(self, expression, binby=[], limits=None, shape=default_shape, selection=False, delay=False, progress=None, edges=False, array_type=None):\n '''\n Calculate the kurtosis for the given expression, possible on a grid defined by binby.\n\n Example:\n\n >>> df.kurtosis('vz')\n 0.33414303\n >>> df.kurtosis(\"vz\", binby=[\"E\"], shape=4)\n array([0.35286113, 0.14455428, 0.52955107, 5.06716345])\n\n :param expression: {expression}\n :param binby: {binby}\n :param limits: {limits}\n :param shape: {shape}\n :param selection: {selection}\n :param delay: {delay}\n :param progress: {progress}\n :param array_type: {array_type}\n :return: {return_stat_scalar}\n '''\n edges=False\n return self._compute_agg('kurtosis', expression, binby, limits, shape, selection, delay, edges, progress, array_type=array_type)\n\n @docsubst\n def covar(self, x, y, binby=[], limits=None, shape=default_shape, selection=False, delay=False, progress=None):\n \"\"\"Calculate the covariance cov[x,y] between x and y, possibly on a grid defined by binby.\n\n Example:\n\n >>> df.covar(\"x**2+y**2+z**2\", \"-log(-E+1)\")\n array(52.69461456005138)\n >>> df.covar(\"x**2+y**2+z**2\", \"-log(-E+1)\")/(df.std(\"x**2+y**2+z**2\") * df.std(\"-log(-E+1)\"))\n 0.63666373822156686\n >>> df.covar(\"x**2+y**2+z**2\", \"-log(-E+1)\", binby=\"Lz\", shape=4)\n array([ 10.17387143, 51.94954078, 51.24902796, 20.2163929 ])\n\n\n\n :param x: {expression}\n :param y: {expression}\n :param binby: {binby}\n :param limits: {limits}\n :param shape: {shape}\n :param selection: {selection}\n :param delay: {delay}\n :param progress: {progress}\n :return: {return_stat_scalar}\n \"\"\"\n @delayed\n def cov(mean_x, mean_y, mean_xy):\n return mean_xy - mean_x * mean_y\n\n waslist, [xlist, ylist] = vaex.utils.listify(x, y)\n # print(\"limits\", limits)\n limits = self.limits(binby, limits, selection=selection, delay=True)\n # print(\"limits\", limits)\n\n @delayed\n def calculate(limits):\n results = []\n for x, y in zip(xlist, ylist):\n mx = self.mean(x, binby=binby, limits=limits, shape=shape, selection=selection, delay=True, progress=progressbar)\n my = self.mean(y, binby=binby, limits=limits, shape=shape, selection=selection, delay=True, progress=progressbar)\n cxy = self.mean(\"(%s)*(%s)\" % (x, y), binby=binby, limits=limits, shape=shape, selection=selection,\n delay=True, progress=progressbar)\n results.append(cov(mx, my, cxy))\n return results\n\n progressbar = vaex.utils.progressbars(progress, title=\"covar\")\n covars = calculate(limits)\n\n @delayed\n def finish(covars):\n value = np.array(vaex.utils.unlistify(waslist, covars))\n return value\n return self._delay(delay, finish(delayed_list(covars)))\n\n @docsubst\n def correlation(self, x, y=None, binby=[], limits=None, shape=default_shape, sort=False, sort_key=np.abs, selection=False, delay=False, progress=None, array_type=None):\n \"\"\"Calculate the correlation coefficient cov[x,y]/(std[x]*std[y]) between x and y, possibly on a grid defined by binby.\n\n The `x` and `y` arguments can be single expressions of lists of expressions.\n - If `x` and `y` are single expression, it computes the correlation between `x` and `y`;\n - If `x` is a list of expressions and `y` is a single expression, it computes the correlation between each expression in `x` and the expression in `y`;\n - If `x` is a list of expressions and `y` is None, it computes the correlation matrix amongst all expressions in `x`;\n - If `x` is a list of tuples of length 2, it computes the correlation for the specified dimension pairs;\n - If `x` and `y` are lists of expressions, it computes the correlation matrix defined by the two expression lists.\n\n Example:\n\n >>> import vaex\n >>> df = vaex.example()\n >>> df.correlation(\"x**2+y**2+z**2\", \"-log(-E+1)\")\n array(0.6366637382215669)\n >>> df.correlation(\"x**2+y**2+z**2\", \"-log(-E+1)\", binby=\"Lz\", shape=4)\n array([ 0.40594394, 0.69868851, 0.61394099, 0.65266318])\n >>> df.correlation(x=['x', 'y', 'z'])\n array([[ 1. , -0.06668907, -0.02709719],\n [-0.06668907, 1. , 0.03450365],\n [-0.02709719, 0.03450365, 1. ]])\n >>> df.correlation(x=['x', 'y', 'z'], y=['E', 'Lz'])\n array([[-0.01116315, -0.00369268],\n [-0.0059848 , 0.02472491],\n [ 0.01428211, -0.05900035]])\n\n :param x: {expression}\n :param y: {expression}\n :param binby: {binby}\n :param limits: {limits}\n :param shape: {shape}\n :param selection: {selection}\n :param delay: {delay}\n :param progress: {progress}\n :return: {return_stat_scalar}\n \"\"\"\n selection = _normalize_selection(selection)\n progressbar = vaex.utils.progressbars(progress, title=\"correlation\")\n if y is None:\n if not _issequence(x):\n raise ValueError(\"if y not given, x is expected to be a list or tuple, not %r\" % x)\n if all([_issequence(k) and len(k) == 2 for k in x]):\n values = []\n pairs = x\n x = []\n y = []\n for col1, col2 in pairs:\n x.append(col1)\n y.append(col2)\n values.append(self.correlation(col1, col2, delay=True, progress=progressbar))\n @vaex.delayed\n def finish(values):\n return vaex.from_arrays(x=x, y=y, correlation=values)\n result = finish(values)\n else:\n result = self._correlation_matrix(x, binby=binby, limits=limits, shape=shape, selection=selection, delay=True, progress=progressbar, array_type=array_type)\n elif _issequence(x) and _issequence(y):\n result = delayed(np.array)([[self.correlation(x_, y_, binby=binby, limits=limits, shape=shape, selection=selection, delay=True, progress=progressbar) for y_ in y] for x_ in x])\n elif _issequence(x):\n combinations = [(k, y) for k in x]\n result = delayed(np.array)([self.correlation(x_, y, binby=binby, limits=limits, shape=shape, selection=selection, delay=True, progress=progressbar)for x_ in x])\n elif _issequence(y):\n combinations = [(x, k) for k in y]\n result = self.correlation(combinations, binby=binby, limits=limits, shape=shape, selection=selection, delay=True, progress=progressbar)\n else:\n @vaex.delayed\n def finish(matrix):\n return matrix[...,0,1]\n matrix = self._correlation_matrix([x, y], binby=binby, limits=limits, shape=shape, selection=selection, delay=True, progress=progressbar)\n result = finish(matrix)\n return self._delay(delay, result)\n\n\n @docsubst\n def _correlation_matrix(self, column_names=None, binby=[], limits=None, shape=default_shape, selection=False, delay=False, progress=None, array_type=None):\n if column_names is None:\n column_names = self.get_column_names()\n @delayed\n def normalize(cov_matrix):\n norm = cov_matrix[:]\n diag = np.diagonal(cov_matrix, axis1=-2, axis2=-1)\n # generalized outer product\n norm = (diag[...,np.newaxis,:] * diag[...,np.newaxis]) ** 0.5\n # norm = np.outer(diag, diag)**0.5\n return cov_matrix/norm\n result = normalize(self.cov(column_names, binby=binby, limits=limits, shape=shape, selection=selection, delay=True, progress=progress))\n\n @vaex.delayed\n def finish(array):\n if array_type == 'xarray':\n dims = binby + ['x', 'y']\n coords = [column_names, column_names]\n return xarray.DataArray(array, dims=dims, coords=coords)\n else:\n return vaex.array_types.convert(array, array_type)\n\n return self._delay(delay, finish(result))\n\n @docsubst\n def cov(self, x, y=None, binby=[], limits=None, shape=default_shape, selection=False, delay=False, progress=None):\n \"\"\"Calculate the covariance matrix for x and y or more expressions, possibly on a grid defined by binby.\n\n Either x and y are expressions, e.g.:\n\n >>> df.cov(\"x\", \"y\")\n\n Or only the x argument is given with a list of expressions, e.g.:\n\n >>> df.cov([\"x, \"y, \"z\"])\n\n Example:\n\n >>> df.cov(\"x\", \"y\")\n array([[ 53.54521742, -3.8123135 ],\n [ -3.8123135 , 60.62257881]])\n >>> df.cov([\"x\", \"y\", \"z\"])\n array([[ 53.54521742, -3.8123135 , -0.98260511],\n [ -3.8123135 , 60.62257881, 1.21381057],\n [ -0.98260511, 1.21381057, 25.55517638]])\n\n >>> df.cov(\"x\", \"y\", binby=\"E\", shape=2)\n array([[[ 9.74852878e+00, -3.02004780e-02],\n [ -3.02004780e-02, 9.99288215e+00]],\n [[ 8.43996546e+01, -6.51984181e+00],\n [ -6.51984181e+00, 9.68938284e+01]]])\n\n\n :param x: {expression}\n :param y: {expression_single}\n :param binby: {binby}\n :param limits: {limits}\n :param shape: {shape}\n :param selection: {selection}\n :param delay: {delay}\n :return: {return_stat_scalar}, the last dimensions are of shape (2,2)\n \"\"\"\n selection = _ensure_strings_from_expressions(selection)\n selection = _normalize_selection(selection)\n if y is None:\n if not _issequence(x):\n raise ValueError(\"if y argument is not given, x is expected to be sequence, not %r\", x)\n expressions = x\n else:\n expressions = [x, y]\n expressions = _ensure_strings_from_expressions(expressions)\n N = len(expressions)\n binby = _ensure_list(binby)\n shape = _expand_shape(shape, len(binby))\n limits = self.limits(binby, limits, selection=selection, delay=True)\n\n @delayed\n def calculate(expressions, limits):\n # print('limits', limits)\n task = tasks.TaskStatistic(self, binby, shape, limits, weights=expressions, op=tasks.OP_COV, selection=selection)\n task = self.executor.schedule(task)\n progressbar.add_task(task, \"covariance values for %r\" % expressions)\n return task\n\n @delayed\n def finish(values):\n N = len(expressions)\n counts = values[..., :N]\n sums = values[..., N:2 * N]\n with np.errstate(divide='ignore', invalid='ignore'):\n means = sums / counts\n # matrix of means * means.T\n meansxy = means[..., None] * means[..., None, :]\n\n counts = values[..., 2 * N:2 * N + N**2]\n sums = values[..., 2 * N + N**2:]\n shape = counts.shape[:-1] + (N, N)\n counts = counts.reshape(shape)\n sums = sums.reshape(shape)\n with np.errstate(divide='ignore', invalid='ignore'):\n moments2 = sums / counts\n cov_matrix = moments2 - meansxy\n return cov_matrix\n progressbar = vaex.utils.progressbars(progress, title=\"cov\")\n values = calculate(expressions, limits)\n cov_matrix = finish(values)\n return self._delay(delay, cov_matrix)\n\n @docsubst\n @stat_1d\n def minmax(self, expression, binby=[], limits=None, shape=default_shape, selection=False, delay=False, progress=None):\n \"\"\"Calculate the minimum and maximum for expressions, possibly on a grid defined by binby.\n\n\n Example:\n\n >>> df.minmax(\"x\")\n array([-128.293991, 271.365997])\n >>> df.minmax([\"x\", \"y\"])\n array([[-128.293991 , 271.365997 ],\n [ -71.5523682, 146.465836 ]])\n >>> df.minmax(\"x\", binby=\"x\", shape=5, limits=[-10, 10])\n array([[-9.99919128, -6.00010443],\n [-5.99972439, -2.00002384],\n [-1.99991322, 1.99998057],\n [ 2.0000093 , 5.99983597],\n [ 6.0004878 , 9.99984646]])\n\n :param expression: {expression}\n :param binby: {binby}\n :param limits: {limits}\n :param shape: {shape}\n :param selection: {selection}\n :param delay: {delay}\n :param progress: {progress}\n :return: {return_stat_scalar}, the last dimension is of shape (2)\n \"\"\"\n # vmin = self._compute_agg('min', expression, binby, limits, shape, selection, delay, edges, progress)\n # vmax = self._compute_agg('max', expression, binby, limits, shape, selection, delay, edges, progress)\n selection = _ensure_strings_from_expressions(selection)\n selection = _normalize_selection(selection)\n @delayed\n def calculate(expression, limits):\n task = tasks.TaskStatistic(self, binby, shape, limits, weight=expression, op=tasks.OP_MIN_MAX, selection=selection)\n task = self.executor.schedule(task)\n progressbar.add_task(task, \"minmax for %s\" % expression)\n return task\n @delayed\n def finish(*minmax_list):\n value = vaex.utils.unlistify(waslist, np.array(minmax_list))\n value = vaex.array_types.to_numpy(value)\n value = value.astype(data_type0.numpy)\n return value\n expression = _ensure_strings_from_expressions(expression)\n binby = _ensure_strings_from_expressions(binby)\n waslist, [expressions, ] = vaex.utils.listify(expression)\n column_names = self.get_column_names(hidden=True)\n expressions = [vaex.utils.valid_expression(column_names, k) for k in expressions]\n data_types = [self.data_type(expr) for expr in expressions]\n data_type0 = data_types[0]\n # special case that we supported mixed endianness for ndarrays\n all_same_kind = all(isinstance(data_type.internal, np.dtype) for data_type in data_types) and all([k.kind == data_type0.kind for k in data_types])\n if not (all_same_kind or all([k == data_type0 for k in data_types])):\n raise TypeError(\"cannot mix different dtypes in 1 minmax call\")\n progressbar = vaex.utils.progressbars(progress, title=\"minmaxes\")\n limits = self.limits(binby, limits, selection=selection, delay=True)\n all_tasks = [calculate(expression, limits) for expression in expressions]\n result = finish(*all_tasks)\n return self._delay(delay, result)\n\n @docsubst\n @stat_1d\n def min(self, expression, binby=[], limits=None, shape=default_shape, selection=False, delay=False, progress=None, edges=False, array_type=None):\n \"\"\"Calculate the minimum for given expressions, possibly on a grid defined by binby.\n\n\n Example:\n\n >>> df.min(\"x\")\n array(-128.293991)\n >>> df.min([\"x\", \"y\"])\n array([-128.293991 , -71.5523682])\n >>> df.min(\"x\", binby=\"x\", shape=5, limits=[-10, 10])\n array([-9.99919128, -5.99972439, -1.99991322, 2.0000093 , 6.0004878 ])\n\n :param expression: {expression}\n :param binby: {binby}\n :param limits: {limits}\n :param shape: {shape}\n :param selection: {selection}\n :param delay: {delay}\n :param progress: {progress}\n :param array_type: {array_type}\n :return: {return_stat_scalar}, the last dimension is of shape (2)\n \"\"\"\n return self._compute_agg('min', expression, binby, limits, shape, selection, delay, edges, progress, array_type=array_type)\n @delayed\n def finish(result):\n return result[..., 0]\n return self._delay(delay, finish(self.minmax(expression, binby=binby, limits=limits, shape=shape, selection=selection, delay=delay, progress=progress)))\n\n @docsubst\n @stat_1d\n def max(self, expression, binby=[], limits=None, shape=default_shape, selection=False, delay=False, progress=None, edges=False, array_type=None):\n \"\"\"Calculate the maximum for given expressions, possibly on a grid defined by binby.\n\n\n Example:\n\n >>> df.max(\"x\")\n array(271.365997)\n >>> df.max([\"x\", \"y\"])\n array([ 271.365997, 146.465836])\n >>> df.max(\"x\", binby=\"x\", shape=5, limits=[-10, 10])\n array([-6.00010443, -2.00002384, 1.99998057, 5.99983597, 9.99984646])\n\n :param expression: {expression}\n :param binby: {binby}\n :param limits: {limits}\n :param shape: {shape}\n :param selection: {selection}\n :param delay: {delay}\n :param progress: {progress}\n :param array_type: {array_type}\n :return: {return_stat_scalar}, the last dimension is of shape (2)\n \"\"\"\n return self._compute_agg('max', expression, binby, limits, shape, selection, delay, edges, progress, array_type=array_type)\n @delayed\n def finish(result):\n return result[..., 1]\n return self._delay(delay, finish(self.minmax(expression, binby=binby, limits=limits, shape=shape, selection=selection, delay=delay, progress=progress)))\n\n @docsubst\n @stat_1d\n def median_approx(self, expression, percentage=50., binby=[], limits=None, shape=default_shape, percentile_shape=256, percentile_limits=\"minmax\", selection=False, delay=False, progress=None):\n \"\"\"Calculate the median, possibly on a grid defined by binby.\n\n NOTE: this value is approximated by calculating the cumulative distribution on a grid defined by\n percentile_shape and percentile_limits\n\n\n :param expression: {expression}\n :param binby: {binby}\n :param limits: {limits}\n :param shape: {shape}\n :param percentile_limits: {percentile_limits}\n :param percentile_shape: {percentile_shape}\n :param selection: {selection}\n :param delay: {delay}\n :param progress: {progress}\n :return: {return_stat_scalar}\n \"\"\"\n return self.percentile_approx(expression, 50, binby=binby, limits=limits, shape=shape, percentile_shape=percentile_shape, percentile_limits=percentile_limits, selection=selection, delay=delay, progress=progress)\n\n @docsubst\n def percentile_approx(self, expression, percentage=50., binby=[], limits=None, shape=default_shape, percentile_shape=1024, percentile_limits=\"minmax\", selection=False, delay=False, progress=None):\n \"\"\"Calculate the percentile given by percentage, possibly on a grid defined by binby.\n\n NOTE: this value is approximated by calculating the cumulative distribution on a grid defined by\n percentile_shape and percentile_limits.\n\n\n Example:\n\n >>> df.percentile_approx(\"x\", 10), df.percentile_approx(\"x\", 90)\n (array([-8.3220355]), array([ 7.92080358]))\n >>> df.percentile_approx(\"x\", 50, binby=\"x\", shape=5, limits=[-10, 10])\n array([[-7.56462982],\n [-3.61036641],\n [-0.01296306],\n [ 3.56697863],\n [ 7.45838367]])\n\n :param expression: {expression}\n :param binby: {binby}\n :param limits: {limits}\n :param shape: {shape}\n :param percentile_limits: {percentile_limits}\n :param percentile_shape: {percentile_shape}\n :param selection: {selection}\n :param delay: {delay}\n :param progress: {progress}\n :return: {return_stat_scalar}\n \"\"\"\n waslist, [expressions, ] = vaex.utils.listify(expression)\n if not isinstance(binby, (tuple, list)):\n binby = [binby]\n else:\n binby = binby\n\n @delayed\n def calculate(expression, shape, limits):\n # task = TaskStatistic(self, [expression] + binby, shape, limits, op=OP_ADD1, selection=selection)\n # self.executor.schedule(task)\n # return task\n return self.count(binby=list(binby) + [expression], shape=shape, limits=limits, selection=selection, delay=True, edges=True, progress=progress)\n\n @delayed\n def finish(percentile_limits, counts_list):\n results = []\n for i, counts in enumerate(counts_list):\n counts = counts.astype(np.float64)\n # remove the nan and boundary edges from the first dimension,\n nonnans = list([slice(2, -1, None) for k in range(len(counts.shape) - 1)])\n nonnans.append(slice(1, None, None)) # we're gonna get rid only of the nan's, and keep the overflow edges\n nonnans = tuple(nonnans)\n cumulative_grid = np.cumsum(counts.__getitem__(nonnans), -1) # convert to cumulative grid\n\n totalcounts = np.sum(counts.__getitem__(nonnans), -1)\n empty = totalcounts == 0\n\n original_shape = counts.shape\n shape = cumulative_grid.shape # + (original_shape[-1] - 1,) #\n\n counts = np.sum(counts, -1)\n edges_floor = np.zeros(shape[:-1] + (2,), dtype=np.int64)\n edges_ceil = np.zeros(shape[:-1] + (2,), dtype=np.int64)\n # if we have an off # of elements, say, N=3, the center is at i=1=(N-1)/2\n # if we have an even # of elements, say, N=4, the center is between i=1=(N-2)/2 and i=2=(N/2)\n # index = (shape[-1] -1-3) * percentage/100. # the -3 is for the edges\n waslist_percentage, [percentages, ] = vaex.utils.listify(percentage)\n percentiles = []\n for p in percentages:\n if p == 0:\n percentiles.append(percentile_limits[i][0])\n continue\n if p == 100:\n percentiles.append(percentile_limits[i][1])\n continue\n values = np.array((totalcounts + 1) * p / 100.) # make sure it's an ndarray\n values[empty] = 0\n floor_values = np.array(np.floor(values))\n ceil_values = np.array(np.ceil(values))\n vaex.vaexfast.grid_find_edges(cumulative_grid, floor_values, edges_floor)\n vaex.vaexfast.grid_find_edges(cumulative_grid, ceil_values, edges_ceil)\n\n def index_choose(a, indices):\n # alternative to np.choise, which doesn't like the last dim to be >= 32\n # print(a, indices)\n out = np.zeros(a.shape[:-1])\n # print(out.shape)\n for i in np.ndindex(out.shape):\n # print(i, indices[i])\n out[i] = a[i + (indices[i],)]\n return out\n\n def calculate_x(edges, values):\n left, right = edges[..., 0], edges[..., 1]\n left_value = index_choose(cumulative_grid, left)\n right_value = index_choose(cumulative_grid, right)\n with np.errstate(divide='ignore', invalid='ignore'):\n u = np.array((values - left_value) / (right_value - left_value))\n # TODO: should it really be -3? not -2\n xleft, xright = percentile_limits[i][0] + (left - 0.5) * (percentile_limits[i][1] - percentile_limits[i][0]) / (shape[-1] - 3),\\\n percentile_limits[i][0] + (right - 0.5) * (percentile_limits[i][1] - percentile_limits[i][0]) / (shape[-1] - 3)\n x = xleft + (xright - xleft) * u # /2\n return x\n\n x1 = calculate_x(edges_floor, floor_values)\n x2 = calculate_x(edges_ceil, ceil_values)\n u = values - floor_values\n x = x1 + (x2 - x1) * u\n percentiles.append(x)\n percentile = vaex.utils.unlistify(waslist_percentage, np.array(percentiles))\n results.append(percentile)\n\n return results\n\n shape = _expand_shape(shape, len(binby))\n percentile_shapes = _expand_shape(percentile_shape, len(expressions))\n if percentile_limits:\n percentile_limits = _expand_limits(percentile_limits, len(expressions))\n limits = self.limits(binby, limits, selection=selection, delay=True)\n percentile_limits = self.limits(expressions, percentile_limits, selection=selection, delay=True)\n\n @delayed\n def calculation(limits, percentile_limits):\n # print(\">>>\", expressions, percentile_limits)\n # print(percentile_limits[0], list(percentile_limits[0]))\n # print(list(np.array(limits).tolist()) + list(percentile_limits[0]))\n # print(\"limits\", limits, expressions, percentile_limits, \">>\", list(limits) + [list(percentile_limits[0]))\n tasks = [calculate(expression, tuple(shape) + (percentile_shape, ), list(limits) + [list(percentile_limit)])\n for percentile_shape, percentile_limit, expression\n in zip(percentile_shapes, percentile_limits, expressions)]\n return finish(percentile_limits, delayed_args(*tasks))\n # return tasks\n result = calculation(limits, percentile_limits)\n\n @delayed\n def finish2(grid):\n value = vaex.utils.unlistify(waslist, np.array(grid))\n return value\n return self._delay(delay, finish2(result))\n\n def _use_delay(self, delay):\n return delay == True\n\n def _delay(self, delay, task, progressbar=False):\n if task.isRejected:\n task.get()\n if delay:\n return task\n else:\n self.execute()\n return task.get()\n\n @docsubst\n def limits_percentage(self, expression, percentage=99.73, square=False, selection=False, progress=None, delay=False):\n \"\"\"Calculate the [min, max] range for expression, containing approximately a percentage of the data as defined\n by percentage.\n\n The range is symmetric around the median, i.e., for a percentage of 90, this gives the same results as:\n\n Example:\n\n >>> df.limits_percentage(\"x\", 90)\n array([-12.35081376, 12.14858052]\n >>> df.percentile_approx(\"x\", 5), df.percentile_approx(\"x\", 95)\n (array([-12.36813152]), array([ 12.13275818]))\n\n NOTE: this value is approximated by calculating the cumulative distribution on a grid.\n NOTE 2: The values above are not exactly the same, since percentile and limits_percentage do not share the same code\n\n :param expression: {expression_limits}\n :param float percentage: Value between 0 and 100\n :param progress: {progress}\n :param delay: {delay}\n :return: {return_limits}\n \"\"\"\n logger.info(\"limits_percentage for %r, with percentage=%r\", expression, percentage)\n progressbar = vaex.utils.progressbars(progress, title=\"limits_percentage\")\n waslist, [expressions, ] = vaex.utils.listify(expression)\n limits = []\n for expr in expressions:\n @delayed\n def compute(limits_minmax, expr=expr):\n @delayed\n def compute_limits(counts):\n cumcounts = np.concatenate([[0], np.cumsum(counts)])\n cumcounts = cumcounts / cumcounts.max()\n # TODO: this is crude.. see the details!\n f = (1 - percentage / 100.) / 2\n x = np.linspace(vmin, vmax, size + 1)\n l = np.interp([f, 1 - f], cumcounts, x)\n return l\n vmin, vmax = limits_minmax\n size = 1024 * 16\n counts = self.count(binby=expr, shape=size, limits=limits_minmax, selection=selection, progress=progressbar, delay=delay)\n return compute_limits(counts)\n # limits.append(l)\n limits_minmax = self.minmax(expr, selection=selection, delay=delay)\n limits1 = compute(limits_minmax=limits_minmax)\n limits.append(limits1)\n return self._delay(delay, progressbar.exit_on(delayed(vaex.utils.unlistify)(waslist, limits)))\n\n @docsubst\n def limits(self, expression, value=None, square=False, selection=None, delay=False, progress=None, shape=None):\n \"\"\"Calculate the [min, max] range for expression, as described by value, which is 'minmax' by default.\n\n If value is a list of the form [minvalue, maxvalue], it is simply returned, this is for convenience when using mixed\n forms.\n\n Example:\n\n >>> import vaex\n >>> df = vaex.example()\n >>> df.limits(\"x\")\n array([-128.293991, 271.365997])\n >>> df.limits(\"x\", \"99.7%\")\n array([-28.86381927, 28.9261226 ])\n >>> df.limits([\"x\", \"y\"])\n (array([-128.293991, 271.365997]), array([ -71.5523682, 146.465836 ]))\n >>> df.limits([\"x\", \"y\"], \"99.7%\")\n (array([-28.86381927, 28.9261226 ]), array([-28.60476934, 28.96535249]))\n >>> df.limits([\"x\", \"y\"], [\"minmax\", \"90%\"])\n (array([-128.293991, 271.365997]), array([-13.37438402, 13.4224423 ]))\n >>> df.limits([\"x\", \"y\"], [\"minmax\", [0, 10]])\n (array([-128.293991, 271.365997]), [0, 10])\n\n :param expression: {expression_limits}\n :param value: {limits}\n :param selection: {selection}\n :param delay: {delay}\n :param progress: {progress}\n :return: {return_limits}\n \"\"\"\n if expression == []:\n return [] if shape is None else ([], [])\n waslist, [expressions, ] = vaex.utils.listify(expression)\n expressions = _ensure_strings_from_expressions(expressions)\n selection = _ensure_strings_from_expressions(selection)\n if value is None:\n value = \"minmax\"\n if _is_limit(value) or not _issequence(value):\n values = (value,) * len(expressions)\n else:\n values = value\n # we cannot hash arrow arrays\n values = [vaex.array_types.to_numpy(k) if isinstance(k, vaex.array_types.supported_arrow_array_types) else k for k in values]\n progressbar = vaex.utils.progressbars(progress, title=\"limits\")\n\n initial_expressions, initial_values = expressions, values\n expression_values = dict()\n expression_shapes = dict()\n for i, (expression, value) in enumerate(zip(expressions, values)):\n if _issequence(expression):\n expressions = expression\n nested = True\n else:\n expressions = [expression]\n nested = False\n if _is_limit(value) or not _issequence(value):\n values = (value,) * len(expressions)\n else:\n values = value\n for j, (expression, value) in enumerate(zip(expressions, values)):\n if shape is not None:\n if _issequence(shape):\n shapes = shape\n else:\n shapes = (shape, ) * (len(expressions) if nested else len(initial_expressions))\n\n shape_index = j if nested else i\n\n if not _is_limit(value):\n expression_values[(expression, value)] = None\n if self.is_category(expression):\n N = self._categories[_ensure_string_from_expression(expression)]['N']\n expression_shapes[expression] = min(N, shapes[shape_index] if shape is not None else default_shape)\n else:\n expression_shapes[expression] = shapes[shape_index] if shape is not None else default_shape\n\n limits_list = []\n for expression, value in expression_values.keys():\n if self.is_category(expression):\n N = self._categories[_ensure_string_from_expression(expression)]['N']\n limits = [-0.5, N-0.5]\n else:\n if isinstance(value, six.string_types):\n if value == \"minmax\":\n limits = self.minmax(expression, selection=selection, progress=progressbar, delay=True)\n else:\n match = re.match(r\"([\\d.]*)(\\D*)\", value)\n if match is None:\n raise ValueError(\"do not understand limit specifier %r, examples are 90%, 3sigma\")\n else:\n number, type = match.groups()\n import ast\n number = ast.literal_eval(number)\n type = type.strip()\n if type in [\"s\", \"sigma\"]:\n limits = self.limits_sigma(number)\n elif type in [\"ss\", \"sigmasquare\"]:\n limits = self.limits_sigma(number, square=True)\n elif type in [\"%\", \"percent\"]:\n limits = self.limits_percentage(expression, number, selection=selection, delay=True, progress=progressbar)\n elif type in [\"%s\", \"%square\", \"percentsquare\"]:\n limits = self.limits_percentage(expression, number, selection=selection, square=True, delay=True)\n elif value is None:\n limits = self.minmax(expression, selection=selection, delay=True)\n else:\n limits = value\n limits_list.append(limits)\n if limits is None:\n raise ValueError(\"limit %r not understood\" % value)\n expression_values[(expression, value)] = limits\n\n limits_list = delayed_args(*limits_list)\n\n @delayed\n def finish(limits_list):\n # print(\"##### 2)\", expression_values.keys())\n limits_outer = []\n shapes_list = []\n for expression, value in zip(initial_expressions, initial_values):\n if _issequence(expression):\n expressions = expression\n waslist2 = True\n else:\n expressions = [expression]\n waslist2 = False\n if _is_limit(value) or not _issequence(value):\n values = (value,) * len(expressions)\n else:\n values = value\n # print(\"expressions 3)\", expressions)\n # print(\"values 3)\", values)\n limits = []\n shapes = []\n for expression, value in zip(expressions, values):\n if not _is_limit(value):\n value = expression_values[(expression, value)]\n if not _is_limit(value):\n # print(\">>> value\", value)\n value = value.get()\n limits.append(value)\n shapes.append(expression_shapes[expression])\n # if not _is_limit(value): # if a\n # #value = tuple(value) # list is not hashable\n # expression_values[(expression, value)] = expression_values[(expression, value)].get()\n # else:\n # #value = tuple(value) # list is not hashable\n # expression_values[(expression, value)] = ()\n if waslist2:\n limits_outer.append(limits)\n shapes_list.append(shapes)\n else:\n limits_outer.append(limits[0])\n shapes_list.append(shapes[0])\n # logger.debug(\">>>>>>>> complete list of limits: %r %r\", limits_list, np.array(limits_list).shape)\n\n # print(\"limits\", limits_outer)\n if shape:\n return vaex.utils.unlistify(waslist, limits_outer), vaex.utils.unlistify(waslist, shapes_list)\n else:\n return vaex.utils.unlistify(waslist, limits_outer)\n return self._delay(delay, progressbar.exit_on(finish(limits_list)))\n\n def mode(self, expression, binby=[], limits=None, shape=256, mode_shape=64, mode_limits=None, progressbar=False, selection=None):\n \"\"\"Calculate/estimate the mode.\"\"\"\n if len(binby) == 0:\n raise ValueError(\"only supported with binby argument given\")\n else:\n # todo, fix progressbar into two...\n try:\n len(shape)\n shape = tuple(shape)\n except:\n shape = len(binby) * (shape,)\n shape = (mode_shape,) + shape\n subspace = self(*(list(binby) + [expression]))\n if selection:\n subspace = subspace.selected()\n\n limits = self.limits(list(binby), limits)\n mode_limits = self.limits([expression], mode_limits)\n limits = list(limits) + list(mode_limits)\n counts = subspace.histogram(limits=limits, size=shape, progressbar=progressbar)\n\n indices = np.argmax(counts, axis=0)\n pmin, pmax = limits[-1]\n centers = np.linspace(pmin, pmax, mode_shape + 1)[:-1] # ignore last bin\n centers += (centers[1] - centers[0]) / 2 # and move half a bin to the right\n\n modes = centers[indices]\n ok = counts.sum(axis=0) > 0\n modes[~ok] = np.nan\n return modes\n\n @vaex.utils.deprecated('use df.widget.heatmap')\n def plot_widget(self, x, y, limits=None, f=\"identity\", **kwargs):\n return self.widget.heatmap(x, y, limits=limits, transform=f, **kwargs)\n\n @vaex.utils.deprecated('use plot_widget')\n def plot_bq(self, x, y, grid=None, shape=256, limits=None, what=\"count(*)\", figsize=None,\n f=\"identity\", figure_key=None, fig=None, axes=None, xlabel=None, ylabel=None, title=None,\n show=True, selection=[None, True], colormap=\"afmhot\", grid_limits=None, normalize=\"normalize\",\n grid_before=None,\n what_kwargs={}, type=\"default\",\n scales=None, tool_select=False, bq_cleanup=True,\n **kwargs):\n import vaex.ext.bqplot\n cls = vaex.ext.bqplot.get_class(type)\n plot2d = cls(df=self, x=x, y=y, grid=grid, shape=shape, limits=limits, what=what,\n f=f, figure_key=figure_key, fig=fig,\n selection=selection, grid_before=grid_before,\n grid_limits=grid_limits, normalize=normalize, colormap=colormap, what_kwargs=what_kwargs, **kwargs)\n if show:\n plot2d.show()\n return plot2d\n\n # @_hidden\n def healpix_count(self, expression=None, healpix_expression=None, healpix_max_level=12, healpix_level=8, binby=None, limits=None, shape=default_shape, delay=False, progress=None, selection=None):\n \"\"\"Count non missing value for expression on an array which represents healpix data.\n\n :param expression: Expression or column for which to count non-missing values, or None or '*' for counting the rows\n :param healpix_expression: {healpix_max_level}\n :param healpix_max_level: {healpix_max_level}\n :param healpix_level: {healpix_level}\n :param binby: {binby}, these dimension follow the first healpix dimension.\n :param limits: {limits}\n :param shape: {shape}\n :param selection: {selection}\n :param delay: {delay}\n :param progress: {progress}\n :return:\n \"\"\"\n # if binby is None:\n import healpy as hp\n if healpix_expression is None:\n if self.ucds.get(\"source_id\", None) == 'meta.id;meta.main': # we now assume we have gaia data\n healpix_expression = \"source_id/34359738368\"\n\n if healpix_expression is None:\n raise ValueError(\"no healpix_expression given, and was unable to guess\")\n\n reduce_level = healpix_max_level - healpix_level\n NSIDE = 2**healpix_level\n nmax = hp.nside2npix(NSIDE)\n scaling = 4**reduce_level\n expr = \"%s/%s\" % (healpix_expression, scaling)\n binby = [expr] + ([] if binby is None else _ensure_list(binby))\n shape = (nmax,) + _expand_shape(shape, len(binby) - 1)\n epsilon = 1. / scaling / 2\n limits = [[-epsilon, nmax - epsilon]] + ([] if limits is None else limits)\n return self.count(expression, binby=binby, limits=limits, shape=shape, delay=delay, progress=progress, selection=selection)\n\n @docsubst\n @stat_1d\n def _stat(self, what=\"count(*)\", what_kwargs={}, binby=[], limits=None, shape=default_shape, selection=False, delay=False, progress=None):\n waslist_what, [whats, ] = vaex.utils.listify(what)\n limits = self.limits(binby, limits, delay=True)\n waslist_selection, [selections] = vaex.utils.listify(selection)\n binby = _ensure_list(binby)\n\n what_labels = []\n shape = _expand_shape(shape, len(binby))\n total_grid = np.zeros((len(whats), len(selections)) + shape, dtype=float)\n\n @delayed\n def copy_grids(grids):\n total_grid[index] = grid\n\n @delayed\n def get_whats(limits):\n grids = []\n for j, what in enumerate(whats):\n what = what.strip()\n index = what.index(\"(\")\n groups = re.match(r\"(.*)\\((.*)\\)\", what).groups()\n if groups and len(groups) == 2:\n function = groups[0]\n arguments = groups[1].strip()\n if \",\" in arguments:\n arguments = arguments.split(\",\")\n functions = [\"mean\", \"sum\", \"std\", \"var\", \"correlation\", \"covar\", \"min\", \"max\"]\n unit_expression = None\n if function in [\"mean\", \"sum\", \"std\", \"min\", \"max\"]:\n unit_expression = arguments\n if function in [\"var\"]:\n unit_expression = \"(%s) * (%s)\" % (arguments, arguments)\n if function in [\"covar\"]:\n unit_expression = \"(%s) * (%s)\" % arguments\n if unit_expression:\n unit = self.unit(unit_expression)\n if unit:\n what_units = unit.to_string('latex_inline')\n if function in functions:\n grid = getattr(self, function)(arguments, binby=binby, limits=limits, shape=shape,\n selection=selections, progress=progress, delay=delay)\n elif function == \"count\":\n grid = self.count(arguments, binby, shape=shape, limits=limits, selection=selections,\n progress=progress, delay=delay)\n else:\n raise ValueError(\"Could not understand method: %s, expected one of %r'\" % (function, functions))\n # what_labels.append(what_label)\n grids.append(grid)\n\n # else:\n # raise ValueError(\"Could not understand 'what' argument %r, expected something in form: 'count(*)', 'mean(x)'\" % what)\n return grids\n grids = get_whats(limits)\n # print grids\n # grids = delayed_args(*grids)\n\n @delayed\n def finish(grids):\n for i, grid in enumerate(grids):\n total_grid[i] = grid\n return total_grid[slice(None, None, None) if waslist_what else 0, slice(None, None, None) if waslist_selection else 0]\n s = finish(delayed_list(grids))\n return self._delay(delay, s)\n\n plot = _requires('viz')\n plot1d = _requires('viz')\n scatter = _requires('viz')\n\n def plot3d(self, x, y, z, vx=None, vy=None, vz=None, vwhat=None, limits=None, grid=None, what=\"count(*)\", shape=128, selection=[None, True], f=None,\n vcount_limits=None,\n smooth_pre=None, smooth_post=None, grid_limits=None, normalize=\"normalize\", colormap=\"afmhot\",\n figure_key=None, fig=None,\n lighting=True, level=[0.1, 0.5, 0.9], opacity=[0.01, 0.05, 0.1], level_width=0.1,\n show=True, **kwargs):\n \"\"\"Use at own risk, requires ipyvolume\"\"\"\n import vaex.ext.ipyvolume\n # vaex.ext.ipyvolume.\n cls = vaex.ext.ipyvolume.PlotDefault\n plot3d = cls(df=self, x=x, y=y, z=z, vx=vx, vy=vy, vz=vz,\n grid=grid, shape=shape, limits=limits, what=what,\n f=f, figure_key=figure_key, fig=fig,\n selection=selection, smooth_pre=smooth_pre, smooth_post=smooth_post,\n grid_limits=grid_limits, vcount_limits=vcount_limits, normalize=normalize, colormap=colormap, **kwargs)\n if show:\n plot3d.show()\n return plot3d\n\n @property\n def col(self):\n \"\"\"Gives direct access to the columns only (useful for tab completion).\n\n Convenient when working with ipython in combination with small DataFrames, since this gives tab-completion.\n\n Columns can be accessed by their names, which are attributes. The attributes are currently expressions, so you can\n do computations with them.\n\n Example\n\n >>> ds = vaex.example()\n >>> df.plot(df.col.x, df.col.y)\n\n \"\"\"\n class ColumnList(object):\n pass\n data = ColumnList()\n for name in self.get_column_names():\n if name != 'col': # avoid recursion\n expression = getattr(self, name, None)\n if not isinstance(expression, Expression):\n expression = Expression(self, name)\n else:\n expression = Expression(self, name)\n setattr(data, name, expression)\n return data\n\n def close(self):\n \"\"\"Close any possible open file handles or other resources, the DataFrame will not be in a usable state afterwards.\"\"\"\n self.dataset.close()\n\n def byte_size(self, selection=False, virtual=False):\n \"\"\"Return the size in bytes the whole DataFrame requires (or the selection), respecting the active_fraction.\"\"\"\n bytes_per_row = 0\n N = self.count(selection=selection)\n extra = 0\n for column in list(self.get_column_names(virtual=virtual)):\n dtype = self.data_type(column)\n #if dtype in [str_type, str] and dtype_internal.kind == 'O':\n if dtype == str:\n # TODO: document or fix this\n # is it too expensive to calculate this exactly?\n extra += self.columns[column].nbytes\n else:\n bytes_per_row += dtype.numpy.itemsize\n if np.ma.isMaskedArray(self.columns[column]):\n bytes_per_row += 1\n return bytes_per_row * self.count(selection=selection) + extra\n\n @property\n def nbytes(self):\n \"\"\"Alias for `df.byte_size()`, see :meth:`DataFrame.byte_size`.\"\"\"\n return self.byte_size()\n\n def _shape_of(self, expression, filtered=True):\n # TODO: we don't seem to need it anymore, would expect a valid_expression() call\n # if check_alias:\n # if str(expression) in self._column_aliases:\n # expression = self._column_aliases[str(expression)] # translate the alias name into the real name\n sample = self.evaluate(expression, 0, 1, filtered=False, array_type=\"numpy-arrow\", parallel=False)\n dtype = vaex.dtype_of(sample)\n rows = len(self) if filtered else self.length_unfiltered()\n if dtype.is_arrow: # for arrow, we don't have nd arrays yet\n return (rows,)\n else:\n return (rows,) + sample.shape[1:]\n\n # TODO: remove array_type and internal arguments?\n def data_type(self, expression, array_type=None, internal=False, axis=0):\n \"\"\"Return the datatype for the given expression, if not a column, the first row will be evaluated to get the data type.\n\n Example:\n\n >>> df = vaex.from_scalars(x=1, s='Hi')\n\n :param str array_type: 'numpy', 'arrow' or None, to indicate if the data type should be converted\n :param int axis: If a nested type (like list), it will return the value_type of the nested type, axis levels deep.\n \"\"\"\n if isinstance(expression, vaex.expression.Expression):\n expression = expression._label\n expression = _ensure_string_from_expression(expression)\n data_type = None\n if expression in self.variables:\n data_type = np.float64(1).dtype\n elif self.is_local() and expression in self.columns.keys():\n column = self.columns[expression]\n if hasattr(column, 'dtype'):\n # TODO: this probably would use data_type\n # to support Columns that wrap arrow arrays\n data_type = column.dtype\n data_type = self._auto_encode_type(expression, data_type)\n if isinstance(data_type, vaex.datatype.DataType):\n data_type = data_type.internal\n else:\n data = column[0:1]\n data = self._auto_encode_data(expression, data)\n else:\n expression = vaex.utils.valid_expression(self.get_column_names(hidden=True), expression)\n try:\n data = self.evaluate(expression, 0, 1, filtered=False, array_type=array_type, parallel=False)\n except:\n data = self.evaluate(expression, 0, 1, filtered=True, array_type=array_type, parallel=False)\n if data_type is None:\n # means we have to determine it from the data\n if isinstance(data, np.ndarray):\n data_type = data.dtype\n elif isinstance(data, Column):\n data = data.to_arrow()\n data_type = data.type\n else:\n # when we eval constants, let arrow find it out\n if isinstance(data, numbers.Number):\n data_type = pa.array([data]).type\n else:\n data_type = data.type # assuming arrow\n\n if array_type == \"arrow\":\n data_type = array_types.to_arrow_type(data_type)\n elif array_type == \"numpy\":\n data_type = array_types.to_numpy_type(data_type)\n elif array_type == \"numpy-arrow\":\n data_type = array_types.to_numpy_type(data_type, strict=False)\n elif array_type is None:\n data_type = data_type\n else:\n raise ValueError(f'Unknown array_type {array_type}')\n data_type = DataType(data_type)\n\n # ugly, but fixes df.x.apply(lambda x: str(x))\n if not internal:\n if isinstance(data_type.internal, np.dtype) and data_type.kind in 'US':\n return DataType(pa.string())\n\n if axis != 0:\n axis_data_type = [data_type]\n while data_type.is_list:\n data_type = data_type.value_type\n axis_data_type.append(data_type)\n data_type = axis_data_type[axis]\n return data_type\n\n @property\n def dtypes(self):\n \"\"\"Gives a Pandas series object containing all numpy dtypes of all columns (except hidden).\"\"\"\n from pandas import Series\n return Series({column_name:self.data_type(column_name) for column_name in self.get_column_names()})\n\n def schema(self):\n '''Similar to df.dtypes, but returns a dict'''\n return {column_name:self.data_type(column_name) for column_name in self.get_column_names()}\n\n @docsubst\n def schema_arrow(self, reduce_large=False):\n '''Similar to :method:`schema`, but returns an arrow schema\n\n :param bool reduce_large: change large_string to normal string\n '''\n def reduce(type):\n if reduce_large and type == pa.large_string():\n type = pa.string()\n return type\n return pa.schema({name: reduce(dtype.arrow) for name, dtype in self.schema().items()})\n\n def is_masked(self, column):\n '''Return if a column is a masked (numpy.ma) column.'''\n column = _ensure_string_from_expression(column)\n if column in self.dataset:\n return self.dataset.is_masked(column)\n else:\n ar = self.evaluate(column, i1=0, i2=1, parallel=False)\n if isinstance(ar, np.ndarray) and np.ma.isMaskedArray(ar):\n return True\n return False\n\n def label(self, expression, unit=None, output_unit=None, format=\"latex_inline\"):\n label = expression\n unit = unit or self.unit(expression)\n try: # if we can convert the unit, use that for the labeling\n if output_unit and unit: # avoid unnecessary error msg'es\n output_unit.to(unit)\n unit = output_unit\n except:\n logger.exception(\"unit error\")\n if unit is not None:\n label = \"%s (%s)\" % (label, unit.to_string('latex_inline'))\n return label\n\n def unit(self, expression, default=None):\n \"\"\"Returns the unit (an astropy.unit.Units object) for the expression.\n\n Example\n\n >>> import vaex\n >>> ds = vaex.example()\n >>> df.unit(\"x\")\n Unit(\"kpc\")\n >>> df.unit(\"x*L\")\n Unit(\"km kpc2 / s\")\n\n\n :param expression: Expression, which can be a column name\n :param default: if no unit is known, it will return this\n :return: The resulting unit of the expression\n :rtype: astropy.units.Unit\n \"\"\"\n expression = _ensure_string_from_expression(expression)\n try:\n # if an expression like pi * <some_expr> it will evaluate to a quantity instead of a unit\n unit_or_quantity = eval(expression, expression_namespace, scopes.UnitScope(self))\n unit = unit_or_quantity.unit if hasattr(unit_or_quantity, \"unit\") else unit_or_quantity\n unit_types = (astropy.units.core.UnitBase, )\n return unit if isinstance(unit, unit_types) else None\n except:\n # logger.exception(\"error evaluating unit expression: %s\", expression)\n # astropy doesn't add units, so we try with a quatiti\n try:\n return eval(expression, expression_namespace, scopes.UnitScope(self, 1.)).unit\n except:\n # logger.exception(\"error evaluating unit expression: %s\", expression)\n return default\n\n def ucd_find(self, ucds, exclude=[]):\n \"\"\"Find a set of columns (names) which have the ucd, or part of the ucd.\n\n Prefixed with a ^, it will only match the first part of the ucd.\n\n Example\n\n >>> df.ucd_find('pos.eq.ra', 'pos.eq.dec')\n ['RA', 'DEC']\n >>> df.ucd_find('pos.eq.ra', 'doesnotexist')\n >>> df.ucds[df.ucd_find('pos.eq.ra')]\n 'pos.eq.ra;meta.main'\n >>> df.ucd_find('meta.main')]\n 'dec'\n >>> df.ucd_find('^meta.main')]\n \"\"\"\n if isinstance(ucds, six.string_types):\n ucds = [ucds]\n if len(ucds) == 1:\n ucd = ucds[0]\n if ucd[0] == \"^\": # we want it to start with\n ucd = ucd[1:]\n columns = [name for name in self.get_column_names() if self.ucds.get(name, \"\").startswith(ucd) and name not in exclude]\n else:\n columns = [name for name in self.get_column_names() if ucd in self.ucds.get(name, \"\") and name not in exclude]\n return None if len(columns) == 0 else columns[0]\n else:\n columns = [self.ucd_find([ucd], exclude=exclude) for ucd in ucds]\n return None if None in columns else columns\n\n @vaex.utils.deprecated('Will most likely disappear or move')\n @_hidden\n def selection_favorite_add(self, name, selection_name=\"default\"):\n selection = self.get_selection(name=selection_name)\n if selection:\n self.favorite_selections[name] = selection\n self.selections_favorite_store()\n else:\n raise ValueError(\"no selection exists\")\n\n @vaex.utils.deprecated('Will most likely disappear or move')\n @_hidden\n def selection_favorite_remove(self, name):\n del self.favorite_selections[name]\n self.selections_favorite_store()\n\n @vaex.utils.deprecated('Will most likely disappear or move')\n @_hidden\n def selection_favorite_apply(self, name, selection_name=\"default\", executor=None):\n self.set_selection(self.favorite_selections[name], name=selection_name, executor=executor)\n\n @vaex.utils.deprecated('Will most likely disappear or move')\n @_hidden\n def selections_favorite_store(self):\n path = os.path.join(self.get_private_dir(create=True), \"favorite_selection.yaml\")\n selections = collections.OrderedDict([(key, value.to_dict()) for key, value in self.favorite_selections.items()])\n vaex.utils.write_json_or_yaml(path, selections)\n\n @vaex.utils.deprecated('Will most likely disappear or move')\n @_hidden\n def selections_favorite_load(self):\n try:\n path = os.path.join(self.get_private_dir(create=True), \"favorite_selection.yaml\")\n if os.path.exists(path):\n selections_dict = vaex.utils.read_json_or_yaml(path)\n for key, value in selections_dict.items():\n self.favorite_selections[key] = selections.selection_from_dict(self, value)\n except:\n logger.exception(\"non fatal error\")\n\n def get_private_dir(self, create=False):\n \"\"\"Each DataFrame has a directory where files are stored for metadata etc.\n\n Example\n\n >>> import vaex\n >>> ds = vaex.example()\n >>> vaex.get_private_dir()\n '/Users/users/breddels/.vaex/dfs/_Users_users_breddels_vaex-testing_data_helmi-dezeeuw-2000-10p.hdf5'\n\n :param bool create: is True, it will create the directory if it does not exist\n \"\"\"\n if self.is_local():\n name = os.path.abspath(self.path).replace(os.path.sep, \"_\")[:250] # should not be too long for most os'es\n name = name.replace(\":\", \"_\") # for windows drive names\n else:\n server = self.server\n name = \"%s_%s_%s_%s\" % (server.hostname, server.port, server.base_path.replace(\"/\", \"_\"), self.name)\n dir = os.path.join(vaex.utils.get_private_dir(), \"dfs\", name)\n if create and not os.path.exists(dir):\n os.makedirs(dir)\n return dir\n\n def state_get(self, skip=None):\n if self._future_behaviour == 5:\n return self._state_get_vaex_5(skip=skip)\n else:\n if not ((skip is None) or (len(skip) == 1 and skip[0] is self.dataset)):\n raise ValueError(f'skip should be None or its own dataset')\n return self._state_get_pre_vaex_5()\n\n def state_set(self, state, use_active_range=False, keep_columns=None, set_filter=True, trusted=True, warn=True, delete_unused_columns = True):\n if self._future_behaviour == 5:\n return self._state_set_vaex_5(state, use_active_range=use_active_range, keep_columns=keep_columns, set_filter=set_filter, trusted=trusted, warn=warn)\n else:\n return self._state_set_pre_vaex_5(state, use_active_range=use_active_range, keep_columns=keep_columns, set_filter=set_filter, trusted=trusted, warn=warn, delete_unused_columns=delete_unused_columns)\n\n def _state_get_vaex_5(self, skip=None):\n \"\"\"Return the internal state of the DataFrame in a dictionary\n\n Example:\n\n >>> import vaex\n >>> df = vaex.from_scalars(x=1, y=2)\n >>> df['r'] = (df.x**2 + df.y**2)**0.5\n >>> df.state_get()\n {'active_range': [0, 1],\n 'column_names': ['x', 'y', 'r'],\n 'description': None,\n 'descriptions': {},\n 'functions': {},\n 'renamed_columns': [],\n 'selections': {'__filter__': None},\n 'ucds': {},\n 'units': {},\n 'variables': {},\n 'virtual_columns': {'r': '(((x ** 2) + (y ** 2)) ** 0.5)'}}\n \"\"\"\n\n virtual_names = list(self.virtual_columns.keys()) + list(self.variables.keys())\n units = {key: str(value) for key, value in self.units.items()}\n ucds = {key: value for key, value in self.ucds.items() if key in virtual_names}\n descriptions = {key: value for key, value in self.descriptions.items()}\n selections = {name: self.get_selection(name) for name, history in self.selection_histories.items() if self.has_selection(name)}\n encoding = vaex.encoding.Encoding()\n state = dict(virtual_columns=dict(self.virtual_columns),\n column_names=list(self.column_names),\n variables={name: encoding.encode(\"variable\", value) for name, value in self.variables.items()},\n functions={name: encoding.encode(\"function\", value) for name, value in self.functions.items()},\n selections={name: encoding.encode(\"selection\", value) for name, value in selections.items()},\n description=self.description,\n ucds=ucds,\n units=units,\n descriptions=descriptions,\n active_range=[self._index_start, self._index_end]\n )\n datasets = self.dataset.leafs() if skip is None else skip\n for dataset in datasets:\n # mark leafs to not encode\n encoding._object_specs[dataset.id] = None\n assert encoding.has_object_spec(dataset.id)\n if len(datasets) != 1:\n raise ValueError('Multiple datasets present, please pass skip= argument so we know which dataset not to include in the state.')\n dataset_main = datasets[0]\n if dataset_main is not self.dataset:\n # encode without the leafs\n data = encoding.encode('dataset', self.dataset)\n # remove the dummy leaf data\n for dataset in datasets:\n assert encoding._object_specs[dataset.id] is None\n del encoding._object_specs[dataset.id]\n if data is not None:\n state['dataset'] = data\n state['dataset_missing'] = {'main': dataset_main.id}\n state['blobs'] = {key: base64.b64encode(value).decode('ascii') for key, value in encoding.blobs.items()}\n if encoding._object_specs:\n state['objects'] = encoding._object_specs\n return state\n\n def _state_set_vaex_5(self, state, use_active_range=False, keep_columns=None, set_filter=True, trusted=True, warn=True):\n \"\"\"Sets the internal state of the df\n\n Example:\n\n >>> import vaex\n >>> df = vaex.from_scalars(x=1, y=2)\n >>> df\n # x y r\n 0 1 2 2.23607\n >>> df['r'] = (df.x**2 + df.y**2)**0.5\n >>> state = df.state_get()\n >>> state\n {'active_range': [0, 1],\n 'column_names': ['x', 'y', 'r'],\n 'description': None,\n 'descriptions': {},\n 'functions': {},\n 'renamed_columns': [],\n 'selections': {'__filter__': None},\n 'ucds': {},\n 'units': {},\n 'variables': {},\n 'virtual_columns': {'r': '(((x ** 2) + (y ** 2)) ** 0.5)'}}\n >>> df2 = vaex.from_scalars(x=3, y=4)\n >>> df2.state_set(state) # now the virtual functions are 'copied'\n >>> df2\n # x y r\n 0 3 4 5\n\n :param state: dict as returned by :meth:`DataFrame.state_get`.\n :param bool use_active_range: Whether to use the active range or not.\n :param list keep_columns: List of columns that should be kept if the state to be set contains less columns.\n :param bool set_filter: Set the filter from the state (default), or leave the filter as it is it.\n :param bool warn: Give warning when issues are found in the state transfer that are recoverable.\n \"\"\"\n self.description = state['description']\n if use_active_range:\n self._index_start, self._index_end = state['active_range']\n self._length_unfiltered = self._index_end - self._index_start\n if keep_columns:\n all_columns = self.get_column_names()\n for column_name in keep_columns:\n if column_name not in all_columns:\n raise KeyError(f'Column name {column_name} does not exist')\n encoding = vaex.encoding.Encoding()\n if 'blobs' in state:\n encoding.blobs = {key: base64.b64decode(value.encode('ascii')) for key, value in state['blobs'].items()}\n if 'objects' in state:\n encoding._object_specs = state['objects']\n if 'dataset' in state:\n encoding.set_object(state['dataset_missing']['main'], self.dataset)\n self.dataset = encoding.decode('dataset', state['dataset'])\n\n for name, value in state['functions'].items():\n self.add_function(name, encoding.decode(\"function\", value, trusted=trusted))\n # we clear all columns, and add them later on, since otherwise self[name] = ... will try\n # to rename the columns (which is unsupported for remote dfs)\n self.column_names = []\n self.virtual_columns = {}\n self.column_names = list(set(self.dataset) & set(state['column_names'])) # initial values not to have virtual column trigger missing column values\n if 'variables' in state:\n self.variables = {name: encoding.decode(\"variable\", value) for name, value in state['variables'].items()}\n for name, value in state['virtual_columns'].items():\n self[name] = self._expr(value)\n # self._save_assign_expression(name)\n self.column_names = list(state['column_names'])\n if keep_columns:\n self.column_names += list(keep_columns)\n for name in self.column_names:\n self._save_assign_expression(name)\n if \"units\" in state:\n units = {key: astropy.units.Unit(value) for key, value in state[\"units\"].items()}\n self.units.update(units)\n if 'selections' in state:\n for name, selection_dict in state['selections'].items():\n selection = encoding.decode('selection', selection_dict)\n if name == FILTER_SELECTION_NAME and not set_filter:\n continue\n self.set_selection(selection, name=name)\n if self.is_local():\n for name in self.dataset:\n if name not in self.column_names:\n del self.columns[name]\n\n def _state_get_pre_vaex_5(self):\n \"\"\"Return the internal state of the DataFrame in a dictionary\n\n Example:\n\n >>> import vaex\n >>> df = vaex.from_scalars(x=1, y=2)\n >>> df['r'] = (df.x**2 + df.y**2)**0.5\n >>> df.state_get()\n {'active_range': [0, 1],\n 'column_names': ['x', 'y', 'r'],\n 'description': None,\n 'descriptions': {},\n 'functions': {},\n 'renamed_columns': [],\n 'selections': {'__filter__': None},\n 'ucds': {},\n 'units': {},\n 'variables': {},\n 'virtual_columns': {'r': '(((x ** 2) + (y ** 2)) ** 0.5)'}}\n \"\"\"\n\n virtual_names = list(self.virtual_columns.keys()) + list(self.variables.keys())\n units = {key: str(value) for key, value in self.units.items()}\n ucds = {key: value for key, value in self.ucds.items() if key in virtual_names}\n descriptions = {key: value for key, value in self.descriptions.items()}\n import vaex.serialize\n\n def check(key, value):\n if not vaex.serialize.can_serialize(value.f):\n warnings.warn('Cannot serialize function for virtual column {} (use vaex.serialize.register)'.format(key))\n return False\n return True\n\n def clean(value):\n return vaex.serialize.to_dict(value.f)\n functions = {key: clean(value) for key, value in self.functions.items() if check(key, value)}\n virtual_columns = {key: value for key, value in self.virtual_columns.items()}\n selections = {name: self.get_selection(name) for name, history in self.selection_histories.items()}\n selections = {name: selection.to_dict() if selection is not None else None for name, selection in selections.items()}\n # if selection is not None}\n state = dict(virtual_columns=virtual_columns,\n column_names=self.column_names,\n renamed_columns=self._renamed_columns,\n variables=self.variables,\n functions=functions,\n selections=selections,\n ucds=ucds,\n units=units,\n descriptions=descriptions,\n description=self.description,\n active_range=[self._index_start, self._index_end])\n return state\n\n def _state_set_pre_vaex_5(self, state, use_active_range=False, keep_columns=None, set_filter=True, trusted=True, warn=True, delete_unused_columns = True):\n \"\"\"Sets the internal state of the df\n\n Example:\n\n >>> import vaex\n >>> df = vaex.from_scalars(x=1, y=2)\n >>> df\n # x y r\n 0 1 2 2.23607\n >>> df['r'] = (df.x**2 + df.y**2)**0.5\n >>> state = df.state_get()\n >>> state\n {'active_range': [0, 1],\n 'column_names': ['x', 'y', 'r'],\n 'description': None,\n 'descriptions': {},\n 'functions': {},\n 'renamed_columns': [],\n 'selections': {'__filter__': None},\n 'ucds': {},\n 'units': {},\n 'variables': {},\n 'virtual_columns': {'r': '(((x ** 2) + (y ** 2)) ** 0.5)'}}\n >>> df2 = vaex.from_scalars(x=3, y=4)\n >>> df2.state_set(state) # now the virtual functions are 'copied'\n >>> df2\n # x y r\n 0 3 4 5\n\n :param state: dict as returned by :meth:`DataFrame.state_get`.\n :param bool use_active_range: Whether to use the active range or not.\n :param list keep_columns: List of columns that should be kept if the state to be set contains less columns.\n :param bool set_filter: Set the filter from the state (default), or leave the filter as it is it.\n :param bool warn: Give warning when issues are found in the state transfer that are recoverable.\n :param bool delete_unused_columns: Whether to delete columns from the DataFrame that are not in the column_names. Useful to set to False during prediction time.\n \"\"\"\n if 'description' in state:\n self.description = state['description']\n if use_active_range:\n if 'active_range' in state:\n self._index_start, self._index_end = state['active_range']\n self._length_unfiltered = self._index_end - self._index_start\n if keep_columns:\n all_columns = self.get_column_names()\n for column_name in keep_columns:\n if column_name not in all_columns:\n raise KeyError(f'Column name {column_name} does not exist')\n if 'renamed_columns' in state:\n for old, new in state['renamed_columns']:\n if old in self:\n self._rename(old, new)\n elif warn:\n warnings.warn(f'The state wants to rename {old} to {new}, but {new} was not found, ignoring the rename')\n if 'functions' in state:\n for name, value in state['functions'].items():\n self.add_function(name, vaex.serialize.from_dict(value, trusted=trusted))\n if 'variables' in state:\n self.variables = state['variables']\n if 'column_names' in state:\n # we clear all columns, and add them later on, since otherwise self[name] = ... will try\n # to rename the columns (which is unsupported for remote dfs)\n self.column_names = []\n self.virtual_columns = {}\n self.column_names = list(set(self.dataset) & set(state['column_names'])) # initial values not to have virtual column trigger missing column values\n if 'virtual_columns' in state:\n for name, value in state['virtual_columns'].items():\n self[name] = self._expr(value)\n self.column_names = list(state['column_names'])\n if keep_columns:\n self.column_names += list(keep_columns)\n for name in self.column_names:\n self._save_assign_expression(name)\n else:\n # old behaviour\n self.virtual_columns = {}\n for name, value in state['virtual_columns'].items():\n self[name] = self._expr(value)\n if 'units' in state:\n units = {key: astropy.units.Unit(value) for key, value in state[\"units\"].items()}\n self.units.update(units)\n if 'selections' in state:\n for name, selection_dict in state['selections'].items():\n if name == FILTER_SELECTION_NAME and not set_filter:\n continue\n # TODO: make selection use the vaex.serialize framework\n if selection_dict is None:\n selection = None\n else:\n selection = selections.selection_from_dict(selection_dict)\n self.set_selection(selection, name=name)\n if self.is_local() and delete_unused_columns:\n for name in self.dataset:\n if name not in self.column_names:\n del self.columns[name]\n\n\n def state_write(self, file, fs_options=None, fs=None):\n \"\"\"Write the internal state to a json or yaml file (see :meth:`DataFrame.state_get`)\n\n Example\n\n >>> import vaex\n >>> df = vaex.from_scalars(x=1, y=2)\n >>> df['r'] = (df.x**2 + df.y**2)**0.5\n >>> df.state_write('state.json')\n >>> print(open('state.json').read())\n {\n \"virtual_columns\": {\n \"r\": \"(((x ** 2) + (y ** 2)) ** 0.5)\"\n },\n \"column_names\": [\n \"x\",\n \"y\",\n \"r\"\n ],\n \"renamed_columns\": [],\n \"variables\": {\n \"pi\": 3.141592653589793,\n \"e\": 2.718281828459045,\n \"km_in_au\": 149597870.7,\n \"seconds_per_year\": 31557600\n },\n \"functions\": {},\n \"selections\": {\n \"__filter__\": null\n },\n \"ucds\": {},\n \"units\": {},\n \"descriptions\": {},\n \"description\": null,\n \"active_range\": [\n 0,\n 1\n ]\n }\n >>> df.state_write('state.yaml')\n >>> print(open('state.yaml').read())\n active_range:\n - 0\n - 1\n column_names:\n - x\n - y\n - r\n description: null\n descriptions: {}\n functions: {}\n renamed_columns: []\n selections:\n __filter__: null\n ucds: {}\n units: {}\n variables:\n pi: 3.141592653589793\n e: 2.718281828459045\n km_in_au: 149597870.7\n seconds_per_year: 31557600\n virtual_columns:\n r: (((x ** 2) + (y ** 2)) ** 0.5)\n\n :param str file: filename (ending in .json or .yaml)\n :param dict fs_options: arguments to pass the the file system handler (s3fs or gcsfs)\n :param fs: 'Pass a file system object directly, see :func:`vaex.open`'\n \"\"\"\n fs_options = fs_options or {}\n vaex.utils.write_json_or_yaml(file, self.state_get(), fs_options=fs_options, fs=fs, old_style=not self._future_behaviour)\n\n def state_load(self, file, use_active_range=False, keep_columns=None, set_filter=True, trusted=True, fs_options=None, fs=None):\n \"\"\"Load a state previously stored by :meth:`DataFrame.state_write`, see also :meth:`DataFrame.state_set`.\n\n :param str file: filename (ending in .json or .yaml)\n :param bool use_active_range: Whether to use the active range or not.\n :param list keep_columns: List of columns that should be kept if the state to be set contains less columns.\n :param bool set_filter: Set the filter from the state (default), or leave the filter as it is it.\n :param dict fs_options: arguments to pass the the file system handler (s3fs or gcsfs)\n :param fs: 'Pass a file system object directly, see :func:`vaex.open`'\n \"\"\"\n state = vaex.utils.read_json_or_yaml(file, fs_options=fs_options, fs=fs, old_style=not self._future_behaviour)\n self.state_set(state, use_active_range=use_active_range, keep_columns=keep_columns, set_filter=set_filter, trusted=trusted)\n\n def remove_virtual_meta(self):\n \"\"\"Removes the file with the virtual column etc, it does not change the current virtual columns etc.\"\"\"\n dir = self.get_private_dir(create=True)\n path = os.path.join(dir, \"virtual_meta.yaml\")\n try:\n if os.path.exists(path):\n os.remove(path)\n if not os.listdir(dir):\n os.rmdir(dir)\n except:\n logger.exception(\"error while trying to remove %s or %s\", path, dir)\n # def remove_meta(self):\n # path = os.path.join(self.get_private_dir(create=True), \"meta.yaml\")\n # os.remove(path)\n\n @_hidden\n def write_virtual_meta(self):\n \"\"\"Writes virtual columns, variables and their ucd,description and units.\n\n The default implementation is to write this to a file called virtual_meta.yaml in the directory defined by\n :func:`DataFrame.get_private_dir`. Other implementation may store this in the DataFrame file itself.\n\n This method is called after virtual columns or variables are added. Upon opening a file, :func:`DataFrame.update_virtual_meta`\n is called, so that the information is not lost between sessions.\n\n Note: opening a DataFrame twice may result in corruption of this file.\n\n \"\"\"\n path = os.path.join(self.get_private_dir(create=True), \"virtual_meta.yaml\")\n virtual_names = list(self.virtual_columns.keys()) + list(self.variables.keys())\n units = {key: str(value) for key, value in self.units.items() if key in virtual_names}\n ucds = {key: value for key, value in self.ucds.items() if key in virtual_names}\n descriptions = {key: value for key, value in self.descriptions.items() if key in virtual_names}\n meta_info = dict(virtual_columns=self.virtual_columns,\n variables=self.variables,\n ucds=ucds, units=units, descriptions=descriptions)\n vaex.utils.write_json_or_yaml(path, meta_info)\n\n @_hidden\n def update_virtual_meta(self):\n \"\"\"Will read back the virtual column etc, written by :func:`DataFrame.write_virtual_meta`. This will be done when opening a DataFrame.\"\"\"\n try:\n path = os.path.join(self.get_private_dir(create=False), \"virtual_meta.yaml\")\n if os.path.exists(path):\n meta_info = vaex.utils.read_json_or_yaml(path)\n if 'virtual_columns' not in meta_info:\n return\n self.virtual_columns.update(meta_info[\"virtual_columns\"])\n self.variables.update(meta_info[\"variables\"])\n self.ucds.update(meta_info[\"ucds\"])\n self.descriptions.update(meta_info[\"descriptions\"])\n units = {key: astropy.units.Unit(value) for key, value in meta_info[\"units\"].items()}\n self.units.update(units)\n except:\n logger.exception(\"non fatal error\")\n\n @_hidden\n def write_meta(self):\n \"\"\"Writes all meta data, ucd,description and units\n\n The default implementation is to write this to a file called meta.yaml in the directory defined by\n :func:`DataFrame.get_private_dir`. Other implementation may store this in the DataFrame file itself.\n (For instance the vaex hdf5 implementation does this)\n\n This method is called after virtual columns or variables are added. Upon opening a file, :func:`DataFrame.update_meta`\n is called, so that the information is not lost between sessions.\n\n Note: opening a DataFrame twice may result in corruption of this file.\n\n \"\"\"\n # raise NotImplementedError\n path = os.path.join(self.get_private_dir(create=True), \"meta.yaml\")\n units = {key: str(value) for key, value in self.units.items()}\n meta_info = dict(description=self.description,\n ucds=self.ucds, units=units, descriptions=self.descriptions,\n )\n vaex.utils.write_json_or_yaml(path, meta_info)\n\n @_hidden\n def update_meta(self):\n \"\"\"Will read back the ucd, descriptions, units etc, written by :func:`DataFrame.write_meta`. This will be done when opening a DataFrame.\"\"\"\n try:\n path = os.path.join(self.get_private_dir(create=False), \"meta.yaml\")\n if os.path.exists(path):\n meta_info = vaex.utils.read_json_or_yaml(path)\n self.description = meta_info[\"description\"]\n self.ucds.update(meta_info[\"ucds\"])\n self.descriptions.update(meta_info[\"descriptions\"])\n # self.virtual_columns.update(meta_info[\"virtual_columns\"])\n # self.variables.update(meta_info[\"variables\"])\n units = {key: astropy.units.Unit(value) for key, value in meta_info[\"units\"].items()}\n self.units.update(units)\n except:\n logger.exception(\"non fatal error, but could read/understand %s\", path)\n\n def is_local(self):\n \"\"\"Returns True if the DataFrame is local, False when a DataFrame is remote.\"\"\"\n raise NotImplementedError\n\n def get_auto_fraction(self):\n return self._auto_fraction\n\n def set_auto_fraction(self, enabled):\n self._auto_fraction = enabled\n\n @classmethod\n def can_open(cls, path, *args, **kwargs):\n # \"\"\"Tests if this class can open the file given by path\"\"\"\n return False\n\n @classmethod\n def get_options(cls, path):\n return []\n\n @classmethod\n def option_to_args(cls, option):\n return []\n\n def combinations(self, expressions_list=None, dimension=2, exclude=None, **kwargs):\n \"\"\"Generate a list of combinations for the possible expressions for the given dimension.\n\n :param expressions_list: list of list of expressions, where the inner list defines the subspace\n :param dimensions: if given, generates a subspace with all possible combinations for that dimension\n :param exclude: list of\n \"\"\"\n if dimension is not None:\n expressions_list = list(itertools.combinations(self.get_column_names(), dimension))\n if exclude is not None:\n import six\n\n def excluded(expressions):\n if callable(exclude):\n return exclude(expressions)\n elif isinstance(exclude, six.string_types):\n return exclude in expressions\n elif isinstance(exclude, (list, tuple)):\n # $#expressions = set(expressions)\n for e in exclude:\n if isinstance(e, six.string_types):\n if e in expressions:\n return True\n elif isinstance(e, (list, tuple)):\n if set(e).issubset(expressions):\n return True\n else:\n raise ValueError(\"elements of exclude should contain a string or a sequence of strings\")\n else:\n raise ValueError(\"exclude should contain a string, a sequence of strings, or should be a callable\")\n return False\n # test if any of the elements of exclude are a subset of the expression\n expressions_list = [expr for expr in expressions_list if not excluded(expr)]\n logger.debug(\"expression list generated: %r\", expressions_list)\n return expressions_list\n\n def set_variable(self, name, expression_or_value, write=True):\n \"\"\"Set the variable to an expression or value defined by expression_or_value.\n\n Example\n\n >>> df.set_variable(\"a\", 2.)\n >>> df.set_variable(\"b\", \"a**2\")\n >>> df.get_variable(\"b\")\n 'a**2'\n >>> df.evaluate_variable(\"b\")\n 4.0\n\n :param name: Name of the variable\n :param write: write variable to meta file\n :param expression: value or expression\n \"\"\"\n self.variables[name] = expression_or_value\n # if write:\n # self.write_virtual_meta()\n\n def get_variable(self, name):\n \"\"\"Returns the variable given by name, it will not evaluate it.\n\n For evaluation, see :func:`DataFrame.evaluate_variable`, see also :func:`DataFrame.set_variable`\n\n \"\"\"\n return self.variables[name]\n\n def evaluate_variable(self, name):\n \"\"\"Evaluates the variable given by name.\"\"\"\n if isinstance(self.variables[name], six.string_types):\n # TODO: this does not allow more than one level deep variable, like a depends on b, b on c, c is a const\n value = eval(self.variables[name], expression_namespace, self.variables)\n return value\n else:\n return self.variables[name]\n\n @docsubst\n def evaluate(self, expression, i1=None, i2=None, out=None, selection=None, filtered=True, array_type=None, parallel=True, chunk_size=None, progress=None):\n \"\"\"Evaluate an expression, and return a numpy array with the results for the full column or a part of it.\n\n Note that this is not how vaex should be used, since it means a copy of the data needs to fit in memory.\n\n To get partial results, use i1 and i2\n\n :param str expression: Name/expression to evaluate\n :param int i1: Start row index, default is the start (0)\n :param int i2: End row index, default is the length of the DataFrame\n :param ndarray out: Output array, to which the result may be written (may be used to reuse an array, or write to\n a memory mapped array)\n :param progress: {{progress}}\n :param selection: selection to apply\n :return:\n \"\"\"\n if chunk_size is not None:\n return self.evaluate_iterator(expression, s1=i1, s2=i2, out=out, selection=selection, filtered=filtered, array_type=array_type, parallel=parallel, chunk_size=chunk_size, progress=progress)\n else:\n return self._evaluate_implementation(expression, i1=i1, i2=i2, out=out, selection=selection, filtered=filtered, array_type=array_type, parallel=parallel, chunk_size=chunk_size, progress=progress)\n\n @docsubst\n def evaluate_iterator(self, expression, s1=None, s2=None, out=None, selection=None, filtered=True, array_type=None, parallel=True, chunk_size=None, prefetch=True, progress=None):\n \"\"\"Generator to efficiently evaluate expressions in chunks (number of rows).\n\n See :func:`DataFrame.evaluate` for other arguments.\n\n Example:\n\n >>> import vaex\n >>> df = vaex.example()\n >>> for i1, i2, chunk in df.evaluate_iterator(df.x, chunk_size=100_000):\n ... print(f\"Total of {{i1}} to {{i2}} = {{chunk.sum()}}\")\n ...\n Total of 0 to 100000 = -7460.610158279056\n Total of 100000 to 200000 = -4964.85827154921\n Total of 200000 to 300000 = -7303.271340043915\n Total of 300000 to 330000 = -2424.65234724951\n\n :param progress: {{progress}}\n :param prefetch: Prefetch/compute the next chunk in parallel while the current value is yielded/returned.\n \"\"\"\n progressbar = vaex.utils.progressbars(progress, title=\"evaluate iterator\")\n import concurrent.futures\n self._fill_filter_mask()\n progressbar(0)\n if not prefetch:\n # this is the simple implementation\n for l1, l2, i1, i2 in self._unfiltered_chunk_slices(chunk_size):\n yield l1, l2, self._evaluate_implementation(expression, i1=i1, i2=i2, out=out, selection=selection, filtered=filtered, array_type=array_type, parallel=parallel, raw=True)\n progressbar(l2/len(self))\n # But this implementation is faster if the main thread work is single threaded\n else:\n with concurrent.futures.ThreadPoolExecutor(1) as executor:\n iter = self._unfiltered_chunk_slices(chunk_size)\n def f(i1, i2):\n return self._evaluate_implementation(expression, i1=i1, i2=i2, out=out, selection=selection, filtered=filtered, array_type=array_type, parallel=parallel, raw=True)\n try:\n previous_l1, previous_l2, previous_i1, previous_i2 = next(iter)\n except StopIteration:\n # empty dataframe/filter\n return\n # we submit the 1st job\n previous = executor.submit(f, previous_i1, previous_i2)\n for l1, l2, i1, i2 in iter:\n # and we submit the next job before returning the previous, so they run in parallel\n # but make sure the previous is done\n previous_chunk = previous.result()\n current = executor.submit(f, i1, i2)\n yield previous_l1, previous_l2, previous_chunk\n progressbar(previous_l2/len(self))\n previous = current\n previous_l1, previous_l2 = l1, l2\n previous_chunk = previous.result()\n yield previous_l1, previous_l2, previous_chunk\n progressbar(previous_l2/len(self))\n\n @docsubst\n def to_records(self, index=None, selection=None, column_names=None, strings=True, virtual=True, parallel=True,\n chunk_size=None, array_type='python'):\n \"\"\"Return a list of [{{column_name: value}}, ...)] \"records\" where each dict is an evaluated row.\n\n :param index: an index to use to get the record of a specific row when provided\n :param column_names: list of column names, to export, when None DataFrame.get_column_names(strings=strings, virtual=virtual) is used\n :param selection: {selection}\n :param strings: argument passed to DataFrame.get_column_names when column_names is None\n :param virtual: argument passed to DataFrame.get_column_names when column_names is None\n :param parallel: {evaluate_parallel}\n :param chunk_size: {chunk_size}\n :param array_type: {array_type}\n :return: list of [{{column_name:value}}, ...] records\n \"\"\"\n if isinstance(index, int):\n return {key: value[0] for key, value in\n self[index:index + 1].to_dict(selection=selection, column_names=column_names, strings=strings,\n virtual=virtual, parallel=parallel, array_type=array_type).items()}\n if index is not None:\n raise RuntimeError(f\"index can be None or an int - {type(index)} provided\")\n\n if chunk_size is not None:\n def iterator():\n for i1, i2, chunk in self.to_dict(selection=selection, column_names=column_names, strings=strings,\n virtual=virtual, parallel=parallel, chunk_size=chunk_size,\n array_type=array_type):\n keys = list(chunk.keys())\n yield i1, i2, [{key: value for key, value in zip(keys, values)} for values in zip(*chunk.values())]\n\n return iterator()\n chunk = self.to_dict(selection=selection, column_names=column_names, strings=strings,\n virtual=virtual, parallel=parallel, chunk_size=chunk_size,\n array_type=array_type)\n keys = list(chunk.keys())\n return [{key: value for key, value in zip(keys, values)} for values in zip(*chunk.values())]\n\n\n @docsubst\n def to_items(self, column_names=None, selection=None, strings=True, virtual=True, parallel=True, chunk_size=None, array_type=None):\n \"\"\"Return a list of [(column_name, ndarray), ...)] pairs where the ndarray corresponds to the evaluated data\n\n :param column_names: list of column names, to export, when None DataFrame.get_column_names(strings=strings, virtual=virtual) is used\n :param selection: {selection}\n :param strings: argument passed to DataFrame.get_column_names when column_names is None\n :param virtual: argument passed to DataFrame.get_column_names when column_names is None\n :param parallel: {evaluate_parallel}\n :param chunk_size: {chunk_size}\n :param array_type: {array_type}\n :return: list of (name, ndarray) pairs or iterator of\n \"\"\"\n column_names = column_names or self.get_column_names(strings=strings, virtual=virtual)\n column_names = _ensure_strings_from_expressions(column_names)\n if chunk_size is not None:\n def iterator():\n for i1, i2, chunks in self.evaluate_iterator(column_names, selection=selection, parallel=parallel, chunk_size=chunk_size):\n yield i1, i2, list(zip(column_names, [array_types.convert(chunk, array_type) for chunk in chunks]))\n return iterator()\n else:\n return list(zip(column_names, [array_types.convert(chunk, array_type) for chunk in self.evaluate(column_names, selection=selection, parallel=parallel)]))\n\n @docsubst\n def to_arrays(self, column_names=None, selection=None, strings=True, virtual=True, parallel=True, chunk_size=None, array_type=None):\n \"\"\"Return a list of ndarrays\n\n :param column_names: list of column names, to export, when None DataFrame.get_column_names(strings=strings, virtual=virtual) is used\n :param selection: {selection}\n :param strings: argument passed to DataFrame.get_column_names when column_names is None\n :param virtual: argument passed to DataFrame.get_column_names when column_names is None\n :param parallel: {evaluate_parallel}\n :param chunk_size: {chunk_size}\n :param array_type: {array_type}\n :return: list of arrays\n \"\"\"\n column_names = column_names or self.get_column_names(strings=strings, virtual=virtual)\n column_names = _ensure_strings_from_expressions(column_names)\n if chunk_size is not None:\n def iterator():\n for i1, i2, chunks in self.evaluate_iterator(column_names, selection=selection, parallel=parallel, chunk_size=chunk_size):\n yield i1, i2, [array_types.convert(chunk, array_type) for chunk in chunks]\n return iterator()\n return [array_types.convert(chunk, array_type) for chunk in self.evaluate(column_names, selection=selection, parallel=parallel)]\n\n @docsubst\n def to_dict(self, column_names=None, selection=None, strings=True, virtual=True, parallel=True, chunk_size=None, array_type=None):\n \"\"\"Return a dict containing the ndarray corresponding to the evaluated data\n\n :param column_names: list of column names, to export, when None DataFrame.get_column_names(strings=strings, virtual=virtual) is used\n :param selection: {selection}\n :param strings: argument passed to DataFrame.get_column_names when column_names is None\n :param virtual: argument passed to DataFrame.get_column_names when column_names is None\n :param parallel: {evaluate_parallel}\n :param chunk_size: {chunk_size}\n :param array_type: {array_type}\n :return: dict\n \"\"\"\n column_names = column_names or self.get_column_names(strings=strings, virtual=virtual)\n column_names = _ensure_strings_from_expressions(column_names)\n if chunk_size is not None:\n def iterator():\n for i1, i2, chunks in self.evaluate_iterator(column_names, selection=selection, parallel=parallel, chunk_size=chunk_size):\n yield i1, i2, dict(list(zip(column_names, [array_types.convert(chunk, array_type) for chunk in chunks])))\n return iterator()\n return dict(list(zip(column_names, [array_types.convert(chunk, array_type) for chunk in self.evaluate(column_names, selection=selection, parallel=parallel)])))\n\n @_hidden\n @docsubst\n @vaex.utils.deprecated('`.to_copy()` is deprecated and it will be removed in version 5.x. Please use `.copy()` instead.')\n def to_copy(self, column_names=None, selection=None, strings=True, virtual=True, selections=True):\n \"\"\"Return a copy of the DataFrame, if selection is None, it does not copy the data, it just has a reference\n\n :param column_names: list of column names, to copy, when None DataFrame.get_column_names(strings=strings, virtual=virtual) is used\n :param selection: {selection}\n :param strings: argument passed to DataFrame.get_column_names when column_names is None\n :param virtual: argument passed to DataFrame.get_column_names when column_names is None\n :param selections: copy selections to a new DataFrame\n :return: DataFrame\n \"\"\"\n if column_names:\n column_names = _ensure_strings_from_expressions(column_names)\n df = vaex.from_items(*self.to_items(column_names=column_names, selection=selection, strings=strings, virtual=False))\n if virtual:\n for name, value in self.virtual_columns.items():\n df.add_virtual_column(name, value)\n if selections:\n # the filter selection does not need copying\n for key, value in self.selection_histories.items():\n if key != FILTER_SELECTION_NAME:\n df.selection_histories[key] = list(value)\n for key, value in self.selection_history_indices.items():\n if key != FILTER_SELECTION_NAME:\n df.selection_history_indices[key] = value\n df.functions.update(self.functions)\n df.copy_metadata(self)\n return df\n\n def copy_metadata(self, other):\n for name in self.get_column_names(strings=True):\n if name in other.units:\n self.units[name] = other.units[name]\n if name in other.descriptions:\n self.descriptions[name] = other.descriptions[name]\n if name in other.ucds:\n self.ucds[name] = other.ucds[name]\n self.description = other.description\n\n @docsubst\n def to_pandas_df(self, column_names=None, selection=None, strings=True, virtual=True, index_name=None, parallel=True, chunk_size=None, array_type=None):\n \"\"\"Return a pandas DataFrame containing the ndarray corresponding to the evaluated data\n\n If index is given, that column is used for the index of the dataframe.\n\n Example\n\n >>> df_pandas = df.to_pandas_df([\"x\", \"y\", \"z\"])\n >>> df_copy = vaex.from_pandas(df_pandas)\n\n :param column_names: list of column names, to export, when None DataFrame.get_column_names(strings=strings, virtual=virtual) is used\n :param selection: {selection}\n :param strings: argument passed to DataFrame.get_column_names when column_names is None\n :param virtual: argument passed to DataFrame.get_column_names when column_names is None\n :param index_column: if this column is given it is used for the index of the DataFrame\n :param parallel: {evaluate_parallel}\n :param chunk_size: {chunk_size}\n :param array_type: {array_type}\n :return: pandas.DataFrame object or iterator of\n \"\"\"\n import pandas as pd\n column_names = column_names or self.get_column_names(strings=strings, virtual=virtual)\n column_names = _ensure_strings_from_expressions(column_names)\n if index_name not in column_names and index_name is not None:\n column_names = column_names + [index_name]\n\n def create_pdf(data):\n if index_name is not None:\n index = data.pop(index_name)\n else:\n index = None\n df = pd.DataFrame(data=data, index=index)\n if index is not None:\n df.index.name = index_name\n return df\n if chunk_size is not None:\n def iterator():\n for i1, i2, chunks in self.evaluate_iterator(column_names, selection=selection, parallel=parallel, chunk_size=chunk_size, array_type=array_type):\n yield i1, i2, create_pdf(dict(zip(column_names, chunks)))\n return iterator()\n else:\n return create_pdf(self.to_dict(column_names=column_names, selection=selection, parallel=parallel, array_type=array_type))\n\n @docsubst\n def to_arrow_table(self, column_names=None, selection=None, strings=True, virtual=True, parallel=True, chunk_size=None, reduce_large=False):\n \"\"\"Returns an arrow Table object containing the arrays corresponding to the evaluated data\n\n :param column_names: list of column names, to export, when None DataFrame.get_column_names(strings=strings, virtual=virtual) is used\n :param selection: {selection}\n :param strings: argument passed to DataFrame.get_column_names when column_names is None\n :param virtual: argument passed to DataFrame.get_column_names when column_names is None\n :param parallel: {evaluate_parallel}\n :param chunk_size: {chunk_size}\n :param bool reduce_large: If possible, cast large_string to normal string\n :return: pyarrow.Table object or iterator of\n \"\"\"\n import pyarrow as pa\n column_names = column_names or self.get_column_names(strings=strings, virtual=virtual)\n column_names = _ensure_strings_from_expressions(column_names)\n if chunk_size is not None:\n def iterator():\n for i1, i2, chunks in self.evaluate_iterator(column_names, selection=selection, parallel=parallel, chunk_size=chunk_size):\n chunks = list(map(vaex.array_types.to_arrow, chunks))\n if reduce_large:\n chunks = list(map(vaex.array_types.arrow_reduce_large, chunks))\n yield i1, i2, pa.Table.from_arrays(chunks, column_names)\n return iterator()\n else:\n chunks = self.evaluate(column_names, selection=selection, parallel=parallel)\n chunks = list(map(vaex.array_types.to_arrow, chunks))\n if reduce_large:\n chunks = list(map(vaex.array_types.arrow_reduce_large, chunks))\n return pa.Table.from_arrays(chunks, column_names)\n\n @docsubst\n def to_astropy_table(self, column_names=None, selection=None, strings=True, virtual=True, index=None, parallel=True):\n \"\"\"Returns a astropy table object containing the ndarrays corresponding to the evaluated data\n\n :param column_names: list of column names, to export, when None DataFrame.get_column_names(strings=strings, virtual=virtual) is used\n :param selection: {selection}\n :param strings: argument passed to DataFrame.get_column_names when column_names is None\n :param virtual: argument passed to DataFrame.get_column_names when column_names is None\n :param index: if this column is given it is used for the index of the DataFrame\n :return: astropy.table.Table object\n \"\"\"\n from astropy.table import Table, Column, MaskedColumn\n meta = dict()\n meta[\"description\"] = self.description\n\n table = Table(meta=meta)\n for name, data in self.to_items(column_names=column_names, selection=selection, strings=strings, virtual=virtual, parallel=parallel):\n if self.is_string(name): # for astropy we convert it to unicode, it seems to ignore object type\n data = np.array(data).astype('U')\n meta = dict()\n if name in self.ucds:\n meta[\"ucd\"] = self.ucds[name]\n if np.ma.isMaskedArray(data):\n cls = MaskedColumn\n else:\n cls = Column\n table[name] = cls(data, unit=self.unit(name), description=self.descriptions.get(name), meta=meta)\n return table\n\n def to_dask_array(self, chunks=\"auto\"):\n \"\"\"Lazily expose the DataFrame as a dask.array\n\n Example\n\n >>> df = vaex.example()\n >>> A = df[['x', 'y', 'z']].to_dask_array()\n >>> A\n dask.array<vaex-df-1f048b40-10ec-11ea-9553, shape=(330000, 3), dtype=float64, chunksize=(330000, 3), chunktype=numpy.ndarray>\n >>> A+1\n dask.array<add, shape=(330000, 3), dtype=float64, chunksize=(330000, 3), chunktype=numpy.ndarray>\n\n :param chunks: How to chunk the array, similar to :func:`dask.array.from_array`.\n :return: :class:`dask.array.Array` object.\n \"\"\"\n import dask.array as da\n import uuid\n dtype = self._dtype\n chunks = da.core.normalize_chunks(chunks, shape=self.shape, dtype=dtype.numpy)\n name = 'vaex-df-%s' % str(uuid.uuid1())\n def getitem(df, item):\n return np.array(df.__getitem__(item).to_arrays(parallel=False)).T\n # broken since https://github.com/dask/dask/pull/7417\n if hasattr(da.core, \"getem\"):\n dsk = da.core.getem(name, chunks, getitem=getitem, shape=self.shape, dtype=dtype.numpy)\n dsk[name] = self\n return da.Array(dsk, name, chunks, dtype=dtype.numpy)\n else:\n dsk = da.core.graph_from_arraylike(self, name=name, chunks=chunks, getitem=getitem, shape=self.shape, dtype=dtype.numpy)\n return da.Array(dsk, name, chunks, dtype=dtype.numpy)\n\n def validate_expression(self, expression):\n \"\"\"Validate an expression (may throw Exceptions)\"\"\"\n # return self.evaluate(expression, 0, 2)\n if str(expression) in self.virtual_columns:\n return\n if self.is_local() and str(expression) in self.columns:\n return\n vars = set(self.get_names(hidden=True)) | {'df'}\n funcs = set(expression_namespace.keys()) | set(self.functions.keys())\n try:\n return vaex.expresso.validate_expression(expression, vars, funcs)\n except NameError as e:\n raise NameError(str(e)) from None\n\n def _block_scope(self, i1, i2):\n variables = {key: self.evaluate_variable(key) for key in self.variables.keys()}\n return scopes._BlockScope(self, i1, i2, **variables)\n\n def select(self, boolean_expression, mode=\"replace\", name=\"default\"):\n \"\"\"Select rows based on the boolean_expression, if there was a previous selection, the mode is taken into account.\n\n if boolean_expression is None, remove the selection, has_selection() will returns false\n\n Note that per DataFrame, multiple selections are possible, and one filter (see :func:`DataFrame.select`).\n\n :param str boolean_expression: boolean expression, such as 'x < 0', '(x < 0) || (y > -10)' or None to remove the selection\n :param str mode: boolean operation to perform with the previous selection, \"replace\", \"and\", \"or\", \"xor\", \"subtract\"\n :return: None\n \"\"\"\n raise NotImplementedError\n\n def add_column(self, name, f_or_array, dtype=None):\n \"\"\"Add an in memory array as a column.\"\"\"\n column_position = len(self.column_names)\n if name in self.get_column_names():\n column_position = self.column_names.index(name)\n renamed = '__' +vaex.utils.find_valid_name(name, used=self.get_column_names())\n self._rename(name, renamed)\n\n if isinstance(f_or_array, supported_column_types):\n data = ar = f_or_array\n # it can be None when we have an 'empty' DataFrameArrays\n if self._length_original is None:\n self._length_unfiltered = _len(data)\n self._length_original = _len(data)\n self._index_end = self._length_unfiltered\n if _len(ar) != self.length_original():\n if self.filtered:\n # give a better warning to avoid confusion\n if len(self) == len(ar):\n raise ValueError(\"Array is of length %s, while the length of the DataFrame is %s due to the filtering, the (unfiltered) length is %s.\" % (len(ar), len(self), self.length_unfiltered()))\n raise ValueError(\"array is of length %s, while the length of the DataFrame is %s\" % (len(ar), self.length_original()))\n valid_name = vaex.utils.find_valid_name(name, used=self.get_column_names(hidden=True))\n self.columns[valid_name] = ar\n if valid_name not in self.column_names:\n self.column_names.insert(column_position, valid_name)\n else:\n raise ValueError(\"functions not yet implemented\")\n # self._save_assign_expression(valid_name, Expression(self, valid_name))\n self._initialize_column(valid_name)\n\n def _initialize_column(self, name):\n self._save_assign_expression(name)\n\n def _sparse_matrix(self, column):\n column = _ensure_string_from_expression(column)\n return self._sparse_matrices.get(column)\n\n def add_columns(self, names, columns):\n from scipy.sparse import csc_matrix, csr_matrix\n if isinstance(columns, csr_matrix):\n if len(names) != columns.shape[1]:\n raise ValueError('number of columns ({}) does not match number of column names ({})'.format(columns.shape[1], len(names)))\n for i, name in enumerate(names):\n valid_name = vaex.utils.find_valid_name(name, used=self.get_column_names(hidden=True))\n self.columns[valid_name] = ColumnSparse(columns, i)\n self.column_names.append(valid_name)\n self._sparse_matrices[valid_name] = columns\n self._save_assign_expression(valid_name)\n else:\n raise ValueError('only scipy.sparse.csr_matrix is supported')\n\n def _save_assign_expression(self, name, expression=None):\n obj = getattr(self, name, None)\n # it's ok to set it if it does not exist, or we overwrite an older expression\n if obj is None or isinstance(obj, Expression):\n if expression is None:\n expression = name\n if isinstance(expression, str):\n expression = vaex.utils.valid_expression(self.get_column_names(hidden=True), expression)\n expression = Expression(self, expression)\n setattr(self, name, expression)\n\n @_hidden\n def add_column_healpix(self, name=\"healpix\", longitude=\"ra\", latitude=\"dec\", degrees=True, healpix_order=12, nest=True):\n \"\"\"Add a healpix (in memory) column based on a longitude and latitude\n\n :param name: Name of column\n :param longitude: longitude expression\n :param latitude: latitude expression (astronomical convenction latitude=90 is north pole)\n :param degrees: If lon/lat are in degrees (default) or radians.\n :param healpix_order: healpix order, >= 0\n :param nest: Nested healpix (default) or ring.\n \"\"\"\n import healpy as hp\n if degrees:\n scale = \"*pi/180\"\n else:\n scale = \"\"\n # TODO: multithread this\n phi = self.evaluate(\"(%s)%s\" % (longitude, scale))\n theta = self.evaluate(\"pi/2-(%s)%s\" % (latitude, scale))\n hp_index = hp.ang2pix(hp.order2nside(healpix_order), theta, phi, nest=nest)\n self.add_column(\"healpix\", hp_index)\n\n\n @_hidden\n def add_virtual_columns_matrix3d(self, x, y, z, xnew, ynew, znew, matrix, matrix_name='deprecated', matrix_is_expression=False, translation=[0, 0, 0], propagate_uncertainties=False):\n \"\"\"\n\n :param str x: name of x column\n :param str y:\n :param str z:\n :param str xnew: name of transformed x column\n :param str ynew:\n :param str znew:\n :param list[list] matrix: 2d array or list, with [row,column] order\n :param str matrix_name:\n :return:\n \"\"\"\n m = matrix\n x, y, z = self._expr(x, y, z)\n\n self[xnew] = m[0][0] * x + m[0][1] * y + m[0][2] * z + translation[0]\n self[ynew] = m[1][0] * x + m[1][1] * y + m[1][2] * z + translation[1]\n self[znew] = m[2][0] * x + m[2][1] * y + m[2][2] * z + translation[2]\n\n if propagate_uncertainties:\n self.propagate_uncertainties([self[xnew], self[ynew], self[znew]], [x, y, z])\n\n # wrap these with an informative msg\n # add_virtual_columns_eq2ecl = _requires('astro')\n # add_virtual_columns_eq2gal = _requires('astro')\n # add_virtual_columns_distance_from_parallax = _requires('astro')\n # add_virtual_columns_cartesian_velocities_to_pmvr = _requires('astro')\n # add_virtual_columns_proper_motion_eq2gal = _requires('astro')\n # add_virtual_columns_lbrvr_proper_motion2vcartesian = _requires('astro')\n # add_virtual_columns_equatorial_to_galactic_cartesian = _requires('astro')\n # add_virtual_columns_celestial = _requires('astro')\n # add_virtual_columns_proper_motion2vperpendicular = _requires('astro')\n\n def _covariance_matrix_guess(self, columns, full=False, as_expression=False):\n all_column_names = self.get_column_names()\n columns = _ensure_strings_from_expressions(columns)\n\n def _guess(x, y):\n if x == y:\n postfixes = [\"_error\", \"_uncertainty\", \"e\", \"_e\"]\n prefixes = [\"e\", \"e_\"]\n for postfix in postfixes:\n if x + postfix in all_column_names:\n return x + postfix\n for prefix in prefixes:\n if prefix + x in all_column_names:\n return prefix + x\n if full:\n raise ValueError(\"No uncertainty found for %r\" % x)\n else:\n\n postfixes = [\"_cov\", \"_covariance\"]\n for postfix in postfixes:\n if x + \"_\" + y + postfix in all_column_names:\n return x + \"_\" + y + postfix\n if y + \"_\" + x + postfix in all_column_names:\n return y + \"_\" + x + postfix\n postfixes = [\"_correlation\", \"_corr\"]\n for postfix in postfixes:\n if x + \"_\" + y + postfix in all_column_names:\n return x + \"_\" + y + postfix + \" * \" + _guess(x, x) + \" * \" + _guess(y, y)\n if y + \"_\" + x + postfix in all_column_names:\n return y + \"_\" + x + postfix + \" * \" + _guess(y, y) + \" * \" + _guess(x, x)\n if full:\n raise ValueError(\"No covariance or correlation found for %r and %r\" % (x, y))\n return \"0\"\n N = len(columns)\n cov_matrix = [[\"\"] * N for i in range(N)]\n for i in range(N):\n for j in range(N):\n cov = _guess(columns[i], columns[j])\n if i == j and cov:\n cov += \"**2\" # square the diagnal\n cov_matrix[i][j] = cov\n if as_expression:\n return [[self[k] for k in row] for row in cov_matrix]\n else:\n return cov_matrix\n\n def _jacobian(self, expressions, variables):\n expressions = _ensure_strings_from_expressions(expressions)\n return [[self[expression].expand(stop=[var]).derivative(var) for var in variables] for expression in expressions]\n\n def propagate_uncertainties(self, columns, depending_variables=None, cov_matrix='auto',\n covariance_format=\"{}_{}_covariance\",\n uncertainty_format=\"{}_uncertainty\"):\n \"\"\"Propagates uncertainties (full covariance matrix) for a set of virtual columns.\n\n Covariance matrix of the depending variables is guessed by finding columns prefixed by \"e\"\n or `\"e_\"` or postfixed by \"_error\", \"_uncertainty\", \"e\" and `\"_e\"`.\n Off diagonals (covariance or correlation) by postfixes with \"_correlation\" or \"_corr\" for\n correlation or \"_covariance\" or \"_cov\" for covariances.\n (Note that x_y_cov = x_e * y_e * x_y_correlation.)\n\n\n Example\n\n >>> df = vaex.from_scalars(x=1, y=2, e_x=0.1, e_y=0.2)\n >>> df[\"u\"] = df.x + df.y\n >>> df[\"v\"] = np.log10(df.x)\n >>> df.propagate_uncertainties([df.u, df.v])\n >>> df.u_uncertainty, df.v_uncertainty\n\n :param columns: list of columns for which to calculate the covariance matrix.\n :param depending_variables: If not given, it is found out automatically, otherwise a list of columns which have uncertainties.\n :param cov_matrix: List of list with expressions giving the covariance matrix, in the same order as depending_variables. If 'full' or 'auto',\n the covariance matrix for the depending_variables will be guessed, where 'full' gives an error if an entry was not found.\n \"\"\"\n\n names = _ensure_strings_from_expressions(columns)\n virtual_columns = self._expr(*columns, always_list=True)\n\n if depending_variables is None:\n depending_variables = set()\n for expression in virtual_columns:\n depending_variables |= expression.expand().variables()\n depending_variables = list(sorted(list(depending_variables)))\n\n fs = [self[self.virtual_columns[name]] for name in names]\n jacobian = self._jacobian(fs, depending_variables)\n m = len(fs)\n n = len(depending_variables)\n\n # n x n matrix\n cov_matrix = self._covariance_matrix_guess(depending_variables, full=cov_matrix == \"full\", as_expression=True)\n\n # empty m x m matrix\n cov_matrix_out = [[self['0'] for __ in range(m)] for __ in range(m)]\n for i in range(m):\n for j in range(m):\n for k in range(n):\n for l in range(n):\n if jacobian[i][k].expression == '0' or jacobian[j][l].expression == '0' or cov_matrix[k][l].expression == '0':\n pass\n else:\n cov_matrix_out[i][j] = cov_matrix_out[i][j] + jacobian[i][k] * cov_matrix[k][l] * jacobian[j][l]\n for i in range(m):\n for j in range(i + 1):\n sigma = cov_matrix_out[i][j]\n sigma = self._expr(vaex.expresso.simplify(_ensure_string_from_expression(sigma)))\n if i != j:\n self.add_virtual_column(covariance_format.format(names[i], names[j]), sigma)\n else:\n self.add_virtual_column(uncertainty_format.format(names[i]), np.sqrt(sigma))\n\n @_hidden\n def add_virtual_columns_cartesian_to_polar(self, x=\"x\", y=\"y\", radius_out=\"r_polar\", azimuth_out=\"phi_polar\",\n propagate_uncertainties=False,\n radians=False):\n kwargs = dict(**locals())\n del kwargs['self']\n return self.geo.cartesian_to_polar(inplace=True, **kwargs)\n\n @_hidden\n def add_virtual_columns_cartesian_velocities_to_spherical(self, x=\"x\", y=\"y\", z=\"z\", vx=\"vx\", vy=\"vy\", vz=\"vz\", vr=\"vr\", vlong=\"vlong\", vlat=\"vlat\", distance=None):\n kwargs = dict(**locals())\n del kwargs['self']\n return self.geo.velocity_cartesian2spherical(inplace=True, **kwargs)\n\n def _expr(self, *expressions, **kwargs):\n always_list = kwargs.pop('always_list', False)\n return self[str(expressions[0])] if len(expressions) == 1 and not always_list else [self[str(k)] for k in expressions]\n\n def _selection_expression(self, expression):\n return vaex.expression.Expression(self, str(expression), _selection=True)\n\n @_hidden\n def add_virtual_columns_cartesian_velocities_to_polar(self, x=\"x\", y=\"y\", vx=\"vx\", radius_polar=None, vy=\"vy\", vr_out=\"vr_polar\", vazimuth_out=\"vphi_polar\",\n propagate_uncertainties=False,):\n kwargs = dict(**locals())\n del kwargs['self']\n return self.geo.velocity_cartesian2polar(inplace=True, **kwargs)\n\n @_hidden\n def add_virtual_columns_polar_velocities_to_cartesian(self, x='x', y='y', azimuth=None, vr='vr_polar', vazimuth='vphi_polar', vx_out='vx', vy_out='vy', propagate_uncertainties=False):\n kwargs = dict(**locals())\n del kwargs['self']\n return self.geo.velocity_polar2cartesian(inplace=True, **kwargs)\n\n @_hidden\n def add_virtual_columns_rotation(self, x, y, xnew, ynew, angle_degrees, propagate_uncertainties=False):\n kwargs = dict(**locals())\n del kwargs['self']\n return self.geo.rotation_2d(inplace=True, **kwargs)\n\n @docsubst\n @_hidden\n def add_virtual_columns_spherical_to_cartesian(self, alpha, delta, distance, xname=\"x\", yname=\"y\", zname=\"z\",\n propagate_uncertainties=False,\n center=[0, 0, 0], radians=False):\n kwargs = dict(**locals())\n del kwargs['self']\n return self.geo.spherical2cartesian(inplace=True, **kwargs)\n\n @_hidden\n def add_virtual_columns_cartesian_to_spherical(self, x=\"x\", y=\"y\", z=\"z\", alpha=\"l\", delta=\"b\", distance=\"distance\", radians=False, center=None, center_name=\"solar_position\"):\n kwargs = dict(**locals())\n del kwargs['self']\n return self.geo.cartesian2spherical(inplace=True, **kwargs)\n\n @_hidden\n def add_virtual_columns_aitoff(self, alpha, delta, x, y, radians=True):\n kwargs = dict(**locals())\n del kwargs['self']\n return self.geo.project_aitoff(inplace=True, **kwargs)\n\n @_hidden\n def add_virtual_columns_projection_gnomic(self, alpha, delta, alpha0=0, delta0=0, x=\"x\", y=\"y\", radians=False, postfix=\"\"):\n kwargs = dict(**locals())\n del kwargs['self']\n return self.geo.project_gnomic(inplace=True, **kwargs)\n\n def add_function(self, name, f, unique=False):\n name = vaex.utils.find_valid_name(name, used=[] if not unique else self.functions.keys())\n function = vaex.expression.Function(self, name, f)\n self.functions[name] = function\n return function\n\n def add_virtual_column(self, name, expression, unique=False):\n \"\"\"Add a virtual column to the DataFrame.\n\n Example:\n\n >>> df.add_virtual_column(\"r\", \"sqrt(x**2 + y**2 + z**2)\")\n >>> df.select(\"r < 10\")\n\n :param: str name: name of virtual column\n :param: expression: expression for the column\n :param str unique: if name is already used, make it unique by adding a postfix, e.g. _1, or _2\n \"\"\"\n if isinstance(expression, Expression):\n if expression.df is not self:\n expression = expression.copy(self)\n column_position = len(self.column_names)\n # if the current name is an existing column name....\n if name in self.get_column_names(hidden=True):\n column_position = self.column_names.index(name)\n renamed = vaex.utils.find_valid_name('__' +name, used=self.get_column_names(hidden=True))\n # we rewrite all existing expressions (including the passed down expression argument)\n self._rename(name, renamed)\n expression = _ensure_string_from_expression(expression)\n\n if vaex.utils.find_valid_name(name) != name:\n # if we have to rewrite the name, we need to make it unique\n unique = True\n valid_name = vaex.utils.find_valid_name(name, used=None if not unique else self.get_column_names(hidden=True))\n\n self.virtual_columns[valid_name] = expression\n self._virtual_expressions[valid_name] = Expression(self, expression)\n if name not in self.column_names:\n self.column_names.insert(column_position, valid_name)\n self._save_assign_expression(valid_name)\n self.signal_column_changed.emit(self, valid_name, \"add\")\n\n def rename(self, name, new_name, unique=False):\n \"\"\"Renames a column or variable, and rewrite expressions such that they refer to the new name\"\"\"\n if name == new_name:\n return\n new_name = vaex.utils.find_valid_name(new_name, used=None if not unique else self.get_column_names(hidden=True))\n self._rename(name, new_name, rename_meta_data=True)\n return new_name\n\n def _rename(self, old, new, rename_meta_data=False):\n is_variable = False\n is_function = False\n if old in self.variables:\n self.variables[new] = self.variables.pop(old)\n is_variable = True\n if old in self.functions:\n self.functions[new] = self.functions.pop(old)\n is_function = True\n elif old in self.virtual_columns:\n # renaming a column should not change the internal order, otherwise virtual\n # columns do not resolve (it will reference an unknown column)\n self.virtual_columns = vaex.utils.dict_replace_key(self.virtual_columns, old, new)\n self._virtual_expressions = vaex.utils.dict_replace_key(self._virtual_expressions, old, new)\n elif self.is_local() and old in self.columns:\n # we only have to do this locally\n # if we don't do this locally, we still store this info\n # in self._renamed_columns, so it will happen at the server\n self.dataset = self.dataset.renamed({old: new})\n if rename_meta_data:\n for d in [self.ucds, self.units, self.descriptions]:\n if old in d:\n d[new] = d[old]\n del d[old]\n for key, value in self.selection_histories.items():\n self.selection_histories[key] = list([k if k is None else k._rename(self, old, new) for k in value])\n if not (is_variable or is_function):\n if new not in self.virtual_columns:\n self._renamed_columns.append((old, new))\n self.column_names[self.column_names.index(old)] = new\n if hasattr(self, old):\n if isinstance(getattr(self, old), Expression):\n try:\n delattr(self, old)\n except:\n pass\n self._save_assign_expression(new)\n existing_expressions = [k() for k in self._expressions]\n existing_expressions = [k for k in existing_expressions if k is not None]\n for expression in existing_expressions:\n expression._rename(old, new, inplace=True)\n self.virtual_columns = {k:self._virtual_expressions[k].expression for k, v in self.virtual_columns.items()}\n\n def delete_virtual_column(self, name):\n \"\"\"Deletes a virtual column from a DataFrame.\"\"\"\n self.drop(name, inplace=True)\n self.signal_column_changed.emit(self, name, \"delete\")\n\n def add_variable(self, name, expression, overwrite=True, unique=True):\n \"\"\"Add a variable to a DataFrame.\n\n A variable may refer to other variables, and virtual columns and expression may refer to variables.\n\n Example\n\n >>> df.add_variable('center', 0)\n >>> df.add_virtual_column('x_prime', 'x-center')\n >>> df.select('x_prime < 0')\n\n :param: str name: name of virtual varible\n :param: expression: expression for the variable\n \"\"\"\n if unique or overwrite or name not in self.variables:\n existing_names = self.get_column_names(virtual=False) + list(self.variables.keys())\n name = vaex.utils.find_valid_name(name, used=[] if not unique else existing_names)\n self.variables[name] = expression\n self.signal_variable_changed.emit(self, name, \"add\")\n if unique:\n return name\n\n def delete_variable(self, name):\n \"\"\"Deletes a variable from a DataFrame.\"\"\"\n del self.variables[name]\n self.signal_variable_changed.emit(self, name, \"delete\")\n\n def info(self, description=True):\n from IPython import display\n self._output_css()\n display.display(display.HTML(self._info(description=description)))\n\n def _info(self, description=True):\n parts = [\"\"\"<div><h2>{}</h2> <b>rows</b>: {:,}</div>\"\"\".format(self.name, len(self))]\n if hasattr(self, 'path'):\n parts += [\"\"\"<div><b>path</b>: <i>%s</i></div>\"\"\" % (self.path)]\n if self.description:\n parts += [\"\"\"<div><b>Description</b>: {}</div>\"\"\".format(self.description)]\n parts += [\"<h2>Columns:</h2>\"]\n parts += [\"<table class='table-striped'>\"]\n parts += [\"<thead><tr>\"]\n for header in \"column type unit description expression\".split():\n if description or header != \"description\":\n parts += [\"<th>%s</th>\" % header]\n parts += [\"</tr></thead>\"]\n for name in self.get_column_names():\n parts += [\"<tr>\"]\n parts += [\"<td>%s</td>\" % name]\n virtual = name in self.virtual_columns\n if not virtual:\n dtype = str(self.data_type(name)) if self.data_type(name) != str else 'str'\n else:\n dtype = \"</i>virtual column</i>\"\n parts += [\"<td>%s</td>\" % dtype]\n units = self.unit(name)\n units = units.to_string(\"latex_inline\") if units else \"\"\n parts += [\"<td>%s</td>\" % units]\n if description:\n parts += [\"<td ><pre>%s</pre></td>\" % self.descriptions.get(name, \"\")]\n if virtual:\n parts += [\"<td><code>%s</code></td>\" % self.virtual_columns[name]]\n else:\n parts += [\"<td></td>\"]\n parts += [\"</tr>\"]\n parts += \"</table>\"\n\n ignore_list = 'pi e km_in_au seconds_per_year'.split()\n variable_names = [name for name in self.variables.keys() if name not in ignore_list]\n if variable_names:\n parts += [\"<h2>Variables:</h2>\"]\n parts += [\"<table class='table-striped'>\"]\n parts += [\"<thead><tr>\"]\n for header in \"variable type unit description expression\".split():\n if description or header != \"description\":\n parts += [\"<th>%s</th>\" % header]\n parts += [\"</tr></thead>\"]\n for name in variable_names:\n parts += [\"<tr>\"]\n parts += [\"<td>%s</td>\" % name]\n parts += [\"<td>%r</td>\" % type]\n units = self.unit(name)\n units = units.to_string(\"latex_inline\") if units else \"\"\n parts += [\"<td>%s</td>\" % units]\n if description:\n parts += [\"<td ><pre>%s</pre></td>\" % self.descriptions.get(name, \"\")]\n parts += [\"<td><code>%s</code></td>\" % (self.variables[name], )]\n parts += [\"</tr>\"]\n parts += \"</table>\"\n\n return \"\".join(parts) + \"<h2>Data:</h2>\" + self._head_and_tail_table()\n\n def head(self, n=10):\n \"\"\"Return a shallow copy a DataFrame with the first n rows.\"\"\"\n return self[:min(n, len(self))]\n\n def tail(self, n=10):\n \"\"\"Return a shallow copy a DataFrame with the last n rows.\"\"\"\n N = len(self)\n # self.cat(i1=max(0, N-n), i2=min(len(self), N))\n return self[max(0, N - n):min(len(self), N)]\n\n def _head_and_tail_table(self, n=None, format='html'):\n n = n or vaex.settings.display.max_rows\n N = _len(self)\n if N <= n:\n return self._as_table(0, N, format=format)\n else:\n return self._as_table(0, math.ceil(n / 2), N - math.floor(n / 2), N, format=format)\n\n def head_and_tail_print(self, n=5):\n \"\"\"Display the first and last n elements of a DataFrame.\"\"\"\n from IPython import display\n display.display(display.HTML(self._head_and_tail_table(n)))\n\n def describe(self, strings=True, virtual=True, selection=None):\n \"\"\"Give a description of the DataFrame.\n\n >>> import vaex\n >>> df = vaex.example()[['x', 'y', 'z']]\n >>> df.describe()\n x y z\n dtype float64 float64 float64\n count 330000 330000 330000\n missing 0 0 0\n mean -0.0671315 -0.0535899 0.0169582\n std 7.31746 7.78605 5.05521\n min -128.294 -71.5524 -44.3342\n max 271.366 146.466 50.7185\n >>> df.describe(selection=df.x > 0)\n x y z\n dtype float64 float64 float64\n count 164060 164060 164060\n missing 165940 165940 165940\n mean 5.13572 -0.486786 -0.0868073\n std 5.18701 7.61621 5.02831\n min 1.51635e-05 -71.5524 -44.3342\n max 271.366 78.0724 40.2191\n\n :param bool strings: Describe string columns or not\n :param bool virtual: Describe virtual columns or not\n :param selection: Optional selection to use.\n :return: Pandas dataframe\n\n \"\"\"\n import pandas as pd\n N = len(self)\n columns = {}\n for feature in self.get_column_names(strings=strings, virtual=virtual)[:]:\n data_type = self.data_type(feature)\n if data_type == str:\n count = self.count(feature, selection=selection, delay=True)\n self.execute()\n count = count.get()\n columns[feature] = ((data_type, count, N-count, '--', '--', '--', '--'))\n elif data_type.kind in 'SU':\n # TODO: this blocks is the same as the string block above, can we avoid SU types?\n count = self.count(feature, selection=selection, delay=True)\n self.execute()\n count = count.get()\n columns[feature] = ((data_type, count, N-count, '--', '--', '--', '--'))\n elif data_type.kind in 'O':\n # this will also properly count NaN-like objects like NaT\n count_na = self[feature].isna().astype('int').sum(delay=True)\n self.execute()\n count_na = count_na.get()\n columns[feature] = ((data_type, N-count_na, count_na, '--', '--', '--', '--'))\n elif data_type.is_primitive or data_type.is_temporal:\n mean = self.mean(feature, selection=selection, delay=True)\n std = self.std(feature, selection=selection, delay=True)\n minmax = self.minmax(feature, selection=selection, delay=True)\n if data_type.is_datetime: # this path tests using isna, which test for nat\n count_na = self[feature].isna().astype('int').sum(delay=True)\n else:\n count = self.count(feature, selection=selection, delay=True)\n self.execute()\n if data_type.is_datetime:\n count_na, mean, std, minmax = count_na.get(), mean.get(), std.get(), minmax.get()\n count = N - int(count_na)\n else:\n count, mean, std, minmax = count.get(), mean.get(), std.get(), minmax.get()\n count = int(count)\n columns[feature] = ((data_type, count, N-count, mean, std, minmax[0], minmax[1]))\n else:\n raise NotImplementedError(f'Did not implement describe for data type {data_type}')\n return pd.DataFrame(data=columns, index=['data_type', 'count', 'NA', 'mean', 'std', 'min', 'max'])\n\n def cat(self, i1, i2, format='html'):\n \"\"\"Display the DataFrame from row i1 till i2\n\n For format, see https://pypi.org/project/tabulate/\n\n :param int i1: Start row\n :param int i2: End row.\n :param str format: Format to use, e.g. 'html', 'plain', 'latex'\n \"\"\"\n from IPython import display\n if format == 'html':\n output = self._as_html_table(i1, i2)\n display.display(display.HTML(output))\n else:\n output = self._as_table(i1, i2, format=format)\n print(output)\n\n def _as_table(self, i1, i2, j1=None, j2=None, format='html', ellipsis=\"...\"):\n from .formatting import _format_value\n parts = [] # \"\"\"<div>%s (length=%d)</div>\"\"\" % (self.name, len(self))]\n parts += [\"<table class='table-striped'>\"]\n\n # we need to get the underlying names since we use df.evaluate\n column_names = self.get_column_names()\n max_columns = vaex.settings.display.max_columns\n if (max_columns is not None) and (max_columns > 0):\n if max_columns < len(column_names):\n columns_sliced = math.ceil(max_columns/2)\n column_names = column_names[:columns_sliced] + column_names[-math.floor(max_columns/2):]\n else:\n columns_sliced = None\n values_list = []\n values_list.append(['#', []])\n # parts += [\"<thead><tr>\"]\n for i, name in enumerate(column_names):\n if columns_sliced == i:\n values_list.append([ellipsis, []])\n values_list.append([name, []])\n # parts += [\"<th>%s</th>\" % name]\n # parts += [\"</tr></thead>\"]\n def table_part(k1, k2, parts):\n N = k2 - k1\n # slicing will invoke .extract which will make the evaluation\n # much quicker\n df = self[k1:k2]\n try:\n values = dict(zip(column_names, df.evaluate(column_names)))\n except:\n values = {}\n for i, name in enumerate(column_names):\n try:\n values[name] = df.evaluate(name)\n except:\n values[name] = [\"error\"] * (N)\n logger.exception('error evaluating: %s at rows %i-%i' % (name, k1, k2))\n for i in range(k2 - k1):\n # parts += [\"<tr>\"]\n # parts += [\"<td><i style='opacity: 0.6'>{:,}</i></td>\".format(i + k1)]\n if format == 'html':\n value = \"<i style='opacity: 0.6'>{:,}</i>\".format(i + k1)\n else:\n value = \"{:,}\".format(i + k1)\n values_list[0][1].append(value)\n for j, name in enumerate(column_names):\n column_index = j\n if columns_sliced == j:\n values_list[column_index+1][1].append(ellipsis)\n if columns_sliced is not None and j >= columns_sliced:\n column_index += 1 # skip over the slice/ellipsis\n value = values[name][i]\n value = _format_value(value)\n values_list[column_index+1][1].append(value)\n # parts += [\"</tr>\"]\n # return values_list\n if i2 - i1 > 0:\n parts = table_part(i1, i2, parts)\n if j1 is not None and j2 is not None:\n values_list[0][1].append(ellipsis)\n for i in range(len(column_names)):\n # parts += [\"<td>...</td>\"]\n values_list[i+1][1].append(ellipsis)\n\n # parts = table_part(j1, j2, parts)\n table_part(j1, j2, parts)\n else:\n for header, values in values_list:\n values.append(None)\n # parts += \"</table>\"\n # html = \"\".join(parts)\n # return html\n values_list = dict(values_list)\n # print(values_list)\n import tabulate\n table_text = str(tabulate.tabulate(values_list, headers=\"keys\", tablefmt=format))\n # Tabulate 0.8.7+ escapes html :()\n table_text = table_text.replace('&lt;i style=&#x27;opacity: 0.6&#x27;&gt;', \"<i style='opacity: 0.6'>\")\n table_text = table_text.replace('&lt;/i&gt;', \"</i>\")\n if i2 - i1 == 0:\n if self._length_unfiltered != len(self):\n footer_text = 'No rows to display (because of filtering).'\n else:\n footer_text = 'No rows to display.'\n if format == 'html':\n table_text += f'<i>{footer_text}</i>'\n if format == 'plain':\n table_text += f'\\n{footer_text}'\n return table_text\n\n def _as_html_table(self, i1, i2, j1=None, j2=None):\n # TODO: this method can be replaced by _as_table\n from .formatting import _format_value\n parts = [] # \"\"\"<div>%s (length=%d)</div>\"\"\" % (self.name, len(self))]\n parts += [\"<table class='table-striped'>\"]\n\n column_names = self.get_column_names()\n parts += [\"<thead><tr>\"]\n for name in [\"#\"] + column_names:\n parts += [\"<th>%s</th>\" % name]\n parts += [\"</tr></thead>\"]\n\n def table_part(k1, k2, parts):\n data_parts = {}\n N = k2 - k1\n for name in column_names:\n try:\n data_parts[name] = self.evaluate(name, i1=k1, i2=k2)\n except:\n data_parts[name] = [\"error\"] * (N)\n logger.exception('error evaluating: %s at rows %i-%i' % (name, k1, k2))\n for i in range(k2 - k1):\n parts += [\"<tr>\"]\n parts += [\"<td><i style='opacity: 0.6'>{:,}</i></td>\".format(i + k1)]\n for name in column_names:\n value = data_parts[name][i]\n value = _format_value(value)\n parts += [\"<td>%r</td>\" % value]\n parts += [\"</tr>\"]\n return parts\n parts = table_part(i1, i2, parts)\n if j1 is not None and j2 is not None:\n for i in range(len(column_names) + 1):\n parts += [\"<td>...</td>\"]\n parts = table_part(j1, j2, parts)\n parts += \"</table>\"\n html = \"\".join(parts)\n return html\n\n def _output_css(self):\n css = \"\"\".vaex-description pre {\n max-width : 450px;\n white-space : nowrap;\n overflow : hidden;\n text-overflow: ellipsis;\n }\n\n .vex-description pre:hover {\n max-width : initial;\n white-space: pre;\n }\"\"\"\n from IPython import display\n style = \"<style>%s</style>\" % css\n display.display(display.HTML(style))\n\n def _repr_mimebundle_(self, include=None, exclude=None, **kwargs):\n # TODO: optimize, since we use the same data in both versions\n # TODO: include latex version\n return {'text/html':self._head_and_tail_table(format='html'), 'text/plain': self._head_and_tail_table(format='plain')}\n\n def _repr_html_(self):\n \"\"\"Representation for Jupyter.\"\"\"\n self._output_css()\n return self._head_and_tail_table()\n\n def __str__(self):\n return self._head_and_tail_table(format='plain')\n\n if not _DEBUG:\n def __repr__(self):\n return self._head_and_tail_table(format='plain')\n\n def __current_sequence_index(self):\n \"\"\"TODO\"\"\"\n return 0\n\n def has_current_row(self):\n \"\"\"Returns True/False if there currently is a picked row.\"\"\"\n return self._current_row is not None\n\n def get_current_row(self):\n \"\"\"Individual rows can be 'picked', this is the index (integer) of the current row, or None there is nothing picked.\"\"\"\n return self._current_row\n\n def set_current_row(self, value):\n \"\"\"Set the current row, and emit the signal signal_pick.\"\"\"\n if (value is not None) and ((value < 0) or (value >= len(self))):\n raise IndexError(\"index %d out of range [0,%d]\" % (value, len(self)))\n self._current_row = value\n self.signal_pick.emit(self, value)\n\n def __has_snapshots(self):\n # currenly disabled\n return False\n\n def column_count(self, hidden=False):\n \"\"\"Returns the number of columns (including virtual columns).\n\n :param bool hidden: If True, include hidden columns in the tally\n :returns: Number of columns in the DataFrame\n \"\"\"\n return len(self.get_column_names(hidden=hidden))\n\n def get_names(self, hidden=False):\n \"\"\"Return a list of column names and variable names.\"\"\"\n names = self.get_column_names(hidden=hidden)\n return names +\\\n [k for k in self.variables.keys() if not hidden or not k.startswith('__')] +\\\n [k for k in self.functions.keys() if not hidden or not k.startswith('__')]\n\n def get_column_names(self, virtual=True, strings=True, hidden=False, regex=None):\n \"\"\"Return a list of column names\n\n Example:\n\n >>> import vaex\n >>> df = vaex.from_scalars(x=1, x2=2, y=3, s='string')\n >>> df['r'] = (df.x**2 + df.y**2)**2\n >>> df.get_column_names()\n ['x', 'x2', 'y', 's', 'r']\n >>> df.get_column_names(virtual=False)\n ['x', 'x2', 'y', 's']\n >>> df.get_column_names(regex='x.*')\n ['x', 'x2']\n\n :param virtual: If False, skip virtual columns\n :param hidden: If False, skip hidden columns\n :param strings: If False, skip string columns\n :param regex: Only return column names matching the (optional) regular expression\n :param alias: Return the alias (True) or internal name (False).\n :rtype: list of str\n \"\"\"\n def column_filter(name):\n '''Return True if column with specified name should be returned'''\n if regex and not re.match(regex, name):\n return False\n if not virtual and name in self.virtual_columns:\n return False\n if not strings and self.is_string(name):\n return False\n if not hidden and name.startswith('__'):\n return False\n return True\n if hidden and virtual and regex is None and strings is True:\n return list(self.column_names) # quick path\n if not hidden and virtual and regex is None and strings is True:\n return [k for k in self.column_names if not k.startswith('__')] # also a quick path\n return [name for name in self.column_names if column_filter(name)]\n\n def __bool__(self):\n return True # we are always true :) otherwise Python might call __len__, which can be expensive\n\n def __len__(self):\n \"\"\"Returns the number of rows in the DataFrame (filtering applied).\"\"\"\n if not self.filtered:\n return self._length_unfiltered\n else:\n if self._cached_filtered_length is None:\n self._cached_filtered_length = int(self.count())\n return self._cached_filtered_length\n\n def selected_length(self):\n \"\"\"Returns the number of rows that are selected.\"\"\"\n raise NotImplementedError\n\n def length_original(self):\n \"\"\"the full length of the DataFrame, independent what active_fraction is, or filtering. This is the real length of the underlying ndarrays.\"\"\"\n return self._length_original\n\n def length_unfiltered(self):\n \"\"\"The length of the arrays that should be considered (respecting active range), but without filtering.\"\"\"\n return self._length_unfiltered\n\n def active_length(self):\n return self._length_unfiltered\n\n def get_active_fraction(self):\n \"\"\"Value in the range (0, 1], to work only with a subset of rows.\n \"\"\"\n return self._active_fraction\n\n def set_active_fraction(self, value):\n \"\"\"Sets the active_fraction, set picked row to None, and remove selection.\n\n TODO: we may be able to keep the selection, if we keep the expression, and also the picked row\n \"\"\"\n if value != self._active_fraction:\n self._active_fraction = value\n # self._fraction_length = int(self._length * self._active_fraction)\n self.select(None)\n self.set_current_row(None)\n self._length_unfiltered = int(round(self._length_original * self._active_fraction))\n self._cached_filtered_length = None\n self._filter_filled = False\n self._index_start = 0\n self._index_end = self._length_unfiltered\n self.signal_active_fraction_changed.emit(self, value)\n\n def get_active_range(self):\n return self._index_start, self._index_end\n\n def set_active_range(self, i1, i2):\n \"\"\"Sets the active_fraction, set picked row to None, and remove selection.\n\n TODO: we may be able to keep the selection, if we keep the expression, and also the picked row\n \"\"\"\n # logger.debug(\"set active range to: %r\", (i1, i2))\n self._active_fraction = (i2 - i1) / float(self.length_original())\n # self._fraction_length = int(self._length * self._active_fraction)\n self._index_start = i1\n self._index_end = i2\n self.select(None)\n self.set_current_row(None)\n self._length_unfiltered = i2 - i1\n if self.filtered:\n mask = self._selection_masks[FILTER_SELECTION_NAME]\n if not mask.view(i1, i2).is_dirty():\n self._cached_filtered_length = mask.view(i1, i2).count()\n else:\n self._cached_filtered_length = None\n self._filter_filled = False\n self.signal_active_fraction_changed.emit(self, self._active_fraction)\n\n @docsubst\n def trim(self, inplace=False):\n '''Return a DataFrame, where all columns are 'trimmed' by the active range.\n\n For the returned DataFrame, df.get_active_range() returns (0, df.length_original()).\n\n {note_copy}\n\n :param inplace: {inplace}\n :rtype: DataFrame\n '''\n df = self if inplace else self.copy()\n if self._index_start == 0 and self._index_end == self._length_original:\n return df\n df.dataset = self.dataset[self._index_start:self._index_end]\n if df.filtered:\n # we're gonna copy the mask from our parent\n parent_mask = self._selection_masks[FILTER_SELECTION_NAME].view(self._index_start, self._index_end)\n mask = df._selection_masks[FILTER_SELECTION_NAME]\n np.copyto(np.asarray(mask), np.asarray(parent_mask))\n selection = df.get_selection(FILTER_SELECTION_NAME)\n if not mask.is_dirty():\n df._cached_filtered_length = mask.count()\n cache = df._selection_mask_caches[FILTER_SELECTION_NAME]\n assert not cache\n chunk_size = self.executor.chunk_size_for(mask.length)\n for i in range(vaex.utils.div_ceil(mask.length, chunk_size)):\n i1 = i * chunk_size\n i2 = min(mask.length, (i + 1) * chunk_size)\n key = (i1, i2)\n sub_mask = mask.view(i1, i2)\n sub_mask_array = np.asarray(sub_mask)\n cache[key] = selection, sub_mask_array\n else:\n df._cached_filtered_length = None\n df._filter_filled = False\n return df\n\n @docsubst\n def take(self, indices, filtered=True, dropfilter=True):\n '''Returns a DataFrame containing only rows indexed by indices\n\n {note_copy}\n\n Example:\n\n >>> import vaex, numpy as np\n >>> df = vaex.from_arrays(s=np.array(['a', 'b', 'c', 'd']), x=np.arange(1,5))\n >>> df.take([0,2])\n # s x\n 0 a 1\n 1 c 3\n\n :param indices: sequence (list or numpy array) with row numbers\n :param filtered: (for internal use) The indices refer to the filtered data.\n :param dropfilter: (for internal use) Drop the filter, set to False when\n indices refer to unfiltered, but may contain rows that still need to be filtered out.\n :return: DataFrame which is a shallow copy of the original data.\n :rtype: DataFrame\n '''\n df_trimmed = self.trim()\n df = df_trimmed.copy()\n indices = np.asarray(indices)\n if df.filtered and filtered:\n # we translate the indices that refer to filters row indices to\n # indices of the unfiltered row indices\n df._fill_filter_mask()\n max_index = indices.max()\n mask = df._selection_masks[FILTER_SELECTION_NAME]\n filtered_indices = mask.first(max_index+1)\n indices = filtered_indices[indices]\n df.dataset = df.dataset.take(indices)\n if dropfilter:\n # if the indices refer to the filtered rows, we can discard the\n # filter in the final dataframe\n df.set_selection(None, name=FILTER_SELECTION_NAME)\n return df\n\n @docsubst\n def extract(self):\n '''Return a DataFrame containing only the filtered rows.\n\n {note_copy}\n\n The resulting DataFrame may be more efficient to work with when the original DataFrame is\n heavily filtered (contains just a small number of rows).\n\n If no filtering is applied, it returns a trimmed view.\n For the returned df, len(df) == df.length_original() == df.length_unfiltered()\n\n :rtype: DataFrame\n '''\n df = self.trim()\n if df.filtered:\n df._push_down_filter()\n df._invalidate_caches()\n return df\n\n def _push_down_filter(self):\n '''Push the filter down the dataset layer'''\n self._fill_filter_mask() # make sure the mask is filled\n mask = self._selection_masks[FILTER_SELECTION_NAME]\n mask = np.asarray(mask)\n # indices = mask.first(len(self))\n # assert len(indices) == len(self)\n selection = self.get_selection(FILTER_SELECTION_NAME)\n from .dataset import DatasetFiltered\n self.set_selection(None, name=FILTER_SELECTION_NAME)\n self.dataset = DatasetFiltered(self.dataset, mask, state=self.state_get(skip=[self.dataset]), selection=selection)\n\n @docsubst\n def shuffle(self, random_state=None):\n '''Shuffle order of rows (equivalent to df.sample(frac=1))\n\n {note_copy}\n\n Example:\n\n >>> import vaex, numpy as np\n >>> df = vaex.from_arrays(s=np.array(['a', 'b', 'c']), x=np.arange(1,4))\n >>> df\n # s x\n 0 a 1\n 1 b 2\n 2 c 3\n >>> df.shuffle(random_state=42)\n # s x\n 0 a 1\n 1 b 2\n 2 c 3\n\n :param int or RandomState: {random_state}\n :return: {return_shallow_copy}\n :rtype: DataFrame\n '''\n\n return self.sample(frac=1, random_state=random_state)\n\n @docsubst\n def sample(self, n=None, frac=None, replace=False, weights=None, random_state=None):\n '''Returns a DataFrame with a random set of rows\n\n {note_copy}\n\n Provide either n or frac.\n\n Example:\n\n >>> import vaex, numpy as np\n >>> df = vaex.from_arrays(s=np.array(['a', 'b', 'c', 'd']), x=np.arange(1,5))\n >>> df\n # s x\n 0 a 1\n 1 b 2\n 2 c 3\n 3 d 4\n >>> df.sample(n=2, random_state=42) # 2 random rows, fixed seed\n # s x\n 0 b 2\n 1 d 4\n >>> df.sample(frac=1, random_state=42) # 'shuffling'\n # s x\n 0 c 3\n 1 a 1\n 2 d 4\n 3 b 2\n >>> df.sample(frac=1, replace=True, random_state=42) # useful for bootstrap (may contain repeated samples)\n # s x\n 0 d 4\n 1 a 1\n 2 a 1\n 3 d 4\n\n :param int n: number of samples to take (default 1 if frac is None)\n :param float frac: fractional number of takes to take\n :param bool replace: If true, a row may be drawn multiple times\n :param str or expression weights: (unnormalized) probability that a row can be drawn\n :param int or RandomState: {random_state}\n :return: {return_shallow_copy}\n :rtype: DataFrame\n '''\n self = self.extract()\n if type(random_state) == int or random_state is None:\n random_state = np.random.RandomState(seed=random_state)\n if n is None and frac is None:\n n = 1\n elif frac is not None:\n n = int(round(frac * len(self)))\n weights_values = None\n if weights is not None:\n weights_values = self.evaluate(weights)\n weights_values = weights_values / self.sum(weights)\n indices = random_state.choice(len(self), n, replace=replace, p=weights_values)\n return self.take(indices)\n\n @docsubst\n @vaex.utils.gen_to_list\n def split_random(self, into, random_state=None):\n '''Returns a list containing random portions of the DataFrame.\n\n {note_copy}\n\n Example:\n\n >>> import vaex, import numpy as np\n >>> np.random.seed(111)\n >>> df = vaex.from_arrays(x = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9])\n >>> for dfs in df.split_random(into=0.3, random_state=42):\n ... print(dfs.x.values)\n ...\n [8 1 5]\n [0 7 2 9 4 3 6]\n >>> for split in df.split_random(into=[0.2, 0.3, 0.5], random_state=42):\n ... print(dfs.x.values)\n [8 1]\n [5 0 7]\n [2 9 4 3 6]\n\n :param int/float/list into: If float will split the DataFrame in two, the first of which will have a relative length as specified by this parameter.\n When a list, will split into as many portions as elements in the list, where each element defines the relative length of that portion. Note that such a list of fractions will always be re-normalized to 1.\n When an int, split DataFrame into n dataframes of equal length (last one may deviate), if len(df) < n, it will return len(df) DataFrames.\n :param int or RandomState: {random_state}\n :return: A list of DataFrames.\n :rtype: list\n '''\n self = self.extract()\n if type(random_state) == int or random_state is None:\n random_state = np.random.RandomState(seed=random_state)\n indices = random_state.choice(len(self), len(self), replace=False)\n return self.take(indices).split(into)\n\n @docsubst\n @vaex.utils.gen_to_list\n def split(self, into=None):\n '''Returns a list containing ordered subsets of the DataFrame.\n\n {note_copy}\n\n Example:\n\n >>> import vaex\n >>> df = vaex.from_arrays(x = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9])\n >>> for dfs in df.split(into=0.3):\n ... print(dfs.x.values)\n ...\n [0 1 3]\n [3 4 5 6 7 8 9]\n >>> for split in df.split(into=[0.2, 0.3, 0.5]):\n ... print(dfs.x.values)\n [0 1]\n [2 3 4]\n [5 6 7 8 9]\n\n :param int/float/list into: If float will split the DataFrame in two, the first of which will have a relative length as specified by this parameter.\n When a list, will split into as many portions as elements in the list, where each element defines the relative length of that portion. Note that such a list of fractions will always be re-normalized to 1.\n When an int, split DataFrame into n dataframes of equal length (last one may deviate), if len(df) < n, it will return len(df) DataFrames.\n '''\n self = self.extract()\n if isinstance(into, numbers.Integral):\n step = max(1, vaex.utils.div_ceil(len(self), into))\n i1 = 0\n i2 = step\n while i1 < len(self):\n i2 = min(len(self), i2)\n yield self[i1:i2]\n i1, i2 = i2, i2 + step\n return\n\n if _issequence(into):\n # make sure it is normalized\n total = sum(into)\n into = [k / total for k in into]\n else:\n assert into <= 1, \"when float, `into` should be <= 1\"\n assert into > 0, \"`into` must be > 0.\"\n into = [into, 1 - into]\n offsets = np.round(np.cumsum(into) * len(self)).astype(np.int64)\n start = 0\n for offset in offsets:\n yield self[start:offset]\n start = offset\n\n @docsubst\n def sort(self, by, ascending=True):\n '''Return a sorted DataFrame, sorted by the expression 'by'.\n\n Both 'by' and 'ascending' arguments can be lists.\n Note that missing/nan/NA values will always be pushed to the end, no matter the sorting order.\n\n {note_copy}\n\n {note_filter}\n\n Example:\n\n >>> import vaex, numpy as np\n >>> df = vaex.from_arrays(s=np.array(['a', 'b', 'c', 'd']), x=np.arange(1,5))\n >>> df['y'] = (df.x-1.8)**2\n >>> df\n # s x y\n 0 a 1 0.64\n 1 b 2 0.04\n 2 c 3 1.44\n 3 d 4 4.84\n >>> df.sort('y', ascending=False) # Note: passing '(x-1.8)**2' gives the same result\n # s x y\n 0 d 4 4.84\n 1 c 3 1.44\n 2 a 1 0.64\n 3 b 2 0.04\n\n :param str or expression or list of str/expressions by: expression to sort by.\n :param bool or list of bools ascending: ascending (default, True) or descending (False).\n '''\n self = self.trim()\n # Ensure \"by\" is in the proper format\n by = vaex.utils._ensure_list(by)\n by = vaex.utils._ensure_strings_from_expressions(by)\n\n # Ensure \"ascending is in the proper format\"\n if isinstance(ascending, list):\n assert len(ascending) == len(by), 'If \"ascending\" is a list, it must have the same number of elements as \"by\".'\n else:\n ascending = vaex.utils._ensure_list(ascending) * len(by)\n\n sort_keys = [(key, 'ascending') if order is True else (key, 'descending') for key, order in zip(by, ascending)]\n pa_table = self[by].to_arrow_table()\n indices = pa.compute.sort_indices(pa_table, sort_keys=sort_keys)\n\n # if we don't cast to int64, we get uint64 scalars, which when adding numbers to will auto case to float (numpy)\n indices = vaex.array_types.to_numpy(indices).astype('int64')\n return self.take(indices)\n\n @docsubst\n def diff(self, periods=1, column=None, fill_value=None, trim=False, inplace=False, reverse=False):\n \"\"\"Calculate the difference between the current row and the row offset by periods\n\n :param int periods: Which row to take the difference with\n :param str or list[str] column: Column or list of columns to use (default is all).\n :param fill_value: Value to use instead of missing values.\n :param bool trim: Do not include rows that would otherwise have missing values\n :param bool reverse: When true, calculate `row[periods] - row[current]`\n :param inplace: {inplace}\n \"\"\"\n df = self.trim(inplace=inplace)\n if column is None:\n columns = self.get_column_names()\n else:\n if isinstance(column, (list, tuple)):\n columns = column\n else:\n columns = [column]\n originals = {}\n for column in columns:\n new_name = df._find_valid_name(f'__{column}_original')\n df[new_name] = df[column]\n originals[column] = new_name\n df = df.shift(periods, columns, fill_value=fill_value, trim=trim, inplace=inplace)\n for column in columns:\n if reverse:\n df[column] = df[column] - df[originals[column]]\n else:\n df[column] = df[originals[column]] - df[column]\n return df\n\n @docsubst\n def shift(self, periods, column=None, fill_value=None, trim=False, inplace=False):\n \"\"\"Shift a column or multiple columns by `periods` amounts of rows.\n\n :param int periods: Shift column forward (when positive) or backwards (when negative)\n :param str or list[str] column: Column or list of columns to shift (default is all).\n :param fill_value: Value to use instead of missing values.\n :param bool trim: Do not include rows that would otherwise have missing values\n :param inplace: {inplace}\n \"\"\"\n df = self.trim(inplace=inplace)\n if df.filtered:\n df._push_down_filter()\n from .shift import DatasetShifted\n # we want to shows these shifted\n if column is not None:\n columns = set(column) if _issequence(column) else {column}\n else:\n columns = set(df.get_column_names())\n columns_all = set(df.get_column_names(hidden=True))\n\n # these columns we do NOT want to shift, because we didn't ask it\n # or because we depend on them (virtual column)\n columns_keep = columns_all - columns\n columns_keep |= df._depending_columns(columns_keep, check_filter=False) # TODO: remove filter check\n\n columns_shift = columns.copy()\n columns_shift |= df._depending_columns(columns)\n virtual_columns = df.virtual_columns.copy()\n # these are the columns we want to shift, but *also* want to keep the original\n columns_conflict = columns_keep & columns_shift\n\n column_shift_mapping = {}\n # we use this dataframe for tracking virtual columns when renaming\n df_shifted = df.copy()\n shifted_names = {}\n unshifted_names = {}\n for name in columns_shift:\n if name in columns_conflict:\n # we want to have two columns, an unshifted and shifted\n\n # rename the current to unshifted\n unshifted_name = df.rename(name, f'__{name}_unshifted', unique=True)\n unshifted_names[name] = unshifted_name\n\n # now make a shifted one\n shifted_name = f'__{name}_shifted'\n shifted_name = vaex.utils.find_valid_name(shifted_name, used=df.get_column_names(hidden=True))\n shifted_names[name] = shifted_name\n\n if name not in virtual_columns:\n # if not virtual, we let the dataset layer handle it\n column_shift_mapping[unshifted_name] = shifted_name\n df.column_names.append(shifted_name)\n # otherwise we can later on copy the virtual columns from this df\n df_shifted.rename(name, shifted_name)\n else:\n if name not in virtual_columns:\n # easy case, just shift\n column_shift_mapping[name] = name\n\n # now that we renamed columns into _shifted/_unshifted we\n # restore the dataframe with the real column names\n for name in columns_shift:\n if name in columns_conflict:\n if name in virtual_columns:\n if name in columns:\n df.add_virtual_column(name, df_shifted.virtual_columns[shifted_names[name]])\n else:\n df.add_virtual_column(name, unshifted_names[name])\n else:\n if name in columns:\n df.add_virtual_column(name, shifted_names[name])\n else:\n df.add_virtual_column(name, unshifted_names[name])\n else:\n if name in virtual_columns:\n df.virtual_columns[name] = df_shifted.virtual_columns[name]\n df._virtual_expressions[name] = Expression(df, df.virtual_columns[name])\n if _issequence(periods):\n if len(periods) != 2:\n raise ValueError(f'periods should be a int or a tuple of ints, not {periods}')\n start, end = periods\n else:\n start = end = periods\n dataset = DatasetShifted(original=df.dataset, start=start, end=end, column_mapping=column_shift_mapping, fill_value=fill_value)\n if trim:\n # assert start == end\n slice_start = 0\n slice_end = dataset.row_count\n if start > 0:\n slice_start = start\n elif start < 0:\n slice_end = dataset.row_count + start\n if end != start:\n if end > start:\n slice_end -= end -1\n dataset = dataset.slice(slice_start, slice_end)\n\n df.dataset = dataset\n for name in df.dataset:\n assert name in df.column_names, f\"oops, {name} in dataset, but not in column_names\"\n for name in df.column_names:\n if name not in df.dataset:\n assert name in df.virtual_columns\n return df\n\n @docsubst\n def fillna(self, value, column_names=None, prefix='__original_', inplace=False):\n '''Return a DataFrame, where missing values/NaN are filled with 'value'.\n\n The original columns will be renamed, and by default they will be hidden columns. No data is lost.\n\n {note_copy}\n\n {note_filter}\n\n Example:\n\n >>> import vaex\n >>> import numpy as np\n >>> x = np.array([3, 1, np.nan, 10, np.nan])\n >>> df = vaex.from_arrays(x=x)\n >>> df_filled = df.fillna(value=-1, column_names=['x'])\n >>> df_filled\n # x\n 0 3\n 1 1\n 2 -1\n 3 10\n 4 -1\n\n :param float value: The value to use for filling nan or masked values.\n :param bool fill_na: If True, fill np.nan values with `value`.\n :param bool fill_masked: If True, fill masked values with `values`.\n :param list column_names: List of column names in which to fill missing values.\n :param str prefix: The prefix to give the original columns.\n :param inplace: {inplace}\n '''\n df = self.trim(inplace=inplace)\n column_names = column_names or list(self)\n for name in column_names:\n column = df.columns.get(name)\n df[name] = df.func.fillna(df[name], value)\n return df\n\n def materialize(self, column=None, inplace=False, virtual_column=None):\n '''Turn columns into native CPU format for optimal performance at cost of memory.\n\n .. warning:: This may use of lot of memory, be mindfull.\n\n Virtual columns will be evaluated immediately, and all real columns will be\n cached in memory when used for the first time.\n\n Example for virtual column:\n\n >>> x = np.arange(1,4)\n >>> y = np.arange(2,5)\n >>> df = vaex.from_arrays(x=x, y=y)\n >>> df['r'] = (df.x**2 + df.y**2)**0.5 # 'r' is a virtual column (computed on the fly)\n >>> df = df.materialize('r') # now 'r' is a 'real' column (i.e. a numpy array)\n\n Example with parquet file\n >>> df = vaex.open('somewhatslow.parquet')\n >>> df.x.sum() # slow\n >>> df = df.materialize()\n >>> df.x.sum() # slow, but will fill the cache\n >>> df.x.sum() # as fast as possible, will use memory\n\n :param column: string or list of strings with column names to materialize, all columns when None\n :param virtual_column: for backward compatibility\n :param inplace: {inplace}\n '''\n if virtual_column is not None:\n warnings.warn(\"virtual_column argument is deprecated, please use column\")\n column = virtual_column\n df = self.trim(inplace=inplace)\n if column is None:\n columns = df.get_column_names(hidden=True)\n else:\n columns = _ensure_strings_from_expressions(column)\n virtual = []\n cache = []\n for column in columns:\n if column in self.dataset:\n cache.append(column)\n elif column in self.virtual_columns:\n virtual.append(column)\n else:\n raise NameError(f'{column} is not a column or virtual column')\n dataset = df._dataset\n if cache:\n dataset = vaex.dataset.DatasetCached(dataset, cache)\n if virtual:\n arrays = df.evaluate(virtual, filtered=False)\n materialized = vaex.dataset.DatasetArrays(dict(zip(virtual, arrays)))\n dataset = dataset.merged(materialized)\n df.dataset = dataset\n for name in virtual:\n del df.virtual_columns[name]\n else:\n # in this case we don't need to invalidate caches,\n # also the fingerprint will be the same\n df._dataset = dataset\n return df\n\n def _lazy_materialize(self, *virtual_columns):\n '''Returns a new DataFrame where the virtual column is turned into an lazily evaluated column.'''\n df = self.trim()\n virtual_columns = _ensure_strings_from_expressions(virtual_columns)\n for name in virtual_columns:\n if name not in df.virtual_columns:\n raise KeyError('Virtual column not found: %r' % name)\n column = ColumnConcatenatedLazy([self[name]])\n del df[name]\n df.add_column(name, column)\n return df\n\n def get_selection(self, name=\"default\"):\n \"\"\"Get the current selection object (mostly for internal use atm).\"\"\"\n name = _normalize_selection_name(name)\n selection_history = self.selection_histories[name]\n index = self.selection_history_indices[name]\n if index == -1:\n return None\n else:\n return selection_history[index]\n\n def selection_undo(self, name=\"default\", executor=None):\n \"\"\"Undo selection, for the name.\"\"\"\n logger.debug(\"undo\")\n executor = executor or self.executor\n assert self.selection_can_undo(name=name)\n selection_history = self.selection_histories[name]\n index = self.selection_history_indices[name]\n self.selection_history_indices[name] -= 1\n self.signal_selection_changed.emit(self, name)\n logger.debug(\"undo: selection history is %r, index is %r\", selection_history, self.selection_history_indices[name])\n\n def selection_redo(self, name=\"default\", executor=None):\n \"\"\"Redo selection, for the name.\"\"\"\n logger.debug(\"redo\")\n executor = executor or self.executor\n assert self.selection_can_redo(name=name)\n selection_history = self.selection_histories[name]\n index = self.selection_history_indices[name]\n next = selection_history[index + 1]\n self.selection_history_indices[name] += 1\n self.signal_selection_changed.emit(self, name)\n logger.debug(\"redo: selection history is %r, index is %r\", selection_history, index)\n\n def selection_can_undo(self, name=\"default\"):\n \"\"\"Can selection name be undone?\"\"\"\n return self.selection_history_indices[name] > -1\n\n def selection_can_redo(self, name=\"default\"):\n \"\"\"Can selection name be redone?\"\"\"\n return (self.selection_history_indices[name] + 1) < len(self.selection_histories[name])\n\n def select(self, boolean_expression, mode=\"replace\", name=\"default\", executor=None):\n \"\"\"Perform a selection, defined by the boolean expression, and combined with the previous selection using the given mode.\n\n Selections are recorded in a history tree, per name, undo/redo can be done for them separately.\n\n :param str boolean_expression: Any valid column expression, with comparison operators\n :param str mode: Possible boolean operator: replace/and/or/xor/subtract\n :param str name: history tree or selection 'slot' to use\n :param executor:\n :return:\n \"\"\"\n boolean_expression = _ensure_string_from_expression(boolean_expression)\n if boolean_expression is None and not self.has_selection(name=name):\n pass # we don't want to pollute the history with many None selections\n self.signal_selection_changed.emit(self, name) # TODO: unittest want to know, does this make sense?\n else:\n def create(current):\n return selections.SelectionExpression(boolean_expression, current, mode) if boolean_expression else None\n self._selection(create, name)\n\n def select_non_missing(self, drop_nan=True, drop_masked=True, column_names=None, mode=\"replace\", name=\"default\"):\n \"\"\"Create a selection that selects rows having non missing values for all columns in column_names.\n\n The name reflects Pandas, no rows are really dropped, but a mask is kept to keep track of the selection\n\n :param drop_nan: drop rows when there is a NaN in any of the columns (will only affect float values)\n :param drop_masked: drop rows when there is a masked value in any of the columns\n :param column_names: The columns to consider, default: all (real, non-virtual) columns\n :param str mode: Possible boolean operator: replace/and/or/xor/subtract\n :param str name: history tree or selection 'slot' to use\n :return:\n \"\"\"\n column_names = column_names or self.get_column_names(virtual=False)\n\n def create(current):\n return selections.SelectionDropNa(drop_nan, drop_masked, column_names, current, mode)\n self._selection(create, name)\n\n def dropmissing(self, column_names=None):\n \"\"\"Create a shallow copy of a DataFrame, with filtering set using ismissing.\n\n :param column_names: The columns to consider, default: all (real, non-virtual) columns\n :rtype: DataFrame\n \"\"\"\n return self._filter_all(self.func.ismissing, column_names)\n\n def dropnan(self, column_names=None):\n \"\"\"Create a shallow copy of a DataFrame, with filtering set using isnan.\n\n :param column_names: The columns to consider, default: all (real, non-virtual) columns\n :rtype: DataFrame\n \"\"\"\n return self._filter_all(self.func.isnan, column_names)\n\n def dropna(self, column_names=None):\n \"\"\"Create a shallow copy of a DataFrame, with filtering set using isna.\n\n :param column_names: The columns to consider, default: all (real, non-virtual) columns\n :rtype: DataFrame\n \"\"\"\n return self._filter_all(self.func.isna, column_names)\n\n def dropinf(self, column_names=None):\n \"\"\" Create a shallow copy of a DataFrame, with filtering set using isinf.\n :param column_names: The columns to consider, default: all (real, non-virtual) columns\n :rtype: DataFrame\n \"\"\"\n return self._filter_all(self.func.isinf, column_names)\n\n def _filter_all(self, f, column_names=None):\n column_names = column_names or self.get_column_names(virtual=False)\n expression = f(self[column_names[0]])\n for column in column_names[1:]:\n expression = expression | f(self[column])\n return self.filter(~expression, mode='and')\n\n def select_nothing(self, name=\"default\"):\n \"\"\"Select nothing.\"\"\"\n logger.debug(\"selecting nothing\")\n self.select(None, name=name)\n self.signal_selection_changed.emit(self, name)\n\n def select_rectangle(self, x, y, limits, mode=\"replace\", name=\"default\"):\n \"\"\"Select a 2d rectangular box in the space given by x and y, bounded by limits.\n\n Example:\n\n >>> df.select_box('x', 'y', [(0, 10), (0, 1)])\n\n :param x: expression for the x space\n :param y: expression fo the y space\n :param limits: sequence of shape [(x1, x2), (y1, y2)]\n :param mode:\n \"\"\"\n self.select_box([x, y], limits, mode=mode, name=name)\n\n def select_box(self, spaces, limits, mode=\"replace\", name=\"default\"):\n \"\"\"Select a n-dimensional rectangular box bounded by limits.\n\n The following examples are equivalent:\n\n >>> df.select_box(['x', 'y'], [(0, 10), (0, 1)])\n >>> df.select_rectangle('x', 'y', [(0, 10), (0, 1)])\n\n :param spaces: list of expressions\n :param limits: sequence of shape [(x1, x2), (y1, y2)]\n :param mode:\n :param name:\n :return:\n \"\"\"\n sorted_limits = [(min(l), max(l)) for l in limits]\n expressions = [\"((%s) >= %f) & ((%s) <= %f)\" % (expression, lmin, expression, lmax) for\n (expression, (lmin, lmax)) in zip(spaces, sorted_limits)]\n self.select(\"&\".join(expressions), mode=mode, name=name)\n\n def select_circle(self, x, y, xc, yc, r, mode=\"replace\", name=\"default\", inclusive=True):\n \"\"\"\n Select a circular region centred on xc, yc, with a radius of r.\n\n Example:\n\n >>> df.select_circle('x','y',2,3,1)\n\n :param x: expression for the x space\n :param y: expression for the y space\n :param xc: location of the centre of the circle in x\n :param yc: location of the centre of the circle in y\n :param r: the radius of the circle\n :param name: name of the selection\n :param mode:\n :return:\n \"\"\"\n\n # expr = \"({x}-{xc})**2 + ({y}-{yc})**2 <={r}**2\".format(**locals())\n if inclusive:\n expr = (self[x] - xc)**2 + (self[y] - yc)**2 <= r**2\n else:\n expr = (self[x] - xc)**2 + (self[y] - yc)**2 < r**2\n\n self.select(boolean_expression=expr, mode=mode, name=name)\n\n def select_ellipse(self, x, y, xc, yc, width, height, angle=0, mode=\"replace\", name=\"default\", radians=False, inclusive=True):\n \"\"\"\n Select an elliptical region centred on xc, yc, with a certain width, height\n and angle.\n\n Example:\n\n >>> df.select_ellipse('x','y', 2, -1, 5,1, 30, name='my_ellipse')\n\n :param x: expression for the x space\n :param y: expression for the y space\n :param xc: location of the centre of the ellipse in x\n :param yc: location of the centre of the ellipse in y\n :param width: the width of the ellipse (diameter)\n :param height: the width of the ellipse (diameter)\n :param angle: (degrees) orientation of the ellipse, counter-clockwise\n measured from the y axis\n :param name: name of the selection\n :param mode:\n :return:\n\n \"\"\"\n\n # Computing the properties of the ellipse prior to selection\n if radians:\n pass\n else:\n alpha = np.deg2rad(angle)\n xr = width / 2\n yr = height / 2\n r = max(xr, yr)\n a = xr / r\n b = yr / r\n\n expr = \"(({x}-{xc})*cos({alpha})+({y}-{yc})*sin({alpha}))**2/{a}**2 + (({x}-{xc})*sin({alpha})-({y}-{yc})*cos({alpha}))**2/{b}**2 <= {r}**2\".format(**locals())\n\n if inclusive:\n expr = ((self[x] - xc) * np.cos(alpha) + (self[y] - yc) * np.sin(alpha))**2 / a**2 + ((self[x] - xc) * np.sin(alpha) - (self[y] - yc) * np.cos(alpha))**2 / b**2 <= r**2\n else:\n expr = ((self[x] - xc) * np.cos(alpha) + (self[y] - yc) * np.sin(alpha))**2 / a**2 + ((self[x] - xc) * np.sin(alpha) - (self[y] - yc) * np.cos(alpha))**2 / b**2 < r**2\n\n self.select(boolean_expression=expr, mode=mode, name=name)\n\n def select_lasso(self, expression_x, expression_y, xsequence, ysequence, mode=\"replace\", name=\"default\", executor=None):\n \"\"\"For performance reasons, a lasso selection is handled differently.\n\n :param str expression_x: Name/expression for the x coordinate\n :param str expression_y: Name/expression for the y coordinate\n :param xsequence: list of x numbers defining the lasso, together with y\n :param ysequence:\n :param str mode: Possible boolean operator: replace/and/or/xor/subtract\n :param str name:\n :param executor:\n :return:\n \"\"\"\n\n def create(current):\n return selections.SelectionLasso(expression_x, expression_y, xsequence, ysequence, current, mode)\n self._selection(create, name, executor=executor)\n\n def select_inverse(self, name=\"default\", executor=None):\n \"\"\"Invert the selection, i.e. what is selected will not be, and vice versa\n\n :param str name:\n :param executor:\n :return:\n \"\"\"\n\n def create(current):\n return selections.SelectionInvert(current)\n self._selection(create, name, executor=executor)\n\n def set_selection(self, selection, name=\"default\", executor=None):\n \"\"\"Sets the selection object\n\n :param selection: Selection object\n :param name: selection 'slot'\n :param executor:\n :return:\n \"\"\"\n def create(current):\n return selection\n self._selection(create, name, executor=executor, execute_fully=True)\n\n def _selection(self, create_selection, name, executor=None, execute_fully=False):\n \"\"\"select_lasso and select almost share the same code\"\"\"\n selection_history = self.selection_histories[name]\n previous_index = self.selection_history_indices[name]\n current = selection_history[previous_index] if selection_history else None\n selection = create_selection(current)\n executor = executor or self.executor\n selection_history.append(selection)\n self.selection_history_indices[name] += 1\n # clip any redo history\n del selection_history[self.selection_history_indices[name]:-1]\n self.signal_selection_changed.emit(self, name)\n result = vaex.promise.Promise.fulfilled(None)\n # logger.debug(\"select selection history is %r, index is %r\", selection_history, self.selection_history_indices[name])\n return result\n\n def has_selection(self, name=\"default\"):\n \"\"\"Returns True if there is a selection with the given name.\"\"\"\n return self.get_selection(name) is not None\n\n def __setitem__(self, name, value):\n '''Convenient way to add a virtual column / expression to this DataFrame.\n\n Example:\n\n >>> import vaex, numpy as np\n >>> df = vaex.example()\n >>> df['r'] = np.sqrt(df.x**2 + df.y**2 + df.z**2)\n >>> df.r\n <vaex.expression.Expression(expressions='r')> instance at 0x121687e80 values=[2.9655450396553587, 5.77829281049018, 6.99079603950256, 9.431842752707537, 0.8825613121347967 ... (total 330000 values) ... 7.453831761514681, 15.398412491068198, 8.864250273925633, 17.601047186042507, 14.540181524970293]\n '''\n\n if isinstance(name, six.string_types):\n if isinstance(value, supported_column_types):\n self.add_column(name, value)\n else:\n self.add_virtual_column(name, value)\n else:\n raise TypeError('__setitem__ only takes strings as arguments, not {}'.format(type(name)))\n\n def drop_filter(self, inplace=False):\n \"\"\"Removes all filters from the DataFrame\"\"\"\n df = self if inplace else self.copy()\n df.select_nothing(name=FILTER_SELECTION_NAME)\n df._invalidate_caches()\n return df\n\n def filter(self, expression, mode=\"and\"):\n \"\"\"General version of df[<boolean expression>] to modify the filter applied to the DataFrame.\n\n See :func:`DataFrame.select` for usage of selection.\n\n Note that using `df = df[<boolean expression>]`, one can only narrow the filter (i.e. only less rows\n can be selected). Using the filter method, and a different boolean mode (e.g. \"or\") one can actually\n cause more rows to be selected. This differs greatly from numpy and pandas for instance, which can only\n narrow the filter.\n\n Example:\n\n >>> import vaex\n >>> import numpy as np\n >>> x = np.arange(10)\n >>> df = vaex.from_arrays(x=x, y=x**2)\n >>> df\n # x y\n 0 0 0\n 1 1 1\n 2 2 4\n 3 3 9\n 4 4 16\n 5 5 25\n 6 6 36\n 7 7 49\n 8 8 64\n 9 9 81\n >>> dff = df[df.x<=2]\n >>> dff\n # x y\n 0 0 0\n 1 1 1\n 2 2 4\n >>> dff = dff.filter(dff.x >=7, mode=\"or\")\n >>> dff\n # x y\n 0 0 0\n 1 1 1\n 2 2 4\n 3 7 49\n 4 8 64\n 5 9 81\n \"\"\"\n df = self.copy()\n df.select(expression, name=FILTER_SELECTION_NAME, mode=mode)\n df._cached_filtered_length = None # invalide cached length\n df._filter_filled = False\n # WARNING: this is a special case where we create a new filter\n # the cache mask chunks still hold references to views on the old\n # mask, and this new mask will be filled when required\n df._selection_masks[FILTER_SELECTION_NAME] = vaex.superutils.Mask(int(df._length_unfiltered))\n return df\n\n def __getitem__(self, item):\n \"\"\"Convenient way to get expressions, (shallow) copies of a few columns, or to apply filtering.\n\n Example:\n\n >>> df['Lz'] # the expression 'Lz\n >>> df['Lz/2'] # the expression 'Lz/2'\n >>> df[[\"Lz\", \"E\"]] # a shallow copy with just two columns\n >>> df[df.Lz < 0] # a shallow copy with the filter Lz < 0 applied\n\n \"\"\"\n if isinstance(item, int):\n names = self.get_column_names()\n return [self.evaluate(name, item, item+1, array_type='python')[0] for name in names]\n elif isinstance(item, six.string_types):\n if hasattr(self, item) and isinstance(getattr(self, item), Expression):\n return getattr(self, item)\n # if item in self.virtual_columns:\n # return Expression(self, self.virtual_columns[item])\n # if item in self._virtual_expressions:\n # return self._virtual_expressions[item]\n if item not in self.column_names:\n self.validate_expression(item)\n item = vaex.utils.valid_expression(self.get_column_names(), item)\n return Expression(self, item) # TODO we'd like to return the same expression if possible\n elif isinstance(item, Expression):\n expression = item.expression\n return self.filter(expression)\n elif isinstance(item, (tuple, list)):\n df = self\n if isinstance(item[0], slice):\n df = df[item[0]]\n if len(item) > 1:\n if isinstance(item[1], int):\n name = self.get_column_names()[item[1]]\n return df[name]\n elif isinstance(item[1], slice):\n names = self.get_column_names().__getitem__(item[1])\n return df[names]\n for expression in item:\n if expression not in self.column_names:\n self.validate_expression(expression)\n df = self.copy(column_names=item)\n return df\n elif isinstance(item, slice):\n start, stop, step = item.start, item.stop, item.step\n start = start or 0\n stop = stop or len(self)\n if start < 0:\n start = len(self)+start\n if stop < 0:\n stop = len(self)+stop\n stop = min(stop, len(self))\n assert step in [None, 1]\n if self.filtered:\n self._fill_filter_mask()\n mask = self._selection_masks[FILTER_SELECTION_NAME]\n startf, stopf = mask.indices(start, stop-1) # -1 since it is inclusive\n assert startf != -1\n assert stopf != -1\n stopf = stopf+1 # +1 to make it inclusive\n start, stop = startf, stopf\n df = self.trim()\n df.set_active_range(start, stop)\n return df.trim()\n\n def __delitem__(self, item):\n '''Alias of df.drop(item, inplace=True)'''\n if item in self.columns:\n name = item\n if name in self._depending_columns(columns_exclude=[name]):\n raise ValueError(f'Oops, you are trying to remove column {name} while other columns depend on it (use .drop instead)')\n self.drop([item], inplace=True)\n\n def _real_drop(self, item):\n '''Removes a (virtual) column from the DataFrame.\n\n Note: this does not check if the column is used in a virtual expression or in the filter\\\n and may lead to issues. It is safer to use :meth:`drop`.\n '''\n if isinstance(item, Expression):\n name = item.expression\n else:\n name = item\n if name in self.columns:\n del self.columns[name]\n self.column_names.remove(name)\n elif name in self.virtual_columns:\n del self.virtual_columns[name]\n del self._virtual_expressions[name]\n self.column_names.remove(name)\n else:\n matches = difflib.get_close_matches(name, self.get_column_names(hidden=True))\n msg = \"Column or variable %r does not exist.\" % name\n if matches:\n msg += ' Did you mean: ' + \" or \".join(map(repr, matches))\n raise KeyError(msg)\n self.signal_column_changed.emit(self, name, \"delete\")\n if hasattr(self, name):\n try:\n if isinstance(getattr(self, name), Expression):\n delattr(self, name)\n except:\n pass\n\n @docsubst\n def drop(self, columns, inplace=False, check=True):\n \"\"\"Drop columns (or a single column).\n\n :param columns: List of columns or a single column name\n :param inplace: {inplace}\n :param check: When true, it will check if the column is used in virtual columns or the filter, and hide it instead.\n \"\"\"\n columns = _ensure_list(columns)\n columns = _ensure_strings_from_expressions(columns)\n df = self if inplace else self.copy()\n depending_columns = df._depending_columns(columns_exclude=columns)\n for column in columns:\n if check and column in depending_columns:\n df._hide_column(column)\n else:\n df._real_drop(column)\n return df\n\n def _hide_column(self, column):\n '''Hides a column by prefixing the name with \\'__\\''''\n column = _ensure_string_from_expression(column)\n new_name = self._find_valid_name('__' + column)\n self._rename(column, new_name)\n return new_name\n\n def _find_valid_name(self, initial_name):\n '''Finds a non-colliding name by optional postfixing'''\n return vaex.utils.find_valid_name(initial_name, used=self.get_column_names(hidden=True))\n\n def _depending_columns(self, columns=None, columns_exclude=None, check_filter=True):\n '''Find all depending column for a set of column (default all), minus the excluded ones'''\n columns = set(columns or self.get_column_names(hidden=True))\n if columns_exclude:\n columns -= set(columns_exclude)\n depending_columns = set()\n for column in columns:\n expression = self[str(column)]\n depending_columns |= expression.variables()\n depending_columns -= set(columns)\n if check_filter:\n if self.filtered:\n selection = self.get_selection(FILTER_SELECTION_NAME)\n depending_columns |= selection._depending_columns(self)\n return depending_columns\n\n def iterrows(self):\n columns = self.get_column_names()\n for i in range(len(self)):\n yield i, {key: self.evaluate(key, i, i+1, array_type='python')[0] for key in columns}\n #return self[i]\n\n def __iter__(self):\n \"\"\"Iterator over the column names.\"\"\"\n return iter(list(self.get_column_names()))\n\n def _root_nodes(self):\n \"\"\"Returns a list of string which are the virtual columns that are not used in any other virtual column.\"\"\"\n # these lists (~used as ordered set) keep track of leafes and root nodes\n # root nodes\n root_nodes = []\n leafes = []\n def walk(node):\n # this function recursively walks the expression graph\n if isinstance(node, six.string_types):\n # we end up at a leaf\n leafes.append(node)\n if node in root_nodes: # so it cannot be a root node\n root_nodes.remove(node)\n else:\n node_repr, fname, fobj, deps = node\n if node_repr in self.virtual_columns:\n # we encountered a virtual column, similar behaviour as leaf\n leafes.append(node_repr)\n if node_repr in root_nodes:\n root_nodes.remove(node_repr)\n # resursive part\n for dep in deps:\n walk(dep)\n for column in self.virtual_columns.keys():\n if column not in leafes:\n root_nodes.append(column)\n node = self[column]._graph()\n # we don't do the virtual column itself, just it's depedencies\n node_repr, fname, fobj, deps = node\n for dep in deps:\n walk(dep)\n return root_nodes\n\n def _graphviz(self, dot=None):\n \"\"\"Return a graphviz.Digraph object with a graph of all virtual columns\"\"\"\n from graphviz import Digraph\n dot = dot or Digraph(comment='whole dataframe')\n root_nodes = self._root_nodes()\n for column in root_nodes:\n self[column]._graphviz(dot=dot)\n return dot\n\n @docsubst\n @stat_1d\n def _agg(self, aggregator, binners=tuple(), delay=False, progress=None):\n \"\"\"\n\n :param delay: {delay}\n :return: {return_stat_scalar}\n \"\"\"\n tasks, result = aggregator.add_tasks(self, binners, progress=progress)\n return self._delay(delay, result)\n\n def _binner(self, expression, limits=None, shape=None, selection=None, progress=None, delay=False):\n expression = str(expression)\n if limits is not None and not isinstance(limits, (tuple, str)):\n limits = tuple(limits)\n if expression in self._categories:\n N = self._categories[expression]['N']\n min_value = self._categories[expression]['min_value']\n binner = self._binner_ordinal(expression, N, min_value)\n binner = vaex.promise.Promise.fulfilled(binner)\n else:\n @delayed\n def create_binner(limits):\n return self._binner_scalar(expression, limits, shape)\n binner = create_binner(self.limits(expression, limits, selection=selection, progress=progress, delay=True))\n return self._delay(delay, binner)\n\n def _binner_scalar(self, expression, limits, shape):\n dtype = self.data_type(expression)\n return BinnerScalar(expression, limits[0], limits[1], shape, dtype)\n\n def _binner_ordinal(self, expression, ordinal_count, min_value=0, invert=False):\n dtype = self.data_type(expression)\n return BinnerOrdinal(expression, min_value, ordinal_count, invert, dtype)\n\n def _binner_hash(self, expression, hash_map_unique):\n dtype = self.data_type(expression)\n return BinnerHash(expression, hash_map_unique, dtype)\n\n def _create_binners(self, binby, limits, shape, selection=None, progress=None, delay=False):\n if isinstance(binby, (list, tuple)):\n binbys = binby\n else:\n binbys = [binby]\n binbys = _ensure_strings_from_expressions(binbys)\n for expression in binbys:\n if expression:\n self.validate_expression(expression)\n binners = []\n if len(binbys):\n limits = _expand_limits(limits, len(binbys))\n else:\n limits = []\n shapes = _expand_shape(shape, len(binbys))\n for binby, limits1, shape in zip(binbys, limits, shapes):\n binners.append(self._binner(binby, limits1, shape, selection, progress=progress, delay=True))\n @delayed\n def finish(*binners):\n return binners\n return self._delay(delay, finish(*binners))\n\n @docsubst\n def rolling(self, window, trim=False, column=None, fill_value=None, edge=\"right\"):\n '''Create a :py:data:`vaex.rolling.Rolling` rolling window object\n\n :param int window: Size of the rolling window.\n :param bool trim: {trim}\n :param str or list[str] column: Column name or column names of columns affected (None for all)\n :param any fill_value: Scalar value to use for data outside of existing rows.\n :param str edge: Where the edge of the rolling window is for the current row.\n '''\n columns = self.get_column_names() if column is None else (column if _issequence(column) else [column])\n from .rolling import Rolling\n return Rolling(self, window, trim=trim, columns=columns, fill_value=fill_value, edge=edge)\n\n\nDataFrame.__hidden__ = {}\nhidden = [name for name, func in vars(DataFrame).items() if getattr(func, '__hidden__', False)]\nfor name in hidden:\n DataFrame.__hidden__[name] = getattr(DataFrame, name)\n delattr(DataFrame, name)\ndel hidden\n\n\nclass ColumnProxy(collections.abc.MutableMapping):\n def __init__(self, df):\n self.df = df\n\n @property\n def dataset(self):\n return self.df.dataset\n\n def __delitem__(self, item):\n assert item in self.dataset\n self.df._dataset = self.dataset.dropped(item)\n\n def __len__(self):\n return len(self.dataset)\n\n def __setitem__(self, item, value):\n if isinstance(self.dataset, vaex.dataset.DatasetArrays):\n merged = vaex.dataset.DatasetArrays({**self.dataset._columns, item: value})\n else:\n left = self.dataset\n if item in self.dataset:\n left = left.dropped(item)\n right = vaex.dataset.DatasetArrays({item: value})\n merged = left.merged(right)\n self.df._dataset = merged\n\n self.df._length = len(value)\n if self.df._length_unfiltered is None:\n self.df._length_unfiltered = self.df._length\n self.df._length_original = self.df._length\n self.df._index_end = self.df._length_unfiltered\n\n def __iter__(self):\n return iter(self.dataset)\n\n def __getitem__(self, item):\n return self.dataset[item]\n\n\nclass DataFrameLocal(DataFrame):\n \"\"\"Base class for DataFrames that work with local file/data\"\"\"\n\n def __init__(self, dataset=None, name=None):\n if dataset is None:\n dataset = vaex.dataset.DatasetArrays()\n name = name or \"no-name\"\n else:\n name = name or dataset.name\n super(DataFrameLocal, self).__init__(name)\n self._dataset = dataset\n if hasattr(dataset, 'units'):\n self.units.update(dataset.units)\n if hasattr(dataset, 'ucds'):\n self.ucds.update(dataset.ucds)\n self.column_names = list(self.dataset)\n if len(self.dataset):\n self._length = self.dataset.row_count\n if self._length_unfiltered is None:\n self._length_unfiltered = self._length\n self._length_original = self._length\n self._index_end = self._length_unfiltered\n # self.path = dataset.path\n self.mask = None\n self.columns = ColumnProxy(self)\n for column_name in self.column_names:\n self._initialize_column(column_name)\n\n def _fill_filter_mask(self):\n if self.filtered and self._filter_filled is False:\n task = vaex.tasks.TaskFilterFill(self)\n # we also get the count, which is almost for free\n @delayed\n def set_length(count):\n self._cached_filtered_length = int(count)\n self._filter_filled = True\n set_length(self.count(delay=True))\n task = self.executor.schedule(task)\n self.execute()\n\n def __getstate__(self):\n state = self.state_get(skip=[self.dataset])\n return {\n 'state': state,\n 'dataset': self.dataset,\n '_future_behaviour': self. _future_behaviour,\n }\n\n def __setstate__(self, state):\n self._init()\n self.executor = get_main_executor()\n self.columns = ColumnProxy(self)\n dataset = state['dataset']\n self._dataset = dataset\n assert dataset.row_count is not None\n self._length_original = dataset.row_count\n self._length_unfiltered = self._length_original\n self._cached_filtered_length = None\n self._filter_filled = False\n self._index_start = 0\n self._index_end = self._length_original\n self._future_behaviour = state['_future_behaviour']\n self.state_set(state['state'], use_active_range=True, trusted=True)\n\n @property\n def dataset(self):\n return self._dataset\n\n @dataset.setter\n def dataset(self, dataset):\n if self._dataset.row_count != dataset.row_count:\n self._length_original = dataset.row_count\n self._length_unfiltered = self._length_original\n self._cached_filtered_length = None\n self._filter_filled = False\n self._index_start = 0\n self._index_end = self._length_original\n self._dataset = dataset\n self._invalidate_caches()\n\n def hashed(self, inplace=False) -> DataFrame:\n '''Return a DataFrame with a hashed dataset'''\n df = self.copy() if not inplace else self\n df.dataset = df.dataset.hashed()\n return df\n\n def _readonly(self, inplace=False):\n # make arrays read only if possible\n df = self if inplace else self.copy()\n assert isinstance(self.dataset, vaex.dataset.DatasetArrays)\n columns = {}\n for key, ar in self.columns.items():\n columns[key] = ar\n if isinstance(ar, np.ndarray):\n columns[key] = ar = ar.view() # make new object so we don't modify others\n ar.flags['WRITEABLE'] = False\n df._dataset = vaex.dataset.DatasetArrays(columns)\n return df\n\n _dict_mapping = {\n pa.uint8(): pa.int16(),\n pa.uint16(): pa.int32(),\n pa.uint32(): pa.int64(),\n pa.uint64(): pa.int64(),\n }\n\n def _auto_encode_type(self, expression, type):\n if not self._future_behaviour:\n return type\n if self.is_category(expression):\n if vaex.dtype(type).is_encoded:\n return type # already encoded\n value_type = vaex.array_types.to_arrow(self.category_labels(expression)).type\n type = vaex.array_types.to_arrow_type(type)\n type = self._dict_mapping.get(type, type)\n type = pa.dictionary(type, value_type)\n return type\n\n def _auto_encode_data(self, expression, values):\n if not self._future_behaviour:\n return values\n if vaex.array_types.is_arrow_array(values) and pa.types.is_dictionary(values.type):\n return values\n if self.is_category(expression):\n dictionary = vaex.array_types.to_arrow(self.category_labels(expression))\n offset = self.category_offset(expression)\n if offset != 0:\n values = values - offset\n values = vaex.array_types.to_arrow(values)\n to_type = None\n if values.type in self._dict_mapping:\n values = values.cast(self._dict_mapping[values.type])\n if isinstance(values, pa.ChunkedArray):\n chunks = [pa.DictionaryArray.from_arrays(k, dictionary) for k in values.chunks]\n values = pa.chunked_array(chunks)\n else:\n values = pa.DictionaryArray.from_arrays(values, dictionary)\n return values\n\n\n @docsubst\n def categorize(self, column, min_value=0, max_value=None, labels=None, inplace=False):\n \"\"\"Mark column as categorical.\n\n This may help speed up calculations using integer columns between a range of [min_value, max_value].\n\n If max_value is not given, the [min_value and max_value] are calcuated from the data.\n\n Example:\n\n >>> import vaex\n >>> df = vaex.from_arrays(year=[2012, 2015, 2019], weekday=[0, 4, 6])\n >>> df = df.categorize('year', min_value=2020, max_value=2019)\n >>> df = df.categorize('weekday', labels=['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun'])\n >>> df\n # year weekday\n 0 2012 0\n 1 2015 4\n 2 2019 6\n >>> df.is_category('year')\n True\n\n :param column: column to assume is categorical.\n :param labels: labels to associate to the values between min_value and max_value\n :param min_value: minimum integer value (if max_value is not given, this is calculated)\n :param max_value: maximum integer value (if max_value is not given, this is calculated)\n :param labels: Labels to associate to each value, list(range(min_value, max_value+1)) by default\n :param inplace: {inplace}\n \"\"\"\n df = self if inplace else self.copy()\n column = _ensure_string_from_expression(column)\n if df[column].dtype != int:\n raise TypeError(f'Only integer columns can be marked as categorical, {column} is {df[column].dtype}')\n if max_value is not None:\n labels = list(range(min_value, max_value+1))\n N = len(labels)\n else:\n vmin, vmax = df.minmax(column)\n if labels is None:\n N = int(vmax + 1)\n labels = list(range(vmin, vmax+1))\n min_value = vmin\n else:\n min_value = vmin\n if (vmax - vmin) >= len(labels):\n raise ValueError('value of {} found, which is larger than number of labels {}'.format(vmax, len(labels)))\n df._categories[column] = dict(labels=labels, N=len(labels), min_value=min_value)\n return df\n\n def ordinal_encode(self, column, values=None, inplace=False, lazy=False):\n \"\"\"Encode column as ordinal values and mark it as categorical.\n\n The existing column is renamed to a hidden column and replaced by a numerical columns\n with values between [0, len(values)-1].\n\n :param lazy: When False, it will materialize the ordinal codes.\n \"\"\"\n column = _ensure_string_from_expression(column)\n df = self if inplace else self.copy()\n # for the codes, we need to work on the unfiltered dataset, since the filter\n # may change, and we also cannot add an array that is smaller in length\n df_unfiltered = df.copy()\n # maybe we need some filter manipulation methods\n df_unfiltered.select_nothing(name=FILTER_SELECTION_NAME)\n df_unfiltered._length_unfiltered = df._length_original\n df_unfiltered.set_active_range(0, df._length_original)\n expression = df_unfiltered[column]\n if lazy or values is not None:\n if values is None:\n found_values = df_unfiltered.unique(column, array_type='numpy-arrow')\n minimal_type = vaex.utils.required_dtype_for_max(len(found_values), signed=True)\n dtype = vaex.dtype_of(found_values)\n if dtype == int:\n min_value = found_values.min()\n max_value = found_values.max()\n if (max_value - min_value +1) == len(found_values):\n warnings.warn(f'It seems your column {column} is already ordinal encoded (values between {min_value} and {max_value}), automatically switching to use df.categorize')\n return df.categorize(column, min_value=min_value, max_value=max_value, inplace=inplace)\n values = found_values\n else:\n values = expression.dtype.create_array(values)\n fp = f'hash-map-unique-{expression.fingerprint()}'\n hash_map_unique_name = fp.replace('-', '_')\n hash_map_unique = vaex.hash.HashMapUnique.from_keys(values, fingerprint=fp)\n if lazy:\n df.add_variable(hash_map_unique_name, hash_map_unique)\n expr = df._expr('hashmap_apply({}, {}, check_missing=True)'.format(column, hash_map_unique_name))\n df[column] = expr\n df._categories[column] = dict(labels=values, N=len(values), min_value=0)\n return df # no else but return to avoid large diff\n else:\n dfc = df.copy()\n dfc.add_variable(hash_map_unique_name, hash_map_unique)\n expr = dfc._expr('hashmap_apply({}, {}, check_missing=True)'.format(column, hash_map_unique_name))\n codes = dfc.evaluate(expr)\n else:\n found_values, codes = df_unfiltered.unique(column, return_inverse=True, array_type='numpy-arrow')\n if isinstance(found_values, array_types.supported_arrow_array_types):\n # elements of arrow arrays are not in arrow arrays, e.g. ar[0] in ar is False\n # see tests/arrow/assumptions_test.py::test_in_pylist\n found_values = found_values.to_pylist()\n values = found_values\n max_code = codes.max()\n minimal_type = vaex.utils.required_dtype_for_max(max_code, signed=True)\n codes = codes.astype(minimal_type)\n if isinstance(values, (list, tuple)):\n values = pa.array(values)\n dtype = vaex.dtype_of(values)\n if dtype == int:\n min_value = values.min()\n max_value = values.max()\n if (max_value - min_value +1) == len(values):\n warnings.warn(f'It seems your column {column} is already ordinal encoded (values between {min_value} and {max_value}), automatically switching to use df.categorize')\n return df.categorize(column, min_value=min_value, max_value=max_value, inplace=inplace)\n df.rename(column, '__original_' + column, unique=True)\n df.add_column(column, codes)\n values = vaex.array_types.tolist(values)\n df._categories[column] = dict(labels=values, N=len(values), min_value=0)\n return df\n\n # for backward compatibility\n label_encode = _hidden(vaex.utils.deprecated('use ordinal_encode')(ordinal_encode))\n\n @property\n def data(self):\n \"\"\"Gives direct access to the data as numpy arrays.\n\n Convenient when working with IPython in combination with small DataFrames, since this gives tab-completion.\n Only real columns (i.e. no virtual) columns can be accessed, for getting the data from virtual columns, use\n DataFrame.evaluate(...).\n\n Columns can be accessed by their names, which are attributes. The attributes are of type numpy.ndarray.\n\n Example:\n\n >>> df = vaex.example()\n >>> r = np.sqrt(df.data.x**2 + df.data.y**2)\n\n \"\"\"\n class Datas(object):\n pass\n\n datas = Datas()\n for name, array in self.columns.items():\n setattr(datas, name, array[:])\n return datas\n\n def copy(self, column_names=None, treeshake=False):\n '''Make a shallow copy of a DataFrame. One can also specify a subset of columns.\n\n This is a fairly cheap operation, since no memory copies of the underlying data are made.\n\n {note_copy}\n\n :param list column_names: A subset of columns to use for the DataFrame copy. If None, all the columns are copied.\n :param bool treeshake: Get rid of variables not used.\n :rtype: DataFrame\n '''\n copy_all = column_names is None\n if copy_all and not treeshake: # fast path\n df = vaex.from_dataset(self.dataset)\n df.column_names = list(self.column_names)\n df.virtual_columns = self.virtual_columns.copy()\n virtuals = set(df.virtual_columns)\n for name in df.column_names:\n if name in virtuals:\n df._virtual_expressions[name] = Expression(df, df.virtual_columns[name])\n df._initialize_column(name)\n hide = set()\n else:\n\n all_column_names = self.get_column_names(hidden=True)\n if column_names is None:\n column_names = all_column_names.copy()\n else:\n for name in column_names:\n self.validate_expression(name)\n\n # the columns that we require for a copy (superset of column_names)\n required = set()\n # expression like 'x/2' that are not (virtual) columns\n expression_columns = set()\n\n def track(name):\n if name in self.dataset:\n required.add(name)\n else:\n if name in self.variables:\n if treeshake:\n required.add(name)\n return\n elif name in self.virtual_columns:\n required.add(name)\n expr = self._virtual_expressions[name]\n else:\n # this might be an expression, create a valid name\n expression_columns.add(name)\n expr = self[name]\n # we expand it ourselves\n deps = expr.variables(ourself=True, expand_virtual=False)\n deps -= {name}\n # the columns we didn't know we required yet\n missing = deps - required\n required.update(deps)\n for name in missing:\n track(name)\n\n for name in column_names:\n track(name)\n\n # track all selection dependencies, this includes the filters\n for key, value in list(self.selection_histories.items()):\n selection = self.get_selection(key)\n if selection:\n for name in selection._depending_columns(self):\n track(name)\n\n # first create the DataFrame with real data (dataset)\n dataset_columns = {k for k in required if k in self.dataset}\n # we want a deterministic order for fingerprinting\n dataset_columns = list(dataset_columns)\n dataset_columns.sort()\n dataset = self.dataset.project(*dataset_columns)\n df = vaex.from_dataset(dataset)\n\n # and reconstruct the rest (virtual columns and variables)\n other = {k for k in required if k not in self.dataset}\n for name in other:\n if name in self.virtual_columns:\n valid_name = vaex.utils.find_valid_name(name)\n df.add_virtual_column(valid_name, self.virtual_columns[name])\n elif name in self.variables:\n # if we treeshake, we copy only what we require\n if treeshake:\n df.variables[name] = self.variables[name]\n pass\n else:\n raise RuntimeError(f'Oops {name} is not a virtual column or variable??')\n\n # and extra expressions like 'x/2'\n for expr in expression_columns:\n df.add_virtual_column(expr, expr)\n hide = required - set(column_names) - set(self.variables)\n\n # restore some metadata\n df._length_unfiltered = self._length_unfiltered\n df._length_original = self._length_original\n df._cached_filtered_length = self._cached_filtered_length\n df._filter_filled = self._filter_filled\n df._index_end = self._index_end\n df._index_start = self._index_start\n df._active_fraction = self._active_fraction\n df._renamed_columns = list(self._renamed_columns)\n df.units.update(self.units)\n if not treeshake:\n df.variables.update(self.variables)\n df._categories.update(self._categories)\n df._future_behaviour = self._future_behaviour\n\n # put in the selections (thus filters) in place\n # so drop moves instead of really dropping it\n df.functions.update(self.functions)\n for key, value in self.selection_histories.items():\n # TODO: selection_histories begin a defaultdict always gives\n # us the filtered selection, so check if we really have a\n # selection\n if self.get_selection(key):\n df.selection_histories[key] = list(value)\n # the filter should never be modified, so we can share a reference\n # except when we add filter on filter using\n # df = df[df.x>0]\n # df = df[df.x < 10]\n # in that case we make a copy in __getitem__\n if key == FILTER_SELECTION_NAME:\n df._selection_masks[key] = self._selection_masks[key]\n else:\n df._selection_masks[key] = vaex.superutils.Mask(int(df._length_original))\n # and make sure the mask is consistent with the cache chunks\n np.asarray(df._selection_masks[key])[:] = np.asarray(self._selection_masks[key])\n for key, value in self.selection_history_indices.items():\n if self.get_selection(key):\n df.selection_history_indices[key] = value\n # we can also copy the caches, which prevents recomputations of selections\n df._selection_mask_caches[key] = collections.defaultdict(dict)\n df._selection_mask_caches[key].update(self._selection_mask_caches[key])\n\n\n for name in hide:\n df._hide_column(name)\n if column_names is not None:\n # make the the column order is as requested by the column_names argument\n extra = set(df.column_names) - set(column_names)\n df.column_names = list(column_names) + list(extra)\n\n df.copy_metadata(self)\n return df\n\n def shallow_copy(self, virtual=True, variables=True):\n \"\"\"Creates a (shallow) copy of the DataFrame.\n\n It will link to the same data, but will have its own state, e.g. virtual columns, variables, selection etc.\n\n \"\"\"\n df = DataFrameLocal(self.name, self.path, self.column_names)\n df.columns.update(self.columns)\n df._length_unfiltered = self._length_unfiltered\n df._length_original = self._length_original\n df._index_end = self._index_end\n df._index_start = self._index_start\n df._active_fraction = self._active_fraction\n if virtual:\n df.virtual_columns.update(self.virtual_columns)\n if variables:\n df.variables.update(self.variables)\n # half shallow/deep copy\n # for key, value in self.selection_histories.items():\n # df.selection_histories[key] = list(value)\n # for key, value in self.selection_history_indices.items():\n # df.selection_history_indices[key] = value\n return df\n\n def is_local(self):\n \"\"\"The local implementation of :func:`DataFrame.evaluate`, always returns True.\"\"\"\n return True\n\n def length(self, selection=False):\n \"\"\"Get the length of the DataFrames, for the selection of the whole DataFrame.\n\n If selection is False, it returns len(df).\n\n TODO: Implement this in DataFrameRemote, and move the method up in :func:`DataFrame.length`\n\n :param selection: When True, will return the number of selected rows\n :return:\n \"\"\"\n if selection:\n return 0 if self.mask is None else np.sum(self.mask)\n else:\n return len(self)\n\n @_hidden\n def __call__(self, *expressions, **kwargs):\n \"\"\"The local implementation of :func:`DataFrame.__call__`\"\"\"\n import vaex.legacy\n return vaex.legacy.SubspaceLocal(self, expressions, kwargs.get(\"executor\") or self.executor, delay=kwargs.get(\"delay\", False))\n\n def echo(self, arg): return arg\n\n @property\n def _dtype(self):\n dtypes = [self[k].dtype for k in self.get_column_names()]\n if not all([dtypes[0] == dtype for dtype in dtypes]):\n return ValueError(\"Not all dtypes are equal: %r\" % dtypes)\n return dtypes[0]\n\n @property\n def shape(self):\n return (len(self), len(self.get_column_names()))\n\n def __array__(self, dtype=None, parallel=True):\n \"\"\"Gives a full memory copy of the DataFrame into a 2d numpy array of shape (n_rows, n_columns).\n Note that the memory order is fortran, so all values of 1 column are contiguous in memory for performance reasons.\n\n Note this returns the same result as:\n\n >>> np.array(ds)\n\n If any of the columns contain masked arrays, the masks are ignored (i.e. the masked elements are returned as well).\n \"\"\"\n if dtype is None:\n dtype = np.float64\n chunks = []\n column_names = self.get_column_names(strings=False)\n for name in column_names:\n column_type = self.data_type(name).numpy\n if not np.can_cast(column_type, dtype):\n if column_type != dtype:\n raise ValueError(\"Cannot cast %r (of type %r) to %r\" % (name, self.data_type(name), dtype))\n chunks = self.evaluate(column_names, parallel=parallel, array_type='numpy')\n if any(np.ma.isMaskedArray(chunk) for chunk in chunks):\n return np.ma.array(chunks, dtype=dtype).T\n else:\n return np.array(chunks, dtype=dtype).T\n\n def as_arrow(self):\n \"\"\"Lazily cast all columns to arrow, except object types.\"\"\"\n df = self.copy()\n for name in self.get_column_names():\n df[name] = df[name].as_arrow()\n return df\n\n def as_numpy(self, strict=False):\n \"\"\"Lazily cast all numerical columns to numpy.\n\n If strict is True, it will also cast non-numerical types.\n \"\"\"\n df = self.copy()\n for name in self.get_column_names():\n df[name] = df[name].as_numpy(strict=strict)\n return df\n\n @vaex.utils.deprecated('use DataFrame.join(other)')\n def _hstack(self, other, prefix=None):\n \"\"\"Join the columns of the other DataFrame to this one, assuming the ordering is the same\"\"\"\n assert len(self) == len(other), \"does not make sense to horizontally stack DataFrames with different lengths\"\n for name in other.get_column_names():\n if prefix:\n new_name = prefix + name\n else:\n new_name = name\n self.add_column(new_name, other.columns[name])\n\n def concat(self, *others, resolver='flexible') -> DataFrame:\n \"\"\"Concatenates multiple DataFrames, adding the rows of the other DataFrame to the current, returned in a new DataFrame.\n\n In the case of resolver='flexible', when not all columns has the same names, the missing data is filled with missing values.\n\n In the case of resolver='strict' all datasets need to have matching column names.\n\n :param others: The other DataFrames that are concatenated with this DataFrame\n :param str resolver: How to resolve schema conflicts, 'flexible' or 'strict'.\n :return: New DataFrame with the rows concatenated\n \"\"\"\n # to reduce complexity, we 'extract' the dataframes (i.e. remove filter)\n dfs = [self, *others]\n dfs = [df.extract() for df in dfs]\n common = []\n dfs_real_column_names = [df.get_column_names(virtual=False, hidden=True) for df in dfs] # for performance\n dfs_all_column_names = [df.get_column_names(virtual=True, hidden=True) for df in dfs] # for performance\n # because set does not preserve order, we use a list\n all_column_names = []\n for column_names in dfs_all_column_names:\n for name in column_names:\n if name not in all_column_names:\n all_column_names.append(name)\n real_column_names = []\n for column_names in dfs_real_column_names:\n for name in column_names:\n if name not in real_column_names:\n real_column_names.append(name)\n for name in all_column_names:\n if name in real_column_names:\n # first we look for virtual colums, that are real columns in other dataframes\n for df, df_real_column_names, df_all_column_names in zip(dfs, dfs_real_column_names, dfs_all_column_names):\n if name in df_all_column_names and name not in df_real_column_names:\n # upgrade to a column, so Dataset's concat works\n dfs[dfs.index(df)] = df._lazy_materialize(name)\n else:\n # check virtual column\n expressions = [df.virtual_columns.get(name, None) for df in dfs]\n test_expression = [k for k in expressions if k][0]\n if any([test_expression != k for k in expressions]):\n # we have a mismatching virtual column, materialize it\n for df in dfs:\n # upgrade to a column, so Dataset's concat can concat\n if name in df.get_column_names(virtual=True, hidden=True):\n dfs[dfs.index(df)] = df._lazy_materialize(name)\n\n first, *tail = dfs\n # concatenate all datasets\n dataset = first.dataset.concat(*[df.dataset for df in tail], resolver=resolver)\n df_concat = vaex.dataframe.DataFrameLocal(dataset)\n\n for name in list(first.virtual_columns.keys()):\n assert all([first.virtual_columns[name] == df.virtual_columns.get(name, None) for df in tail]), 'Virtual column expression mismatch for column {name}'\n df_concat.add_virtual_column(name, first.virtual_columns[name])\n\n for df in dfs:\n for name, value in list(df.variables.items()):\n if name not in df_concat.variables:\n df_concat.set_variable(name, value, write=False)\n for df in dfs:\n for name, value in list(df.functions.items()):\n if name not in df_concat.functions:\n if isinstance(value, vaex.expression.Function):\n value = value.f\n if isinstance(value, vaex.expression.FunctionSerializablePickle):\n value = value.f\n df_concat.add_function(name, value)\n else:\n if df_concat.functions[name].f != df.functions[name].f:\n raise ValueError(f'Unequal function {name} in concatenated dataframes are not supported yet')\n return df_concat\n\n def _invalidate_caches(self):\n self._invalidate_selection_cache()\n self._cached_filtered_length = None\n self._filter_filled = False\n\n def _invalidate_selection_cache(self):\n self._selection_mask_caches.clear()\n for key in self._selection_masks.keys():\n self._selection_masks[key] = vaex.superutils.Mask(int(self._length_original))\n\n def _filtered_range_to_unfiltered_indices(self, i1, i2):\n assert self.filtered\n self._fill_filter_mask()\n count = len(self)\n assert i2 <= count\n cache = self._selection_mask_caches[FILTER_SELECTION_NAME]\n mask_blocks = iter(sorted(\n [(k1, k2, block) for (k1, k2), (selection, block) in cache.items()],\n key=lambda item: item[0]))\n done = False\n\n offset_unfiltered = 0 # points to the unfiltered arrays\n offset_filtered = 0 # points to the filtered array\n indices = []\n while not done:\n unfiltered_i1, unfiltered_i2, block = next(mask_blocks)\n count = block.sum()\n if (offset_filtered + count) < i1: # i1 does not start in this block\n assert unfiltered_i2 == offset_unfiltered + len(block)\n offset_unfiltered = unfiltered_i2\n offset_filtered += count\n else:\n for block_index in range(len(block)):\n if block[block_index]: # if not filtered, we go to the next index\n if i1 <= offset_filtered < i2: # if this is in the range we want...\n indices.append(offset_unfiltered)\n offset_filtered += 1\n offset_unfiltered += 1\n done = offset_filtered >= i2\n return np.array(indices, dtype=np.int64)\n\n def _evaluate(self, expression, i1, i2, out=None, selection=None, internal=False, filter_mask=None):\n scope = scopes._BlockScope(self, i1, i2, mask=filter_mask, **self.variables)\n if out is not None:\n scope.buffers[expression] = out\n value = scope.evaluate(expression)\n if isinstance(value, ColumnString) and not internal:\n value = value.to_numpy()\n return value\n\n def _unfiltered_chunk_slices(self, chunk_size):\n logical_length = len(self)\n if self.filtered:\n full_mask = self._selection_masks[FILTER_SELECTION_NAME]\n # TODO: python 3, use yield from\n for item in vaex.utils.subdivide_mask(full_mask, max_length=chunk_size, logical_length=logical_length):\n yield item\n else:\n for i1, i2 in vaex.utils.subdivide(logical_length, max_length=chunk_size):\n yield i1, i2, i1, i2\n\n def _evaluate_implementation(self, expression, i1=None, i2=None, out=None, selection=None, filtered=True, array_type=None, parallel=True, chunk_size=None, raw=False, progress=None):\n \"\"\"The real implementation of :func:`DataFrame.evaluate` (not returning a generator).\n\n :param raw: Whether indices i1 and i2 refer to unfiltered (raw=True) or 'logical' offsets (raw=False)\n \"\"\"\n # expression = _ensure_string_from_expression(expression)\n was_list, [expressions] = vaex.utils.listify(expression)\n expressions = vaex.utils._ensure_strings_from_expressions(expressions)\n column_names = self.get_column_names(hidden=True)\n expressions = [vaex.utils.valid_expression(column_names, k) for k in expressions]\n selection = _normalize_selection(selection)\n\n\n selection = _ensure_strings_from_expressions(selection)\n max_stop = (len(self) if (self.filtered and filtered) else self.length_unfiltered())\n i1 = i1 or 0\n i2 = i2 or max_stop\n if parallel:\n df = self\n # first, reduce complexity for the parallel path\n if self.filtered and not filtered:\n df = df.drop_filter()\n if i1 != 0 or i2 != max_stop:\n if not raw and self.filtered and filtered:\n self._fill_filter_mask()\n mask = self._selection_masks[FILTER_SELECTION_NAME]\n i1, i2 = mask.indices(i1, i2-1)\n assert i1 != -1\n i2 += 1\n # TODO: performance: can we collapse the two trims in one?\n df = df.trim()\n df.set_active_range(i1, i2)\n df = df.trim()\n else:\n df = self\n # print(df.columns['x'], i1, i2)\n expression = expressions[0]\n # here things are simpler or we don't go parallel\n mask = None\n\n if parallel:\n use_filter = df.filtered and filtered\n\n length = df.length_unfiltered()\n arrays = {}\n # maps to a dict of start_index -> apache arrow array (a chunk)\n chunks_map = {}\n dtypes = {}\n shapes = {}\n virtual = set()\n # TODO: For NEP branch: dtype -> dtype_evaluate\n\n expression_to_evaluate = list(set(expressions)) # lets assume we have to do them all\n\n for expression in set(expressions):\n expression_obj = expression\n expression = self._expr(expression)._label\n dtypes[expression] = dtype = df.data_type(expression).internal\n if expression not in df.columns:\n virtual.add(expression)\n # since we will use pre_filter=True, we'll get chunks of the data at unknown offset\n # so we'll also have to stitch those back together\n if use_filter or selection:# or not isinstance(dtype, np.dtype):\n chunks_map[expression] = {}\n else:\n # we know exactly where to place the chunks, so we pre allocate the arrays\n if expression in virtual:\n if isinstance(dtype, np.dtype):\n shape = (length, ) + df._shape_of(expression, filtered=False)[1:]\n shapes[expression] = shape\n # numpy arrays are fixed length, so we can pre allocate them\n if df.is_masked(expression):\n arrays[expression] = np.ma.empty(shapes.get(expression, length), dtype=dtypes[expression])\n else:\n arrays[expression] = np.zeros(shapes.get(expression, length), dtype=dtypes[expression])\n else:\n # TODO: find a way to modify an arrow array inplace, e.g. float64 array\n # probably by making an ndarray, and have an Arrow array view that\n # fixed_width = False\n # try:\n # ts.bit_width\n # fixed_width = True\n # except ValueError:\n # pass\n # if fixed_width:\n chunks_map[expression] = {}\n else:\n # quick path, we can just copy the column\n arrays[expression] = df.columns[expression]\n start, end = df._index_start, df._index_end\n if start != 0 or end != len(arrays[expression]):\n arrays[expression] = arrays[expression][start:end]\n if isinstance(arrays[expression], vaex.column.Column):\n arrays[expression] = arrays[expression][0:end-start] # materialize fancy columns (lazy, indexed)\n expression_to_evaluate.remove(expression_obj)\n def assign(thread_index, i1, i2, selection_masks, blocks):\n for i, expression in enumerate(expression_to_evaluate):\n expression_obj = expression\n expression = self._expr(expression)._label\n if expression in chunks_map:\n # for non-primitive arrays we simply keep a reference to the chunk\n chunks_map[expression][i1] = blocks[i]\n else:\n # for primitive arrays (and no filter/selection) we directly add it to the right place in contiguous numpy array\n arrays[expression][i1:i2] = blocks[i]\n if expression_to_evaluate:\n df.map_reduce(assign, lambda *_: None, expression_to_evaluate, progress=progress, ignore_filter=False, selection=selection, pre_filter=use_filter, info=True, to_numpy=False, name=\"evaluate\")\n def finalize_result(expression):\n expression_obj = expression\n expression = self._expr(expression)._label\n if expression in chunks_map:\n # put all chunks in order\n chunks = [chunk for (i1, chunk) in sorted(chunks_map[expression].items(), key=lambda i1_and_chunk: i1_and_chunk[0])]\n assert len(chunks) > 0\n if len(chunks) == 1:\n values = array_types.convert(chunks[0], array_type)\n else:\n values = array_types.convert(chunks, array_type)\n else:\n values = array_types.convert(arrays[expression], array_type)\n values = self._auto_encode_data(expression, values)\n return values\n result = [finalize_result(k) for k in expressions]\n if not was_list:\n result = result[0]\n return result\n else:\n assert df is self\n if i1 == i2: # empty arrays\n values = [array_types.convert(self.data_type(e).create_array([]), array_type) for e in expressions]\n if not was_list:\n return values[0]\n return values\n if not raw and self.filtered and filtered:\n\n\n self._fill_filter_mask() # fill caches and masks\n mask = self._selection_masks[FILTER_SELECTION_NAME]\n # if _DEBUG:\n # if i1 == 0 and i2 == count_check:\n # # we cannot check it if we just evaluate a portion\n # assert not mask.view(self._index_start, self._index_end).is_dirty()\n # # assert mask.count() == count_check\n ni1, ni2 = mask.indices(i1, i2-1) # -1 since it is inclusive\n assert ni1 != -1\n assert ni2 != -1\n i1, i2 = ni1, ni2\n i2 = i2+1 # +1 to make it inclusive\n values = []\n\n dataset = self.dataset\n if i1 != 0 or i2 != self.dataset.row_count:\n dataset = dataset[i1:i2]\n\n deps = set()\n for expression in expressions:\n deps |= self._expr(expression).dependencies()\n deps = {k for k in deps if k in dataset}\n if self.filtered:\n filter_deps = df.get_selection(vaex.dataframe.FILTER_SELECTION_NAME).dependencies(df)\n deps |= filter_deps\n columns = {k: dataset[k][:] for k in deps if k in dataset}\n\n if self.filtered and filtered:\n filter_scope = scopes._BlockScope(df, i1, i2, None, selection=True, values={**df.variables, **{k: columns[k] for k in filter_deps if k in columns}})\n filter_scope.filter_mask = None\n filter_mask = filter_scope.evaluate(vaex.dataframe.FILTER_SELECTION_NAME)\n columns = {k:vaex.array_types.filter(v, filter_mask) for k, v, in columns.items()}\n else:\n filter_mask = None\n block_scope = scopes._BlockScope(self, i1, i2, mask=mask, values={**self.variables, **columns})\n block_scope.mask = filter_mask\n\n for expression in expressions:\n value = block_scope.evaluate(expression)\n value = array_types.convert(value, array_type)\n values.append(value)\n if not was_list:\n return values[0]\n return values\n\n def _equals(self, other):\n values = self.compare(other)\n return values == ([], [], [], [])\n\n def compare(self, other, report_missing=True, report_difference=False, show=10, orderby=None, column_names=None):\n \"\"\"Compare two DataFrames and report their difference, use with care for large DataFrames\"\"\"\n if column_names is None:\n column_names = self.get_column_names(virtual=False)\n for other_column_name in other.get_column_names(virtual=False):\n if other_column_name not in column_names:\n column_names.append(other_column_name)\n different_values = []\n missing = []\n type_mismatch = []\n meta_mismatch = []\n assert len(self) == len(other)\n if orderby:\n index1 = np.argsort(self.columns[orderby])\n index2 = np.argsort(other.columns[orderby])\n for column_name in column_names:\n if column_name not in self.get_column_names(virtual=False):\n missing.append(column_name)\n if report_missing:\n print(\"%s missing from this DataFrame\" % column_name)\n elif column_name not in other.get_column_names(virtual=False):\n missing.append(column_name)\n if report_missing:\n print(\"%s missing from other DataFrame\" % column_name)\n else:\n ucd1 = self.ucds.get(column_name)\n ucd2 = other.ucds.get(column_name)\n if ucd1 != ucd2:\n print(\"ucd mismatch : %r vs %r for %s\" % (ucd1, ucd2, column_name))\n meta_mismatch.append(column_name)\n unit1 = self.units.get(column_name)\n unit2 = other.units.get(column_name)\n if unit1 != unit2:\n print(\"unit mismatch : %r vs %r for %s\" % (unit1, unit2, column_name))\n meta_mismatch.append(column_name)\n type1 = self.data_type(column_name)\n type2 = other.data_type(column_name)\n if not vaex.array_types.same_type(type1, type2):\n print(\"different data types: %s vs %s for %s\" % (self.data_type(column_name), other.data_type(column_name), column_name))\n type_mismatch.append(column_name)\n else:\n # a = self.columns[column_name]\n # b = other.columns[column_name]\n # if self.filtered:\n # a = a[self.evaluate_selection_mask(None)]\n # if other.filtered:\n # b = b[other.evaluate_selection_mask(None)]\n a = self.evaluate(column_name, array_type=\"numpy\")\n b = other.evaluate(column_name, array_type=\"numpy\")\n if orderby:\n a = a[index1]\n b = b[index2]\n\n def normalize(ar):\n if isinstance(ar, pa.Array):\n ar = ar.to_pandas().values\n # if ar.dtype == str_type:\n # return ar\n if ar.dtype.kind == \"f\" and hasattr(ar, \"mask\"):\n mask = ar.mask\n ar = ar.copy()\n ar[mask] = np.nan\n if ar.dtype.kind in \"SU\":\n if hasattr(ar, \"mask\"):\n data = ar.data\n else:\n data = ar\n values = [value.strip() for value in data.tolist()]\n if hasattr(ar, \"mask\"):\n ar = np.ma.masked_array(values, ar.mask)\n else:\n ar = np.array(values)\n return ar\n\n def equal_mask(a, b):\n a = normalize(a)\n b = normalize(b)\n boolean_mask = (a == b)\n if not self.is_string(column_name) and self.data_type(column_name).kind == 'f': # floats with nan won't equal itself, i.e. NaN != NaN\n boolean_mask |= (np.isnan(a) & np.isnan(b))\n return boolean_mask\n boolean_mask = equal_mask(a, b)\n all_equal = np.all(boolean_mask)\n if not all_equal:\n count = np.sum(~boolean_mask)\n print(\"%s does not match for both DataFrames, %d rows are diffent out of %d\" % (column_name, count, len(self)))\n different_values.append(column_name)\n if report_difference:\n indices = np.arange(len(self))[~boolean_mask]\n values1 = self.columns[column_name][:][~boolean_mask]\n values2 = other.columns[column_name][:][~boolean_mask]\n print(\"\\tshowing difference for the first 10\")\n for i in range(min(len(values1), show)):\n try:\n diff = values1[i] - values2[i]\n except:\n diff = \"does not exists\"\n print(\"%s[%d] == %s != %s other.%s[%d] (diff = %s)\" % (column_name, indices[i], values1[i], values2[i], column_name, indices[i], diff))\n return different_values, missing, type_mismatch, meta_mismatch\n\n @docsubst\n def join(self, other, on=None, left_on=None, right_on=None, lprefix='', rprefix='', lsuffix='', rsuffix='', how='left', allow_duplication=False, prime_growth=False, cardinality_other=None, inplace=False):\n \"\"\"Return a DataFrame joined with other DataFrames, matched by columns/expression on/left_on/right_on\n\n If neither on/left_on/right_on is given, the join is done by simply adding the columns (i.e. on the implicit\n row index).\n\n Note: The filters will be ignored when joining, the full DataFrame will be joined (since filters may\n change). If either DataFrame is heavily filtered (contains just a small number of rows) consider running\n :func:`DataFrame.extract` first.\n\n Example:\n\n >>> a = np.array(['a', 'b', 'c'])\n >>> x = np.arange(1,4)\n >>> ds1 = vaex.from_arrays(a=a, x=x)\n >>> b = np.array(['a', 'b', 'd'])\n >>> y = x**2\n >>> ds2 = vaex.from_arrays(b=b, y=y)\n >>> ds1.join(ds2, left_on='a', right_on='b')\n\n :param other: Other DataFrame to join with (the right side)\n :param on: default key for the left table (self)\n :param left_on: key for the left table (self), overrides on\n :param right_on: default key for the right table (other), overrides on\n :param lprefix: prefix to add to the left column names in case of a name collision\n :param rprefix: similar for the right\n :param lsuffix: suffix to add to the left column names in case of a name collision\n :param rsuffix: similar for the right\n :param how: how to join, 'left' keeps all rows on the left, and adds columns (with possible missing values)\n 'right' is similar with self and other swapped. 'inner' will only return rows which overlap.\n :param bool allow_duplication: Allow duplication of rows when the joined column contains non-unique values.\n :param int cardinality_other: Number of unique elements (or estimate of) for the other table.\n :param bool prime_growth: Growth strategy for the hashmaps used internally, can improve performance in some case (e.g. integers with low bits unused).\n :param inplace: {inplace}\n :return:\n \"\"\"\n import vaex.join\n kwargs = dict(**locals())\n kwargs['df'] = kwargs.pop('self')\n del kwargs['vaex']\n return vaex.join.join(**kwargs)\n\n @docsubst\n def export(self, path, progress=None, chunk_size=default_chunk_size, parallel=True, fs_options=None, fs=None):\n \"\"\"Exports the DataFrame to a file depending on the file extension.\n\n E.g if the filename ends on .hdf5, `df.export_hdf5` is called.\n\n :param str path: path for file\n :param progress: {progress}\n :param int chunk_size: {chunk_size_export}, if supported.\n :param bool parallel: {evaluate_parallel}\n :param dict fs_options: {fs_options}\n :return:\n \"\"\"\n naked_path, options = vaex.file.split_options(path)\n fs_options = fs_options or {}\n if naked_path.endswith('.arrow'):\n self.export_arrow(path, progress=progress, chunk_size=chunk_size, parallel=parallel, fs_options=fs_options, fs=fs)\n elif naked_path.endswith('.feather'):\n self.export_feather(path, parallel=parallel, fs_options=fs_options)\n elif naked_path.endswith('.hdf5'):\n self.export_hdf5(path, progress=progress, parallel=parallel)\n elif naked_path.endswith('.fits'):\n self.export_fits(path, progress=progress)\n elif naked_path.endswith('.parquet'):\n self.export_parquet(path, progress=progress, parallel=parallel, chunk_size=chunk_size, fs_options=fs_options, fs=fs)\n elif naked_path.endswith('.csv'):\n self.export_csv(path, progress=progress, parallel=parallel, chunk_size=chunk_size)\n elif naked_path.endswith('.json'):\n self.export_json(path, progress=progress, chunk_size=chunk_size, parallel=parallel, fs_options=fs_options, fs=fs)\n else:\n raise ValueError('''Unrecognized file extension. Please use .arrow, .hdf5, .parquet, .fits, or .csv to export to the particular file format.''')\n\n @docsubst\n def export_arrow(self, to, progress=None, chunk_size=default_chunk_size, parallel=True, reduce_large=True, fs_options=None, fs=None, as_stream=True):\n \"\"\"Exports the DataFrame to a file of stream written with arrow\n\n :param to: filename, file object, or :py:data:`pyarrow.RecordBatchStreamWriter`, py:data:`pyarrow.RecordBatchFileWriter` or :py:data:`pyarrow.parquet.ParquetWriter`\n :param progress: {progress}\n :param int chunk_size: {chunk_size_export}\n :param bool parallel: {evaluate_parallel}\n :param bool reduce_large: If True, convert arrow large_string type to string type\n :param bool as_stream: Write as an Arrow stream if true, else a file.\n see also https://arrow.apache.org/docs/format/Columnar.html?highlight=arrow1#ipc-file-format\n :param dict fs_options: {fs_options}\n :return:\n \"\"\"\n def write(writer):\n N = len(self)\n if chunk_size:\n with vaex.progress.tree(progress, title=\"export(arrow)\") as progressbar:\n for i1, i2, table in self.to_arrow_table(chunk_size=chunk_size, parallel=parallel, reduce_large=reduce_large):\n writer.write_table(table)\n progressbar(i2/N)\n progressbar(1.)\n else:\n table = self.to_arrow_table(chunk_size=chunk_size, parallel=parallel, reduce_large=reduce_large)\n writer.write_table(table)\n\n if vaex.file.is_path_like(to) or vaex.file.is_file_object(to):\n schema = self.schema_arrow(reduce_large=reduce_large)\n with vaex.file.open(path=to, mode='wb', fs_options=fs_options, fs=fs) as sink:\n if as_stream:\n with pa.RecordBatchStreamWriter(sink, schema) as writer:\n write(writer)\n else:\n with pa.RecordBatchFileWriter(sink, schema) as writer:\n write(writer)\n else:\n write(to)\n\n @docsubst\n def export_feather(self, to, parallel=True, reduce_large=True, compression='lz4', fs_options=None, fs=None):\n \"\"\"Exports the DataFrame to an arrow file using the feather file format version 2\n\n Feather is exactly represented as the Arrow IPC file format on disk, but also support compression.\n see also https://arrow.apache.org/docs/python/feather.html\n\n :param to: filename or file object\n :param bool parallel: {evaluate_parallel}\n :param bool reduce_large: If True, convert arrow large_string type to string type\n :param compression: Can be one of 'zstd', 'lz4' or 'uncompressed'\n :param fs_options: {fs_options}\n :param fs: {fs}\n :return:\n \"\"\"\n import pyarrow.feather as feather\n table = self.to_arrow_table(parallel=False, reduce_large=reduce_large)\n fs_options = fs_options or {}\n with vaex.file.open(path=to, mode='wb', fs_options=fs_options, fs=fs) as sink:\n feather.write_feather(table, sink, compression=compression)\n\n @docsubst\n def export_parquet(self, path, progress=None, chunk_size=default_chunk_size, parallel=True, fs_options=None, fs=None, **kwargs):\n \"\"\"Exports the DataFrame to a parquet file.\n\n Note: This may require that all of the data fits into memory (memory mapped data is an exception).\n Use :py:`DataFrame.export_chunks` to write to multiple files in parallel.\n\n :param str path: path for file\n :param progress: {progress}\n :param int chunk_size: {chunk_size_export}\n :param bool parallel: {evaluate_parallel}\n :param dict fs_options: {fs_options}\n :param fs: {fs}\n :param **kwargs: Extra keyword arguments to be passed on to py:data:`pyarrow.parquet.ParquetWriter`.\n :return:\n \"\"\"\n import pyarrow.parquet as pq\n schema = self.schema_arrow(reduce_large=True)\n with vaex.file.open(path=path, mode='wb', fs_options=fs_options, fs=fs) as sink:\n with pq.ParquetWriter(sink, schema, **kwargs) as writer:\n self.export_arrow(writer, progress=progress, chunk_size=chunk_size, parallel=parallel, reduce_large=True)\n\n @docsubst\n def export_partitioned(self, path, by, directory_format='{key}={value}', progress=None, chunk_size=default_chunk_size, parallel=True, fs_options={}, fs=None):\n '''Expertimental: export files using hive partitioning.\n\n If no extension is found in the path, we assume parquet files. Otherwise you can specify the\n format like an format-string. Where {{i}} is a zero based index, {{uuid}} a unique id, and {{subdir}}\n the Hive key=value directory.\n\n Example paths:\n * '/some/dir/{{subdir}}/{{i}}.parquet'\n * '/some/dir/{{subdir}}/fixed_name.parquet'\n * '/some/dir/{{subdir}}/{{uuid}}.parquet'\n * '/some/dir/{{subdir}}/{{uuid}}.parquet'\n\n :param path: directory where to write the files to.\n :param str or list of str: Which column to partition by.\n :param str directory_format: format string for directories, default '{{key}}={{value}}' for Hive layout.\n :param progress: {progress}\n :param int chunk_size: {chunk_size_export}\n :param bool parallel: {evaluate_parallel}\n :param dict fs_options: {fs_options}\n '''\n from uuid import uuid4\n if not _issequence(by):\n by = [by]\n by = _ensure_strings_from_expressions(by)\n\n # we don't store the partitioned columns\n columns = self.get_column_names()\n for name in by:\n columns.remove(name)\n\n progressbar = vaex.utils.progressbars(progress, title=\"export(partitioned)\")\n progressbar(0)\n groups = self.groupby(by)\n _, ext, _ = vaex.file.split_ext(path)\n if not ext:\n path = vaex.file.stringyfy(path) + '/{subdir}/{uuid}.parquet'\n else:\n path = vaex.file.stringyfy(path)\n for i, (values, df) in enumerate(groups):\n parts = [directory_format.format(key=key, value=value) for key, value in dict(zip(by, values)).items()]\n subdir = '/'.join(parts)\n uuid = uuid4()\n fullpath = path.format(uuid=uuid, subdir=subdir, i=i)\n dirpath = os.path.dirname(fullpath)\n vaex.file.create_dir(dirpath, fs_options=fs_options, fs=fs)\n progressbar((i)/len(groups))\n df[columns].export(fullpath, chunk_size=chunk_size, parallel=parallel, fs_options=fs_options, fs=fs)\n progressbar(1)\n\n @docsubst\n def export_many(self, path, progress=None, chunk_size=default_chunk_size, parallel=True, max_workers=None, fs_options=None, fs=None):\n \"\"\"Export the DataFrame to multiple files of the same type in parallel.\n\n The path will be formatted using the i parameter (which is the chunk index).\n\n Example:\n\n >>> import vaex\n >>> df = vaex.open('my_big_dataset.hdf5')\n >>> print(f'number of rows: {{len(df):,}}')\n number of rows: 193,938,982\n >>> df.export_many(path='my/destination/folder/chunk-{{i:03}}.arrow')\n >>> df_single_chunk = vaex.open('my/destination/folder/chunk-00001.arrow')\n >>> print(f'number of rows: {{len(df_single_chunk):,}}')\n number of rows: 1,048,576\n >>> df_all_chunks = vaex.open('my/destination/folder/chunk-*.arrow')\n >>> print(f'number of rows: {{len(df_all_chunks):,}}')\n number of rows: 193,938,982\n\n\n :param str path: Path for file, formatted by chunk index i (e.g. 'chunk-{{i:05}}.parquet')\n :param progress: {progress}\n :param int chunk_size: {chunk_size_export}\n :param bool parallel: {evaluate_parallel}\n :param int max_workers: Number of workers/threads to use for writing in parallel\n :param dict fs_options: {fs_options}\n \"\"\"\n from .itertools import pmap, pwait, buffer, consume\n path1 = str(path).format(i=0, i1=1, i2=2)\n path2 = str(path).format(i=1, i1=2, i2=3)\n if path1 == path2:\n name, ext = os.path.splitext(path)\n path = f'{name}-{{i:05}}{ext}'\n input = self.to_dict(chunk_size=chunk_size, parallel=True)\n column_names = self.get_column_names()\n def write(i, item):\n i1, i2, chunks = item\n p = str(path).format(i=i, i1=i2, i2=i2)\n df = vaex.from_dict(chunks)\n df.export(p, chunk_size=None, parallel=False, fs_options=fs_options, fs=fs)\n return i2\n progressbar = vaex.utils.progressbars(progress, title=\"export(many)\")\n progressbar(0)\n length = len(self)\n def update_progress(offset):\n progressbar(offset / length)\n pool = concurrent.futures.ThreadPoolExecutor(max_workers)\n workers = pool._max_workers\n consume(map(update_progress, pwait(buffer(pmap(write, enumerate(input), pool=pool), workers+3))))\n progressbar(1)\n\n @docsubst\n def export_hdf5(self, path, byteorder=\"=\", progress=None, chunk_size=default_chunk_size, parallel=True, column_count=1, writer_threads=0, group='/table', mode='w'):\n \"\"\"Exports the DataFrame to a vaex hdf5 file\n\n :param str path: path for file\n :param str byteorder: = for native, < for little endian and > for big endian\n :param progress: {progress}\n :param bool parallel: {evaluate_parallel}\n :param int column_count: How many columns to evaluate and export in parallel (>1 requires fast random access, like and SSD drive).\n :param int writer_threads: Use threads for writing or not, only useful when column_count > 1.\n :param str group: Write the data into a custom group in the hdf5 file.\n :param str mode: If set to \"w\" (write), an existing file will be overwritten. If set to \"a\", one can append additional data to the hdf5 file, but it needs to be in a different group.\n :return:\n \"\"\"\n from vaex.hdf5.writer import Writer\n with vaex.utils.progressbars(progress, title=\"export(hdf5)\") as progressbar:\n progressbar_layout = progressbar.add(\"layout file structure\")\n progressbar_write = progressbar.add(\"write data\")\n with Writer(path=path, group=group, mode=mode, byteorder=byteorder) as writer:\n writer.layout(self, progress=progressbar_layout)\n writer.write(\n self,\n chunk_size=chunk_size,\n progress=progressbar_write,\n column_count=column_count,\n parallel=parallel,\n export_threads=writer_threads)\n\n @docsubst\n def export_fits(self, path, progress=None):\n \"\"\"Exports the DataFrame to a fits file that is compatible with TOPCAT colfits format\n\n :param str path: path for file\n :param progress: {progress}\n :return:\n \"\"\"\n from vaex.astro.fits import export_fits\n export_fits(self, path, progress=progress)\n\n @docsubst\n def export_csv(self, path, progress=None, chunk_size=default_chunk_size, parallel=True, **kwargs):\n \"\"\" Exports the DataFrame to a CSV file.\n\n :param str path: Path for file\n :param progress: {progress}\n :param int chunk_size: {chunk_size_export}\n :param parallel: {evaluate_parallel}\n :param **kwargs: Extra keyword arguments to be passed on pandas.DataFrame.to_csv()\n :return:\n \"\"\"\n import pandas as pd\n expressions = self.get_column_names()\n progressbar = vaex.utils.progressbars(progress, title=\"export(csv)\")\n dtypes = self[expressions].dtypes\n n_samples = len(self)\n if chunk_size is None:\n chunk_size = len(self)\n\n # By default vaex does not expect a csv file to have index like column so this is turned of by default\n if 'index' not in kwargs:\n kwargs['index'] = False\n\n for i1, i2, chunks in self.evaluate_iterator(expressions, chunk_size=chunk_size, parallel=parallel):\n progressbar( i1 / n_samples)\n chunk_dict = {col: values for col, values in zip(expressions, chunks)}\n chunk_pdf = pd.DataFrame(chunk_dict)\n\n if i1 == 0: # Only the 1st chunk should have a header and the rest will be appended\n kwargs['mode'] = 'w'\n else:\n kwargs['mode'] = 'a'\n kwargs['header'] = False\n\n chunk_pdf.to_csv(path_or_buf=path, **kwargs)\n progressbar(1.0)\n return\n\n @docsubst\n def export_json(self, to, progress=None, chunk_size=default_chunk_size, parallel=True, fs_options=None, fs=None):\n \"\"\" Exports the DataFrame to a CSV file.\n\n :param to: filename or file object\n :param progress: {progress}\n :param int chunk_size: {chunk_size_export}\n :param parallel: {evaluate_parallel}\n :param fs_options: {fs_options}\n :param fs: {fs}\n :return:\n \"\"\"\n json = None # we may want to pass the module as parameter to use a faster library\n import json as json_std\n json = json or json_std\n\n # not sure if we want to use pandas, it will treat datetime for us, but will convert null to nan\n use_pandas = True\n\n # we take on the '[' and ']' from each chunk, and insert it back ourselves\n # and we also need to but ',' between each chunk\n with vaex.progress.tree(progress, title=\"export(json)\"), vaex.file.open(path=to, mode='wb', fs_options=fs_options, fs=fs) as f:\n f.write(b\"[\")\n first = True\n if use_pandas:\n for _i1, _i2, df in self.to_pandas_df(chunk_size=chunk_size, parallel=parallel):\n if not first:\n f.write(b\", \")\n first = False\n f_temp = io.StringIO()\n df.to_json(f_temp, orient='records')\n f.write(f_temp.getvalue()[1:-1].encode('utf8'))\n else:\n for _i1, _i2, records in self.to_records(chunk_size=chunk_size, parallel=parallel):\n if not first:\n f.write(b\", \")\n first = False\n raw = json.dumps(records)[1:-1]\n f.write(raw.encode(\"utf8\"))\n f.write(b\"]\")\n\n def _needs_copy(self, column_name):\n import vaex.file.other\n return not \\\n ((column_name in self.column_names and not\n isinstance(self.columns[column_name], Column) and not\n isinstance(self.columns[column_name], vaex.file.other.DatasetTap.TapColumn) and\n self.columns[column_name].dtype.type == np.float64 and\n self.columns[column_name].strides[0] == 8 and\n column_name not in\n self.virtual_columns) or self.data_type(column_name) == str_type or self.data_type(column_name).kind == 'S')\n # and False:\n\n def selected_length(self, selection=\"default\"):\n \"\"\"The local implementation of :func:`DataFrame.selected_length`\"\"\"\n return int(self.count(selection=selection).item())\n # np.sum(self.mask) if self.has_selection() else None\n\n # def _set_mask(self, mask):\n # self.mask = mask\n # self._has_selection = mask is not None\n # # self.signal_selection_changed.emit(self)\n\n @docsubst\n def groupby(self, by=None, agg=None, sort=False, ascending=True, assume_sparse='auto', row_limit=None, copy=True, progress=None, delay=False):\n \"\"\"Return a :class:`GroupBy` or :class:`DataFrame` object when agg is not None\n\n Examples:\n\n >>> import vaex\n >>> import numpy as np\n >>> np.random.seed(42)\n >>> x = np.random.randint(1, 5, 10)\n >>> y = x**2\n >>> df = vaex.from_arrays(x=x, y=y)\n >>> df.groupby(df.x, agg='count')\n # x y_count\n 0 3 4\n 1 4 2\n 2 1 3\n 3 2 1\n >>> df.groupby(df.x, agg=[vaex.agg.count('y'), vaex.agg.mean('y')])\n # x y_count y_mean\n 0 3 4 9\n 1 4 2 16\n 2 1 3 1\n 3 2 1 4\n >>> df.groupby(df.x, agg={{'z': [vaex.agg.count('y'), vaex.agg.mean('y')]}})\n # x z_count z_mean\n 0 3 4 9\n 1 4 2 16\n 2 1 3 1\n 3 2 1 4\n\n Example using datetime:\n\n >>> import vaex\n >>> import numpy as np\n >>> t = np.arange('2015-01-01', '2015-02-01', dtype=np.datetime64)\n >>> y = np.arange(len(t))\n >>> df = vaex.from_arrays(t=t, y=y)\n >>> df.groupby(vaex.BinnerTime.per_week(df.t)).agg({{'y' : 'sum'}})\n # t y\n 0 2015-01-01 00:00:00 21\n 1 2015-01-08 00:00:00 70\n 2 2015-01-15 00:00:00 119\n 3 2015-01-22 00:00:00 168\n 4 2015-01-29 00:00:00 87\n\n\n :param dict, list or agg agg: Aggregate operation in the form of a string, vaex.agg object, a dictionary\n where the keys indicate the target column names, and the values the operations, or the a list of aggregates.\n When not given, it will return the groupby object.\n :param bool sort: Sort columns for which we group by.\n :param bool or list of bools ascending: ascending (default, True) or descending (False).\n :param bool or str assume_sparse: Assume that when grouping by multiple keys, that the existing pairs are sparse compared to the cartesian product.\n If 'auto', let vaex decide (e.g. a groupby with 10_000 rows but only 4*3=12 combinations does not matter much to compress into say 8 existing\n combinations, and will save another pass over the data)\n :param int row_limit: Limits the resulting dataframe to the number of rows (default is not to check, only works when assume_sparse is True).\n Throws a :py:`vaex.RowLimitException` when the condition is not met.\n :param bool copy: Copy the dataframe (shallow, does not cost memory) so that the fingerprint of the original dataframe is not modified.\n :param bool delay: {delay}\n :param progress: {progress}\n :return: :class:`DataFrame` or :class:`GroupBy` object.\n \"\"\"\n from .groupby import GroupBy\n progressbar = vaex.utils.progressbars(progress, title=\"groupby\")\n groupby = GroupBy(self, by=by, sort=sort, ascending=ascending, combine=assume_sparse, row_limit=row_limit, copy=copy, progress=progressbar)\n if agg:\n progressbar_agg = progressbar.add('aggregators')\n @vaex.delayed\n def next(_ignore):\n if agg is None:\n return groupby\n else:\n return groupby.agg(agg, delay=delay, progress=progressbar_agg)\n return self._delay(delay, progressbar.exit_on(next(groupby._promise_by)))\n\n @docsubst\n def binby(self, by=None, agg=None, sort=False, copy=True, delay=False, progress=None):\n \"\"\"Return a :class:`BinBy` or :class:`DataArray` object when agg is not None\n\n The binby operation does not return a 'flat' DataFrame, instead it returns an N-d grid\n in the form of an xarray.\n\n\n :param dict, list or agg agg: Aggregate operation in the form of a string, vaex.agg object, a dictionary\n where the keys indicate the target column names, and the values the operations, or the a list of aggregates.\n When not given, it will return the binby object.\n :param bool copy: Copy the dataframe (shallow, does not cost memory) so that the fingerprint of the original dataframe is not modified.\n :param bool delay: {delay}\n :param progress: {progress}\n :return: :class:`DataArray` or :class:`BinBy` object.\n \"\"\"\n from .groupby import BinBy\n progressbar = vaex.utils.progressbars(progress, title=\"binby\")\n binby = BinBy(self, by=by, sort=sort, progress=progressbar, copy=copy)\n if agg:\n progressbar_agg = progressbar.add('aggregators')\n @vaex.delayed\n def next(_ignore):\n if agg is None:\n return binby\n else:\n return binby.agg(agg, delay=delay, progress=progressbar_agg)\n return self._delay(delay, progressbar.exit_on(next(binby._promise_by)))\n\n def _selection(self, create_selection, name, executor=None, execute_fully=False):\n def create_wrapper(current):\n selection = create_selection(current)\n # only create a mask when we have a selection, so we do not waste memory\n if selection is not None and name not in self._selection_masks:\n self._selection_masks[name] = vaex.superutils.Mask(int(self._length_unfiltered))\n return selection\n return super()._selection(create_wrapper, name, executor, execute_fully)\n\n @property\n def values(self):\n \"\"\"Gives a full memory copy of the DataFrame into a 2d numpy array of shape (n_rows, n_columns).\n Note that the memory order is fortran, so all values of 1 column are contiguous in memory for performance reasons.\n\n Note this returns the same result as:\n\n >>> np.array(ds)\n\n If any of the columns contain masked arrays, the masks are ignored (i.e. the masked elements are returned as well).\n \"\"\"\n return self.__array__()\n\n\ndef _is_dtype_ok(dtype):\n return dtype.type in [np.bool_, np.int8, np.int16, np.int32, np.int64, np.uint8, np.uint16,\n np.uint32, np.uint64, np.float32, np.float64, np.datetime64] or\\\n dtype.type == np.string_ or dtype.type == np.unicode_\n\n\ndef _is_array_type_ok(array):\n return _is_dtype_ok(array.dtype)\n\n\n# there represent the spec version of the cpu based vaex.superagg.BinnerScalar/Ordinal_<dtype>\nregister_binner = vaex.encoding.make_class_registery('binner')\n\n\nclass BinnerBase:\n @classmethod\n def decode(cls, encoding, spec):\n spec = spec.copy()\n spec['dtype'] = encoding.decode('dtype', spec['dtype'])\n return cls(**spec)\n\n\n@register_binner\nclass BinnerScalar(BinnerBase):\n snake_name = 'scalar'\n def __init__(self, expression, minimum, maximum, count, dtype):\n self.expression = str(expression)\n self.minimum = minimum\n self.maximum = maximum\n self.count = count\n self.dtype = dtype\n\n def __repr__(self):\n return f'binner_scalar({self.expression}, {self.minimum}, {self.maximum}, count={self.count})'\n\n def encode(self, encoding):\n dtype = encoding.encode('dtype', self.dtype)\n return {'expression': self.expression, 'dtype': dtype, 'count': self.count, 'minimum': self.minimum, 'maximum': self.maximum}\n\n def __hash__(self) -> int:\n return hash((self.__class__.__name__, self.expression, self.minimum, self.maximum, self.count, self.dtype))\n\n def __eq__(self, rhs):\n if not isinstance(rhs, BinnerScalar):\n return False\n return \\\n self.expression == rhs.expression and \\\n self.minimum == rhs.minimum and \\\n self.maximum == rhs.maximum and \\\n self.count == rhs.count and \\\n self.dtype == rhs.dtype\n\n\n@register_binner\nclass BinnerOrdinal(BinnerBase):\n snake_name = 'ordinal'\n def __init__(self, expression, minimum, count, invert, dtype):\n self.expression = str(expression)\n self.minimum = minimum\n self.count = count\n self.invert = invert\n self.dtype = dtype\n\n def __repr__(self):\n return f'binner_ordinal({self.expression}, {self.minimum}, {self.count}, {self.invert})'\n\n def encode(self, encoding):\n datatype = encoding.encode(\"dtype\", self.dtype)\n return {\"type\": \"ordinal\", \"expression\": self.expression, \"dtype\": datatype, \"count\": self.count, \"minimum\": self.minimum, \"invert\": self.invert}\n\n def __hash__(self) -> int:\n return hash((self.__class__.__name__, self.expression, self.minimum, self.count, self.invert, self.dtype))\n\n def __eq__(self, rhs):\n if not isinstance(rhs, BinnerOrdinal):\n return False\n return \\\n self.expression == rhs.expression and \\\n self.minimum == rhs.minimum and \\\n self.count == rhs.count and \\\n self.invert == rhs.invert and \\\n self.dtype == rhs.dtype\n\n\n@register_binner\nclass BinnerHash(BinnerBase):\n snake_name = 'hash'\n def __init__(self, expression, hash_map_unique, dtype):\n self.expression = str(expression)\n self.hash_map_unique = hash_map_unique\n self.dtype = dtype\n\n def __repr__(self):\n return f'binner_hash({self.expression}, {self.hash_map_unique})'\n\n def encode(self, encoding):\n datatype = encoding.encode('dtype', self.dtype)\n hash_map_spec = encoding.encode('hash-map-unique', self.hash_map_unique)\n assert self.hash_map_unique.fingerprint\n hash_map_id = vaex.cache.fingerprint('binner-hash', self.hash_map_unique.fingerprint)\n encoding.set_object_spec(hash_map_id, hash_map_spec)\n return {'expression': self.expression, 'hash_map_unique': hash_map_id, 'dtype': datatype}\n\n def __hash__(self) -> int:\n return hash((self.__class__.__name__, self.expression, self.hash_map_unique))\n\n def __eq__(self, rhs):\n if not isinstance(rhs, BinnerHash):\n return False\n return \\\n self.expression == rhs.expression and \\\n self.hash_map_unique == rhs.hash_map_unique and\\\n self.dtype == rhs.dtype\n" ]
[ [ "numpy.can_cast", "numpy.sqrt", "numpy.linspace", "numpy.asarray", "numpy.cumsum", "pandas.DataFrame", "numpy.all", "numpy.ma.array", "numpy.ma.getmaskarray", "numpy.unique", "numpy.sin", "numpy.ceil", "numpy.argmax", "numpy.asanyarray", "numpy.interp", "numpy.zeros", "numpy.ma.isMaskedArray", "numpy.isnan", "numpy.deg2rad", "numpy.delete", "numpy.floor", "numpy.errstate", "numpy.argsort", "numpy.array", "numpy.random.RandomState", "numpy.sum", "numpy.diagonal", "numpy.abs", "numpy.cos", "numpy.float64", "numpy.ma.masked_array", "numpy.ndindex" ] ]
zhigenzhao/stable-baselines3
[ "a69a4f0497849d9c20f6b77870ca77ae92f5c7bb" ]
[ "Kuka_examples/plot_monitor.py" ]
[ "from matplotlib import pyplot as plt\nimport numpy as np\nimport csv\n\n\ndef main():\n filename = \"/home/zhigen/code/stable-baselines3/Kuka_examples/saved_models/monitor (copy).csv\"\n with open(filename) as csvfile:\n training_monitor = csv.reader(csvfile)\n \n reward = []\n for (i, row) in enumerate(training_monitor):\n if i > 1:\n reward.append(float(row[0]))\n reward = np.array(reward)\n\n reward_vec = []\n reward_std = []\n i = 0\n n = 5000\n while i < len(reward):\n if i+n < len(reward):\n temp = reward[i:i+n]\n else:\n temp = reward[i:]\n \n m = np.mean(temp)\n s = np.std(temp)\n\n reward_vec.append(m)\n reward_std.append(s)\n\n i += n\n\n\n plt.plot(np.arange(len(reward_vec))*n, -np.array(reward_vec), linewidth=3)\n plt.plot(np.arange(len(reward_vec))*n, np.ones_like(reward_vec)*1750, linestyle=\"dashed\", linewidth=3)\n plt.fill_between(np.arange(len(reward_vec))*n, \n -np.array(reward_vec)+np.array(reward_std),\n -np.array(reward_vec)-np.array(reward_std),\n alpha=0.5)\n plt.yscale(\"log\")\n plt.xlabel(\"Timestep\", fontsize=24)\n plt.xticks(fontsize=14)\n plt.ylabel(\"Cost\", fontsize=24)\n plt.yticks(fontsize=16)\n plt.legend([\"PPO\", \"VERONICA baseline\"], fontsize=16)\n plt.xlim([0, 300000])\n plt.show()\n\nif __name__==\"__main__\":\n main()" ]
[ [ "matplotlib.pyplot.legend", "matplotlib.pyplot.yticks", "numpy.ones_like", "matplotlib.pyplot.yscale", "matplotlib.pyplot.xlim", "numpy.std", "numpy.mean", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.xticks", "numpy.array", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel" ] ]
craigmayhew/aws_transcribe_to_docx
[ "1aded9a7c0f14b8242991e564aec3275fc9f83f9" ]
[ "tscribe/__init__.py" ]
[ "\"\"\" Produce Word Document transcriptions using the automatic speech recognition from AWS Transcribe. \"\"\"\n\nfrom docx import Document\nfrom docx.shared import Cm, Mm, Inches, RGBColor\nfrom docx.enum.text import WD_ALIGN_PARAGRAPH\nimport json, datetime\nimport matplotlib.pyplot as plt\nimport statistics\n\n\ndef convert_time_stamp(n):\n \"\"\" Function to help convert timestamps from s to H:M:S \"\"\"\n ts = datetime.timedelta(seconds=float(n))\n ts = ts - datetime.timedelta(microseconds=ts.microseconds)\n return str(ts)\n\n\ndef write(file, **kwargs):\n \"\"\" Write a transcript from the .json transcription file. \"\"\"\n\n # Initiate Document\n document = Document()\n # A4 Size\n document.sections[0].page_width = Mm(210)\n document.sections[0].page_height = Mm(297)\n # Font\n font = document.styles['Normal'].font\n font.name = 'Calibri'\n\n # Load Transcription output\n data = json.load(open(file, 'r', encoding='utf-8'))\n\n # Document title and intro\n title = f\"Transcription of {data['jobName']}\"\n document.add_heading(title, level=1)\n # Set thresholds for formatting later\n threshold_for_grey = 0.98\n # Intro\n document.add_paragraph('Transcription using AWS Transcribe automatic speech recognition.')\n document.add_paragraph(datetime.datetime.now().strftime('Document produced on %A %d %B %Y at %X.'))\n document.add_paragraph() # Spacing\n document.add_paragraph(f\"Grey text has less than {int(threshold_for_grey * 100)}% confidence.\")\n\n # Stats dictionary\n stats = {\n 'timestamps': [],\n 'accuracy': [],\n '9.8': 0, '9': 0, '8': 0, '7': 0, '6': 0, '5': 0, '4': 0, '3': 0, '2': 0, '1': 0, '0': 0,\n 'total': len(data['results']['items'])}\n\n # Confidence count\n for item in data['results']['items']:\n if item['type'] == 'pronunciation':\n stats['timestamps'].append(float(item['start_time']))\n stats['accuracy'].append(int(float(item['alternatives'][0]['confidence']) * 100))\n if float(item['alternatives'][0]['confidence']) >= 0.98: stats['9.8'] += 1\n elif float(item['alternatives'][0]['confidence']) >= 0.9: stats['9'] += 1\n elif float(item['alternatives'][0]['confidence']) >= 0.8: stats['8'] += 1\n elif float(item['alternatives'][0]['confidence']) >= 0.7: stats['7'] += 1\n elif float(item['alternatives'][0]['confidence']) >= 0.6: stats['6'] += 1\n elif float(item['alternatives'][0]['confidence']) >= 0.5: stats['5'] += 1\n elif float(item['alternatives'][0]['confidence']) >= 0.4: stats['4'] += 1\n elif float(item['alternatives'][0]['confidence']) >= 0.3: stats['3'] += 1\n elif float(item['alternatives'][0]['confidence']) >= 0.2: stats['2'] += 1\n elif float(item['alternatives'][0]['confidence']) >= 0.1: stats['1'] += 1\n else: stats['0'] += 1\n # Display confidence count table\n table = document.add_table(rows=1, cols=3)\n table.style = document.styles['Light List Accent 1']\n table.alignment = WD_ALIGN_PARAGRAPH.CENTER\n hdr_cells = table.rows[0].cells\n hdr_cells[0].text = 'Confidence'\n hdr_cells[1].text = 'Count'\n hdr_cells[2].text = 'Percentage'\n row_cells = table.add_row().cells\n row_cells[0].text = str('98% - 100%')\n row_cells[1].text = str(stats['9.8'])\n row_cells[2].text = str(round(stats['9.8'] / stats['total'] * 100, 2)) + '%'\n row_cells = table.add_row().cells\n row_cells[0].text = str('90% - 97%')\n row_cells[1].text = str(stats['9'])\n row_cells[2].text = str(round(stats['9'] / stats['total'] * 100, 2)) + '%'\n row_cells = table.add_row().cells\n row_cells[0].text = str('80% - 89%')\n row_cells[1].text = str(stats['8'])\n row_cells[2].text = str(round(stats['8'] / stats['total'] * 100, 2)) + '%'\n row_cells = table.add_row().cells\n row_cells[0].text = str('70% - 79%')\n row_cells[1].text = str(stats['7'])\n row_cells[2].text = str(round(stats['7'] / stats['total'] * 100, 2)) + '%'\n row_cells = table.add_row().cells\n row_cells[0].text = str('60% - 69%')\n row_cells[1].text = str(stats['6'])\n row_cells[2].text = str(round(stats['6'] / stats['total'] * 100, 2)) + '%'\n row_cells = table.add_row().cells\n row_cells[0].text = str('50% - 59%')\n row_cells[1].text = str(stats['5'])\n row_cells[2].text = str(round(stats['5'] / stats['total'] * 100, 2)) + '%'\n row_cells = table.add_row().cells\n row_cells[0].text = str('40% - 49%')\n row_cells[1].text = str(stats['4'])\n row_cells[2].text = str(round(stats['4'] / stats['total'] * 100, 2)) + '%'\n row_cells = table.add_row().cells\n row_cells[0].text = str('30% - 39%')\n row_cells[1].text = str(stats['3'])\n row_cells[2].text = str(round(stats['3'] / stats['total'] * 100, 2)) + '%'\n row_cells = table.add_row().cells\n row_cells[0].text = str('20% - 29%')\n row_cells[1].text = str(stats['2'])\n row_cells[2].text = str(round(stats['2'] / stats['total'] * 100, 2)) + '%'\n row_cells = table.add_row().cells\n row_cells[0].text = str('10% - 19%')\n row_cells[1].text = str(stats['1'])\n row_cells[2].text = str(round(stats['1'] / stats['total'] * 100, 2)) + '%'\n row_cells = table.add_row().cells\n row_cells[0].text = str('0% - 9%')\n row_cells[1].text = str(stats['0'])\n row_cells[2].text = str(round(stats['0'] / stats['total'] * 100, 2)) + '%'\n # Add paragraph for spacing\n document.add_paragraph()\n # Display scatter graph of confidence\n # Confidence of each word as scatter graph\n plt.scatter(stats['timestamps'], stats['accuracy'])\n # Mean average as line across graph\n plt.plot([stats['timestamps'][0], stats['timestamps'][-1]],\n [statistics.mean(stats['accuracy']), statistics.mean(stats['accuracy'])], 'r')\n # Formatting\n plt.xlabel('Time (seconds)')\n # plt.xticks(range(0, int(stats['timestamps'][-1]), 60))\n plt.ylabel('Accuracy (percent)')\n plt.yticks(range(0, 101, 10))\n plt.title('Accuracy during video')\n plt.legend(['Accuracy average (mean)', 'Individual words'], loc='lower center')\n\n # not all file systems are writable, so we allow specifying a writable tmp directory\n # alternatively if it is not set, we use ./\n tmp_dir = kwargs.get('tmp_dir', \"./\")\n chart_file_name = tmp_dir+'chart.png'\n\n plt.savefig(chart_file_name)\n document.add_picture(chart_file_name, width=Cm(14.64))\n document.paragraphs[-1].alignment = WD_ALIGN_PARAGRAPH.CENTER\n document.add_page_break()\n\n # Process and display transcript by speaker segments\n table = document.add_table(rows=1, cols=3)\n table.style = document.styles['Light List Accent 1']\n hdr_cells = table.rows[0].cells\n hdr_cells[0].text = 'Time'\n hdr_cells[1].text = 'Speaker'\n hdr_cells[2].text = 'Content'\n\n # If speaker identification\n if 'speaker_labels' in data['results'].keys():\n for segment in data['results']['speaker_labels']['segments']:\n # If there is content in the segment\n if len(segment['items']) > 0:\n # Add a row, write the time and speaker\n row_cells = table.add_row().cells\n row_cells[0].text = convert_time_stamp(segment['start_time'])\n row_cells[1].text = str(segment['speaker_label'])\n\n # Segments group individual word results by speaker. They are cross-referenced by time.\n # For each word in the segment...\n for word in segment['items']:\n # Run through the word results and get the corresponding result\n for result in data['results']['items']:\n if result['type'] == 'pronunciation':\n if result['start_time'] == word['start_time']:\n\n # Get the word with the highest confidence\n if len(result['alternatives']) > 0:\n current_word = dict()\n confidence_scores = []\n for score in result['alternatives']:\n confidence_scores.append(score['confidence'])\n for alternative in result['alternatives']:\n if alternative['confidence'] == max(confidence_scores):\n current_word = alternative.copy()\n\n # Write and format the word\n run = row_cells[2].paragraphs[0].add_run(' ' + current_word['content'])\n if float(current_word['confidence']) < threshold_for_grey:\n font = run.font\n font.color.rgb = RGBColor(204, 204, 204)\n\n # If the next item is punctuation, add it\n try:\n if data['results']['items'][data['results']['items'].index(result) + 1]['type'] == 'punctuation':\n run = row_cells[2].paragraphs[0].add_run(data['results']['items'][data['results']['items'].index(result) + 1]['alternatives'][0]['content'])\n # Occasional IndexErrors encountered\n except:\n pass\n # Else no speaker identification\n else:\n # Run through the word results\n\n # Start the first row\n row_cells = table.add_row().cells\n row_cells[0].text = convert_time_stamp(data['results']['items'][0]['start_time'])\n\n # Add words\n for result in data['results']['items']:\n if result['type'] == 'pronunciation':\n # Write the time if it's not yet there\n if table.cell(-1, 0).text == \"\":\n table.cell(-1, 0).text = convert_time_stamp(result[\"start_time\"])\n\n # Get the word with the highest confidence\n if len(result['alternatives']) > 0:\n current_word = dict()\n confidence_scores = []\n for score in result['alternatives']:\n confidence_scores.append(score['confidence'])\n for alternative in result['alternatives']:\n if alternative['confidence'] == max(confidence_scores):\n current_word = alternative.copy()\n\n # Write and format the word\n run = table.cell(-1, 2).paragraphs[0].add_run(' ' + current_word['content'])\n if float(current_word['confidence']) < threshold_for_grey:\n font = run.font\n font.color.rgb = RGBColor(204, 204, 204)\n\n # If the next item is punctuation, add it and start a new row\n elif result['type'] == 'punctuation':\n\n # Get the punctuation with the highest confidence\n if len(result['alternatives']) > 0:\n current_word = dict()\n confidence_scores = []\n for score in result['alternatives']:\n confidence_scores.append(score['confidence'])\n for alternative in result['alternatives']:\n if alternative['confidence'] == max(confidence_scores):\n current_word = alternative.copy()\n\n # Write and format the word\n run = table.cell(-1, 2).paragraphs[0].add_run(current_word['content'])\n table.add_row().cells\n\n # Formatting transcript table widthds\n widths = (Inches(0.6), Inches(1), Inches(4.5))\n for row in table.rows:\n for idx, width in enumerate(widths):\n row.cells[idx].width = width\n\n # Save\n filename = kwargs.get('save_as', f\"{data['jobName']}.docx\")\n document.save(filename)\n print(f\"Transcript {filename} writen.\")\n" ]
[ [ "matplotlib.pyplot.legend", "matplotlib.pyplot.title", "matplotlib.pyplot.scatter", "matplotlib.pyplot.savefig", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.ylabel" ] ]
FloWuenne/semi-auto-image-annotation-tool
[ "f48f521c480a436bafb148b4ecaf9f2c4a898211" ]
[ "main.py" ]
[ "\"\"\"\nCopyright {2018} {Viraj Mavani}\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\"\"\"\n\nfrom tkinter import *\nfrom tkinter import filedialog\nfrom PIL import Image, ImageTk\n\nimport keras\n\nfrom keras_retinanet import models\nfrom keras_retinanet.utils.image import preprocess_image\n\n# import miscellaneous modules\nimport os\nimport numpy as np\nimport tensorflow as tf\nimport config\nimport math\n\n\ndef get_session():\n config = tf.compat.v1.ConfigProto()\n config.gpu_options.allow_growth = True\n return tf.compat.v1.Session(config=config)\n\n\ntf.compat.v1.keras.backend.set_session(get_session())\n\nmodel_path = os.path.join('.', 'snapshots', 'resnet50_coco_best_v2.1.0.h5')\n\nmodel = models.load_model(model_path, backbone_name='resnet50')\n\n\nclass MainGUI:\n def __init__(self, master):\n self.parent = master\n self.parent.title(\"Semi Automatic Image Annotation Tool\")\n self.frame = Frame(self.parent)\n self.frame.pack(fill=BOTH, expand=1)\n self.parent.resizable(width=False, height=False)\n\n # Initialize class variables\n self.img = None\n self.tkimg = None\n self.imageDir = ''\n self.imageDirPathBuffer = ''\n self.imageList = []\n self.imageTotal = 0\n self.imageCur = 0\n self.cur = 0\n self.bboxIdList = []\n self.bboxList = []\n self.bboxPointList = []\n self.o1 = None\n self.o2 = None\n self.o3 = None\n self.o4 = None\n self.bboxId = None\n self.currLabel = None\n self.editbboxId = None\n self.currBboxColor = None\n self.zoomImgId = None\n self.zoomImg = None\n self.zoomImgCrop = None\n self.tkZoomImg = None\n self.hl = None\n self.vl = None\n self.editPointId = None\n self.filename = None\n self.filenameBuffer = None\n self.objectLabelList = []\n self.EDIT = False\n\n # initialize mouse state\n self.STATE = {'x': 0, 'y': 0}\n self.STATE_COCO = {'click': 0}\n\n # initialize annotation file\n self.anno_filename = 'annotations.csv'\n self.annotation_file = open('annotations/' + self.anno_filename, 'w+')\n self.annotation_file.write(\"\")\n self.annotation_file.close()\n\n # ------------------ GUI ---------------------\n\n # Control Panel\n self.ctrlPanel = Frame(self.frame)\n self.ctrlPanel.grid(row=0, column=0, sticky=W + N)\n self.openBtn = Button(self.ctrlPanel, text='Open', command=self.open_image)\n self.openBtn.pack(fill=X, side=TOP)\n self.openDirBtn = Button(self.ctrlPanel, text='Open Dir', command=self.open_image_dir)\n self.openDirBtn.pack(fill=X, side=TOP)\n self.nextBtn = Button(self.ctrlPanel, text='Next -->', command=self.open_next)\n self.nextBtn.pack(fill=X, side=TOP)\n self.previousBtn = Button(self.ctrlPanel, text='<-- Previous', command=self.open_previous)\n self.previousBtn.pack(fill=X, side=TOP)\n self.saveBtn = Button(self.ctrlPanel, text='Save', command=self.save)\n self.saveBtn.pack(fill=X, side=TOP)\n self.semiAutoBtn = Button(self.ctrlPanel, text=\"Show Suggestions\", command=self.automate)\n self.semiAutoBtn.pack(fill=X, side=TOP)\n self.disp = Label(self.ctrlPanel, text='Coordinates:')\n self.disp.pack(fill=X, side=TOP)\n self.mb = Menubutton(self.ctrlPanel, text=\"COCO Classes for Suggestions\", relief=RAISED)\n self.mb.pack(fill=X, side=TOP)\n self.mb.menu = Menu(self.mb, tearoff=0)\n self.mb[\"menu\"] = self.mb.menu\n self.addCocoBtn = Button(self.ctrlPanel, text=\"+\", command=self.add_labels_coco)\n self.addCocoBtn.pack(fill=X, side=TOP)\n self.zoomPanelLabel = Label(self.ctrlPanel, text=\"Precision View Panel\")\n self.zoomPanelLabel.pack(fill=X, side=TOP)\n self.zoomcanvas = Canvas(self.ctrlPanel, width=150, height=150)\n self.zoomcanvas.pack(fill=X, side=TOP, anchor='center')\n\n # Image Editing Region\n self.canvas = Canvas(self.frame, width=500, height=500)\n self.canvas.grid(row=0, column=1, sticky=W + N)\n self.canvas.bind(\"<Button-1>\", self.mouse_click)\n self.canvas.bind(\"<Motion>\", self.mouse_move, \"+\")\n self.canvas.bind(\"<B1-Motion>\", self.mouse_drag)\n self.canvas.bind(\"<ButtonRelease-1>\", self.mouse_release)\n self.parent.bind(\"<Key-Left>\", self.open_previous)\n self.parent.bind(\"<Key-Right>\", self.open_next)\n self.parent.bind(\"Escape\", self.cancel_bbox)\n\n # Labels and Bounding Box Lists Panel\n self.listPanel = Frame(self.frame)\n self.listPanel.grid(row=0, column=2, sticky=W + N)\n self.listBoxNameLabel = Label(self.listPanel, text=\"List of Objects\").pack(fill=X, side=TOP)\n self.objectListBox = Listbox(self.listPanel, width=40)\n self.objectListBox.pack(fill=X, side=TOP)\n self.delObjectBtn = Button(self.listPanel, text=\"Delete\", command=self.del_bbox)\n self.delObjectBtn.pack(fill=X, side=TOP)\n self.clearAllBtn = Button(self.listPanel, text=\"Clear All\", command=self.clear_bbox)\n self.clearAllBtn.pack(fill=X, side=TOP)\n self.classesNameLabel = Label(self.listPanel, text=\"Classes\").pack(fill=X, side=TOP)\n self.textBox = Entry(self.listPanel, text=\"Enter label\")\n self.textBox.pack(fill=X, side=TOP)\n\n self.addLabelBtn = Button(self.listPanel, text=\"+\", command=self.add_label).pack(fill=X, side=TOP)\n self.delLabelBtn = Button(self.listPanel, text=\"-\", command=self.del_label).pack(fill=X, side=TOP)\n\n self.labelListBox = Listbox(self.listPanel)\n self.labelListBox.pack(fill=X, side=TOP)\n\n self.cocoLabels = config.labels_to_names.values()\n self.cocoIntVars = []\n\n for idxcoco, label_coco in enumerate(self.cocoLabels):\n self.cocoIntVars.append(IntVar())\n self.mb.menu.add_checkbutton(label=label_coco, variable=self.cocoIntVars[idxcoco])\n # print(self.cocoIntVars)\n\n # STATUS BAR\n self.statusBar = Frame(self.frame, width=500)\n self.statusBar.grid(row=1, column=1, sticky=W + N)\n self.processingLabel = Label(self.statusBar, text=\" \")\n self.processingLabel.pack(side=\"left\", fill=X)\n self.imageIdxLabel = Label(self.statusBar, text=\" \")\n self.imageIdxLabel.pack(side=\"right\", fill=X)\n\n def open_image(self):\n self.filename = filedialog.askopenfilename(title=\"Select Image\", filetypes=((\"jpeg files\", \"*.jpg\"),\n (\"all files\", \"*.*\")))\n if not self.filename:\n return None\n self.filenameBuffer = self.filename\n self.load_image(self.filenameBuffer)\n\n def open_image_dir(self):\n self.imageDir = filedialog.askdirectory(title=\"Select Dataset Directory\")\n if not self.imageDir:\n return None\n self.imageList = os.listdir(self.imageDir)\n self.imageList = sorted(self.imageList)\n self.imageTotal = len(self.imageList)\n self.filename = None\n self.imageDirPathBuffer = self.imageDir\n self.load_image(self.imageDirPathBuffer + '/' + self.imageList[self.cur])\n\n def load_image(self, file):\n self.img = Image.open(file)\n self.imageCur = self.cur + 1\n self.imageIdxLabel.config(text=' || Image Number: %d / %d' % (self.imageCur, self.imageTotal))\n # Resize to Pascal VOC format\n w, h = self.img.size\n if w >= h:\n baseW = 500\n wpercent = (baseW / float(w))\n hsize = int((float(h) * float(wpercent)))\n self.img = self.img.resize((baseW, hsize), Image.BICUBIC)\n else:\n baseH = 500\n wpercent = (baseH / float(h))\n wsize = int((float(w) * float(wpercent)))\n self.img = self.img.resize((wsize, baseH), Image.BICUBIC)\n\n self.tkimg = ImageTk.PhotoImage(self.img)\n self.canvas.create_image(0, 0, image=self.tkimg, anchor=NW)\n self.clear_bbox()\n\n def open_next(self, event=None):\n self.save()\n if self.cur < len(self.imageList):\n self.cur += 1\n self.load_image(self.imageDirPathBuffer + '/' + self.imageList[self.cur])\n self.processingLabel.config(text=\" \")\n self.processingLabel.update_idletasks()\n\n def open_previous(self, event=None):\n self.save()\n if self.cur > 0:\n self.cur -= 1\n self.load_image(self.imageDirPathBuffer + '/' + self.imageList[self.cur])\n self.processingLabel.config(text=\" \")\n self.processingLabel.update_idletasks()\n\n def save(self):\n if self.filenameBuffer is None:\n self.annotation_file = open('annotations/' + self.anno_filename, 'a')\n for idx, item in enumerate(self.bboxList):\n self.annotation_file.write(self.imageDirPathBuffer + '/' + self.imageList[self.cur] + ',' +\n ','.join(map(str, self.bboxList[idx])) + ',' + str(self.objectLabelList[idx])\n + '\\n')\n self.annotation_file.close()\n else:\n self.annotation_file = open('annotations/' + self.anno_filename, 'a')\n for idx, item in enumerate(self.bboxList):\n self.annotation_file.write(self.filenameBuffer + ',' + ','.join(map(str, self.bboxList[idx])) + ','\n + str(self.objectLabelList[idx]) + '\\n')\n self.annotation_file.close()\n\n def mouse_click(self, event):\n # Check if Updating BBox\n if self.canvas.find_enclosed(event.x - 5, event.y - 5, event.x + 5, event.y + 5):\n self.EDIT = True\n self.editPointId = int(self.canvas.find_enclosed(event.x - 5, event.y - 5, event.x + 5, event.y + 5)[0])\n else:\n self.EDIT = False\n\n # Set the initial point\n if self.EDIT:\n idx = self.bboxPointList.index(self.editPointId)\n self.editbboxId = self.bboxIdList[math.floor(idx/4.0)]\n self.bboxId = self.editbboxId\n pidx = self.bboxIdList.index(self.editbboxId)\n pidx = pidx * 4\n self.o1 = self.bboxPointList[pidx]\n self.o2 = self.bboxPointList[pidx + 1]\n self.o3 = self.bboxPointList[pidx + 2]\n self.o4 = self.bboxPointList[pidx + 3]\n if self.editPointId == self.o1:\n a, b, c, d = self.canvas.coords(self.o3)\n elif self.editPointId == self.o2:\n a, b, c, d = self.canvas.coords(self.o4)\n elif self.editPointId == self.o3:\n a, b, c, d = self.canvas.coords(self.o1)\n elif self.editPointId == self.o4:\n a, b, c, d = self.canvas.coords(self.o2)\n self.STATE['x'], self.STATE['y'] = int((a+c)/2), int((b+d)/2)\n else:\n self.STATE['x'], self.STATE['y'] = event.x, event.y\n\n def mouse_drag(self, event):\n self.mouse_move(event)\n if self.bboxId:\n self.currBboxColor = self.canvas.itemcget(self.bboxId, \"outline\")\n self.canvas.delete(self.bboxId)\n self.canvas.delete(self.o1)\n self.canvas.delete(self.o2)\n self.canvas.delete(self.o3)\n self.canvas.delete(self.o4)\n if self.EDIT:\n self.bboxId = self.canvas.create_rectangle(self.STATE['x'], self.STATE['y'],\n event.x, event.y,\n width=2,\n outline=self.currBboxColor)\n else:\n self.currBboxColor = config.COLORS[len(self.bboxList) % len(config.COLORS)]\n self.bboxId = self.canvas.create_rectangle(self.STATE['x'], self.STATE['y'],\n event.x, event.y,\n width=2,\n outline=self.currBboxColor)\n\n def mouse_move(self, event):\n self.disp.config(text='x: %d, y: %d' % (event.x, event.y))\n self.zoom_view(event)\n if self.tkimg:\n # Horizontal and Vertical Line for precision\n if self.hl:\n self.canvas.delete(self.hl)\n self.hl = self.canvas.create_line(0, event.y, self.tkimg.width(), event.y, width=2)\n if self.vl:\n self.canvas.delete(self.vl)\n self.vl = self.canvas.create_line(event.x, 0, event.x, self.tkimg.height(), width=2)\n # elif (event.x, event.y) in self.bboxBRPointList:\n # pass\n\n def mouse_release(self, event):\n try:\n labelidx = self.labelListBox.curselection()\n self.currLabel = self.labelListBox.get(labelidx)\n except:\n pass\n if self.EDIT:\n self.update_bbox()\n self.EDIT = False\n x1, x2 = min(self.STATE['x'], event.x), max(self.STATE['x'], event.x)\n y1, y2 = min(self.STATE['y'], event.y), max(self.STATE['y'], event.y)\n self.bboxList.append((x1, y1, x2, y2))\n o1 = self.canvas.create_oval(x1 - 3, y1 - 3, x1 + 3, y1 + 3, fill=\"red\")\n o2 = self.canvas.create_oval(x2 - 3, y1 - 3, x2 + 3, y1 + 3, fill=\"red\")\n o3 = self.canvas.create_oval(x2 - 3, y2 - 3, x2 + 3, y2 + 3, fill=\"red\")\n o4 = self.canvas.create_oval(x1 - 3, y2 - 3, x1 + 3, y2 + 3, fill=\"red\")\n self.bboxPointList.append(o1)\n self.bboxPointList.append(o2)\n self.bboxPointList.append(o3)\n self.bboxPointList.append(o4)\n self.bboxIdList.append(self.bboxId)\n self.bboxId = None\n self.objectLabelList.append(str(self.currLabel))\n self.objectListBox.insert(END, '(%d, %d) -> (%d, %d)' % (x1, y1, x2, y2) + ': ' + str(self.currLabel))\n self.objectListBox.itemconfig(len(self.bboxIdList) - 1,\n fg=self.currBboxColor)\n self.currLabel = None\n\n def zoom_view(self, event):\n try:\n if self.zoomImgId:\n self.zoomcanvas.delete(self.zoomImgId)\n self.zoomImg = self.img.copy()\n self.zoomImgCrop = self.zoomImg.crop(((event.x - 25), (event.y - 25), (event.x + 25), (event.y + 25)))\n self.zoomImgCrop = self.zoomImgCrop.resize((150, 150))\n self.tkZoomImg = ImageTk.PhotoImage(self.zoomImgCrop)\n self.zoomImgId = self.zoomcanvas.create_image(0, 0, image=self.tkZoomImg, anchor=NW)\n hl = self.zoomcanvas.create_line(0, 75, 150, 75, width=2)\n vl = self.zoomcanvas.create_line(75, 0, 75, 150, width=2)\n except:\n pass\n\n def update_bbox(self):\n idx = self.bboxIdList.index(self.editbboxId)\n self.bboxIdList.pop(idx)\n self.bboxList.pop(idx)\n self.objectListBox.delete(idx)\n self.currLabel = self.objectLabelList[idx]\n self.objectLabelList.pop(idx)\n idx = idx*4\n self.canvas.delete(self.bboxPointList[idx])\n self.canvas.delete(self.bboxPointList[idx+1])\n self.canvas.delete(self.bboxPointList[idx+2])\n self.canvas.delete(self.bboxPointList[idx+3])\n self.bboxPointList.pop(idx)\n self.bboxPointList.pop(idx)\n self.bboxPointList.pop(idx)\n self.bboxPointList.pop(idx)\n\n def cancel_bbox(self, event):\n if self.STATE['click'] == 1:\n if self.bboxId:\n self.canvas.delete(self.bboxId)\n self.bboxId = None\n self.STATE['click'] = 0\n\n def del_bbox(self):\n sel = self.objectListBox.curselection()\n if len(sel) != 1:\n return\n idx = int(sel[0])\n self.canvas.delete(self.bboxIdList[idx])\n self.canvas.delete(self.bboxPointList[idx * 4])\n self.canvas.delete(self.bboxPointList[(idx * 4) + 1])\n self.canvas.delete(self.bboxPointList[(idx * 4) + 2])\n self.canvas.delete(self.bboxPointList[(idx * 4) + 3])\n self.bboxPointList.pop(idx * 4)\n self.bboxPointList.pop(idx * 4)\n self.bboxPointList.pop(idx * 4)\n self.bboxPointList.pop(idx * 4)\n self.bboxIdList.pop(idx)\n self.bboxList.pop(idx)\n self.objectLabelList.pop(idx)\n self.objectListBox.delete(idx)\n\n def clear_bbox(self):\n for idx in range(len(self.bboxIdList)):\n self.canvas.delete(self.bboxIdList[idx])\n for idx in range(len(self.bboxPointList)):\n self.canvas.delete(self.bboxPointList[idx])\n self.objectListBox.delete(0, len(self.bboxList))\n self.bboxIdList = []\n self.bboxList = []\n self.objectLabelList = []\n self.bboxPointList = []\n\n def add_label(self):\n if self.textBox.get() is not '':\n curr_label_list = self.labelListBox.get(0, END)\n curr_label_list = list(curr_label_list)\n if self.textBox.get() not in curr_label_list:\n self.labelListBox.insert(END, str(self.textBox.get()))\n self.textBox.delete(0, 'end')\n\n def del_label(self):\n labelidx = self.labelListBox.curselection()\n self.labelListBox.delete(labelidx)\n\n def add_labels_coco(self):\n for listidxcoco, list_label_coco in enumerate(self.cocoLabels):\n if self.cocoIntVars[listidxcoco].get():\n curr_label_list = self.labelListBox.get(0, END)\n curr_label_list = list(curr_label_list)\n if list_label_coco not in curr_label_list:\n self.labelListBox.insert(END, str(list_label_coco))\n\n def automate(self):\n self.processingLabel.config(text=\"Processing \")\n self.processingLabel.update_idletasks()\n open_cv_image = np.array(self.img)\n # Convert RGB to BGR\n opencvImage= open_cv_image[:, :, ::-1].copy()\n # opencvImage = cv2.cvtColor(np.array(self.img), cv2.COLOR_RGB2BGR)\n image = preprocess_image(opencvImage)\n boxes, scores, labels = model.predict_on_batch(np.expand_dims(image, axis=0))\n for idx, (box, label, score) in enumerate(zip(boxes[0], labels[0], scores[0])):\n curr_label_list = self.labelListBox.get(0, END)\n curr_label_list = list(curr_label_list)\n if score < 0.5:\n continue\n\n if config.labels_to_names[label] not in curr_label_list:\n continue\n\n b = box.astype(int)\n\n self.bboxId = self.canvas.create_rectangle(b[0], b[1],\n b[2], b[3],\n width=2,\n outline=config.COLORS[len(self.bboxList) % len(config.COLORS)])\n self.bboxList.append((b[0], b[1], b[2], b[3]))\n o1 = self.canvas.create_oval(b[0] - 3, b[1] - 3, b[0] + 3, b[1] + 3, fill=\"red\")\n o2 = self.canvas.create_oval(b[2] - 3, b[1] - 3, b[2] + 3, b[1] + 3, fill=\"red\")\n o3 = self.canvas.create_oval(b[2] - 3, b[3] - 3, b[2] + 3, b[3] + 3, fill=\"red\")\n o4 = self.canvas.create_oval(b[0] - 3, b[3] - 3, b[0] + 3, b[3] + 3, fill=\"red\")\n self.bboxPointList.append(o1)\n self.bboxPointList.append(o2)\n self.bboxPointList.append(o3)\n self.bboxPointList.append(o4)\n self.bboxIdList.append(self.bboxId)\n self.bboxId = None\n self.objectLabelList.append(str(config.labels_to_names[label]))\n self.objectListBox.insert(END, '(%d, %d) -> (%d, %d)' % (b[0], b[1], b[2], b[3]) + ': ' +\n str(config.labels_to_names[label]))\n self.objectListBox.itemconfig(len(self.bboxIdList) - 1,\n fg=config.COLORS[(len(self.bboxIdList) - 1) % len(config.COLORS)])\n self.processingLabel.config(text=\"Done \")\n\n\nif __name__ == '__main__':\n root = Tk()\n imgicon = PhotoImage(file='icon.gif')\n root.tk.call('wm', 'iconphoto', root._w, imgicon)\n tool = MainGUI(root)\n root.mainloop()\n" ]
[ [ "tensorflow.compat.v1.Session", "numpy.array", "tensorflow.compat.v1.ConfigProto", "numpy.expand_dims" ] ]
czy779509408/text-detection-ctpn
[ "b94c3af3d5105b5a9ff4d4a00edf92b2d55ee4cf" ]
[ "lib/utils/blob.py" ]
[ "\"\"\"Blob helper functions.\"\"\"\nimport numpy as np\nimport cv2\nfrom ..fast_rcnn.config import cfg\n\ndef im_list_to_blob(ims):\n \"\"\"Convert a list of images into a network input.\n\n Assumes images are already prepared (means subtracted, BGR order, ...).\n \"\"\"\n max_shape = np.array([im.shape for im in ims]).max(axis=0)\n num_images = len(ims)\n blob = np.zeros((num_images, max_shape[0], max_shape[1], 3),\n dtype=np.float32)\n for i in range(num_images):\n im = ims[i]\n blob[i, 0:im.shape[0], 0:im.shape[1], :] = im\n\n return blob\n\ndef prep_im_for_blob(im, pixel_means, target_size, max_size):\n \"\"\"Mean subtract and scale an image for use in a blob.\"\"\"\n im = im.astype(np.float32, copy=False)\n im -= pixel_means\n im_shape = im.shape\n im_size_min = np.min(im_shape[0:2])\n im_size_max = np.max(im_shape[0:2])\n im_scale = float(target_size) / float(im_size_min)\n # Prevent the biggest axis from being more than MAX_SIZE\n if np.round(im_scale * im_size_max) > max_size:\n im_scale = float(max_size) / float(im_size_max)\n if cfg.TRAIN.RANDOM_DOWNSAMPLE:\n r = 0.6 + np.random.rand() * 0.4\n im_scale *= r\n im = cv2.resize(im, None, None, fx=im_scale, fy=im_scale,\n interpolation=cv2.INTER_LINEAR)\n\n return im, im_scale\n" ]
[ [ "numpy.min", "numpy.round", "numpy.max", "numpy.random.rand", "numpy.array", "numpy.zeros" ] ]
qhjqhj00/ConV2020
[ "680c4b8eb9e9568471e414f6e763e838bca0025e" ]
[ "to_images.py" ]
[ "import matplotlib\nmatplotlib.use('Agg')\nimport pandas as pd\nfrom matplotlib import pyplot as plt \nimport os\nfrom pypinyin import lazy_pinyin\nfrom matplotlib.ticker import MaxNLocator\nimport re\n\ntarget_dir = './res/'\npicture = './images/'\n\nfunc = lambda z:dict([(x, y) for y, x in z.items()])\n \ndef plot(x,y,t,a):\n ax = plt.figure().gca()\n ax.yaxis.set_major_locator(MaxNLocator(integer=True))\n plt.title(a) \n plt.xlabel(\"time (month.day.hour)\") \n plt.ylabel(t) \n plt.plot(x,y) \n if not os.path.exists(picture+f'{a}/'):\n os.mkdir(picture+f'{a}/')\n plt.savefig(picture+f'{a}/{t}.png')\n plt.close()\n\ndef time(t):\n year = t[:4]\n month = t[4:6]\n day = t[6:8]\n hour = t[8:10]\n minute = t[10:]\n return f'{month}.{day}.{hour}'\n\n\ndef to_images(data, t):\n for l in data:\n d = data[l].to_dict()\n d = func(d)\n y = list(d.keys())\n x = list(d.values())\n x = [time(t) for t in x]\n x.insert(0,'')\n y.insert(0,0)\n a = ''.join(lazy_pinyin(l))\n plot(x,y,t,a)\n \nfields = '|'.join(['confirmedCount', 'deadCount', 'curedCount'])\n \nfor table in list(os.walk(target_dir))[0][2]:\n t = re.findall(fields, table)\n if len(t) > 0:\n data = pd.read_csv(target_dir+table, '\\t',header=0,index_col=0).T\n to_images(data, t[0])\n" ]
[ [ "pandas.read_csv", "matplotlib.pyplot.title", "matplotlib.use", "matplotlib.pyplot.figure", "matplotlib.pyplot.savefig", "matplotlib.pyplot.plot", "matplotlib.ticker.MaxNLocator", "matplotlib.pyplot.close", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.ylabel" ] ]
cbrnr/mne-qt-browser
[ "8ed661d2317d0bfc4c25fdbcabcdf2ea581d2f1c" ]
[ "mne_qt_browser/_pg_figure.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"Base classes and functions for 2D browser backends.\"\"\"\n\n# Author: Martin Schulz <[email protected]>\n#\n# License: BSD-3-Clause\n\nimport datetime\nimport functools\nimport gc\nimport math\nimport platform\nimport sys\nfrom ast import literal_eval\nfrom collections import OrderedDict\nfrom contextlib import contextmanager\nfrom copy import copy\nfrom functools import partial\nfrom os.path import getsize\n\nimport numpy as np\nfrom PyQt5.QtCore import (QEvent, QThread, Qt, pyqtSignal, QRectF, QLineF,\n QPoint, QSettings)\nfrom PyQt5.QtGui import (QFont, QIcon, QPixmap, QTransform,\n QMouseEvent, QImage, QPainter, QPainterPath)\nfrom PyQt5.QtTest import QTest\nfrom PyQt5.QtWidgets import (QAction, QColorDialog, QComboBox, QDialog,\n QDockWidget, QDoubleSpinBox, QFormLayout,\n QGridLayout, QHBoxLayout, QInputDialog,\n QLabel, QMainWindow, QMessageBox,\n QPushButton, QScrollBar, QToolTip, QWidget,\n QStyleOptionSlider, QStyle,\n QApplication, QGraphicsView, QProgressBar,\n QVBoxLayout, QLineEdit, QCheckBox, QScrollArea,\n QGraphicsLineItem, QGraphicsScene, QTextEdit,\n QSizePolicy, QSpinBox, QDesktopWidget, QSlider)\nfrom matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg\nfrom matplotlib.colors import to_rgba_array\nfrom pyqtgraph import (AxisItem, GraphicsView, InfLineLabel, InfiniteLine,\n LinearRegionItem, PlotCurveItem, PlotItem,\n Point, TextItem, ViewBox, mkBrush,\n mkPen, setConfigOption, mkColor)\nfrom scipy.stats import zscore\n\nfrom mne.viz import plot_sensors\nfrom mne.viz._figure import BrowserBase\nfrom mne.viz.utils import _simplify_float, _merge_annotations, _figure_agg\nfrom mne.annotations import _sync_onset\nfrom mne.io.pick import (_DATA_CH_TYPES_ORDER_DEFAULT,\n channel_indices_by_type, _DATA_CH_TYPES_SPLIT)\nfrom mne.utils import _to_rgb, logger, sizeof_fmt, warn, get_config\n\nfrom . import _browser_instances\nfrom .icons import resources # noqa: F401\n\ntry:\n from pytestqt.exceptions import capture_exceptions\nexcept ImportError:\n logger.debug('If pytest-qt is not installed, the errors from inside '\n 'the Qt-loop will be occluded and it will be harder '\n 'to trace back the cause.')\n\n @contextmanager\n def capture_exceptions():\n yield []\n\nname = 'pyqtgraph'\n\n# This can be removed when mne==1.0 is released.\ntry:\n from mne.viz.backends._utils import _init_mne_qtapp\nexcept ImportError:\n from mne.viz.backends._utils import _init_qt_resources\n\n def _init_mne_qtapp(enable_icon=True, pg_app=False):\n \"\"\"Get QApplication-instance for MNE-Python.\n\n Parameter\n ---------\n enable_icon: bool\n If to set an MNE-icon for the app.\n pg_app: bool\n If to create the QApplication with pyqtgraph. For an until know\n undiscovered reason the pyqtgraph-browser won't show without\n mkQApp from pyqtgraph.\n\n Returns\n -------\n app: ``PyQt5.QtWidgets.QApplication``\n Instance of QApplication.\n \"\"\"\n from PyQt5.QtWidgets import QApplication\n from PyQt5.QtGui import QIcon\n\n app_name = 'MNE-Python'\n organization_name = 'MNE'\n\n # Fix from cbrnr/mnelab for app name in menu bar\n if sys.platform.startswith(\"darwin\"):\n try:\n # set bundle name on macOS (app name shown in the menu bar)\n from Foundation import NSBundle\n bundle = NSBundle.mainBundle()\n info = (bundle.localizedInfoDictionary()\n or bundle.infoDictionary())\n info[\"CFBundleName\"] = app_name\n except ModuleNotFoundError:\n pass\n\n if pg_app:\n from pyqtgraph import mkQApp\n app = mkQApp(app_name)\n else:\n app = (QApplication.instance()\n or QApplication(sys.argv or [app_name]))\n app.setApplicationName(app_name)\n app.setOrganizationName(organization_name)\n\n if enable_icon:\n # Set icon\n _init_qt_resources()\n kind = 'bigsur-' if platform.mac_ver()[0] >= '10.16' else ''\n app.setWindowIcon(QIcon(f\":/mne-{kind}icon.png\"))\n\n return app\n\n\ndef _get_color(color_spec):\n \"\"\"Wraps mkColor to accept all possible matplotlib color-specifiers.\"\"\"\n try:\n # Convert matplotlib color-names if possible\n color_spec = _to_rgb(color_spec, alpha=True)\n except ValueError:\n pass\n\n # Convert tuples of floats from 0-1 to 0-255 for pyqtgraph\n if (isinstance(color_spec, tuple) and\n all([i <= 1 for i in color_spec])):\n color_spec = tuple([int(i * 255) for i in color_spec])\n\n try:\n color = mkColor(color_spec)\n except ValueError:\n raise ValueError(f'\"{color_spec}\" is not a valid matplotlib '\n f'color-specifier!') from None\n\n return color\n\n\ndef propagate_to_children(method):\n @functools.wraps(method)\n def wrapper(*args, **kwargs):\n propagate = kwargs.pop('propagate', True)\n result = method(*args, **kwargs)\n if args[0].mne.is_epochs and propagate:\n # parent always goes first\n if hasattr(args[0], 'child_traces'):\n for child_trace in args[0].child_traces:\n getattr(child_trace, method.__name__)(*args[1:], **kwargs)\n return result\n\n return wrapper\n\n\nclass DataTrace(PlotCurveItem):\n \"\"\"Graphics-Object for single data trace.\"\"\"\n\n def __init__(self, main, ch_idx, child_idx=None, parent_trace=None):\n super().__init__()\n self.main = main\n self.mne = main.mne\n\n # Set clickable with small area around trace to make clicking easier.\n self.setClickable(True, 12)\n\n # Set default z-value to 1 to be before other items in scene\n self.setZValue(1)\n\n # General attributes\n # The ch_idx is the index of the channel represented by this trace\n # in the channel-order from the unchanged instance (which also picks\n # refer to).\n self.ch_idx = None\n # The range_idx is the index of the channel represented by this trace\n # in the shown range.\n self.range_idx = None\n # The order_idx is the index of the channel represented by this trace\n # in the channel-order (defined e.g. by group_by).\n self.order_idx = None\n # Name of the channel the trace represents.\n self.ch_name = None\n # Indicates if trace is bad.\n self.isbad = None\n # Channel-type of trace.\n self.ch_type = None\n # Color-specifier (all possible matplotlib color formats)\n self.color = None\n\n # Attributes for epochs-mode\n # Index of child if child.\n self.child_idx = child_idx\n # Reference to parent if child.\n self.parent_trace = parent_trace\n\n # Only for parent traces\n if self.parent_trace is None:\n # Add to main trace list\n self.mne.traces.append(self)\n # References to children\n self.child_traces = list()\n # Colors of trace in viewrange\n self.trace_colors = None\n\n # set attributes\n self.set_ch_idx(ch_idx)\n self.update_color()\n self.update_scale()\n # Avoid calling self.update_data() twice on initialization\n # (because of update_scale()).\n if self.mne.clipping is None:\n self.update_data()\n\n # Add to main plot\n self.mne.plt.addItem(self)\n\n @propagate_to_children\n def remove(self):\n self.mne.plt.removeItem(self)\n # Only for parent trace\n if self.parent_trace is None:\n self.mne.traces.remove(self)\n self.deleteLater()\n\n @propagate_to_children\n def update_color(self):\n \"\"\"Update the color of the trace.\"\"\"\n\n # Epochs\n if self.mne.is_epochs:\n # Add child traces if shown trace needs to have multiple colors\n # (PlotCurveItem only supports one color per object).\n # There always as many color-specific traces added depending\n # on the whole time range of the instance regardless of the\n # currently visible time range (to avoid checking for new colors\n # while scrolling horizontally).\n\n # Only for parent trace\n if hasattr(self, 'child_traces'):\n self.trace_colors = np.unique(\n self.mne.epoch_color_ref[self.ch_idx], axis=0)\n n_childs = len(self.child_traces)\n trace_diff = len(self.trace_colors) - n_childs - 1\n # Add child traces if necessary\n if trace_diff > 0:\n for cix in range(n_childs, n_childs + trace_diff):\n child = DataTrace(self.main, self.ch_idx,\n child_idx=cix, parent_trace=self)\n self.child_traces.append(child)\n elif trace_diff < 0:\n for _ in range(abs(trace_diff)):\n rm_trace = self.child_traces.pop()\n rm_trace.remove()\n\n # Set parent color\n self.color = self.trace_colors[0]\n\n # Only for child trace\n else:\n self.color = self.parent_trace.trace_colors[\n self.child_idx + 1]\n\n # Raw/ICA\n else:\n if self.isbad:\n self.color = self.mne.ch_color_bad\n else:\n self.color = self.mne.ch_color_ref[self.ch_name]\n\n self.setPen(_get_color(self.color))\n\n @propagate_to_children\n def update_range_idx(self):\n \"\"\"Should be updated when view-range or ch_idx changes.\"\"\"\n self.range_idx = np.argwhere(self.mne.picks == self.ch_idx)[0][0]\n\n @propagate_to_children\n def update_ypos(self):\n \"\"\"Should be updated when butterfly is toggled or ch_idx changes.\"\"\"\n if self.mne.butterfly and self.mne.fig_selection is not None:\n self.ypos = self.mne.selection_ypos_dict[self.ch_idx]\n elif self.mne.fig_selection is not None and \\\n self.mne.old_selection == 'Custom':\n self.ypos = self.range_idx + 1\n elif self.mne.butterfly:\n self.ypos = self.mne.butterfly_type_order.index(self.ch_type) + 1\n else:\n self.ypos = self.range_idx + self.mne.ch_start + 1\n\n @propagate_to_children\n def update_scale(self):\n transform = QTransform()\n transform.scale(1., self.mne.scale_factor)\n self.setTransform(transform)\n\n if self.mne.clipping is not None:\n self.update_data(propagate=False)\n\n @propagate_to_children\n def set_ch_idx(self, ch_idx):\n \"\"\"Sets the channel index and all deriving indices.\"\"\"\n # The ch_idx is the index of the channel represented by this trace\n # in the channel-order from the unchanged instance (which also picks\n # refer to).\n self.ch_idx = ch_idx\n # The range_idx is the index of the channel represented by this trace\n # in the shown range.\n self.update_range_idx(propagate=False)\n # The order_idx is the index of the channel represented by this trace\n # in the channel-order (defined e.g. by group_by).\n self.order_idx = np.argwhere(self.mne.ch_order == self.ch_idx)[0][0]\n self.ch_name = self.mne.inst.ch_names[ch_idx]\n self.isbad = self.ch_name in self.mne.info['bads']\n self.ch_type = self.mne.ch_types[ch_idx]\n self.update_ypos(propagate=False)\n\n @propagate_to_children\n def update_data(self):\n \"\"\"Update data (fetch data from self.mne according to self.ch_idx).\"\"\"\n if self.mne.is_epochs or (self.mne.clipping is not None and\n self.mne.clipping != 'clamp'):\n connect = 'finite'\n skip = False\n else:\n connect = 'all'\n skip = True\n\n if self.mne.data_precomputed:\n data = self.mne.data[self.order_idx]\n else:\n data = self.mne.data[self.range_idx]\n\n # Get decim-specific time if enabled\n if self.mne.decim != 1:\n times = self.mne.decim_times[self.mne.decim_data[self.range_idx]]\n data = data[..., ::self.mne.decim_data[self.range_idx]]\n else:\n times = self.mne.times\n\n # For multiple color traces with epochs\n # replace values from other colors with NaN.\n if self.mne.is_epochs:\n data = np.copy(data)\n check_color = self.mne.epoch_color_ref[self.ch_idx,\n self.mne.epoch_idx]\n bool_ixs = np.invert(np.equal(self.color, check_color).all(axis=1))\n starts = self.mne.boundary_times[self.mne.epoch_idx][bool_ixs]\n stops = self.mne.boundary_times[self.mne.epoch_idx + 1][bool_ixs]\n\n for start, stop in zip(starts, stops):\n data[np.logical_and(start <= times, times <= stop)] = np.nan\n\n self.setData(times, data, connect=connect, skipFiniteCheck=skip,\n antialias=self.mne.antialiasing)\n\n self.setPos(0, self.ypos)\n\n def toggle_bad(self, x=None):\n \"\"\"Toggle bad status.\"\"\"\n # Toggle bad epoch\n if self.mne.is_epochs and x is not None:\n epoch_idx, color = self.main._toggle_bad_epoch(x)\n\n # Update epoch color\n if color != 'none':\n new_epo_color = np.repeat(to_rgba_array(color),\n len(self.mne.inst.ch_names), axis=0)\n elif self.mne.epoch_colors is None:\n new_epo_color = np.concatenate(\n [to_rgba_array(c) for c\n in self.mne.ch_color_ref.values()])\n else:\n new_epo_color = \\\n np.concatenate([to_rgba_array(c) for c in\n self.mne.epoch_colors[epoch_idx]])\n\n # Update bad channel colors\n bad_idxs = np.in1d(self.mne.ch_names, self.mne.info['bads'])\n new_epo_color[bad_idxs] = to_rgba_array(self.mne.ch_color_bad)\n\n self.mne.epoch_color_ref[:, epoch_idx] = new_epo_color\n\n # Update overview-bar\n self.mne.overview_bar.update_bad_epochs()\n\n # Update other traces inlcuding self\n for trace in self.mne.traces:\n trace.update_color()\n # Update data is necessary because colored segments will vary\n trace.update_data()\n\n # Toggle bad channel\n else:\n bad_color, pick, marked_bad = self.main._toggle_bad_channel(\n self.range_idx)\n\n # Update line color status\n self.isbad = not self.isbad\n\n # Update colors for epochs\n if self.mne.is_epochs:\n if marked_bad:\n new_ch_color = np.repeat(to_rgba_array(bad_color),\n len(self.mne.inst), axis=0)\n elif self.mne.epoch_colors is None:\n ch_color = self.mne.ch_color_ref[self.ch_name]\n new_ch_color = np.repeat(to_rgba_array(ch_color),\n len(self.mne.inst), axis=0)\n else:\n new_ch_color = np.concatenate([to_rgba_array(c[pick]) for\n c in self.mne.epoch_colors])\n\n self.mne.epoch_color_ref[pick, :] = new_ch_color\n\n # Update trace color\n self.update_color()\n if self.mne.is_epochs:\n self.update_data()\n\n # Update channel-axis\n self.main._update_yaxis_labels()\n\n # Update overview-bar\n self.mne.overview_bar.update_bad_channels()\n\n # Update sensor color (if in selection mode)\n if self.mne.fig_selection is not None:\n self.mne.fig_selection._update_bad_sensors(pick, marked_bad)\n\n def mouseClickEvent(self, ev):\n \"\"\"Customize mouse click events.\"\"\"\n if (not self.clickable or ev.button() != Qt.MouseButton.LeftButton\n or self.mne.annotation_mode):\n # Explicitly ignore events in annotation-mode\n ev.ignore()\n return\n if self.mouseShape().contains(ev.pos()):\n ev.accept()\n self.toggle_bad(ev.pos().x())\n\n def get_xdata(self):\n \"\"\"Get xdata for testing.\"\"\"\n return self.xData\n\n def get_ydata(self):\n \"\"\"Get ydata for testing.\"\"\"\n return self.yData + self.ypos\n\n\nclass TimeAxis(AxisItem):\n \"\"\"The X-Axis displaying the time.\"\"\"\n\n def __init__(self, mne):\n self.mne = mne\n self._spacing = None\n super().__init__(orientation='bottom')\n\n def tickValues(self, minVal, maxVal, size):\n \"\"\"Customize creation of axis values from visible axis range.\"\"\"\n if self.mne.is_epochs:\n value_idxs = np.searchsorted(self.mne.midpoints, [minVal, maxVal])\n values = self.mne.midpoints[slice(*value_idxs)]\n spacing = len(self.mne.inst.times) / self.mne.info['sfreq']\n tick_values = [(spacing, values)]\n return tick_values\n else:\n # Save _spacing for later use\n self._spacing = self.tickSpacing(minVal, maxVal, size)\n return super().tickValues(minVal, maxVal, size)\n\n def tickStrings(self, values, scale, spacing):\n \"\"\"Customize strings of axis values.\"\"\"\n if self.mne.is_epochs:\n epoch_nums = self.mne.inst.selection\n ts = epoch_nums[np.searchsorted(self.mne.midpoints, values)]\n tick_strings = [str(v) for v in ts]\n\n elif self.mne.time_format == 'clock':\n meas_date = self.mne.info['meas_date']\n first_time = datetime.timedelta(seconds=self.mne.inst.first_time)\n\n digits = np.ceil(-np.log10(min(v[0] for v in self._spacing)\n ) + 1).astype(int)\n tick_strings = list()\n for val in values:\n val_time = datetime.timedelta(seconds=val) + \\\n first_time + meas_date\n val_str = val_time.strftime('%H:%M:%S')\n if int(val_time.microsecond):\n val_str += \\\n f'{round(val_time.microsecond * 1e-6, digits)}'[1:]\n tick_strings.append(val_str)\n else:\n tick_strings = super().tickStrings(values, scale, spacing)\n\n return tick_strings\n\n def repaint(self):\n \"\"\"Repaint Time Axis.\"\"\"\n self.picture = None\n self.update()\n\n def get_labels(self):\n \"\"\"Get labels for testing.\"\"\"\n values = self.tickValues(*self.mne.viewbox.viewRange()[0],\n self.mne.xmax)\n labels = list()\n for spacing, vals in values:\n labels += self.tickStrings(vals, 1, spacing)\n\n return labels\n\n\nclass ChannelAxis(AxisItem):\n \"\"\"The Y-Axis displaying the channel-names.\"\"\"\n\n def __init__(self, main):\n self.main = main\n self.mne = main.mne\n self.ch_texts = OrderedDict()\n super().__init__(orientation='left')\n self.style['autoReduceTextSpace'] = False\n\n def tickValues(self, minVal, maxVal, size):\n \"\"\"Customize creation of axis values from visible axis range.\"\"\"\n minVal, maxVal = sorted((minVal, maxVal))\n values = list(range(round(minVal) + 1, round(maxVal)))\n tick_values = [(1, values)]\n return tick_values\n\n def tickStrings(self, values, scale, spacing):\n \"\"\"Customize strings of axis values.\"\"\"\n # Get channel-names\n if self.mne.butterfly and self.mne.fig_selection is not None:\n tick_strings = list(self.main._make_butterfly_selections_dict())\n elif self.mne.butterfly:\n _, ixs, _ = np.intersect1d(_DATA_CH_TYPES_ORDER_DEFAULT,\n self.mne.ch_types, return_indices=True)\n ixs.sort()\n tick_strings = np.array(_DATA_CH_TYPES_ORDER_DEFAULT)[ixs]\n else:\n # Get channel-names and by substracting 1 from tick-values\n # since the first channel starts at y=1.\n tick_strings = self.mne.ch_names[\n self.mne.ch_order[[v - 1 for v in values]]]\n\n return tick_strings\n\n def drawPicture(self, p, axisSpec, tickSpecs, textSpecs):\n \"\"\"Customize drawing of axis items.\"\"\"\n super().drawPicture(p, axisSpec, tickSpecs, textSpecs)\n for rect, flags, text in textSpecs:\n if self.mne.butterfly and self.mne.fig_selection is not None:\n p.setPen(_get_color('black'))\n elif self.mne.butterfly:\n p.setPen(_get_color(self.mne.ch_color_dict[text]))\n elif text in self.mne.info['bads']:\n p.setPen(_get_color(self.mne.ch_color_bad))\n else:\n p.setPen(_get_color(self.mne.ch_color_ref[text]))\n self.ch_texts[text] = ((rect.left(), rect.left() + rect.width()),\n (rect.top(), rect.top() + rect.height()))\n p.drawText(rect, int(flags), text)\n\n def repaint(self):\n \"\"\"Repaint Channel Axis.\"\"\"\n self.picture = None\n self.update()\n\n def mouseClickEvent(self, event):\n \"\"\"Customize mouse click events.\"\"\"\n # Clean up channel-texts\n if not self.mne.butterfly:\n self.ch_texts = {k: v for k, v in self.ch_texts.items()\n if k in [tr.ch_name for tr in self.mne.traces]}\n # Get channel-name from position of channel-description\n ypos = event.scenePos().y()\n y_values = np.asarray(list(self.ch_texts.values()))[:, 1, :]\n y_diff = np.abs(y_values - ypos)\n ch_idx = int(np.argmin(y_diff, axis=0)[0])\n ch_name = list(self.ch_texts.keys())[ch_idx]\n trace = [tr for tr in self.mne.traces\n if tr.ch_name == ch_name][0]\n if event.button() == Qt.LeftButton:\n trace.toggle_bad()\n elif event.button() == Qt.RightButton:\n self.main._create_ch_context_fig(trace.range_idx)\n\n def get_labels(self):\n \"\"\"Get labels for testing.\"\"\"\n values = self.tickValues(*self.mne.viewbox.viewRange()[1], None)\n labels = self.tickStrings(values[0][1], None, None)\n\n return labels\n\n\nclass BaseScrollBar(QScrollBar):\n \"\"\"Base Class for scrolling directly to the clicked position.\"\"\"\n\n def __init__(self, parent=None):\n super().__init__(parent)\n\n def mousePressEvent(self, event):\n \"\"\"Customize mouse click events.\n\n Taken from: https://stackoverflow.com/questions/29710327/\n how-to-override-qscrollbar-onclick-default-behaviour\n \"\"\"\n if event.button() == Qt.LeftButton:\n opt = QStyleOptionSlider()\n self.initStyleOption(opt)\n control = self.style().hitTestComplexControl(\n QStyle.CC_ScrollBar, opt,\n event.pos(), self)\n if (control == QStyle.SC_ScrollBarAddPage or\n control == QStyle.SC_ScrollBarSubPage):\n # scroll here\n gr = self.style().subControlRect(QStyle.CC_ScrollBar,\n opt,\n QStyle.SC_ScrollBarGroove,\n self)\n sr = self.style().subControlRect(QStyle.CC_ScrollBar,\n opt,\n QStyle.SC_ScrollBarSlider,\n self)\n if self.orientation() == Qt.Horizontal:\n pos = event.pos().x()\n sliderLength = sr.width()\n sliderMin = gr.x()\n sliderMax = gr.right() - sliderLength + 1\n if (self.layoutDirection() == Qt.RightToLeft):\n opt.upsideDown = not opt.upsideDown\n else:\n pos = event.pos().y()\n sliderLength = sr.height()\n sliderMin = gr.y()\n sliderMax = gr.bottom() - sliderLength + 1\n self.setValue(QStyle.sliderValueFromPosition(\n self.minimum(), self.maximum(),\n pos - sliderMin, sliderMax - sliderMin,\n opt.upsideDown))\n return\n\n return super().mousePressEvent(event)\n\n\nclass TimeScrollBar(BaseScrollBar):\n \"\"\"Scrolls through time.\"\"\"\n\n def __init__(self, mne):\n super().__init__(Qt.Horizontal)\n self.mne = mne\n self.step_factor = 1\n self.setMinimum(0)\n self.setSingleStep(1)\n self.update_duration()\n self.setFocusPolicy(Qt.WheelFocus)\n # Because valueChanged is needed (captures every input to scrollbar,\n # not just sliderMoved), there has to be made a differentiation\n # between internal and external changes.\n self.external_change = False\n self.valueChanged.connect(self._time_changed)\n\n def _time_changed(self, value):\n if not self.external_change:\n if self.mne.is_epochs:\n # Convert Epoch index to time\n value = self.mne.boundary_times[int(value)]\n else:\n value /= self.step_factor\n self.mne.plt.setXRange(value, value + self.mne.duration,\n padding=0)\n\n def update_value(self, value):\n \"\"\"Update value of the ScrollBar.\"\"\"\n # Mark change as external to avoid setting\n # XRange again in _time_changed.\n self.external_change = True\n if self.mne.is_epochs:\n set_value = np.searchsorted(self.mne.midpoints, value)\n else:\n set_value = int(value * self.step_factor)\n self.setValue(set_value)\n self.external_change = False\n\n def update_duration(self):\n \"\"\"Update bar size.\"\"\"\n if self.mne.is_epochs:\n self.setPageStep(self.mne.n_epochs)\n self.setMaximum(len(self.mne.inst) - self.mne.n_epochs)\n else:\n self.setPageStep(int(self.mne.duration))\n self.step_factor = self.mne.scroll_sensitivity / self.mne.duration\n self.setMaximum(int((self.mne.xmax - self.mne.duration)\n * self.step_factor))\n\n def _update_scroll_sensitivity(self):\n self.update_duration()\n self.update_value(self.value() / self.step_factor)\n\n def keyPressEvent(self, event):\n \"\"\"Customize key press events.\"\"\"\n # Let main handle the keypress\n event.ignore()\n\n\nclass ChannelScrollBar(BaseScrollBar):\n \"\"\"Scrolls through channels.\"\"\"\n\n def __init__(self, mne):\n super().__init__(Qt.Vertical)\n self.mne = mne\n\n self.setMinimum(0)\n self.setSingleStep(1)\n self.update_nchan()\n self.setFocusPolicy(Qt.WheelFocus)\n # Because valueChanged is needed (captures every input to scrollbar,\n # not just sliderMoved), there has to be made a differentiation\n # between internal and external changes.\n self.external_change = False\n self.valueChanged.connect(self._channel_changed)\n\n def _channel_changed(self, value):\n if not self.external_change:\n if self.mne.fig_selection:\n label = list(self.mne.ch_selections.keys())[value]\n self.mne.fig_selection._chkbx_changed(label)\n elif not self.mne.butterfly:\n value = min(value, self.mne.ymax - self.mne.n_channels)\n self.mne.plt.setYRange(value, value + self.mne.n_channels + 1,\n padding=0)\n\n def update_value(self, value):\n \"\"\"Update value of the ScrollBar.\"\"\"\n # Mark change as external to avoid setting YRange again in\n # _channel_changed.\n self.external_change = True\n self.setValue(value)\n self.external_change = False\n\n def update_nchan(self):\n \"\"\"Update bar size.\"\"\"\n if getattr(self.mne, 'group_by', None) in ['position', 'selection']:\n self.setPageStep(1)\n self.setMaximum(len(self.mne.ch_selections) - 1)\n else:\n self.setPageStep(self.mne.n_channels)\n self.setMaximum(self.mne.ymax - self.mne.n_channels - 1)\n\n def keyPressEvent(self, event):\n \"\"\"Customize key press events.\"\"\"\n # Let main handle the keypress\n event.ignore()\n\n\nclass OverviewBar(QGraphicsView):\n \"\"\"\n Provides overview over channels and current visible range.\n\n Has different modes:\n - channels: Display channel-types\n - zscore: Display channel-wise zscore across time\n \"\"\"\n\n def __init__(self, main):\n super().__init__(QGraphicsScene())\n self.main = main\n self.mne = main.mne\n self.bg_img = None\n self.bg_pxmp = None\n self.bg_pxmp_item = None\n # Set minimum Size to 1/10 of display size\n min_h = int(QApplication.desktop().screenGeometry().height() / 10)\n self.setMinimumSize(1, 1)\n self.setFixedHeight(min_h)\n self.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)\n self.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOff)\n\n self.set_background()\n\n # Initialize Graphics-Items\n # Bad channels\n self.bad_line_dict = dict()\n self.update_bad_channels()\n\n # Events\n self.event_line_dict = dict()\n self.update_events()\n\n if self.mne.is_epochs:\n # Epochs Lines\n self.epoch_line_dict = dict()\n self.update_epoch_lines()\n self.bad_epoch_rect_dict = dict()\n self.update_bad_epochs()\n else:\n # Annotations\n self.annotations_rect_dict = dict()\n self.update_annotations()\n\n # VLine\n self.v_line = None\n self.update_vline()\n\n # View Range\n self.viewrange_rect = None\n self.update_viewrange()\n\n def update_epoch_lines(self):\n \"\"\"Update representation of epoch lines.\"\"\"\n epoch_line_pen = mkPen(color='k', width=1)\n for t in self.mne.boundary_times[1:-1]:\n top_left = self._mapFromData(t, 0)\n bottom_right = self._mapFromData(t, len(self.mne.ch_order))\n line = self.scene().addLine(QLineF(top_left, bottom_right),\n epoch_line_pen)\n line.setZValue(1)\n self.epoch_line_dict[t] = line\n\n def update_bad_channels(self):\n \"\"\"Update representation of bad channels.\"\"\"\n bad_set = set(self.mne.info['bads'])\n line_set = set(self.bad_line_dict.keys())\n\n add_chs = bad_set.difference(line_set)\n rm_chs = line_set.difference(bad_set)\n\n for line_idx, ch_idx in enumerate(self.mne.ch_order):\n ch_name = self.mne.ch_names[ch_idx]\n if ch_name in add_chs:\n start = self._mapFromData(0, line_idx)\n stop = self._mapFromData(self.mne.inst.times[-1], line_idx)\n pen = _get_color(self.mne.ch_color_bad)\n line = self.scene().addLine(QLineF(start, stop), pen)\n line.setZValue(2)\n self.bad_line_dict[ch_name] = line\n elif ch_name in rm_chs:\n self.scene().removeItem(self.bad_line_dict[ch_name])\n self.bad_line_dict.pop(ch_name)\n\n def update_bad_epochs(self):\n bad_set = set(self.mne.bad_epochs)\n rect_set = set(self.bad_epoch_rect_dict.keys())\n\n add_epos = bad_set.difference(rect_set)\n rm_epos = rect_set.difference(bad_set)\n\n for epo_num in self.mne.inst.selection:\n if epo_num in add_epos:\n epo_idx = self.mne.inst.selection.tolist().index(epo_num)\n start, stop = self.mne.boundary_times[epo_idx:epo_idx + 2]\n top_left = self._mapFromData(start, 0)\n bottom_right = self._mapFromData(stop, len(self.mne.ch_order))\n pen = _get_color(self.mne.epoch_color_bad)\n rect = self.scene().addRect(QRectF(top_left, bottom_right),\n pen=pen, brush=pen)\n rect.setZValue(3)\n self.bad_epoch_rect_dict[epo_num] = rect\n elif epo_num in rm_epos:\n self.scene().removeItem(self.bad_epoch_rect_dict[epo_num])\n self.bad_epoch_rect_dict.pop(epo_num)\n\n def update_events(self):\n \"\"\"Update representation of events.\"\"\"\n if getattr(self.mne, 'event_nums', None) is not None \\\n and self.mne.events_visible:\n for ev_t, ev_id in zip(self.mne.event_times, self.mne.event_nums):\n color_name = self.mne.event_color_dict[ev_id]\n color = _get_color(color_name)\n color.setAlpha(100)\n pen = mkPen(color)\n top_left = self._mapFromData(ev_t, 0)\n bottom_right = self._mapFromData(ev_t, len(self.mne.ch_order))\n line = self.scene().addLine(QLineF(top_left, bottom_right),\n pen)\n line.setZValue(1)\n self.event_line_dict[ev_t] = line\n else:\n for event_line in self.event_line_dict.values():\n self.scene().removeItem(event_line)\n self.event_line_dict.clear()\n\n def update_annotations(self):\n \"\"\"Update representation of annotations.\"\"\"\n annotations = self.mne.inst.annotations\n # Exclude non-visible annotations\n annot_set = set([annot['onset'] for annot in annotations if\n self.mne.visible_annotations[annot['description']]])\n rect_set = set(self.annotations_rect_dict.keys())\n\n add_onsets = annot_set.difference(rect_set)\n rm_onsets = rect_set.difference(annot_set)\n\n # Add missing onsets\n for add_onset in add_onsets:\n plot_onset = _sync_onset(self.mne.inst, add_onset)\n annot_idx = np.argwhere(self.mne.inst.annotations.onset\n == add_onset)[0][0]\n duration = annotations.duration[annot_idx]\n description = annotations.description[annot_idx]\n color_name = self.mne.annotation_segment_colors[description]\n color = _get_color(color_name)\n color.setAlpha(150)\n pen = mkPen(color)\n brush = mkBrush(color)\n top_left = self._mapFromData(plot_onset, 0)\n bottom_right = self._mapFromData(plot_onset + duration,\n len(self.mne.ch_order))\n rect = self.scene().addRect(QRectF(top_left, bottom_right),\n pen, brush)\n rect.setZValue(3)\n self.annotations_rect_dict[add_onset] = {'rect': rect,\n 'plot_onset': plot_onset,\n 'duration': duration,\n 'color': color_name}\n\n # Remove onsets\n for rm_onset in rm_onsets:\n self.scene().removeItem(self.annotations_rect_dict[rm_onset]\n ['rect'])\n self.annotations_rect_dict.pop(rm_onset)\n\n # Changes\n for edit_onset in self.annotations_rect_dict:\n plot_onset = _sync_onset(self.mne.inst, edit_onset)\n annot_idx = np.where(annotations.onset == edit_onset)[0][0]\n duration = annotations.duration[annot_idx]\n rect_duration = self.annotations_rect_dict[edit_onset]['duration']\n rect = self.annotations_rect_dict[edit_onset]['rect']\n # Update changed duration\n if duration != rect_duration:\n self.annotations_rect_dict[edit_onset]['duration'] = duration\n top_left = self._mapFromData(plot_onset, 0)\n bottom_right = self._mapFromData(plot_onset + duration,\n len(self.mne.ch_order))\n rect.setRect(QRectF(top_left, bottom_right))\n # Update changed color\n description = annotations.description[annot_idx]\n color_name = self.mne.annotation_segment_colors[description]\n rect_color = self.annotations_rect_dict[edit_onset]['color']\n if color_name != rect_color:\n color = _get_color(color_name)\n color.setAlpha(150)\n pen = mkPen(color)\n brush = mkBrush(color)\n rect.setPen(pen)\n rect.setBrush(brush)\n\n def update_vline(self):\n \"\"\"Update representation of vline.\"\"\"\n if self.mne.is_epochs:\n # VLine representation not useful in epochs-mode\n pass\n # Add VLine-Representation\n elif self.mne.vline is not None:\n value = self.mne.vline.value()\n top_left = self._mapFromData(value, 0)\n bottom_right = self._mapFromData(value, len(self.mne.ch_order))\n line = QLineF(top_left, bottom_right)\n if self.v_line is None:\n pen = mkPen('g')\n self.v_line = self.scene().addLine(line, pen)\n self.v_line.setZValue(1)\n else:\n self.v_line.setLine(line)\n # Remove VLine-Representation\n elif self.v_line is not None:\n self.scene().removeItem(self.v_line)\n self.v_line = None\n\n def update_viewrange(self):\n \"\"\"Update representation of viewrange.\"\"\"\n if self.mne.butterfly:\n top_left = self._mapFromData(self.mne.t_start, 0)\n bottom_right = self._mapFromData(self.mne.t_start +\n self.mne.duration, self.mne.ymax)\n else:\n top_left = self._mapFromData(self.mne.t_start, self.mne.ch_start)\n bottom_right = self._mapFromData(self.mne.t_start\n + self.mne.duration,\n self.mne.ch_start\n + self.mne.n_channels)\n rect = QRectF(top_left, bottom_right)\n if self.viewrange_rect is None:\n pen = mkPen(color='g')\n brush = mkBrush(color=(0, 0, 0, 100))\n self.viewrange_rect = self.scene().addRect(rect, pen, brush)\n self.viewrange_rect.setZValue(4)\n else:\n self.viewrange_rect.setRect(rect)\n\n def _set_range_from_pos(self, pos):\n x, y = self._mapToData(pos)\n\n # Set X\n # Check boundaries\n if self.mne.is_epochs:\n if x == '-offbounds':\n epo_idx = 0\n elif x == '+offbounds':\n epo_idx = len(self.mne.inst) - self.mne.n_epochs\n else:\n epo_idx = max(x - self.mne.n_epochs // 2, 0)\n x = self.mne.boundary_times[epo_idx]\n elif x == '-offbounds':\n x = 0\n elif x == '+offbounds':\n x = self.mne.xmax - self.mne.duration\n else:\n # Move click position to middle of view range\n x -= self.mne.duration / 2\n xmin = np.clip(x, 0, self.mne.xmax - self.mne.duration)\n xmax = np.clip(xmin + self.mne.duration,\n self.mne.duration, self.mne.xmax)\n\n self.mne.plt.setXRange(xmin, xmax, padding=0)\n\n # Set Y\n if y == '-offbounds':\n y = 0\n elif y == '+offbounds':\n y = self.mne.ymax - (self.mne.n_channels + 1)\n else:\n # Move click position to middle of view range\n y -= self.mne.n_channels / 2\n ymin = np.clip(y, 0, self.mne.ymax - (self.mne.n_channels + 1))\n ymax = np.clip(ymin + self.mne.n_channels + 1,\n self.mne.n_channels, self.mne.ymax)\n # Check boundaries\n if self.mne.fig_selection:\n self.mne.fig_selection._scroll_to_idx(int(ymin))\n else:\n self.mne.plt.setYRange(ymin, ymax, padding=0)\n\n def mousePressEvent(self, event):\n \"\"\"Customize mouse press events.\"\"\"\n self._set_range_from_pos(event.pos())\n\n def mouseMoveEvent(self, event):\n \"\"\"Customize mouse move events.\"\"\"\n self._set_range_from_pos(event.pos())\n\n def _fit_bg_img(self):\n # Remove previous item from scene\n if (self.bg_pxmp_item is not None and\n self.bg_pxmp_item in self.scene().items()):\n self.scene().removeItem(self.bg_pxmp_item)\n # Resize Pixmap\n if self.bg_pxmp is not None:\n cnt_rect = self.contentsRect()\n self.bg_pxmp = self.bg_pxmp.scaled(cnt_rect.width(),\n cnt_rect.height(),\n Qt.IgnoreAspectRatio)\n self.bg_pxmp_item = self.scene().addPixmap(self.bg_pxmp)\n\n def resizeEvent(self, event):\n \"\"\"Customize resize event.\"\"\"\n super().resizeEvent(event)\n cnt_rect = self.contentsRect()\n self.setSceneRect(QRectF(QPoint(0, 0),\n QPoint(cnt_rect.width(),\n cnt_rect.height())))\n # Resize backgounrd\n self._fit_bg_img()\n\n # Resize Graphics Items (assuming height never changes)\n # Resize bad_channels\n for bad_ch_line in self.bad_line_dict.values():\n current_line = bad_ch_line.line()\n bad_ch_line.setLine(QLineF(current_line.p1(),\n Point(cnt_rect.width(),\n current_line.y2())))\n\n # Resize event-lines\n for ev_t, event_line in self.event_line_dict.items():\n top_left = self._mapFromData(ev_t, 0)\n bottom_right = self._mapFromData(ev_t, len(self.mne.ch_order))\n event_line.setLine(QLineF(top_left, bottom_right))\n\n if self.mne.is_epochs:\n # Resize epoch lines\n for epo_t, epoch_line in self.epoch_line_dict.items():\n top_left = self._mapFromData(epo_t, 0)\n bottom_right = self._mapFromData(epo_t,\n len(self.mne.ch_order))\n epoch_line.setLine(QLineF(top_left, bottom_right))\n # Resize bad rects\n for epo_idx, epoch_rect in self.bad_epoch_rect_dict.items():\n start, stop = self.mne.boundary_times[epo_idx:epo_idx + 2]\n top_left = self._mapFromData(start, 0)\n bottom_right = self._mapFromData(stop, len(self.mne.ch_order))\n epoch_rect.setRect(QRectF(top_left, bottom_right))\n else:\n # Resize annotation-rects\n for annot_dict in self.annotations_rect_dict.values():\n annot_rect = annot_dict['rect']\n plot_onset = annot_dict['plot_onset']\n duration = annot_dict['duration']\n\n top_left = self._mapFromData(plot_onset, 0)\n bottom_right = self._mapFromData(plot_onset + duration,\n len(self.mne.ch_order))\n annot_rect.setRect(QRectF(top_left, bottom_right))\n\n # Update vline\n if all([i is not None for i in [self.v_line, self.mne.vline]]):\n value = self.mne.vline.value()\n top_left = self._mapFromData(value, 0)\n bottom_right = self._mapFromData(value, len(self.mne.ch_order))\n self.v_line.setLine(QLineF(top_left, bottom_right))\n\n # Update viewrange-rect\n top_left = self._mapFromData(self.mne.t_start, self.mne.ch_start)\n bottom_right = self._mapFromData(self.mne.t_start\n + self.mne.duration,\n self.mne.ch_start\n + self.mne.n_channels)\n self.viewrange_rect.setRect(QRectF(top_left, bottom_right))\n\n def set_background(self):\n \"\"\"Set the background-image for the selected overview-mode.\"\"\"\n # Add Overview-Pixmap\n if self.mne.overview_mode == 'empty':\n self.bg_pxmp = None\n elif self.mne.overview_mode == 'channels':\n channel_rgba = np.empty((len(self.mne.ch_order),\n 2, 4))\n for line_idx, ch_idx in enumerate(self.mne.ch_order):\n ch_type = self.mne.ch_types[ch_idx]\n color = _get_color(self.mne.ch_color_dict[ch_type])\n channel_rgba[line_idx, :] = color.getRgb()\n\n channel_rgba = np.require(channel_rgba, np.uint8, 'C')\n self.bg_img = QImage(channel_rgba,\n channel_rgba.shape[1],\n channel_rgba.shape[0],\n QImage.Format_RGBA8888)\n self.bg_pxmp = QPixmap.fromImage(self.bg_img)\n\n elif self.mne.overview_mode == 'zscore':\n self.bg_img = QImage(self.mne.zscore_rgba,\n self.mne.zscore_rgba.shape[1],\n self.mne.zscore_rgba.shape[0],\n QImage.Format_RGBA8888)\n self.bg_pxmp = QPixmap.fromImage(self.bg_img)\n\n self._fit_bg_img()\n\n def _mapFromData(self, x, y):\n # Include padding from black frame\n point_x = self.width() * x / self.mne.xmax\n point_y = self.height() * y / len(self.mne.ch_order)\n\n return Point(point_x, point_y)\n\n def _mapToData(self, point):\n # Include padding from black frame\n xnorm = point.x() / self.width()\n if xnorm < 0:\n x = '-offbounds'\n elif xnorm > 1:\n x = '+offbounds'\n else:\n if self.mne.is_epochs:\n # Return epoch index for epochs\n x = int(len(self.mne.inst) * xnorm)\n else:\n time_idx = int((len(self.mne.inst.times) - 1) * xnorm)\n x = self.mne.inst.times[time_idx]\n\n ynorm = point.y() / self.height()\n if ynorm < 0:\n y = '-offbounds'\n elif ynorm > 1:\n y = '+offbounds'\n else:\n y = len(self.mne.ch_order) * ynorm\n\n return x, y\n\n def keyPressEvent(self, event):\n self.main.keyPressEvent(event)\n\n\nclass RawViewBox(ViewBox):\n \"\"\"PyQtGraph-Wrapper for interaction with the View.\"\"\"\n\n def __init__(self, main):\n super().__init__(invertY=True)\n self.enableAutoRange(enable=False, x=False, y=False)\n self.main = main\n self.mne = main.mne\n self._drag_start = None\n self._drag_region = None\n\n def mouseDragEvent(self, event, axis=None):\n \"\"\"Customize mouse drag events.\"\"\"\n event.accept()\n\n if event.button() == Qt.LeftButton \\\n and self.mne.annotation_mode:\n if self.mne.current_description:\n description = self.mne.current_description\n if event.isStart():\n self._drag_start = self.mapSceneToView(\n event.lastScenePos()).x()\n drag_stop = self.mapSceneToView(event.scenePos()).x()\n self._drag_region = AnnotRegion(self.mne,\n description=description,\n values=(self._drag_start,\n drag_stop))\n self.mne.plt.addItem(self._drag_region)\n self.mne.plt.addItem(self._drag_region.label_item)\n elif event.isFinish():\n drag_stop = self.mapSceneToView(event.scenePos()).x()\n self._drag_region.setRegion((self._drag_start, drag_stop))\n plot_onset = min(self._drag_start, drag_stop)\n plot_offset = max(self._drag_start, drag_stop)\n duration = abs(self._drag_start - drag_stop)\n\n # Add to annotations\n onset = _sync_onset(self.mne.inst, plot_onset,\n inverse=True)\n _merge_annotations(onset, onset + duration,\n self.mne.current_description,\n self.mne.inst.annotations)\n\n # Add to regions/merge regions\n merge_values = [plot_onset, plot_offset]\n rm_regions = list()\n for region in [r for r in self.mne.regions\n if r.description ==\n self.mne.current_description]:\n values = region.getRegion()\n if any([plot_onset < val < plot_offset for val in\n values]):\n merge_values += values\n rm_regions.append(region)\n if len(merge_values) > 2:\n self._drag_region.setRegion((min(merge_values),\n max(merge_values)))\n for rm_region in rm_regions:\n self.main._remove_region(rm_region, from_annot=False)\n self.main._add_region(plot_onset, duration,\n self.mne.current_description,\n self._drag_region)\n self._drag_region.select(True)\n\n # Update Overview-Bar\n self.mne.overview_bar.update_annotations()\n else:\n x_to = self.mapSceneToView(event.scenePos()).x()\n self._drag_region.setRegion((self._drag_start, x_to))\n\n elif event.isFinish():\n self.main.message_box(text='No description!',\n info_text='No description is given, '\n 'add one!',\n icon=QMessageBox.Warning)\n\n def mouseClickEvent(self, event):\n \"\"\"Customize mouse click events.\"\"\"\n # If we want the context-menu back, uncomment following line\n # super().mouseClickEvent(event)\n if not self.mne.annotation_mode:\n if event.button() == Qt.LeftButton:\n self.main._add_vline(self.mapSceneToView(\n event.scenePos()).x())\n elif event.button() == Qt.RightButton:\n self.main._remove_vline()\n\n def wheelEvent(self, ev, axis=None):\n \"\"\"Customize mouse wheel/trackpad-scroll events.\"\"\"\n ev.accept()\n scroll = -1 * ev.delta() / 120\n if ev.orientation() == Qt.Horizontal:\n self.main.hscroll(scroll * 10)\n elif ev.orientation() == Qt.Vertical:\n self.main.vscroll(scroll)\n\n def keyPressEvent(self, event):\n self.main.keyPressEvent(event)\n\n\nclass VLineLabel(InfLineLabel):\n \"\"\"Label of the vline displaying the time.\"\"\"\n\n def __init__(self, vline):\n super().__init__(vline, text='{value:.3f} s', position=0.98,\n fill='g', color='b', movable=True)\n self.cursorOffset = None\n\n def mouseDragEvent(self, ev):\n \"\"\"Customize mouse drag events.\"\"\"\n if self.movable and ev.button() == Qt.LeftButton:\n if ev.isStart():\n self.line.moving = True\n self.cursorOffset = (self.line.pos() -\n self.mapToView(ev.buttonDownPos()))\n ev.accept()\n\n if not self.line.moving:\n return\n\n self.line.setPos(self.cursorOffset + self.mapToView(ev.pos()))\n self.line.sigDragged.emit(self)\n if ev.isFinish():\n self.line.moving = False\n self.line.sigPositionChangeFinished.emit(self.line)\n\n def valueChanged(self):\n \"\"\"Customize what happens on value change.\"\"\"\n if not self.isVisible():\n return\n value = self.line.value()\n if self.line.mne.is_epochs:\n # Show epoch-time\n t_vals_abs = np.linspace(0, self.line.mne.epoch_dur,\n len(self.line.mne.inst.times))\n search_val = value % self.line.mne.epoch_dur\n t_idx = np.searchsorted(t_vals_abs, search_val)\n value = self.line.mne.inst.times[t_idx]\n self.setText(self.format.format(value=value))\n self.updatePosition()\n\n\nclass VLine(InfiniteLine):\n \"\"\"Marker to be placed inside the Trace-Plot.\"\"\"\n\n def __init__(self, mne, pos, bounds):\n super().__init__(pos, pen='g', hoverPen='y',\n movable=True, bounds=bounds)\n self.mne = mne\n self.label = VLineLabel(self)\n\n\nclass EventLine(InfiniteLine):\n \"\"\"Displays Events inside Trace-Plot\"\"\"\n\n def __init__(self, pos, id, color):\n super().__init__(pos, pen=color, movable=False,\n label=str(id), labelOpts={'position': 0.98,\n 'color': color,\n 'anchors': [(0, 0.5),\n (0, 0.5)]})\n self.label.setFont(QFont('AnyStyle', 10, QFont.Bold))\n self.setZValue(0)\n\n\nclass Crosshair(InfiniteLine):\n \"\"\"Continously updating marker inside the Trace-Plot.\"\"\"\n\n def __init__(self):\n super().__init__(angle=90, movable=False, pen='g')\n self.y = 1\n\n def set_data(self, x, y):\n \"\"\"Set x and y data for crosshair point.\"\"\"\n self.setPos(x)\n self.y = y\n\n def paint(self, p, *args):\n super().paint(p, *args)\n\n p.setPen(mkPen('r', width=4))\n p.drawPoint(Point(self.y, 0))\n\n\nclass BaseScaleBar:\n def __init__(self, mne, ch_type):\n self.mne = mne\n self.ch_type = ch_type\n self.ypos = None\n\n def _set_position(self, x, y):\n pass\n\n def _is_visible(self):\n return self.ch_type in self.mne.ch_types[self.mne.picks]\n\n def _get_ypos(self):\n if self.mne.butterfly:\n self.ypos = self.mne.butterfly_type_order.index(self.ch_type) + 1\n else:\n ch_type_idxs = np.where(self.mne.ch_types[self.mne.picks]\n == self.ch_type)[0]\n\n for idx in ch_type_idxs:\n ch_name = self.mne.ch_names[self.mne.picks[idx]]\n if ch_name not in self.mne.info['bads'] and \\\n ch_name not in self.mne.whitened_ch_names:\n self.ypos = self.mne.ch_start + idx + 1\n break\n # Consider all indices bad\n if self.ypos is None:\n self.ypos = self.mne.ch_start + ch_type_idxs[0] + 1\n\n def update_x_position(self):\n \"\"\"Update x-position of Scalebar.\"\"\"\n if self._is_visible():\n if self.ypos is None:\n self._get_ypos()\n self._set_position(self.mne.t_start, self.ypos)\n\n def update_y_position(self):\n \"\"\"Update y-position of Scalebar.\"\"\"\n if self._is_visible():\n self.setVisible(True)\n self._get_ypos()\n self._set_position(self.mne.t_start, self.ypos)\n else:\n self.setVisible(False)\n\n\nclass ScaleBarText(BaseScaleBar, TextItem):\n def __init__(self, mne, ch_type):\n BaseScaleBar.__init__(self, mne, ch_type)\n TextItem.__init__(self, color='#AA3377')\n\n self.setFont(QFont('AnyStyle', 10))\n self.setZValue(2) # To draw over RawTraceItems\n\n self.update_value()\n self.update_y_position()\n\n def update_value(self):\n \"\"\"Update value of ScaleBarText.\"\"\"\n scaler = 1 if self.mne.butterfly else 2\n inv_norm = (scaler *\n self.mne.scalings[self.ch_type] *\n self.mne.unit_scalings[self.ch_type] /\n self.mne.scale_factor)\n self.setText(f'{_simplify_float(inv_norm)} '\n f'{self.mne.units[self.ch_type]}')\n\n def _set_position(self, x, y):\n self.setPos(x, y)\n\n\nclass ScaleBar(BaseScaleBar, QGraphicsLineItem):\n def __init__(self, mne, ch_type):\n BaseScaleBar.__init__(self, mne, ch_type)\n QGraphicsLineItem.__init__(self)\n\n self.setZValue(1)\n self.setPen(mkPen(color='#AA3377', width=5))\n self.update_y_position()\n\n def _set_position(self, x, y):\n self.setLine(QLineF(x, y - 0.5, x, y + 0.5))\n\n def get_ydata(self):\n \"\"\"Get y-data for tests.\"\"\"\n line = self.line()\n return line.y1(), line.y2()\n\n\nclass _BaseDialog(QDialog):\n def __init__(self, main, widget=None,\n modal=False, name=None, title=None):\n super().__init__(main)\n self.main = main\n self.widget = widget\n self.mne = main.mne\n self.name = name\n self.modal = modal\n\n self.setAttribute(Qt.WA_DeleteOnClose, True)\n\n self.mne.child_figs.append(self)\n\n if self.name is not None:\n setattr(self.mne, self.name, self)\n\n if title is not None:\n self.setWindowTitle(title)\n\n if self.widget is not None:\n layout = QVBoxLayout()\n layout.addWidget(self.widget)\n self.setLayout(layout)\n\n def show(self, center=True):\n if self.modal:\n self.open()\n else:\n super().show()\n\n if center:\n # center dialog\n qr = self.frameGeometry()\n cp = QDesktopWidget().availableGeometry().center()\n qr.moveCenter(cp)\n self.move(qr.topLeft())\n\n def keyPressEvent(self, event):\n if event.key() == Qt.Key_Escape:\n self.close()\n else:\n self.parent().keyPressEvent(event)\n\n def closeEvent(self, event):\n if hasattr(self, 'name') and hasattr(self, 'mne'):\n if self.name is not None and hasattr(self.mne, self.name):\n setattr(self.mne, self.name, None)\n if self in self.mne.child_figs:\n self.mne.child_figs.remove(self)\n event.accept()\n\n\nclass SettingsDialog(_BaseDialog):\n \"\"\"Shows additional settings.\"\"\"\n\n def __init__(self, main, **kwargs):\n super().__init__(main, **kwargs)\n\n layout = QFormLayout()\n\n self.downsampling_box = QSpinBox()\n self.downsampling_box.setToolTip('Set an integer as the downsampling'\n ' factor or \"Auto\" to get the factor'\n ' from the visible range.\\n'\n ' Setting the factor 1 means no '\n 'downsampling.\\n'\n ' Default is 1.')\n self.downsampling_box.setMinimum(0)\n self.downsampling_box.setSpecialValueText('Auto')\n self.downsampling_box.valueChanged.connect(partial(\n self._value_changed, value_name='downsampling'))\n self.downsampling_box.setValue(0 if self.mne.downsampling == 'auto'\n else self.mne.downsampling)\n layout.addRow('downsampling', self.downsampling_box)\n\n self.ds_method_cmbx = QComboBox()\n self.ds_method_cmbx.setToolTip(\n '<h2>Downsampling Method</h2>'\n '<ul>'\n '<li>subsample:<br>'\n 'Only take every n-th sample.</li>'\n '<li>mean:<br>'\n 'Take the mean of n samples.</li>'\n '<li>peak:<br>'\n 'Draws a saw wave from the minimum to the maximum from a '\n 'collection of n samples.</li>'\n '</ul>'\n '<i>(Those methods are adapted from '\n 'pyqtgraph)</i><br>'\n 'Default is \"peak\".')\n self.ds_method_cmbx.addItems(['subsample', 'mean', 'peak'])\n self.ds_method_cmbx.currentTextChanged.connect(partial(\n self._value_changed, value_name='ds_method'))\n self.ds_method_cmbx.setCurrentText(\n self.mne.ds_method)\n layout.addRow('ds_method', self.ds_method_cmbx)\n\n self.scroll_sensitivity_slider = QSlider(Qt.Horizontal)\n self.scroll_sensitivity_slider.setMinimum(10)\n self.scroll_sensitivity_slider.setMaximum(1000)\n self.scroll_sensitivity_slider.setToolTip('Set the sensitivity of '\n 'the scrolling in '\n 'horizontal direction.')\n self.scroll_sensitivity_slider.valueChanged.connect(partial(\n self._value_changed, value_name='scroll_sensitivity'))\n # Set default\n self.scroll_sensitivity_slider.setValue(self.mne.scroll_sensitivity)\n layout.addRow('horizontal scroll sensitivity',\n self.scroll_sensitivity_slider)\n self.setLayout(layout)\n self.show()\n\n def closeEvent(self):\n _disconnect(self.ds_method_cmbx.currentTextChanged)\n _disconnect(self.scroll_sensitivity_slider.valueChanged)\n super.closeEvent()\n\n def _value_changed(self, new_value, value_name):\n if value_name == 'downsampling' and new_value == 0:\n new_value = 'auto'\n\n setattr(self.mne, value_name, new_value)\n\n if value_name == 'scroll_sensitivity':\n self.mne.ax_hscroll._update_scroll_sensitivity()\n else:\n self.main._redraw()\n\n\nclass HelpDialog(_BaseDialog):\n \"\"\"Shows all keyboard-shortcuts.\"\"\"\n\n def __init__(self, main, **kwargs):\n super().__init__(main, **kwargs)\n\n # Show all keyboard-shortcuts in a Scroll-Area\n layout = QVBoxLayout()\n keyboard_label = QLabel('Keyboard Shortcuts')\n keyboard_label.setFont(QFont('AnyStyle', 16, QFont.Bold))\n layout.addWidget(keyboard_label)\n\n scroll_area = QScrollArea()\n scroll_area.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)\n scroll_area.setSizePolicy(QSizePolicy.MinimumExpanding,\n QSizePolicy.MinimumExpanding)\n scroll_widget = QWidget()\n form_layout = QFormLayout()\n for key in main.mne.keyboard_shortcuts:\n key_dict = main.mne.keyboard_shortcuts[key]\n if 'description' in key_dict:\n if 'alias' in key_dict:\n key = key_dict['alias']\n for idx, key_des in enumerate(key_dict['description']):\n key_name = key\n if 'modifier' in key_dict:\n mod = key_dict['modifier'][idx]\n if mod is not None:\n key_name = mod + ' + ' + key_name\n form_layout.addRow(key_name, QLabel(key_des))\n scroll_widget.setLayout(form_layout)\n scroll_area.setWidget(scroll_widget)\n layout.addWidget(scroll_area)\n\n # Additional help for mouse interaction\n inst = self.main.mne.instance_type\n is_raw = inst == 'raw'\n is_epo = inst == 'epochs'\n is_ica = inst == 'ica'\n ch_cmp = 'component' if is_ica else 'channel'\n ch_epo = 'epoch' if is_epo else 'channel'\n ica_bad = 'Mark/unmark component for exclusion'\n lclick_data = ica_bad if is_ica else f'Mark/unmark bad {ch_epo}'\n lclick_name = (ica_bad if is_ica else 'Mark/unmark bad channel')\n ldrag = 'add annotation (in annotation mode)' if is_raw else None\n rclick_name = dict(ica='Show diagnostics for component',\n epochs='Show imageplot for channel',\n raw='Show channel location')[inst]\n mouse_help = [(f'Left-click {ch_cmp} name', lclick_name),\n (f'Left-click {ch_cmp} data', lclick_data),\n ('Left-click-and-drag on plot', ldrag),\n ('Left-click on plot background',\n 'Place vertical guide'),\n ('Right-click on plot background',\n 'Clear vertical guide'),\n ('Right-click on channel name', rclick_name)]\n\n mouse_label = QLabel('Mouse Interaction')\n mouse_label.setFont(QFont('AnyStyle', 16, QFont.Bold))\n layout.addWidget(mouse_label)\n mouse_widget = QWidget()\n mouse_layout = QFormLayout()\n for interaction, description in mouse_help:\n if description is not None:\n mouse_layout.addRow(f'{interaction}:', QLabel(description))\n mouse_widget.setLayout(mouse_layout)\n layout.addWidget(mouse_widget)\n\n self.setLayout(layout)\n self.show()\n\n # Set minimum width to avoid horizontal scrolling\n scroll_area.setMinimumWidth(scroll_widget.minimumSizeHint().width() +\n scroll_area.verticalScrollBar().width())\n self.update()\n\n\nclass ProjDialog(_BaseDialog):\n \"\"\"A dialog to toggle projections.\"\"\"\n\n def __init__(self, main, **kwargs):\n self.external_change = True\n # Create projection-layout\n super().__init__(main, **kwargs)\n\n layout = QVBoxLayout()\n labels = [p['desc'] for p in self.mne.projs]\n for ix, active in enumerate(self.mne.projs_active):\n if active:\n labels[ix] += ' (already applied)'\n\n # make title\n layout.addWidget(QLabel('Mark projectors applied on the plot.\\n'\n '(Applied projectors are dimmed).'))\n\n # Add checkboxes\n self.checkboxes = list()\n for idx, label in enumerate(labels):\n chkbx = QCheckBox(label)\n chkbx.setChecked(bool(self.mne.projs_on[idx]))\n chkbx.clicked.connect(partial(self._proj_changed, idx=idx))\n if self.mne.projs_active[idx]:\n chkbx.setEnabled(False)\n self.checkboxes.append(chkbx)\n layout.addWidget(chkbx)\n\n self.toggle_all_bt = QPushButton('Toggle All')\n self.toggle_all_bt.clicked.connect(self.toggle_all)\n layout.addWidget(self.toggle_all_bt)\n self.setLayout(layout)\n self.show()\n\n def _proj_changed(self, state, idx):\n # Only change if proj wasn't already applied.\n if not self.mne.projs_active[idx]:\n self.mne.projs_on[idx] = state\n self.main._apply_update_projectors()\n\n def toggle_all(self):\n \"\"\"Toggle all projectors.\"\"\"\n self.main._apply_update_projectors(toggle_all=True)\n\n # Update all checkboxes\n for idx, chkbx in enumerate(self.checkboxes):\n chkbx.setChecked(bool(self.mne.projs_on[idx]))\n\n\nclass _ChannelFig(FigureCanvasQTAgg):\n def __init__(self, figure):\n self.figure = figure\n super().__init__(figure)\n self.setFocusPolicy(Qt.StrongFocus | Qt.WheelFocus)\n self.setFocus()\n self._lasso_path = None\n # Only update when mouse is pressed\n self.setMouseTracking(False)\n\n def paintEvent(self, event):\n super().paintEvent(event)\n # Lasso-Drawing doesn't seem to work with mpl, thus it is replicated\n # in Qt.\n if self._lasso_path is not None:\n painter = QPainter(self)\n painter.setPen(mkPen('red', width=2))\n painter.drawPath(self._lasso_path)\n painter.end()\n\n def mouseMoveEvent(self, event):\n super().mouseMoveEvent(event)\n\n if self._lasso_path is None:\n self._lasso_path = QPainterPath()\n self._lasso_path.moveTo(event.pos())\n else:\n self._lasso_path.lineTo(event.pos())\n\n self.update()\n\n def mouseReleaseEvent(self, event):\n super().mouseReleaseEvent(event)\n self._lasso_path = None\n self.update()\n\n def keyPressEvent(self, event):\n event.ignore()\n\n\nclass SelectionDialog(_BaseDialog):\n def __init__(self, main):\n # Create widget\n super().__init__(main, name='fig_selection',\n title='Channel selection')\n xpos = QApplication.desktop().screenGeometry().width() - 400\n self.setGeometry(xpos, 100, 400, 800)\n\n layout = QVBoxLayout()\n\n # Add channel plot\n fig = _figure_agg(figsize=(6, 6), dpi=96)\n ax = fig.add_axes([0, 0, 1, 1])\n self.channel_fig = plot_sensors(self.mne.info, kind='select',\n ch_type='all', title='',\n ch_groups=self.mne.group_by, axes=ax,\n show=False)[0]\n if hasattr(self.channel_fig.lasso, 'callbacks'):\n # MNE >= 1.0\n self.channel_fig.lasso.callbacks.append(self._set_custom_selection)\n else:\n # MNE <= 0.24\n self.channel_fig.canvas.mpl_connect(\n 'lasso_event', self._set_custom_selection)\n self.channel_widget = _ChannelFig(self.channel_fig)\n layout.addWidget(self.channel_widget)\n\n selections_dict = self.mne.ch_selections\n selections_dict.update(Custom=np.array([], dtype=int)) # for lasso\n\n self.chkbxs = OrderedDict()\n for label in selections_dict:\n chkbx = QCheckBox(label)\n chkbx.clicked.connect(partial(self._chkbx_changed, label))\n self.chkbxs[label] = chkbx\n layout.addWidget(chkbx)\n\n self.mne.old_selection = list(selections_dict.keys())[0]\n self.chkbxs[self.mne.old_selection].setChecked(True)\n\n self._update_highlighted_sensors()\n\n # add instructions at bottom\n instructions = (\n 'To use a custom selection, first click-drag on the sensor plot '\n 'to \"lasso\" the sensors you want to select, or hold Ctrl while '\n 'clicking individual sensors. Holding Ctrl while click-dragging '\n 'allows a lasso selection adding to (rather than replacing) the '\n 'existing selection.')\n help_widget = QTextEdit(instructions)\n help_widget.setReadOnly(True)\n layout.addWidget(help_widget)\n\n self.setLayout(layout)\n self.show(center=False)\n\n def _chkbx_changed(self, label):\n # Disable butterfly if checkbox is clicked\n if self.mne.butterfly:\n self.main._set_butterfly(False)\n # Disable other checkboxes\n for chkbx in self.chkbxs.values():\n chkbx.setChecked(False)\n if (label == 'Custom' and\n not len(self.mne.ch_selections['Custom'])):\n label = self.mne.old_selection\n # Select the checkbox no matter if clicked on when active or not\n self.chkbxs[label].setChecked(True)\n # Update selections\n self.mne.old_selection = label\n self.mne.picks = np.asarray(self.mne.ch_selections[label])\n self.mne.n_channels = len(self.mne.picks)\n # Update highlighted sensors\n self._update_highlighted_sensors()\n # if \"Vertex\" is defined, some channels appear twice, so if\n # \"Vertex\" is selected, ch_start should be the *first* match;\n # otherwise it should be the *last* match (since \"Vertex\" is\n # always the first selection group, if it exists).\n if label == 'Custom':\n self.mne.ch_start = 0\n else:\n all_values = list()\n for key, chs in self.mne.ch_selections.items():\n if np.array_equal(chs, self.mne.picks):\n self.mne.ch_start = len(all_values)\n break\n else:\n all_values = np.concatenate([all_values, chs])\n\n # Apply changes on view\n self.mne.plt.setYRange(self.mne.ch_start,\n self.mne.ch_start + self.mne.n_channels + 1,\n padding=0)\n\n # Update scrollbar\n label_idx = list(self.mne.ch_selections.keys()).index(label)\n self.mne.ax_vscroll.update_value(label_idx)\n\n # Update all y-positions, because channels can appear in multiple\n # selections on different y-positions\n for trace in self.mne.traces:\n trace.update_ypos()\n trace.update_data()\n\n def _set_custom_selection(self):\n chs = self.channel_fig.lasso.selection\n inds = np.in1d(self.mne.ch_names, chs)\n self.mne.ch_selections['Custom'] = inds.nonzero()[0]\n if any(inds):\n self._chkbx_changed('Custom')\n\n def _update_highlighted_sensors(self):\n inds = np.in1d(self.mne.fig_selection.channel_fig.lasso.ch_names,\n self.mne.ch_names[self.mne.picks]).nonzero()[0]\n self.channel_fig.lasso.select_many(inds)\n self.channel_widget.draw()\n\n def _update_bad_sensors(self, pick, mark_bad):\n sensor_picks = list()\n ch_indices = channel_indices_by_type(self.mne.info)\n for this_type in _DATA_CH_TYPES_SPLIT:\n if this_type in self.mne.ch_types:\n sensor_picks.extend(ch_indices[this_type])\n sensor_idx = np.in1d(sensor_picks, pick).nonzero()[0]\n # change the sensor color\n fig = self.channel_fig\n fig.lasso.ec[sensor_idx, 0] = float(mark_bad) # change R of RGBA array\n fig.lasso.collection.set_edgecolors(fig.lasso.ec)\n fig.canvas.draw_idle()\n self.channel_widget.draw()\n\n def _style_butterfly(self):\n for key, chkbx in self.chkbxs.items():\n if self.mne.butterfly:\n chkbx.setChecked(False)\n else:\n if key == self.mne.old_selection:\n chkbx.setChecked(True)\n self._update_highlighted_sensors()\n\n def _scroll_selection(self, step):\n name_idx = list(self.mne.ch_selections.keys()).index(\n self.mne.old_selection)\n new_idx = np.clip(name_idx + step,\n 0, len(self.mne.ch_selections) - 1)\n new_label = list(self.mne.ch_selections.keys())[new_idx]\n self._chkbx_changed(new_label)\n\n def _scroll_to_idx(self, idx):\n all_values = list()\n label = list(self.mne.ch_selections.keys())[0]\n for key, values in self.mne.ch_selections.items():\n all_values = np.concatenate([all_values, values])\n if idx < len(all_values):\n label = key\n break\n self._chkbx_changed(label)\n\n def closeEvent(self, event):\n super().closeEvent(event)\n if hasattr(self.channel_fig.lasso, 'callbacks'):\n # MNE >= 1.0\n self.channel_fig.lasso.callbacks.clear()\n for chkbx in self.chkbxs.values():\n _disconnect(chkbx.clicked)\n if hasattr(self, 'main'):\n self.main.close()\n\n\nclass AnnotRegion(LinearRegionItem):\n \"\"\"Graphics-Oobject for Annotations.\"\"\"\n\n regionChangeFinished = pyqtSignal(object)\n gotSelected = pyqtSignal(object)\n removeRequested = pyqtSignal(object)\n\n def __init__(self, mne, description, values):\n super().__init__(values=values, orientation='vertical',\n movable=True, swapMode='sort',\n bounds=(0, mne.xmax))\n # Set default z-value to 0 to be behind other items in scene\n self.setZValue(0)\n\n self.sigRegionChangeFinished.connect(self._region_changed)\n self.mne = mne\n self.description = description\n self.old_onset = values[0]\n self.selected = False\n\n self.label_item = TextItem(text=description, anchor=(0.5, 0.5))\n self.label_item.setFont(QFont('AnyStyle', 10, QFont.Bold))\n self.sigRegionChanged.connect(self.update_label_pos)\n\n self.update_color()\n\n def _region_changed(self):\n self.regionChangeFinished.emit(self)\n self.old_onset = self.getRegion()[0]\n\n def update_color(self):\n \"\"\"Update color of annotation-region.\"\"\"\n color_string = self.mne.annotation_segment_colors[self.description]\n self.base_color = _get_color(color_string)\n self.hover_color = _get_color(color_string)\n self.text_color = _get_color(color_string)\n self.base_color.setAlpha(75)\n self.hover_color.setAlpha(150)\n self.text_color.setAlpha(255)\n self.line_pen = mkPen(color=self.hover_color, width=2)\n self.hover_pen = mkPen(color=self.text_color, width=2)\n self.setBrush(self.base_color)\n self.setHoverBrush(self.hover_color)\n self.label_item.setColor(self.text_color)\n for line in self.lines:\n line.setPen(self.line_pen)\n line.setHoverPen(self.hover_pen)\n self.update()\n\n def update_description(self, description):\n \"\"\"Update description of annoation-region.\"\"\"\n self.description = description\n self.label_item.setText(description)\n self.label_item.update()\n\n def update_visible(self, visible):\n \"\"\"Update if annotation-region is visible.\"\"\"\n self.setVisible(visible)\n self.label_item.setVisible(visible)\n\n def remove(self):\n \"\"\"Remove annotation-region.\"\"\"\n self.removeRequested.emit(self)\n vb = self.mne.viewbox\n if vb and self.label_item in vb.addedItems:\n vb.removeItem(self.label_item)\n\n def select(self, selected):\n \"\"\"Update select-state of annotation-region.\"\"\"\n self.selected = selected\n if selected:\n self.label_item.setColor('w')\n self.label_item.fill = mkBrush(self.hover_color)\n self.gotSelected.emit(self)\n else:\n self.label_item.setColor(self.text_color)\n self.label_item.fill = mkBrush(None)\n self.label_item.update()\n\n def mouseClickEvent(self, event):\n \"\"\"Customize mouse click events.\"\"\"\n if self.mne.annotation_mode:\n if event.button() == Qt.LeftButton and self.movable:\n self.select(True)\n event.accept()\n elif event.button() == Qt.RightButton and self.movable:\n self.remove()\n # Propagate remove request to lower annotations if overlapping\n event.ignore()\n else:\n event.ignore()\n\n def update_label_pos(self):\n \"\"\"Update position of description-label from annotation-region.\"\"\"\n rgn = self.getRegion()\n vb = self.mne.viewbox\n if vb:\n ymax = vb.viewRange()[1][1]\n self.label_item.setPos(sum(rgn) / 2, ymax - 0.3)\n\n\nclass _AnnotEditDialog(_BaseDialog):\n def __init__(self, annot_dock):\n super().__init__(annot_dock.main, title='Edit Annotations')\n self.ad = annot_dock\n\n self.current_mode = None\n\n layout = QVBoxLayout()\n self.descr_label = QLabel()\n if self.mne.selected_region:\n self.mode_cmbx = QComboBox()\n self.mode_cmbx.addItems(['all', 'selected'])\n self.mode_cmbx.currentTextChanged.connect(self._mode_changed)\n layout.addWidget(QLabel('Edit Scope:'))\n layout.addWidget(self.mode_cmbx)\n # Set group as default\n self._mode_changed('all')\n\n layout.addWidget(self.descr_label)\n self.input_w = QLineEdit()\n layout.addWidget(self.input_w)\n bt_layout = QHBoxLayout()\n ok_bt = QPushButton('Ok')\n ok_bt.clicked.connect(self._edit)\n bt_layout.addWidget(ok_bt)\n cancel_bt = QPushButton('Cancel')\n cancel_bt.clicked.connect(self.close)\n bt_layout.addWidget(cancel_bt)\n layout.addLayout(bt_layout)\n self.setLayout(layout)\n self.show()\n\n def _mode_changed(self, mode):\n self.current_mode = mode\n if mode == 'all':\n curr_des = self.ad.description_cmbx.currentText()\n else:\n curr_des = self.mne.selected_region.description\n self.descr_label.setText(f'Change \"{curr_des}\" to:')\n\n def _edit(self):\n new_des = self.input_w.text()\n if new_des:\n if self.current_mode == 'all' or self.mne.selected_region is None:\n self.ad._edit_description_all(new_des)\n else:\n self.ad._edit_description_selected(new_des)\n self.close()\n\n\nclass AnnotationDock(QDockWidget):\n \"\"\"Dock-Window for Management of annotations.\"\"\"\n\n def __init__(self, main):\n super().__init__('Annotations')\n self.main = main\n self.mne = main.mne\n self._init_ui()\n\n self.setFeatures(QDockWidget.DockWidgetMovable |\n QDockWidget.DockWidgetFloatable)\n\n def _init_ui(self):\n widget = QWidget()\n layout = QHBoxLayout()\n layout.setAlignment(Qt.AlignLeft)\n\n self.description_cmbx = QComboBox()\n self.description_cmbx.setSizeAdjustPolicy(QComboBox.AdjustToContents)\n self.description_cmbx.activated.connect(self._description_changed)\n self._update_description_cmbx()\n layout.addWidget(self.description_cmbx)\n\n add_bt = QPushButton('Add Description')\n add_bt.clicked.connect(self._add_description_dlg)\n layout.addWidget(add_bt)\n\n rm_bt = QPushButton('Remove Description')\n rm_bt.clicked.connect(self._remove_description_dlg)\n layout.addWidget(rm_bt)\n\n edit_bt = QPushButton('Edit Description')\n edit_bt.clicked.connect(self._edit_description_dlg)\n layout.addWidget(edit_bt)\n\n # Uncomment when custom colors for annotations are implemented in\n # MNE-Python.\n # color_bt = QPushButton('Edit Color')\n # color_bt.clicked.connect(self._set_color)\n # layout.addWidget(color_bt)\n\n select_bt = QPushButton('Select Visible')\n select_bt.clicked.connect(self._select_annotations)\n layout.addWidget(select_bt)\n\n # Determine reasonable time decimals from sampling frequency.\n time_decimals = int(np.ceil(np.log10(self.mne.info['sfreq'])))\n\n layout.addWidget(QLabel('Start:'))\n self.start_bx = QDoubleSpinBox()\n self.start_bx.setDecimals(time_decimals)\n self.start_bx.editingFinished.connect(self._start_changed)\n layout.addWidget(self.start_bx)\n\n layout.addWidget(QLabel('Stop:'))\n self.stop_bx = QDoubleSpinBox()\n self.stop_bx.setDecimals(time_decimals)\n self.stop_bx.editingFinished.connect(self._stop_changed)\n layout.addWidget(self.stop_bx)\n\n help_bt = QPushButton(QIcon(\":/help.svg\"), 'Help')\n help_bt.clicked.connect(self._show_help)\n layout.addWidget(help_bt)\n\n widget.setLayout(layout)\n self.setWidget(widget)\n\n def _add_description_to_cmbx(self, description):\n color_pixmap = QPixmap(25, 25)\n color = _get_color(self.mne.annotation_segment_colors[description])\n color.setAlpha(75)\n color_pixmap.fill(color)\n color_icon = QIcon(color_pixmap)\n self.description_cmbx.addItem(color_icon, description)\n\n def _add_description(self, new_description):\n self.mne.new_annotation_labels.append(new_description)\n self.mne.visible_annotations[new_description] = True\n self.main._setup_annotation_colors()\n self._add_description_to_cmbx(new_description)\n self.mne.current_description = new_description\n self.description_cmbx.setCurrentText(new_description)\n\n def _add_description_dlg(self):\n new_description, ok = QInputDialog.getText(self,\n 'Set new description!',\n 'New description: ')\n if ok and new_description \\\n and new_description not in self.mne.new_annotation_labels:\n self._add_description(new_description)\n\n def _edit_description_all(self, new_des):\n \"\"\"Update descriptions of all annotations with the same description.\"\"\"\n old_des = self.description_cmbx.currentText()\n edit_regions = [r for r in self.mne.regions\n if r.description == old_des]\n # Update regions & annotations\n for ed_region in edit_regions:\n idx = self.main._get_onset_idx(ed_region.getRegion()[0])\n self.mne.inst.annotations.description[idx] = new_des\n ed_region.update_description(new_des)\n # Update containers with annotation-attributes\n self.mne.new_annotation_labels.remove(old_des)\n self.mne.new_annotation_labels = self.main._get_annotation_labels()\n self.mne.visible_annotations[new_des] = \\\n self.mne.visible_annotations.pop(old_des)\n self.mne.annotation_segment_colors[new_des] = \\\n self.mne.annotation_segment_colors.pop(old_des)\n\n # Update related widgets\n self.main._setup_annotation_colors()\n self._update_regions_colors()\n self._update_description_cmbx()\n self.mne.overview_bar.update_annotations()\n\n def _edit_description_selected(self, new_des):\n \"\"\"Update description only of selected region.\"\"\"\n old_des = self.mne.selected_region.description\n idx = self.main._get_onset_idx(self.mne.selected_region.getRegion()[0])\n # Update regions & annotations\n self.mne.inst.annotations.description[idx] = new_des\n self.mne.selected_region.update_description(new_des)\n # Update containers with annotation-attributes\n if new_des not in self.mne.new_annotation_labels:\n self.mne.new_annotation_labels.append(new_des)\n self.mne.visible_annotations[new_des] = \\\n copy(self.mne.visible_annotations[old_des])\n if old_des not in self.mne.inst.annotations.description:\n self.mne.new_annotation_labels.remove(old_des)\n self.mne.visible_annotations.pop(old_des)\n self.mne.annotation_segment_colors[new_des] = \\\n self.mne.annotation_segment_colors.pop(old_des)\n\n # Update related widgets\n self.main._setup_annotation_colors()\n self._update_regions_colors()\n self._update_description_cmbx()\n self.mne.overview_bar.update_annotations()\n\n def _edit_description_dlg(self):\n if len(self.mne.inst.annotations.description) > 0:\n _AnnotEditDialog(self)\n else:\n self.main.message_box(text='No Annotations!',\n info_text='There are no annotations '\n 'yet to edit!',\n icon=QMessageBox.Information)\n\n def _remove_description(self, rm_description):\n # Remove regions\n for rm_region in [r for r in self.mne.regions\n if r.description == rm_description]:\n rm_region.remove()\n\n # Remove from descriptions\n self.mne.new_annotation_labels.remove(rm_description)\n self._update_description_cmbx()\n\n # Remove from visible annotations\n self.mne.visible_annotations.pop(rm_description)\n\n # Remove from color-mapping\n if rm_description in self.mne.annotation_segment_colors:\n self.mne.annotation_segment_colors.pop(rm_description)\n\n # Set first description in Combo-Box to current description\n if self.description_cmbx.count() > 0:\n self.description_cmbx.setCurrentIndex(0)\n self.mne.current_description = \\\n self.description_cmbx.currentText()\n\n def _remove_description_dlg(self):\n rm_description = self.description_cmbx.currentText()\n existing_annot = list(self.mne.inst.annotations.description).count(\n rm_description)\n if existing_annot > 0:\n text = f'Remove annotations with {rm_description}?'\n info_text = f'There exist {existing_annot} annotations with ' \\\n f'\"{rm_description}\".\\n' \\\n f'Do you really want to remove them?'\n buttons = QMessageBox.Yes | QMessageBox.No\n ans = self.main.message_box(text=text, info_text=info_text,\n buttons=buttons,\n default_button=QMessageBox.Yes,\n icon=QMessageBox.Question)\n else:\n ans = QMessageBox.Yes\n\n if ans == QMessageBox.Yes:\n self._remove_description(rm_description)\n\n def _select_annotations(self):\n def _set_visible_region(state, description):\n self.mne.visible_annotations[description] = bool(state)\n\n def _select_all():\n for chkbx in chkbxs:\n chkbx.setChecked(True)\n\n def _clear_all():\n for chkbx in chkbxs:\n chkbx.setChecked(False)\n\n select_dlg = QDialog(self)\n chkbxs = list()\n layout = QVBoxLayout()\n layout.addWidget(QLabel('Select visible labels:'))\n\n # Add descriptions to scroll-area to be scalable.\n scroll_area = QScrollArea()\n scroll_widget = QWidget()\n scroll_layout = QVBoxLayout()\n\n for des in self.mne.visible_annotations:\n chkbx = QCheckBox(des)\n chkbx.setChecked(self.mne.visible_annotations[des])\n chkbx.stateChanged.connect(partial(_set_visible_region,\n description=des))\n chkbxs.append(chkbx)\n scroll_layout.addWidget(chkbx)\n\n scroll_widget.setLayout(scroll_layout)\n scroll_area.setWidget(scroll_widget)\n layout.addWidget(scroll_area)\n\n bt_layout = QGridLayout()\n\n all_bt = QPushButton('All')\n all_bt.clicked.connect(_select_all)\n bt_layout.addWidget(all_bt, 0, 0)\n\n clear_bt = QPushButton('Clear')\n clear_bt.clicked.connect(_clear_all)\n bt_layout.addWidget(clear_bt, 0, 1)\n\n ok_bt = QPushButton('Ok')\n ok_bt.clicked.connect(select_dlg.close)\n bt_layout.addWidget(ok_bt, 1, 0, 1, 2)\n\n layout.addLayout(bt_layout)\n\n select_dlg.setLayout(layout)\n select_dlg.exec()\n\n self.main._update_regions_visible()\n\n def _description_changed(self, descr_idx):\n new_descr = self.description_cmbx.itemText(descr_idx)\n self.mne.current_description = new_descr\n\n def _start_changed(self):\n start = self.start_bx.value()\n sel_region = self.mne.selected_region\n if sel_region:\n stop = sel_region.getRegion()[1]\n if start < stop:\n sel_region.setRegion((start, stop))\n else:\n self.main.message_box(text='Invalid value!',\n info_text='Start can\\'t be bigger or '\n 'equal to Stop!',\n icon=QMessageBox.Critical,\n modal=False)\n self.start_bx.setValue(sel_region.getRegion()[0])\n\n def _stop_changed(self):\n stop = self.stop_bx.value()\n sel_region = self.mne.selected_region\n if sel_region:\n start = sel_region.getRegion()[0]\n if start < stop:\n sel_region.setRegion((start, stop))\n else:\n self.main.message_box(text='Invalid value!',\n info_text='Stop can\\'t be smaller or '\n 'equal to Start!',\n icon=QMessageBox.Critical)\n self.stop_bx.setValue(sel_region.getRegion()[1])\n\n def _set_color(self):\n curr_descr = self.description_cmbx.currentText()\n if curr_descr in self.mne.annotation_segment_colors:\n curr_col = self.mne.annotation_segment_colors[curr_descr]\n else:\n curr_col = None\n color = QColorDialog.getColor(_get_color(curr_col), self,\n f'Choose color for {curr_descr}!')\n if color.isValid():\n self.mne.annotation_segment_colors[curr_descr] = color\n self._update_regions_colors()\n self._update_description_cmbx()\n self.mne.overview_bar.update_annotations()\n\n def update_values(self, region):\n \"\"\"Update spinbox-values from region.\"\"\"\n rgn = region.getRegion()\n self.start_bx.setValue(rgn[0])\n self.stop_bx.setValue(rgn[1])\n\n def _update_description_cmbx(self):\n self.description_cmbx.clear()\n descriptions = self.main._get_annotation_labels()\n for description in descriptions:\n self._add_description_to_cmbx(description)\n self.description_cmbx.setCurrentText(self.mne.current_description)\n\n def _update_regions_colors(self):\n for region in self.mne.regions:\n region.update_color()\n\n def reset(self):\n \"\"\"Reset to default state.\"\"\"\n if self.description_cmbx.count() > 0:\n self.description_cmbx.setCurrentIndex(0)\n self.mne.current_description = self.description_cmbx.currentText()\n self.start_bx.setValue(0)\n self.stop_bx.setValue(0)\n\n def _show_help(self):\n info_text = '<h1>Help</h1>' \\\n '<h2>Annotations</h2>' \\\n '<h3>Add Annotations</h3>' \\\n 'Drag inside the data-view to create annotations with '\\\n 'the description currently selected (leftmost item of '\\\n 'the toolbar).If there is no description yet, add one ' \\\n 'with the button \"Add description\".' \\\n '<h3>Remove Annotations</h3>' \\\n 'You can remove single annotations by right-clicking on '\\\n 'them.' \\\n '<h3>Edit Annotations</h3>' \\\n 'You can edit annotations by dragging them or their '\\\n 'boundaries. Or you can use the dials in the toolbar to '\\\n 'adjust the boundaries for the current selected '\\\n 'annotation.' \\\n '<h2>Descriptions</h2>' \\\n '<h3>Add Description</h3>' \\\n 'Add a new description with ' \\\n 'the button \"Add description\".' \\\n '<h3>Edit Description</h3>' \\\n 'You can edit the description of one single annotation '\\\n 'or all annotations of the currently selected kind with '\\\n 'the button \"Edit description\".' \\\n '<h3>Remove Description</h3>' \\\n 'You can remove all annotations of the currently '\\\n 'selected kind with the button \"Remove description\".'\n self.main.message_box(text='Annotations-Help',\n info_text=info_text,\n icon=QMessageBox.Information)\n\n\nclass BrowserView(GraphicsView):\n \"\"\"Customized View as part of GraphicsView-Framework.\"\"\"\n\n def __init__(self, plot, **kwargs):\n super().__init__(**kwargs)\n self.setCentralItem(plot)\n self.viewport().setAttribute(Qt.WA_AcceptTouchEvents, True)\n\n self.viewport().grabGesture(Qt.PinchGesture)\n self.viewport().grabGesture(Qt.SwipeGesture)\n\n # def viewportEvent(self, event):\n # \"\"\"Customize viewportEvent for touch-gestures (WIP).\"\"\"\n # if event.type() in [QEvent.TouchBegin, QEvent.TouchUpdate,\n # QEvent.TouchEnd]:\n # if event.touchPoints() == 2:\n # pass\n # elif event.type() == QEvent.Gesture:\n # print('Gesture')\n # return super().viewportEvent(event)\n\n def mouseMoveEvent(self, ev):\n \"\"\"Customize MouseMoveEvent.\"\"\"\n # Don't set GraphicsView.mouseEnabled to True,\n # we only want part of the functionality pyqtgraph offers here.\n super().mouseMoveEvent(ev)\n self.sigSceneMouseMoved.emit(ev.pos())\n\n\nclass LoadThread(QThread):\n \"\"\"A worker object for precomputing in a separate QThread.\"\"\"\n loadProgress = pyqtSignal(int)\n processText = pyqtSignal(str)\n loadingFinished = pyqtSignal()\n\n def __init__(self, browser):\n super().__init__()\n self.browser = browser\n self.mne = browser.mne\n self.loadProgress.connect(self.mne.load_progressbar.setValue)\n self.processText.connect(self.browser._show_process)\n self.loadingFinished.connect(self.browser._precompute_finished)\n\n def run(self):\n \"\"\"Load and process data in a separate QThread.\"\"\"\n # Split data loading into 10 chunks to show user progress.\n # Testing showed that e.g. n_chunks=100 extends loading time\n # (at least for the sample dataset)\n # because of the frequent gui-update-calls.\n # Thus n_chunks = 10 should suffice.\n data = None\n if self.mne.is_epochs:\n times = np.arange(len(self.mne.inst) * len(self.mne.inst.times)) \\\n / self.mne.info['sfreq']\n else:\n times = None\n n_chunks = min(10, len(self.mne.inst))\n chunk_size = len(self.mne.inst) // n_chunks\n for n in range(n_chunks):\n start = n * chunk_size\n if n == n_chunks - 1:\n # Get last chunk which may be larger due to rounding above\n stop = None\n else:\n stop = start + chunk_size\n # Load epochs\n if self.mne.is_epochs:\n item = slice(start, stop)\n with self.mne.inst.info._unlock():\n data_chunk = np.concatenate(\n self.mne.inst.get_data(item=item), axis=-1)\n # Load raw\n else:\n data_chunk, times_chunk = self.browser._load_data(start, stop)\n if times is None:\n times = times_chunk\n else:\n times = np.concatenate((times, times_chunk), axis=0)\n\n if data is None:\n data = data_chunk\n else:\n data = np.concatenate((data, data_chunk), axis=1)\n\n self.loadProgress.emit(n + 1)\n\n picks = self.mne.ch_order\n # Deactive remove dc because it will be removed for visible range\n stashed_remove_dc = self.mne.remove_dc\n self.mne.remove_dc = False\n data = self.browser._process_data(data, 0, len(data), picks, self)\n self.mne.remove_dc = stashed_remove_dc\n\n self.mne.global_data = data\n self.mne.global_times = times\n\n # Calculate Z-Scores\n self.processText.emit('Calculating Z-Scores...')\n self.browser._get_zscore(data)\n\n self.loadingFinished.emit()\n\n def clean(self):\n if self.isRunning():\n wait_time = 10 # max. waiting time in seconds\n logger.info('Waiting for Loading-Thread to finish... '\n f'(max. {wait_time} sec)')\n self.wait(int(wait_time * 1e3))\n _disconnect(self.loadProgress)\n _disconnect(self.processText)\n _disconnect(self.loadingFinished)\n del self.mne\n del self.browser\n\n\nclass _FastToolTipComboBox(QComboBox):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.setMouseTracking(True)\n\n def setToolTip(self, tooltip):\n self.tooltip = tooltip\n\n def enterEvent(self, event):\n QToolTip.showText(event.globalPos(), self.tooltip)\n super().enterEvent(event)\n\n\nclass _PGMetaClass(type(BrowserBase), type(QMainWindow)):\n \"\"\"Class is necessary to prevent a metaclass conflict.\n\n The conflict arises due to the different types of QMainWindow and\n BrowserBase.\n \"\"\"\n\n pass\n\n\n# Those are the settings which are stored on each device\n# depending on its operating system with QSettings.\n\nqsettings_params = {\n # Antialiasing (works with/without OpenGL, integer because QSettings\n # can't handle booleans)\n 'antialiasing': False,\n # Steps per view (relative to time)\n 'scroll_sensitivity': 100,\n # Downsampling-Factor (or 'auto', see SettingsDialog for details)\n 'downsampling': 1,\n # Downsampling-Method (set SettingsDialog for details)\n 'ds_method': 'peak'\n}\n\n\ndef _disconnect(sig):\n try:\n sig.disconnect()\n except TypeError: # if there are no connections, ignore it\n pass\n\n\nclass PyQtGraphBrowser(BrowserBase, QMainWindow, metaclass=_PGMetaClass):\n \"\"\"A PyQtGraph-backend for 2D data browsing.\"\"\"\n\n gotClosed = pyqtSignal()\n\n def __init__(self, **kwargs):\n self.backend_name = 'pyqtgraph'\n\n BrowserBase.__init__(self, **kwargs)\n QMainWindow.__init__(self)\n\n # Add to list to keep a reference and avoid premature\n # garbage-collection.\n _browser_instances.append(self)\n\n if self.mne.window_title is not None:\n self.setWindowTitle(self.mne.window_title)\n\n # Initialize attributes which are only used by pyqtgraph, not by\n # matplotlib and add them to MNEBrowseParams.\n\n # Exactly one MessageBox for messages to facilitate testing/debugging\n self.msg_box = QMessageBox(self)\n # MessageBox modality needs to be adapted for tests\n # (otherwise test execution blocks)\n self.test_mode = False\n # A Settings-Dialog\n self.mne.fig_settings = None\n # Stores decimated data\n self.mne.decim_data = None\n self.mne.decim_times = None\n # Stores ypos for selection-mode\n self.mne.selection_ypos_dict = dict()\n # Parameters for precomputing\n self.mne.enable_precompute = False\n self.mne.data_precomputed = False\n self._rerun_load_thread = False\n # Parameters for overviewbar\n self.mne.show_overview_bar = True\n self.mne.overview_mode = 'channels'\n self.mne.zscore_rgba = None\n # Container for traces\n self.mne.traces = list()\n # Scale-Factor\n self.mne.scale_factor = 1\n # Stores channel-types for butterfly-mode\n self.mne.butterfly_type_order = [tp for tp in\n _DATA_CH_TYPES_ORDER_DEFAULT\n if tp in self.mne.ch_types]\n if self.mne.is_epochs:\n # Stores parameters for epochs\n self.mne.epoch_dur = np.diff(self.mne.boundary_times[:2])[0]\n epoch_idx = np.searchsorted(self.mne.midpoints,\n (self.mne.t_start,\n self.mne.t_start + self.mne.duration))\n self.mne.epoch_idx = np.arange(epoch_idx[0], epoch_idx[1])\n\n # Load from QSettings if available\n for qparam in qsettings_params:\n default = qsettings_params[qparam]\n qvalue = QSettings().value(qparam, defaultValue=default)\n # QSettings may alter types depending on OS\n if not isinstance(qvalue, type(default)):\n try:\n qvalue = literal_eval(qvalue)\n except (SyntaxError, ValueError):\n if qvalue in ['true', 'false']:\n qvalue = bool(qvalue)\n else:\n qvalue = default\n setattr(self.mne, qparam, qvalue)\n\n # Initialize channel-colors for faster indexing later\n self.mne.ch_color_ref = dict()\n for idx, ch_name in enumerate(self.mne.ch_names):\n ch_type = self.mne.ch_types[idx]\n self.mne.ch_color_ref[ch_name] = self.mne.ch_color_dict[ch_type]\n\n # Initialize epoch colors for faster indexing later\n if self.mne.is_epochs:\n if self.mne.epoch_colors is None:\n self.mne.epoch_color_ref = \\\n np.repeat([to_rgba_array(c) for c\n in self.mne.ch_color_ref.values()],\n len(self.mne.inst), axis=1)\n else:\n self.mne.epoch_color_ref = np.empty((len(self.mne.ch_names),\n len(self.mne.inst), 4))\n for epo_idx, epo in enumerate(self.mne.epoch_colors):\n for ch_idx, color in enumerate(epo):\n self.mne.epoch_color_ref[ch_idx, epo_idx] = \\\n to_rgba_array(color)\n\n # Mark bad epochs\n self.mne.epoch_color_ref[:, self.mne.bad_epochs] = \\\n to_rgba_array(self.mne.epoch_color_bad)\n\n # Mark bad channels\n bad_idxs = np.in1d(self.mne.ch_names, self.mne.info['bads'])\n self.mne.epoch_color_ref[bad_idxs, :] = \\\n to_rgba_array(self.mne.ch_color_bad)\n\n # Add Load-Progressbar for loading in a thread\n self.mne.load_prog_label = QLabel('Loading...')\n self.statusBar().addWidget(self.mne.load_prog_label)\n self.mne.load_prog_label.hide()\n self.mne.load_progressbar = QProgressBar()\n # Set to n_chunks of LoadRunner\n self.mne.load_progressbar.setMaximum(10)\n self.statusBar().addWidget(self.mne.load_progressbar, stretch=1)\n self.mne.load_progressbar.hide()\n\n # A QThread for preloading\n self.load_thread = LoadThread(self)\n\n # Create centralWidget and layout\n widget = QWidget()\n layout = QGridLayout()\n\n # Initialize Axis-Items\n self.mne.time_axis = TimeAxis(self.mne)\n self.mne.time_axis.setLabel(text='Time', units='s')\n self.mne.channel_axis = ChannelAxis(self)\n self.mne.viewbox = RawViewBox(self)\n\n # Start precomputing if enabled\n self._init_precompute()\n\n # Initialize data (needed in DataTrace.update_data).\n self._update_data()\n\n # Initialize Trace-Plot\n self.mne.plt = PlotItem(viewBox=self.mne.viewbox,\n axisItems={'bottom': self.mne.time_axis,\n 'left': self.mne.channel_axis})\n # Hide AutoRange-Button\n self.mne.plt.hideButtons()\n # Configure XY-Range\n if self.mne.is_epochs:\n self.mne.xmax = len(self.mne.inst.times) * len(self.mne.inst) \\\n / self.mne.info['sfreq']\n else:\n self.mne.xmax = self.mne.inst.times[-1]\n # Add one empty line as padding at top (y=0).\n # Negative Y-Axis to display channels from top.\n self.mne.ymax = len(self.mne.ch_order) + 1\n self.mne.plt.setLimits(xMin=0, xMax=self.mne.xmax,\n yMin=0, yMax=self.mne.ymax)\n # Connect Signals from PlotItem\n self.mne.plt.sigXRangeChanged.connect(self._xrange_changed)\n self.mne.plt.sigYRangeChanged.connect(self._yrange_changed)\n\n # Add traces\n for ch_idx in self.mne.picks:\n DataTrace(self, ch_idx)\n\n # Initialize Epochs Grid\n if self.mne.is_epochs:\n grid_pen = mkPen(color='k', width=2, style=Qt.DashLine)\n for x_grid in self.mne.boundary_times[1:-1]:\n grid_line = InfiniteLine(pos=x_grid,\n pen=grid_pen,\n movable=False)\n self.mne.plt.addItem(grid_line)\n\n # Add events\n if getattr(self.mne, 'event_nums', None) is not None:\n self.mne.events_visible = True\n for ev_time, ev_id in zip(self.mne.event_times,\n self.mne.event_nums):\n color = self.mne.event_color_dict[ev_id]\n event_line = EventLine(ev_time, ev_id, color)\n self.mne.event_lines.append(event_line)\n\n if 0 < ev_time < self.mne.duration:\n self.mne.plt.addItem(event_line)\n else:\n self.mne.events_visible = False\n\n # Add Scale-Bars\n self._add_scalebars()\n\n # Check for OpenGL\n if self.mne.use_opengl is None: # default: opt-in\n self.mne.use_opengl = (\n get_config('MNE_BROWSE_USE_OPENGL', '').lower() == 'true')\n\n # Epochs currently only work with OpenGL enabled\n # (https://github.com/mne-tools/mne-qt-browser/issues/53)\n mac_epochs = self.mne.is_epochs and sys.platform == 'darwin'\n if mac_epochs:\n self.mne.use_opengl = True\n\n if self.mne.use_opengl:\n try:\n import OpenGL\n except (ModuleNotFoundError, ImportError):\n warn('PyOpenGL was not found and OpenGL can\\'t be used!\\n'\n 'Consider installing pyopengl with pip or conda'\n 'or set \"use_opengl\" to False to avoid this warning.')\n if mac_epochs:\n warn('Plotting epochs on MacOS without OpenGL'\n 'may be unstable!')\n self.mne.use_opengl = False\n else:\n logger.info(\n f'Using pyopengl with version {OpenGL.__version__}')\n # Initialize BrowserView (inherits QGraphicsView)\n self.mne.view = BrowserView(self.mne.plt,\n useOpenGL=self.mne.use_opengl,\n background='w')\n if hasattr(self.mne, 'bgcolor'):\n bgcolor = self.mne.bgcolor\n else:\n bgcolor = 'w'\n self.mne.view.setBackground(_get_color(bgcolor))\n layout.addWidget(self.mne.view, 0, 0)\n\n # Initialize Scroll-Bars\n self.mne.ax_hscroll = TimeScrollBar(self.mne)\n layout.addWidget(self.mne.ax_hscroll, 1, 0, 1, 2)\n\n self.mne.ax_vscroll = ChannelScrollBar(self.mne)\n layout.addWidget(self.mne.ax_vscroll, 0, 1)\n\n # Initialize VLine\n self.mne.vline = None\n self.mne.vline_visible = False\n\n # Initialize crosshair (as in pyqtgraph example)\n self.mne.crosshair_enabled = False\n self.mne.crosshair_h = None\n self.mne.crosshair = None\n self.mne.view.sigSceneMouseMoved.connect(self._mouse_moved)\n\n # Initialize Annotation-Widgets\n self.mne.annotation_mode = False\n if not self.mne.is_epochs:\n self._init_annot_mode()\n\n # OverviewBar\n self.mne.overview_bar = OverviewBar(self)\n layout.addWidget(self.mne.overview_bar, 2, 0, 1, 2)\n\n # Add Combobox to select Overview-Mode\n self.overview_mode_chkbx = _FastToolTipComboBox()\n self.overview_mode_chkbx.addItems(['empty', 'channels'])\n tooltip = (\n '<h2>Overview-Modes</h2>'\n '<ul>'\n '<li>empty:<br>'\n 'Display no background.</li>'\n '<li>channels:<br>'\n 'Display each channel with its channel-type color.</li>'\n '<li>zscore:<br>'\n 'Display the zscore for the data from each channel across time. '\n 'Red indicates high zscores, blue indicates low zscores, '\n 'and the boundaries of the color gradient are defined by the '\n 'minimum/maximum zscore.'\n 'This only works if precompute is set to \"True\", or if it is '\n 'enabled with \"auto\" and enough free RAM is available.</li>'\n '</ul>')\n self.overview_mode_chkbx.setToolTip(tooltip)\n if self.mne.enable_precompute:\n self.overview_mode_chkbx.addItems(['zscore'])\n self.overview_mode_chkbx.setCurrentText(self.mne.overview_mode)\n self.overview_mode_chkbx.currentTextChanged.connect(\n self._overview_mode_changed)\n # Avoid taking keyboard-focus\n self.overview_mode_chkbx.setFocusPolicy(Qt.NoFocus)\n overview_mode_layout = QHBoxLayout()\n overview_mode_layout.addWidget(QLabel('Overview-Mode:'))\n overview_mode_layout.addWidget(self.overview_mode_chkbx)\n overview_mode_widget = QWidget()\n overview_mode_widget.setLayout(overview_mode_layout)\n self.statusBar().addPermanentWidget(overview_mode_widget)\n\n widget.setLayout(layout)\n self.setCentralWidget(widget)\n\n # Initialize Selection-Dialog\n if getattr(self.mne, 'group_by', None) in ['position', 'selection']:\n self._create_selection_fig()\n\n # Initialize Projectors-Dialog if show_options=True\n if getattr(self.mne, 'show_options', False):\n self._toggle_proj_fig()\n\n # Initialize Toolbar\n self.mne.toolbar = self.addToolBar('Tools')\n self.mne.toolbar.setToolButtonStyle(Qt.ToolButtonTextBesideIcon)\n\n adecr_time = QAction(QIcon(\":/less_time.svg\"), '- Time', parent=self)\n adecr_time.triggered.connect(partial(self.change_duration, -0.2))\n self.mne.toolbar.addAction(adecr_time)\n\n aincr_time = QAction(QIcon(\":/more_time.svg\"), '+ Time', parent=self)\n aincr_time.triggered.connect(partial(self.change_duration, 0.25))\n self.mne.toolbar.addAction(aincr_time)\n\n adecr_nchan = QAction(QIcon(\":/less_channels.svg\"), '- Channels',\n parent=self)\n adecr_nchan.triggered.connect(partial(self.change_nchan, -10))\n self.mne.toolbar.addAction(adecr_nchan)\n\n aincr_nchan = QAction(QIcon(\":/more_channels.svg\"), '+ Channels',\n parent=self)\n aincr_nchan.triggered.connect(partial(self.change_nchan, 10))\n self.mne.toolbar.addAction(aincr_nchan)\n\n adecr_nchan = QAction(QIcon(\":/zoom_out.svg\"), 'Zoom Out', parent=self)\n adecr_nchan.triggered.connect(partial(self.scale_all, 4 / 5))\n self.mne.toolbar.addAction(adecr_nchan)\n\n aincr_nchan = QAction(QIcon(\":/zoom_in.svg\"), 'Zoom In', parent=self)\n aincr_nchan.triggered.connect(partial(self.scale_all, 5 / 4))\n self.mne.toolbar.addAction(aincr_nchan)\n\n if not self.mne.is_epochs:\n atoggle_annot = QAction(QIcon(\":/annotations.svg\"), 'Annotations',\n parent=self)\n atoggle_annot.triggered.connect(self._toggle_annotation_fig)\n self.mne.toolbar.addAction(atoggle_annot)\n\n atoggle_proj = QAction(QIcon(\":/ssp.svg\"), 'SSP', parent=self)\n atoggle_proj.triggered.connect(self._toggle_proj_fig)\n self.mne.toolbar.addAction(atoggle_proj)\n\n atoggle_fullscreen = QAction(QIcon(\":/fullscreen.svg\"), 'Fullscreen',\n parent=self)\n atoggle_fullscreen.triggered.connect(self._toggle_fullscreen)\n self.mne.toolbar.addAction(atoggle_fullscreen)\n\n asettings = QAction(QIcon(\":/settings.svg\"), 'Settings',\n parent=self)\n asettings.triggered.connect(self._toggle_settings_fig)\n self.mne.toolbar.addAction(asettings)\n\n ahelp = QAction(QIcon(\":/help.svg\"), 'Help', parent=self)\n ahelp.triggered.connect(self._toggle_help_fig)\n self.mne.toolbar.addAction(ahelp)\n\n # Set Start-Range (after all necessary elements are initialized)\n self.mne.plt.setXRange(self.mne.t_start,\n self.mne.t_start + self.mne.duration,\n padding=0)\n if self.mne.butterfly:\n self._set_butterfly(True)\n else:\n self.mne.plt.setYRange(0, self.mne.n_channels + 1, padding=0)\n\n # Set Size\n width = int(self.mne.figsize[0] * self.logicalDpiX())\n height = int(self.mne.figsize[1] * self.logicalDpiY())\n self.resize(width, height)\n\n # Initialize Keyboard-Shortcuts\n is_mac = platform.system() == 'Darwin'\n dur_keys = ('fn + ←', 'fn + →') if is_mac else ('Home', 'End')\n ch_keys = ('fn + ↑', 'fn + ↓') if is_mac else ('Page up', 'Page down')\n hscroll_type = '1 epoch' if self.mne.is_epochs else '¼ page'\n self.mne.keyboard_shortcuts = {\n 'left': {\n 'alias': '←',\n 'qt_key': Qt.Key_Left,\n 'modifier': [None, 'Shift'],\n 'slot': [self.hscroll],\n 'parameter': [-40, '-full'],\n 'description': [f'Scroll left ({hscroll_type})',\n 'Scroll left (full page)']\n },\n 'right': {\n 'alias': '→',\n 'qt_key': Qt.Key_Right,\n 'modifier': [None, 'Shift'],\n 'slot': [self.hscroll],\n 'parameter': [40, '+full'],\n 'description': [f'Scroll right ({hscroll_type})',\n 'Scroll right (full page)']\n },\n 'up': {\n 'alias': '↑',\n 'qt_key': Qt.Key_Up,\n 'slot': [self.vscroll],\n 'parameter': ['-full'],\n 'description': ['Scroll up (full page)']\n },\n 'down': {\n 'alias': '↓',\n 'qt_key': Qt.Key_Down,\n 'slot': [self.vscroll],\n 'parameter': ['+full'],\n 'description': ['Scroll down (full page)']\n },\n 'home': {\n 'alias': dur_keys[0],\n 'qt_key': Qt.Key_Home,\n 'slot': [self.change_duration],\n 'parameter': [-0.2],\n 'description': [f'Decrease duration ({hscroll_type})']\n },\n 'end': {\n 'alias': dur_keys[1],\n 'qt_key': Qt.Key_End,\n 'slot': [self.change_duration],\n 'parameter': [0.25],\n 'description': [f'Increase duration ({hscroll_type})']\n },\n 'pagedown': {\n 'alias': ch_keys[0],\n 'qt_key': Qt.Key_PageDown,\n 'modifier': [None, 'Shift'],\n 'slot': [self.change_nchan],\n 'parameter': [-1, -10],\n 'description': ['Decrease shown channels (1)',\n 'Decrease shown channels (10)']\n },\n 'pageup': {\n 'alias': ch_keys[1],\n 'qt_key': Qt.Key_PageUp,\n 'modifier': [None, 'Shift'],\n 'slot': [self.change_nchan],\n 'parameter': [1, 10],\n 'description': ['Increase shown channels (1)',\n 'Increase shown channels (10)']\n },\n '-': {\n 'qt_key': Qt.Key_Minus,\n 'slot': [self.scale_all],\n 'parameter': [4 / 5],\n 'description': ['Decrease Scale']\n },\n '+': {\n 'qt_key': Qt.Key_Plus,\n 'slot': [self.scale_all],\n 'parameter': [5 / 4],\n 'description': ['Increase Scale']\n },\n '=': {\n 'qt_key': Qt.Key_Equal,\n 'slot': [self.scale_all],\n 'parameter': [5 / 4],\n 'description': ['Increase Scale']\n },\n 'a': {\n 'qt_key': Qt.Key_A,\n 'slot': [self._toggle_annotation_fig,\n self._toggle_annotations],\n 'modifier': [None, 'Shift'],\n 'description': ['Toggle Annotation-Tool',\n 'Toggle Annotations visible']\n },\n 'b': {\n 'qt_key': Qt.Key_B,\n 'slot': [self._toggle_butterfly],\n 'description': ['Toggle Butterfly']\n },\n 'd': {\n 'qt_key': Qt.Key_D,\n 'slot': [self._toggle_dc],\n 'description': ['Toggle DC-Correction']\n },\n 'e': {\n 'qt_key': Qt.Key_E,\n 'slot': [self._toggle_events],\n 'description': ['Toggle Events visible']\n },\n 'h': {\n 'qt_key': Qt.Key_H,\n 'slot': [self._toggle_epoch_histogram],\n 'description': ['Toggle Epoch-Histogram']\n },\n 'j': {\n 'qt_key': Qt.Key_J,\n 'slot': [self._toggle_proj_fig,\n self._toggle_all_projs],\n 'modifier': [None, 'Shift'],\n 'description': ['Toggle Projection Figure',\n 'Toggle all projections']\n },\n 'l': {\n 'qt_key': Qt.Key_L,\n 'slot': [self._toggle_antialiasing],\n 'description': ['Toggle Antialiasing']\n },\n 'o': {\n 'qt_key': Qt.Key_O,\n 'slot': [self._toggle_overview_bar],\n 'description': ['Toggle Overview-Bar']\n },\n 't': {\n 'qt_key': Qt.Key_T,\n 'slot': [self._toggle_time_format],\n 'description': ['Toggle Time-Format']\n },\n 's': {\n 'qt_key': Qt.Key_S,\n 'slot': [self._toggle_scalebars],\n 'description': ['Toggle Scalebars']\n },\n 'w': {\n 'qt_key': Qt.Key_W,\n 'slot': [self._toggle_whitening],\n 'description': ['Toggle Whitening']\n },\n 'x': {\n 'qt_key': Qt.Key_X,\n 'slot': [self._toggle_crosshair],\n 'description': ['Toggle Crosshair']\n },\n 'z': {\n 'qt_key': Qt.Key_Z,\n 'slot': [self._toggle_zenmode],\n 'description': ['Toggle Zen-Mode']\n },\n '?': {\n 'qt_key': Qt.Key_Question,\n 'slot': [self._toggle_help_fig],\n 'description': ['Show Help']\n },\n 'f11': {\n 'qt_key': Qt.Key_F11,\n 'slot': [self._toggle_fullscreen],\n 'description': ['Toggle Full-Screen']\n },\n 'escape': {\n 'qt_key': Qt.Key_Escape,\n 'slot': [self.close],\n 'description': ['Close']\n },\n # Just for testing\n 'enter': {\n 'qt_key': Qt.Key_Enter\n },\n ' ': {\n 'qt_key': Qt.Key_Space\n }\n }\n\n def _update_yaxis_labels(self):\n self.mne.channel_axis.repaint()\n\n def _add_scalebars(self):\n \"\"\"Add scalebars for all channel-types.\n (scene handles showing them in when in view\n range)\n \"\"\"\n self.mne.scalebars.clear()\n # To keep order (np.unique sorts)\n ordered_types = self.mne.ch_types[self.mne.ch_order]\n unique_type_idxs = np.unique(ordered_types,\n return_index=True)[1]\n ch_types_ordered = [ordered_types[idx] for idx\n in sorted(unique_type_idxs)]\n for ch_type in [ct for ct in ch_types_ordered\n if ct != 'stim' and\n ct in self.mne.scalings and\n ct in getattr(self.mne, 'units', {}) and\n ct in getattr(self.mne, 'unit_scalings', {})]:\n scale_bar = ScaleBar(self.mne, ch_type)\n self.mne.scalebars[ch_type] = scale_bar\n self.mne.plt.addItem(scale_bar)\n\n scale_bar_text = ScaleBarText(self.mne, ch_type)\n self.mne.scalebar_texts[ch_type] = scale_bar_text\n self.mne.plt.addItem(scale_bar_text)\n\n self._set_scalebars_visible(self.mne.scalebars_visible)\n\n def _update_scalebar_x_positions(self):\n if self.mne.scalebars_visible:\n for scalebar in self.mne.scalebars.values():\n scalebar.update_x_position()\n\n for scalebar_text in self.mne.scalebar_texts.values():\n scalebar_text.update_x_position()\n\n def _update_scalebar_y_positions(self):\n if self.mne.scalebars_visible:\n for scalebar in self.mne.scalebars.values():\n scalebar.update_y_position()\n\n for scalebar_text in self.mne.scalebar_texts.values():\n scalebar_text.update_y_position()\n\n def _update_scalebar_values(self):\n for scalebar_text in self.mne.scalebar_texts.values():\n scalebar_text.update_value()\n\n def _set_scalebars_visible(self, visible):\n for scalebar in self.mne.scalebars.values():\n scalebar.setVisible(visible)\n\n for scalebar_text in self.mne.scalebar_texts.values():\n scalebar_text.setVisible(visible)\n\n self._update_scalebar_y_positions()\n\n def _toggle_scalebars(self):\n self.mne.scalebars_visible = not self.mne.scalebars_visible\n self._set_scalebars_visible(self.mne.scalebars_visible)\n\n def _overview_mode_changed(self, new_mode):\n self.mne.overview_mode = new_mode\n if self.mne.overview_mode == 'zscore':\n while self.mne.zscore_rgba is None:\n QApplication.processEvents()\n self.mne.overview_bar.set_background()\n\n def scale_all(self, step):\n \"\"\"Scale all traces by multiplying with step.\"\"\"\n self.mne.scale_factor *= step\n\n # Reapply clipping if necessary\n if self.mne.clipping is not None:\n self._update_data()\n\n # Scale Traces (by scaling the Item, not the data)\n for line in self.mne.traces:\n line.update_scale()\n\n # Update Scalebars\n self._update_scalebar_values()\n\n def hscroll(self, step):\n \"\"\"Scroll horizontally by step.\"\"\"\n if step == '+full':\n rel_step = self.mne.duration\n elif step == '-full':\n rel_step = - self.mne.duration\n elif self.mne.is_epochs:\n direction = 1 if step > 0 else -1\n rel_step = direction * self.mne.duration / self.mne.n_epochs\n else:\n rel_step = step * self.mne.duration / self.mne.scroll_sensitivity\n # Get current range and add step to it\n xmin, xmax = [i + rel_step for i in self.mne.viewbox.viewRange()[0]]\n\n if xmin < 0:\n xmin = 0\n xmax = xmin + self.mne.duration\n elif xmax > self.mne.xmax:\n xmax = self.mne.xmax\n xmin = xmax - self.mne.duration\n\n self.mne.plt.setXRange(xmin, xmax, padding=0)\n\n def vscroll(self, step):\n \"\"\"Scroll vertically by step.\"\"\"\n if self.mne.fig_selection is not None:\n if step == '+full':\n step = 1\n elif step == '-full':\n step = -1\n else:\n step = int(step)\n self.mne.fig_selection._scroll_selection(step)\n elif self.mne.butterfly:\n return\n else:\n # Get current range and add step to it\n if step == '+full':\n step = self.mne.n_channels\n elif step == '-full':\n step = - self.mne.n_channels\n ymin, ymax = [i + step for i in self.mne.viewbox.viewRange()[1]]\n\n if ymin < 0:\n ymin = 0\n ymax = self.mne.n_channels + 1\n elif ymax > self.mne.ymax:\n ymax = self.mne.ymax\n ymin = ymax - self.mne.n_channels - 1\n\n self.mne.plt.setYRange(ymin, ymax, padding=0)\n\n def change_duration(self, step):\n \"\"\"Change duration by step.\"\"\"\n xmin, xmax = self.mne.viewbox.viewRange()[0]\n\n if self.mne.is_epochs:\n # use the length of one epoch as duration change\n min_dur = len(self.mne.inst.times) / self.mne.info['sfreq']\n step_dir = (1 if step > 0 else -1)\n rel_step = min_dur * step_dir\n self.mne.n_epochs = np.clip(self.mne.n_epochs + step_dir,\n 1, len(self.mne.inst))\n else:\n # never show fewer than 3 samples\n min_dur = 3 * np.diff(self.mne.inst.times[:2])[0]\n rel_step = self.mne.duration * step\n\n xmax += rel_step\n\n if xmax - xmin < min_dur:\n xmax = xmin + min_dur\n\n if xmax > self.mne.xmax:\n diff = xmax - self.mne.xmax\n xmax = self.mne.xmax\n xmin -= diff\n\n if xmin < 0:\n xmin = 0\n\n self.mne.ax_hscroll.update_duration()\n self.mne.plt.setXRange(xmin, xmax, padding=0)\n\n def change_nchan(self, step):\n \"\"\"Change number of channels by step.\"\"\"\n if not self.mne.butterfly:\n if step == '+full':\n step = self.mne.n_channels\n elif step == '-full':\n step = - self.mne.n_channels\n ymin, ymax = self.mne.viewbox.viewRange()[1]\n ymax += step\n if ymax > self.mne.ymax:\n ymax = self.mne.ymax\n ymin -= step\n\n if ymin < 0:\n ymin = 0\n\n if ymax - ymin <= 2:\n ymax = ymin + 2\n\n self.mne.ax_vscroll.update_nchan()\n self.mne.plt.setYRange(ymin, ymax, padding=0)\n\n def _remove_vline(self):\n if self.mne.vline is not None:\n if self.mne.is_epochs:\n for vline in self.mne.vline:\n self.mne.plt.removeItem(vline)\n else:\n self.mne.plt.removeItem(self.mne.vline)\n\n self.mne.vline = None\n self.mne.vline_visible = False\n self.mne.overview_bar.update_vline()\n\n def _get_vline_times(self, t):\n rel_time = t % self.mne.epoch_dur\n abs_time = self.mne.times[0]\n ts = np.arange(\n self.mne.n_epochs) * self.mne.epoch_dur + abs_time + rel_time\n\n return ts\n\n def _vline_slot(self, orig_vline):\n if self.mne.is_epochs:\n ts = self._get_vline_times(orig_vline.value())\n for vl, xt in zip(self.mne.vline, ts):\n if vl != orig_vline:\n vl.setPos(xt)\n self.mne.overview_bar.update_vline()\n\n def _add_vline(self, t):\n if self.mne.is_epochs:\n ts = self._get_vline_times(t)\n\n # Add vline if None\n if self.mne.vline is None:\n self.mne.vline = list()\n for xt in ts:\n epo_idx = np.clip(\n np.searchsorted(self.mne.boundary_times, xt) - 1,\n 0, len(self.mne.inst))\n bmin, bmax = self.mne.boundary_times[epo_idx:epo_idx + 2]\n # Avoid off-by-one-error at bmax for VlineLabel\n bmax -= 1 / self.mne.info['sfreq']\n vl = VLine(self.mne, xt, bounds=(bmin, bmax))\n # Should only be emitted when dragged\n vl.sigPositionChangeFinished.connect(self._vline_slot)\n self.mne.vline.append(vl)\n self.mne.plt.addItem(vl)\n else:\n for vl, xt in zip(self.mne.vline, ts):\n vl.setPos(xt)\n else:\n if self.mne.vline is None:\n self.mne.vline = VLine(self.mne, t, bounds=(0, self.mne.xmax))\n self.mne.vline.sigPositionChangeFinished.connect(\n self._vline_slot)\n self.mne.plt.addItem(self.mne.vline)\n else:\n self.mne.vline.setPos(t)\n\n self.mne.vline_visible = True\n self.mne.overview_bar.update_vline()\n\n def _mouse_moved(self, pos):\n \"\"\"Show Crosshair if enabled at mouse move.\"\"\"\n if self.mne.crosshair_enabled:\n if self.mne.plt.sceneBoundingRect().contains(pos):\n mousePoint = self.mne.viewbox.mapSceneToView(pos)\n x, y = mousePoint.x(), mousePoint.y()\n if (0 <= x <= self.mne.xmax and\n 0 <= y <= self.mne.ymax):\n if not self.mne.crosshair:\n self.mne.crosshair = Crosshair()\n self.mne.plt.addItem(self.mne.crosshair,\n ignoreBounds=True)\n\n # Get ypos from trace\n trace = [tr for tr in self.mne.traces if\n tr.ypos - 0.5 < y < tr.ypos + 0.5]\n if len(trace) == 1:\n trace = trace[0]\n idx = np.searchsorted(self.mne.times, x)\n if self.mne.data_precomputed:\n data = self.mne.data[trace.order_idx]\n else:\n data = self.mne.data[trace.range_idx]\n yvalue = data[idx]\n yshown = yvalue + trace.ypos\n self.mne.crosshair.set_data(x, yshown)\n\n # relative x for epochs\n if self.mne.is_epochs:\n rel_idx = idx % len(self.mne.inst.times)\n x = self.mne.inst.times[rel_idx]\n\n # negative because plot is inverted for Y\n scaler = -1 if self.mne.butterfly else -2\n inv_norm = (scaler *\n self.mne.scalings[trace.ch_type] *\n self.mne.unit_scalings[trace.ch_type] /\n self.mne.scale_factor)\n label = f'{_simplify_float(yvalue * inv_norm)} ' \\\n f'{self.mne.units[trace.ch_type]}'\n self.statusBar().showMessage(f'x={x:.3f} s, '\n f'y={label}')\n\n def _toggle_crosshair(self):\n self.mne.crosshair_enabled = not self.mne.crosshair_enabled\n if self.mne.crosshair:\n self.mne.plt.removeItem(self.mne.crosshair)\n self.mne.crosshair = None\n\n def _xrange_changed(self, _, xrange):\n # Update data\n if self.mne.is_epochs:\n if self.mne.vline is not None:\n rel_vl_t = self.mne.vline[0].value() \\\n - self.mne.boundary_times[self.mne.epoch_idx][0]\n\n # Depends on only allowing xrange showing full epochs\n boundary_idxs = np.searchsorted(self.mne.midpoints, xrange)\n self.mne.epoch_idx = np.arange(*boundary_idxs)\n\n # Update colors\n for trace in self.mne.traces:\n trace.update_color()\n\n # Update vlines\n if self.mne.vline is not None:\n for bmin, bmax, vl in zip(self.mne.boundary_times[\n self.mne.epoch_idx],\n self.mne.boundary_times[\n self.mne.epoch_idx + 1],\n self.mne.vline):\n # Avoid off-by-one-error at bmax for VlineLabel\n bmax -= 1 / self.mne.info['sfreq']\n vl.setBounds((bmin, bmax))\n vl.setValue(bmin + rel_vl_t)\n\n self.mne.t_start = xrange[0]\n self.mne.duration = xrange[1] - xrange[0]\n\n self._redraw(update_data=True)\n\n # Update annotations\n if not self.mne.is_epochs:\n self._update_annotations_xrange(xrange)\n\n # Update Events\n self._update_events_xrange(xrange)\n\n # Update Time-Bar\n self.mne.ax_hscroll.update_value(xrange[0])\n\n # Update Overview-Bar\n self.mne.overview_bar.update_viewrange()\n\n # Update Scalebars\n self._update_scalebar_x_positions()\n\n def _update_events_xrange(self, xrange):\n \"\"\"Add or remove event-lines depending on view-range.\n\n This has proven to be more performant (and scalable)\n than adding all event-lines to plt(the Scene)\n and letting pyqtgraph/Qt handle it.\n \"\"\"\n if self.mne.events_visible:\n for ev_line in self.mne.event_lines:\n if xrange[0] < ev_line.pos().x() < xrange[1]:\n if ev_line not in self.mne.plt.items:\n self.mne.plt.addItem(ev_line)\n else:\n if ev_line in self.mne.plt.items:\n self.mne.plt.removeItem(ev_line)\n\n def _update_annotations_xrange(self, xrange):\n \"\"\"Add or remove annotation-regions depending on view-range.\n\n This has proven to be more performant (and scalable)\n than adding all annotations to plt(the Scene)\n and letting pyqtgraph/Qt handle it.\n \"\"\"\n if self.mne.annotations_visible:\n for region in self.mne.regions:\n if self.mne.visible_annotations[region.description]:\n rmin, rmax = region.getRegion()\n xmin, xmax = xrange\n comparisons = [rmin < xmin,\n rmin < xmax,\n rmax < xmin,\n rmax < xmax]\n if all(comparisons) or not any(comparisons):\n if region in self.mne.plt.items:\n self.mne.plt.removeItem(region)\n self.mne.plt.removeItem(region.label_item)\n else:\n if region not in self.mne.plt.items:\n self.mne.plt.addItem(region)\n self.mne.plt.addItem(region.label_item)\n\n def _yrange_changed(self, _, yrange):\n if not self.mne.butterfly:\n if not self.mne.fig_selection:\n # Update picks and data\n self.mne.ch_start = np.clip(round(yrange[0]), 0,\n len(self.mne.ch_order)\n - self.mne.n_channels)\n self.mne.n_channels = round(yrange[1] - yrange[0] - 1)\n self._update_picks()\n # Update Channel-Bar\n self.mne.ax_vscroll.update_value(self.mne.ch_start)\n self._update_data()\n\n # Update Overview-Bar\n self.mne.overview_bar.update_viewrange()\n\n # Update Scalebars\n self._update_scalebar_y_positions()\n\n off_traces = [tr for tr in self.mne.traces\n if tr.ch_idx not in self.mne.picks]\n add_idxs = [p for p in self.mne.picks\n if p not in [tr.ch_idx for tr in self.mne.traces]]\n\n # Update range_idx for traces which just shifted in y-position\n for trace in [tr for tr in self.mne.traces if tr not in off_traces]:\n trace.update_range_idx()\n\n # Update number of traces.\n trace_diff = len(self.mne.picks) - len(self.mne.traces)\n\n # Remove unnecessary traces.\n if trace_diff < 0:\n # Only remove from traces not in picks.\n remove_traces = off_traces[:abs(trace_diff)]\n for trace in remove_traces:\n trace.remove()\n off_traces.remove(trace)\n\n # Add new traces if necessary.\n if trace_diff > 0:\n # Make copy to avoid skipping iteration.\n idxs_copy = add_idxs.copy()\n for aidx in idxs_copy[:trace_diff]:\n DataTrace(self, aidx)\n add_idxs.remove(aidx)\n\n # Update data of traces outside of yrange (reuse remaining trace-items)\n for trace, ch_idx in zip(off_traces, add_idxs):\n trace.set_ch_idx(ch_idx)\n trace.update_color()\n trace.update_data()\n\n # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #\n # DATA HANDLING\n # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #\n def _apply_downsampling(self):\n \"\"\"\n Get ds-factor and apply ds with one of multiple methods.\n\n The methods are taken from PlotDataItem in pyqtgraph\n and adjusted to multi-channel data.\n \"\"\"\n # Get Downsampling-Factor\n # Auto-Downsampling from pyqtgraph\n if self.mne.downsampling == 'auto':\n ds = 1\n if all([hasattr(self.mne, a) for a in ['viewbox', 'times']]):\n vb = self.mne.viewbox\n if vb is not None:\n view_range = vb.viewRect()\n else:\n view_range = None\n if view_range is not None and len(self.mne.times) > 1:\n dx = float(self.mne.times[-1] - self.mne.times[0]) / (\n len(self.mne.times) - 1)\n if dx != 0.0:\n x0 = view_range.left() / dx\n x1 = view_range.right() / dx\n width = vb.width()\n if width != 0.0:\n # Auto-Downsampling with 5 samples per pixel\n ds = int(max(1, (x1 - x0) / (width * 5)))\n else:\n ds = self.mne.downsampling\n\n # Apply Downsampling\n if ds not in [None, 1]:\n times = self.mne.times\n data = self.mne.data\n n_ch = data.shape[0]\n\n if self.mne.ds_method == 'subsample':\n times = times[::ds]\n data = data[:, ::ds]\n\n elif self.mne.ds_method == 'mean':\n n = len(times) // ds\n # start of x-values\n # try to select a somewhat centered point\n stx = ds // 2\n times = times[stx:stx + n * ds:ds]\n rs_data = data[:, :n * ds].reshape(n_ch, n, ds)\n data = rs_data.mean(axis=2)\n\n elif self.mne.ds_method == 'peak':\n n = len(times) // ds\n # start of x-values\n # try to select a somewhat centered point\n stx = ds // 2\n\n x1 = np.empty((n, 2))\n x1[:] = times[stx:stx + n * ds:ds, np.newaxis]\n times = x1.reshape(n * 2)\n\n y1 = np.empty((n_ch, n, 2))\n y2 = data[:, :n * ds].reshape((n_ch, n, ds))\n y1[:, :, 0] = y2.max(axis=2)\n y1[:, :, 1] = y2.min(axis=2)\n data = y1.reshape((n_ch, n * 2))\n\n self.mne.times, self.mne.data = times, data\n\n def _show_process(self, message):\n if self.mne.load_progressbar.isVisible():\n self.mne.load_progressbar.hide()\n self.mne.load_prog_label.hide()\n self.statusBar().showMessage(message)\n\n def _precompute_finished(self):\n self.statusBar().showMessage('Loading Finished', 5)\n self.mne.data_precomputed = True\n\n if self.mne.overview_mode == 'zscore':\n # Show loaded overview image\n self.mne.overview_bar.set_background()\n\n if self._rerun_load_thread:\n self._rerun_load_thread = False\n self._init_precompute()\n\n def _init_precompute(self):\n # Remove previously loaded data\n self.mne.data_precomputed = False\n if all([hasattr(self.mne, st)\n for st in ['global_data', 'global_times']]):\n del self.mne.global_data, self.mne.global_times\n gc.collect()\n\n if self.mne.precompute == 'auto':\n self.mne.enable_precompute = self._check_space_for_precompute()\n elif isinstance(self.mne.precompute, bool):\n self.mne.enable_precompute = self.mne.precompute\n\n if self.mne.enable_precompute:\n # Start precompute thread\n self.mne.load_progressbar.show()\n self.mne.load_prog_label.show()\n self.load_thread.start()\n\n def _rerun_precompute(self):\n if self.load_thread.isRunning():\n self._rerun_load_thread = True\n else:\n self._init_precompute()\n\n def _check_space_for_precompute(self):\n try:\n import psutil\n except ImportError:\n logger.info('Free RAM space could not be determined because'\n '\"psutil\" is not installed. '\n 'Setting precompute to False.')\n return False\n else:\n if self.mne.is_epochs:\n files = [self.mne.inst.filename]\n else:\n files = self.mne.inst.filenames\n if files[0] is not None:\n # Get disk-space of raw-file(s)\n disk_space = 0\n for fn in files:\n disk_space += getsize(fn)\n\n # Determine expected RAM space based on orig_format\n fmt_multipliers = {'double': 1,\n 'single': 2,\n 'int': 2,\n 'short': 4}\n\n # Epochs and ICA don't have this attribute, assume single\n # on disk\n fmt = getattr(self.mne.inst, 'orig_format', 'single')\n # Apply size change to 64-bit float in memory\n # (* 2 because when loading data will be loaded into a copy\n # of self.mne.inst._data to apply processing.\n expected_ram = disk_space * fmt_multipliers[fmt] * 2\n else:\n expected_ram = sys.getsizeof(self.mne.inst._data)\n\n # Get available RAM\n free_ram = psutil.virtual_memory().free\n\n expected_ram_str = sizeof_fmt(expected_ram)\n free_ram_str = sizeof_fmt(free_ram)\n left_ram_str = sizeof_fmt(free_ram - expected_ram)\n\n if expected_ram < free_ram:\n logger.debug('The data precomputed for visualization takes '\n f'{expected_ram_str} with {left_ram_str} of '\n f'RAM left.')\n return True\n else:\n logger.debug(f'The precomputed data with {expected_ram_str} '\n f'will surpass your current {free_ram_str} '\n f'of free RAM.\\n'\n 'Thus precompute will be set to False.\\n'\n '(If you want to precompute nevertheless, '\n 'then set precompute to True instead of \"auto\")')\n return False\n\n def _process_data(self, data, start, stop, picks,\n signals=None):\n data = super()._process_data(data, start, stop, picks, signals)\n\n # Invert Data to be displayed from top on inverted Y-Axis\n data *= -1\n\n return data\n\n def _update_data(self):\n if self.mne.data_precomputed:\n # get start/stop-samples\n start, stop = self._get_start_stop()\n self.mne.times = self.mne.global_times[start:stop]\n self.mne.data = self.mne.global_data[:, start:stop]\n\n # remove DC locally\n if self.mne.remove_dc:\n self.mne.data = self.mne.data - \\\n self.mne.data.mean(axis=1, keepdims=True)\n\n else:\n # While data is not precomputed get data only from shown range and\n # process only those.\n super()._update_data()\n\n # Initialize decim\n self.mne.decim_data = np.ones_like(self.mne.picks)\n data_picks_mask = np.in1d(self.mne.picks, self.mne.picks_data)\n self.mne.decim_data[data_picks_mask] = self.mne.decim\n\n # Get decim_times\n if self.mne.decim != 1:\n # decim can vary by channel type,\n # so compute different `times` vectors.\n self.mne.decim_times = {decim_value: self.mne.times[::decim_value]\n + self.mne.first_time for decim_value\n in set(self.mne.decim_data)}\n\n # Apply clipping\n if self.mne.clipping == 'clamp':\n self.mne.data = np.clip(self.mne.data, -0.5, 0.5)\n elif self.mne.clipping is not None:\n self.mne.data = self.mne.data.copy()\n self.mne.data[abs(self.mne.data * self.mne.scale_factor)\n > self.mne.clipping] = np.nan\n\n # Apply Downsampling (if enabled)\n self._apply_downsampling()\n\n def _get_zscore(self, data):\n # Reshape data to reasonable size for display\n if QApplication.desktop() is None:\n max_pixel_width = 3840 # default=UHD\n else:\n max_pixel_width = QApplication.desktop().screenGeometry().width()\n collapse_by = data.shape[1] // max_pixel_width\n data = data[:, :max_pixel_width * collapse_by]\n if collapse_by > 0:\n data = data.reshape(data.shape[0], max_pixel_width, collapse_by)\n data = data.mean(axis=2)\n z = zscore(data, axis=1)\n if z.size > 0:\n zmin = np.min(z, axis=1)\n zmax = np.max(z, axis=1)\n\n # Convert into RGBA\n zrgba = np.empty((*z.shape, 4))\n for row_idx, row in enumerate(z):\n for col_idx, value in enumerate(row):\n if math.isnan(value):\n value = 0\n if value == 0:\n rgba = [0, 0, 0, 0]\n elif value < 0:\n alpha = int(255 * value / abs(zmin[row_idx]))\n rgba = [0, 0, 255, alpha]\n else:\n alpha = int(255 * value / zmax[row_idx])\n rgba = [255, 0, 0, alpha]\n\n zrgba[row_idx, col_idx] = rgba\n\n zrgba = np.require(zrgba, np.uint8, 'C')\n\n self.mne.zscore_rgba = zrgba\n\n # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #\n # ANNOTATIONS\n # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #\n def _add_region(self, plot_onset, duration, description, region=None):\n if not region:\n region = AnnotRegion(self.mne, description=description,\n values=(plot_onset, plot_onset + duration))\n if (any([self.mne.t_start < v < self.mne.t_start + self.mne.duration\n for v in [plot_onset, plot_onset + duration]]) and\n region not in self.mne.plt.items):\n self.mne.plt.addItem(region)\n self.mne.plt.addItem(region.label_item)\n region.regionChangeFinished.connect(self._region_changed)\n region.gotSelected.connect(self._region_selected)\n region.removeRequested.connect(self._remove_region)\n self.mne.viewbox.sigYRangeChanged.connect(region.update_label_pos)\n self.mne.regions.append(region)\n\n region.update_label_pos()\n\n def _remove_region(self, region, from_annot=True):\n # Remove from shown regions\n if region.label_item in self.mne.viewbox.addedItems:\n self.mne.viewbox.removeItem(region.label_item)\n if region in self.mne.plt.items:\n self.mne.plt.removeItem(region)\n\n # Remove from all regions\n if region in self.mne.regions:\n self.mne.regions.remove(region)\n\n # Reset selected region\n if region == self.mne.selected_region:\n self.mne.selected_region = None\n\n # Remove from annotations\n if from_annot:\n idx = self._get_onset_idx(region.getRegion()[0])\n self.mne.inst.annotations.delete(idx)\n\n # Update Overview-Bar\n self.mne.overview_bar.update_annotations()\n\n def _region_selected(self, region):\n old_region = self.mne.selected_region\n # Remove selected-status from old region\n if old_region and old_region != region:\n old_region.select(False)\n self.mne.selected_region = region\n self.mne.fig_annotation.update_values(region)\n\n def _get_onset_idx(self, plot_onset):\n onset = _sync_onset(self.mne.inst, plot_onset, inverse=True)\n idx = np.where(self.mne.inst.annotations.onset == onset)[0][0]\n return idx\n\n def _region_changed(self, region):\n rgn = region.getRegion()\n region.select(True)\n idx = self._get_onset_idx(region.old_onset)\n\n # Update Spinboxes of Annot-Dock\n self.mne.fig_annotation.update_values(region)\n\n # Change annotations\n self.mne.inst.annotations.onset[idx] = _sync_onset(self.mne.inst,\n rgn[0],\n inverse=True)\n self.mne.inst.annotations.duration[idx] = rgn[1] - rgn[0]\n\n # Update overview-bar\n self.mne.overview_bar.update_annotations()\n\n def _draw_annotations(self):\n # All regions are constantly added to the Scene and handled by Qt\n # which is faster than handling adding/removing in Python.\n pass\n\n def _init_annot_mode(self):\n self.mne.annotations_visible = True\n self.mne.new_annotation_labels = self._get_annotation_labels()\n if len(self.mne.new_annotation_labels) > 0:\n self.mne.current_description = self.mne.new_annotation_labels[0]\n else:\n self.mne.current_description = None\n self._setup_annotation_colors()\n self.mne.regions = list()\n self.mne.selected_region = None\n\n # Initialize Annotation-Dock\n existing_dock = getattr(self.mne, 'fig_annotation', None)\n if existing_dock is None:\n self.mne.fig_annotation = AnnotationDock(self)\n self.addDockWidget(Qt.TopDockWidgetArea, self.mne.fig_annotation)\n self.mne.fig_annotation.setVisible(False)\n\n # Add annotations as regions\n for annot in self.mne.inst.annotations:\n plot_onset = _sync_onset(self.mne.inst, annot['onset'])\n duration = annot['duration']\n description = annot['description']\n self._add_region(plot_onset, duration, description)\n\n # Initialize showing annotation widgets\n self._change_annot_mode()\n\n def _change_annot_mode(self):\n if not self.mne.annotation_mode:\n # Reset Widgets in Annotation-Figure\n self.mne.fig_annotation.reset()\n\n # Show Annotation-Dock if activated.\n self.mne.fig_annotation.setVisible(self.mne.annotation_mode)\n\n # Make Regions movable if activated and move into foreground\n for region in self.mne.regions:\n region.setMovable(self.mne.annotation_mode)\n if self.mne.annotation_mode:\n region.setZValue(2)\n else:\n region.setZValue(0)\n\n # Add/Remove selection-rectangle.\n if self.mne.selected_region:\n self.mne.selected_region.select(self.mne.annotation_mode)\n\n def _toggle_annotation_fig(self):\n if not self.mne.is_epochs:\n self.mne.annotation_mode = not self.mne.annotation_mode\n self._change_annot_mode()\n\n def _update_regions_visible(self):\n for region in self.mne.regions:\n region.update_visible(\n self.mne.visible_annotations[region.description])\n self.mne.overview_bar.update_annotations()\n\n def _set_annotations_visible(self, visible):\n for descr in self.mne.visible_annotations:\n self.mne.visible_annotations[descr] = visible\n self._update_regions_visible()\n\n # Update Plot\n if visible:\n self._update_annotations_xrange((self.mne.t_start,\n self.mne.t_start +\n self.mne.duration))\n else:\n for region in [r for r in self.mne.regions\n if r in self.mne.plt.items]:\n self.mne.plt.removeItem(region)\n self.mne.plt.removeItem(region.label_item)\n\n def _toggle_annotations(self):\n self.mne.annotations_visible = not self.mne.annotations_visible\n self._set_annotations_visible(self.mne.annotations_visible)\n\n def _apply_update_projectors(self, toggle_all=False):\n if toggle_all:\n on = self.mne.projs_on\n applied = self.mne.projs_active\n value = False if all(on) else True\n new_state = np.full_like(on, value)\n # Always activate applied projections\n new_state[applied] = True\n self.mne.projs_on = new_state\n self._update_projector()\n # If data was precomputed it needs to be precomputed again.\n self._rerun_precompute()\n self._redraw()\n\n def _toggle_proj_fig(self):\n if self.mne.fig_proj is None:\n ProjDialog(self, name='fig_proj')\n else:\n self.mne.fig_proj.close()\n\n def _toggle_all_projs(self):\n if self.mne.fig_proj is None:\n self._apply_update_projectors(toggle_all=True)\n else:\n self.mne.fig_proj.toggle_all()\n\n def _toggle_whitening(self):\n super()._toggle_whitening()\n # If data was precomputed it needs to be precomputed again.\n self._rerun_precompute()\n self._redraw()\n\n def _toggle_settings_fig(self):\n if self.mne.fig_settings is None:\n SettingsDialog(self, name='fig_settings')\n else:\n self.mne.fig_help.close()\n self.mne.fig_help = None\n\n def _toggle_help_fig(self):\n if self.mne.fig_help is None:\n HelpDialog(self, name='fig_help')\n else:\n self.mne.fig_help.close()\n self.mne.fig_help = None\n\n def _set_butterfly(self, butterfly):\n self.mne.butterfly = butterfly\n self._update_picks()\n self._update_data()\n\n if butterfly and self.mne.fig_selection is not None:\n self.mne.selection_ypos_dict.clear()\n selections_dict = self._make_butterfly_selections_dict()\n for idx, picks in enumerate(selections_dict.values()):\n for pick in picks:\n self.mne.selection_ypos_dict[pick] = idx + 1\n ymax = len(selections_dict) + 1\n self.mne.ymax = ymax\n self.mne.plt.setLimits(yMax=ymax)\n self.mne.plt.setYRange(0, ymax, padding=0)\n elif butterfly:\n ymax = len(self.mne.butterfly_type_order) + 1\n self.mne.ymax = ymax\n self.mne.plt.setLimits(yMax=ymax)\n self.mne.plt.setYRange(0, ymax, padding=0)\n else:\n self.mne.ymax = len(self.mne.ch_order) + 1\n self.mne.plt.setLimits(yMax=self.mne.ymax)\n self.mne.plt.setYRange(self.mne.ch_start,\n self.mne.ch_start + self.mne.n_channels + 1,\n padding=0)\n\n if self.mne.fig_selection is not None:\n # Update Selection-Dialog\n self.mne.fig_selection._style_butterfly()\n\n # Set vertical scrollbar visible\n self.mne.ax_vscroll.setVisible(not butterfly or\n self.mne.fig_selection is not None)\n\n # update overview-bar\n self.mne.overview_bar.update_viewrange()\n\n # update ypos and color for butterfly-mode\n for trace in self.mne.traces:\n trace.update_color()\n trace.update_ypos()\n\n self._draw_traces()\n\n def _toggle_butterfly(self):\n if self.mne.instance_type != 'ica':\n self._set_butterfly(not self.mne.butterfly)\n\n def _toggle_dc(self):\n self.mne.remove_dc = not self.mne.remove_dc\n self._redraw()\n\n def _toggle_epoch_histogram(self):\n fig = self._create_epoch_histogram()\n self._get_dlg_from_mpl(fig)\n\n def _set_events_visible(self, visible):\n for event_line in self.mne.event_lines:\n event_line.setVisible(visible)\n\n # Update Plot\n if visible:\n self._update_events_xrange((self.mne.t_start,\n self.mne.t_start +\n self.mne.duration))\n else:\n for event_line in [evl for evl in self.mne.event_lines\n if evl in self.mne.plt.items]:\n self.mne.plt.removeItem(event_line)\n self.mne.overview_bar.update_events()\n\n def _toggle_events(self):\n if self.mne.event_nums is not None:\n self.mne.events_visible = not self.mne.events_visible\n self._set_events_visible(self.mne.events_visible)\n\n def _toggle_time_format(self):\n if self.mne.time_format == 'float':\n self.mne.time_format = 'clock'\n self.mne.time_axis.setLabel(text='Time')\n else:\n self.mne.time_format = 'float'\n self.mne.time_axis.setLabel(text='Time', units='s')\n self._update_yaxis_labels()\n\n def _toggle_fullscreen(self):\n if self.isFullScreen():\n self.showNormal()\n else:\n self.showFullScreen()\n\n def _toggle_antialiasing(self):\n self.mne.antialiasing = not self.mne.antialiasing\n self._redraw()\n\n def _toggle_overview_bar(self):\n self.mne.show_overview_bar = not self.mne.show_overview_bar\n self.mne.overview_bar.setVisible(self.mne.show_overview_bar)\n\n def _toggle_zenmode(self):\n self.mne.scrollbars_visible = not self.mne.scrollbars_visible\n for bar in [self.mne.ax_hscroll, self.mne.ax_vscroll]:\n bar.setVisible(self.mne.scrollbars_visible)\n self.mne.toolbar.setVisible(self.mne.scrollbars_visible)\n\n def _new_child_figure(self, fig_name, window_title, **kwargs):\n from matplotlib.figure import Figure\n fig = Figure(**kwargs)\n # Pass window title and fig_name on\n if fig_name is not None:\n fig.fig_name = fig_name\n if window_title is not None:\n fig.title = window_title\n return fig\n\n def _get_widget_from_mpl(self, fig):\n canvas = FigureCanvasQTAgg(fig)\n canvas.setFocusPolicy(Qt.StrongFocus | Qt.WheelFocus)\n canvas.setFocus()\n # Pass window title and fig_name on\n if hasattr(fig, 'fig_name'):\n canvas.fig_name = fig.fig_name\n if hasattr(fig, 'title'):\n canvas.title = fig.title\n\n return canvas\n\n def _get_dlg_from_mpl(self, fig):\n canvas = self._get_widget_from_mpl(fig)\n # Pass window title and fig_name on\n if hasattr(canvas, 'fig_name'):\n name = canvas.fig_name\n else:\n name = None\n if hasattr(canvas, 'title'):\n title = canvas.title\n else:\n title = None\n dlg = _BaseDialog(self, widget=canvas, title=title, name=name)\n dlg.show()\n\n def _create_ch_context_fig(self, idx):\n fig = super()._create_ch_context_fig(idx)\n if fig is not None:\n self._get_dlg_from_mpl(fig)\n\n def _toggle_epoch_histogramm(self):\n if self.mne.is_epochs:\n fig = self._create_epoch_histogram()\n if fig is not None:\n self._get_dlg_from_mpl(fig)\n\n def _create_selection_fig(self):\n if not any([isinstance(fig, SelectionDialog) for\n fig in self.mne.child_figs]):\n SelectionDialog(self)\n\n def message_box(self, text, info_text=None, buttons=None,\n default_button=None, icon=None, modal=True):\n self.msg_box.setText(f'<font size=\"+2\"><b>{text}</b></font>')\n if info_text is not None:\n self.msg_box.setInformativeText(info_text)\n if buttons is not None:\n self.msg_box.setStandardButtons(buttons)\n if default_button is not None:\n self.msg_box.setDefaultButton(default_button)\n if icon is not None:\n self.msg_box.setIcon(icon)\n\n # Allow interacting with message_box in test-mode.\n # Set modal=False only if no return value is expected.\n self.msg_box.setModal(False if self.test_mode else modal)\n if self.test_mode or not modal:\n self.msg_box.show()\n else:\n return self.msg_box.exec()\n\n def keyPressEvent(self, event):\n \"\"\"Customize key press events.\"\"\"\n # On MacOs additionally KeypadModifier is set when arrow-keys\n # are pressed.\n # On Unix GroupSwitchModifier is set when ctrl is pressed.\n # To preserve cross-platform consistency the following comparison\n # of the modifier-values is done.\n # modifiers need to be exclusive\n modifiers = {\n 'Ctrl': '4' in hex(int(event.modifiers())),\n 'Shift': int(event.modifiers()) == 33554432\n }\n for key_name in self.mne.keyboard_shortcuts:\n key_dict = self.mne.keyboard_shortcuts[key_name]\n if key_dict['qt_key'] == event.key() and 'slot' in key_dict:\n\n mod_idx = 0\n # Get modifier\n if 'modifier' in key_dict:\n mods = [modifiers[mod] for mod in modifiers]\n if any(mods):\n # No multiple modifiers supported yet\n mod = [mod for mod in modifiers if modifiers[mod]][0]\n if mod in key_dict['modifier']:\n mod_idx = key_dict['modifier'].index(mod)\n\n slot_idx = mod_idx if mod_idx < len(key_dict['slot']) else 0\n slot = key_dict['slot'][slot_idx]\n\n if 'parameter' in key_dict:\n param_idx = (mod_idx if mod_idx <\n len(key_dict['parameter']) else 0)\n slot(key_dict['parameter'][param_idx])\n else:\n slot()\n\n break\n\n def _draw_traces(self):\n # Update data in traces (=drawing traces)\n for trace in self.mne.traces:\n # Update data\n trace.update_data()\n\n def _get_size(self):\n inch_width = self.width() / self.logicalDpiX()\n inch_height = self.height() / self.logicalDpiY()\n\n return inch_width, inch_height\n\n def _fake_keypress(self, key, fig=None):\n fig = fig or self\n\n if key.isupper():\n key = key.lower()\n modifier = Qt.ShiftModifier\n elif key.startswith('shift+'):\n key = key[6:]\n modifier = Qt.ShiftModifier\n else:\n modifier = Qt.NoModifier\n\n # Use pytest-qt's exception-hook\n with capture_exceptions() as exceptions:\n QTest.keyPress(fig, self.mne.keyboard_shortcuts[key]['qt_key'],\n modifier)\n\n for exc in exceptions:\n raise RuntimeError(f'There as been an {exc[0]} inside the Qt '\n f'event loop (look above for traceback).')\n\n def _fake_click(self, point, add_points=None, fig=None, ax=None,\n xform='ax', button=1, kind='press'):\n add_points = add_points or list()\n # Wait until Window is fully shown.\n QTest.qWaitForWindowExposed(self)\n # Scene-Dimensions still seem to change to final state when waiting\n # for a short time.\n QTest.qWait(10)\n\n # Qt: right-button=2, matplotlib: right-button=3\n if button == 1:\n button = Qt.LeftButton\n else:\n button = Qt.RightButton\n\n # For Qt, fig or ax both would be the widget to test interaction on.\n # If View\n fig = ax or fig or self.mne.view\n\n if xform == 'ax':\n # For Qt, the equivalent of matplotlibs transAxes\n # would be a transformation to View Coordinates.\n # But for the View top-left is (0, 0) and bottom-right is\n # (view-width, view-height).\n view_width = fig.width()\n view_height = fig.height()\n x = view_width * point[0]\n y = view_height * (1 - point[1])\n point = Point(x, y)\n for idx, apoint in enumerate(add_points):\n x2 = view_width * apoint[0]\n y2 = view_height * (1 - apoint[1])\n add_points[idx] = Point(x2, y2)\n\n elif xform == 'data':\n # For Qt, the equivalent of matplotlibs transData\n # would be a transformation to\n # the coordinate system of the ViewBox.\n # This only works on the View (self.mne.view)\n fig = self.mne.view\n point = self.mne.viewbox.mapViewToScene(Point(*point))\n for idx, apoint in enumerate(add_points):\n add_points[idx] = self.mne.viewbox.mapViewToScene(\n Point(*apoint))\n\n elif xform == 'none' or xform is None:\n if isinstance(point, (tuple, list)):\n point = Point(*point)\n else:\n point = Point(point)\n for idx, apoint in enumerate(add_points):\n if isinstance(apoint, (tuple, list)):\n add_points[idx] = Point(*apoint)\n else:\n add_points[idx] = Point(apoint)\n\n # Use pytest-qt's exception-hook\n with capture_exceptions() as exceptions:\n widget = fig.viewport() if isinstance(fig, QGraphicsView) else fig\n if kind == 'press':\n # always click because most interactivity comes form\n # mouseClickEvent from pyqtgraph (just press doesn't suffice\n # here).\n _mouseClick(widget=widget, pos=point, button=button)\n elif kind == 'release':\n _mouseRelease(widget=widget, pos=point, button=button)\n elif kind == 'motion':\n _mouseMove(widget=widget, pos=point, buttons=button)\n elif kind == 'drag':\n _mouseDrag(widget=widget, positions=[point] + add_points,\n button=button)\n\n for exc in exceptions:\n raise RuntimeError(f'There as been an {exc[0]} inside the Qt '\n f'event loop (look above for traceback).')\n\n # Waiting some time for events to be processed.\n QTest.qWait(50)\n\n def _fake_scroll(self, x, y, step, fig=None):\n # QTest doesn't support simulating scrolling-wheel\n self.vscroll(step)\n\n def _click_ch_name(self, ch_index, button):\n self.mne.channel_axis.repaint()\n # Wait because channel-axis may need time\n # (came up with test_epochs::test_plot_epochs_clicks)\n QTest.qWait(100)\n if not self.mne.butterfly:\n ch_name = self.mne.ch_names[self.mne.picks[ch_index]]\n xrange, yrange = self.mne.channel_axis.ch_texts[ch_name]\n x = np.mean(xrange)\n y = np.mean(yrange)\n\n self._fake_click((x, y), fig=self.mne.view, button=button,\n xform='none')\n\n def _update_trace_offsets(self):\n \"\"\"legacy method for mne<1.0\"\"\"\n pass\n\n def _resize_by_factor(self, factor):\n pass\n\n def _get_ticklabels(self, orientation):\n if orientation == 'x':\n ax = self.mne.time_axis\n else:\n ax = self.mne.channel_axis\n\n return list(ax.get_labels())\n\n def _get_scale_bar_texts(self):\n return tuple(t.toPlainText() for t in self.mne.scalebar_texts.values())\n\n def show(self):\n # Set raise_window like matplotlib if possible\n super().show()\n try:\n from matplotlib import rcParams\n raise_window = rcParams['figure.raise_window']\n except ImportError:\n raise_window = True\n if raise_window:\n self.activateWindow()\n self.raise_()\n\n def _close_event(self, fig=None):\n \"\"\"Force calling of the MPL figure close event.\"\"\"\n fig = fig or self\n if hasattr(fig, 'canvas'):\n try:\n fig.canvas.close_event()\n except ValueError: # old mpl with Qt\n pass # pragma: no cover\n else:\n fig.close()\n\n def closeEvent(self, event):\n \"\"\"Customize close event.\"\"\"\n event.accept()\n if hasattr(self, 'mne'):\n # Explicit disconnects to avoid reference cycles that gc can't\n # properly resolve ()\n if hasattr(self.mne, 'plt'):\n _disconnect(self.mne.plt.sigXRangeChanged)\n _disconnect(self.mne.plt.sigYRangeChanged)\n if hasattr(self.mne, 'toolbar'):\n for action in self.mne.toolbar.actions():\n _disconnect(action.triggered)\n # Save settings going into QSettings.\n for qsetting in qsettings_params:\n value = getattr(self.mne, qsetting)\n QSettings().setValue(qsetting, value)\n for attr in ('keyboard_shortcuts', 'traces', 'plt', 'toolbar'):\n if hasattr(self.mne, attr):\n delattr(self.mne, attr)\n if hasattr(self.mne, 'child_figs'):\n for fig in self.mne.child_figs:\n fig.close()\n self.mne.child_figs.clear()\n for attr in ('traces', 'event_lines', 'regions'):\n getattr(self.mne, attr, []).clear()\n if getattr(self.mne, 'vline', None) is not None:\n if self.mne.is_epochs:\n for vl in self.mne.vline:\n _disconnect(vl.sigPositionChangeFinished)\n self.mne.vline.clear()\n else:\n _disconnect(self.mne.vline.sigPositionChangeFinished)\n if getattr(self, 'load_thread', None) is not None:\n self.load_thread.clean()\n self.load_thread = None\n\n # Remove self from browser_instances in globals\n if self in _browser_instances:\n _browser_instances.remove(self)\n self._close(event)\n self.gotClosed.emit()\n # Make sure PyQtBrowser gets deleted after it was closed.\n self.deleteLater()\n\n\ndef _get_n_figs():\n # Wait for a short time to let the Qt-loop clean up\n QTest.qWait(100)\n return len([window for window in QApplication.topLevelWindows()\n if window.isVisible()])\n\n\ndef _close_all():\n if len(QApplication.topLevelWindows()) > 0:\n QApplication.closeAllWindows()\n\n\n# mouse testing functions adapted from pyqtgraph\n# (pyqtgraph.tests.ui_testing.py)\ndef _mousePress(widget, pos, button, modifier=None):\n if modifier is None:\n modifier = Qt.KeyboardModifier.NoModifier\n event = QMouseEvent(QEvent.Type.MouseButtonPress, pos, button,\n Qt.MouseButton.NoButton, modifier)\n QApplication.sendEvent(widget, event)\n\n\ndef _mouseRelease(widget, pos, button, modifier=None):\n if modifier is None:\n modifier = Qt.KeyboardModifier.NoModifier\n event = QMouseEvent(QEvent.Type.MouseButtonRelease, pos,\n button, Qt.MouseButton.NoButton, modifier)\n QApplication.sendEvent(widget, event)\n\n\ndef _mouseMove(widget, pos, buttons=None, modifier=None):\n if buttons is None:\n buttons = Qt.MouseButton.NoButton\n if modifier is None:\n modifier = Qt.KeyboardModifier.NoModifier\n event = QMouseEvent(QEvent.Type.MouseMove, pos,\n Qt.MouseButton.NoButton, buttons, modifier)\n QApplication.sendEvent(widget, event)\n\n\ndef _mouseClick(widget, pos, button, modifier=None):\n _mouseMove(widget, pos)\n _mousePress(widget, pos, button, modifier)\n _mouseRelease(widget, pos, button, modifier)\n\n\ndef _mouseDrag(widget, positions, button, modifier=None):\n _mouseMove(widget, positions[0])\n _mousePress(widget, positions[0], button, modifier)\n # Delay for 10 ms for drag to be recognized.\n QTest.qWait(10)\n for pos in positions[1:]:\n _mouseMove(widget, pos, button, modifier)\n _mouseRelease(widget, positions[-1], button, modifier)\n\n\ndef _init_browser(**kwargs):\n setConfigOption('enableExperimental', True)\n\n _init_mne_qtapp(pg_app=True)\n browser = PyQtGraphBrowser(**kwargs)\n\n return browser\n" ]
[ [ "numpy.asarray", "numpy.in1d", "scipy.stats.zscore", "numpy.concatenate", "numpy.max", "numpy.mean", "numpy.argmin", "numpy.searchsorted", "numpy.where", "numpy.ones_like", "numpy.clip", "numpy.unique", "numpy.arange", "numpy.intersect1d", "numpy.copy", "numpy.diff", "numpy.min", "numpy.full_like", "numpy.log10", "numpy.equal", "numpy.require", "numpy.array", "numpy.logical_and", "numpy.abs", "numpy.array_equal", "matplotlib.figure.Figure", "numpy.argwhere", "matplotlib.backends.backend_qt5agg.FigureCanvasQTAgg", "numpy.empty", "matplotlib.colors.to_rgba_array" ] ]
k-fillmore/pandas
[ "67d4cae17bec45e84b9cf51bcf4fb5bbe293b26f" ]
[ "pandas/tests/frame/test_reductions.py" ]
[ "from datetime import timedelta\nfrom decimal import Decimal\n\nfrom dateutil.tz import tzlocal\nimport numpy as np\nimport pytest\n\nfrom pandas.compat import is_platform_windows\nimport pandas.util._test_decorators as td\n\nimport pandas as pd\nfrom pandas import (\n Categorical,\n DataFrame,\n Index,\n MultiIndex,\n Series,\n Timestamp,\n date_range,\n isna,\n notna,\n to_datetime,\n to_timedelta,\n)\nimport pandas._testing as tm\nimport pandas.core.algorithms as algorithms\nimport pandas.core.nanops as nanops\n\n\ndef assert_stat_op_calc(\n opname,\n alternative,\n frame,\n has_skipna=True,\n check_dtype=True,\n check_dates=False,\n rtol=1e-5,\n atol=1e-8,\n skipna_alternative=None,\n):\n \"\"\"\n Check that operator opname works as advertised on frame\n\n Parameters\n ----------\n opname : string\n Name of the operator to test on frame\n alternative : function\n Function that opname is tested against; i.e. \"frame.opname()\" should\n equal \"alternative(frame)\".\n frame : DataFrame\n The object that the tests are executed on\n has_skipna : bool, default True\n Whether the method \"opname\" has the kwarg \"skip_na\"\n check_dtype : bool, default True\n Whether the dtypes of the result of \"frame.opname()\" and\n \"alternative(frame)\" should be checked.\n check_dates : bool, default false\n Whether opname should be tested on a Datetime Series\n rtol : float, default 1e-5\n Relative tolerance.\n atol : float, default 1e-8\n Absolute tolerance.\n skipna_alternative : function, default None\n NaN-safe version of alternative\n \"\"\"\n f = getattr(frame, opname)\n\n if check_dates:\n expected_warning = FutureWarning if opname in [\"mean\", \"median\"] else None\n df = DataFrame({\"b\": date_range(\"1/1/2001\", periods=2)})\n with tm.assert_produces_warning(expected_warning):\n result = getattr(df, opname)()\n assert isinstance(result, Series)\n\n df[\"a\"] = range(len(df))\n with tm.assert_produces_warning(expected_warning):\n result = getattr(df, opname)()\n assert isinstance(result, Series)\n assert len(result)\n\n if has_skipna:\n\n def wrapper(x):\n return alternative(x.values)\n\n skipna_wrapper = tm._make_skipna_wrapper(alternative, skipna_alternative)\n result0 = f(axis=0, skipna=False)\n result1 = f(axis=1, skipna=False)\n tm.assert_series_equal(\n result0, frame.apply(wrapper), check_dtype=check_dtype, rtol=rtol, atol=atol\n )\n # HACK: win32\n tm.assert_series_equal(\n result1,\n frame.apply(wrapper, axis=1),\n check_dtype=False,\n rtol=rtol,\n atol=atol,\n )\n else:\n skipna_wrapper = alternative\n\n result0 = f(axis=0)\n result1 = f(axis=1)\n tm.assert_series_equal(\n result0,\n frame.apply(skipna_wrapper),\n check_dtype=check_dtype,\n rtol=rtol,\n atol=atol,\n )\n\n if opname in [\"sum\", \"prod\"]:\n expected = frame.apply(skipna_wrapper, axis=1)\n tm.assert_series_equal(\n result1, expected, check_dtype=False, rtol=rtol, atol=atol\n )\n\n # check dtypes\n if check_dtype:\n lcd_dtype = frame.values.dtype\n assert lcd_dtype == result0.dtype\n assert lcd_dtype == result1.dtype\n\n # bad axis\n with pytest.raises(ValueError, match=\"No axis named 2\"):\n f(axis=2)\n\n # all NA case\n if has_skipna:\n all_na = frame * np.NaN\n r0 = getattr(all_na, opname)(axis=0)\n r1 = getattr(all_na, opname)(axis=1)\n if opname in [\"sum\", \"prod\"]:\n unit = 1 if opname == \"prod\" else 0 # result for empty sum/prod\n expected = Series(unit, index=r0.index, dtype=r0.dtype)\n tm.assert_series_equal(r0, expected)\n expected = Series(unit, index=r1.index, dtype=r1.dtype)\n tm.assert_series_equal(r1, expected)\n\n\ndef assert_stat_op_api(opname, float_frame, float_string_frame, has_numeric_only=False):\n \"\"\"\n Check that API for operator opname works as advertised on frame\n\n Parameters\n ----------\n opname : string\n Name of the operator to test on frame\n float_frame : DataFrame\n DataFrame with columns of type float\n float_string_frame : DataFrame\n DataFrame with both float and string columns\n has_numeric_only : bool, default False\n Whether the method \"opname\" has the kwarg \"numeric_only\"\n \"\"\"\n # make sure works on mixed-type frame\n getattr(float_string_frame, opname)(axis=0)\n getattr(float_string_frame, opname)(axis=1)\n\n if has_numeric_only:\n getattr(float_string_frame, opname)(axis=0, numeric_only=True)\n getattr(float_string_frame, opname)(axis=1, numeric_only=True)\n getattr(float_frame, opname)(axis=0, numeric_only=False)\n getattr(float_frame, opname)(axis=1, numeric_only=False)\n\n\ndef assert_bool_op_calc(opname, alternative, frame, has_skipna=True):\n \"\"\"\n Check that bool operator opname works as advertised on frame\n\n Parameters\n ----------\n opname : string\n Name of the operator to test on frame\n alternative : function\n Function that opname is tested against; i.e. \"frame.opname()\" should\n equal \"alternative(frame)\".\n frame : DataFrame\n The object that the tests are executed on\n has_skipna : bool, default True\n Whether the method \"opname\" has the kwarg \"skip_na\"\n \"\"\"\n f = getattr(frame, opname)\n\n if has_skipna:\n\n def skipna_wrapper(x):\n nona = x.dropna().values\n return alternative(nona)\n\n def wrapper(x):\n return alternative(x.values)\n\n result0 = f(axis=0, skipna=False)\n result1 = f(axis=1, skipna=False)\n\n tm.assert_series_equal(result0, frame.apply(wrapper))\n tm.assert_series_equal(\n result1, frame.apply(wrapper, axis=1), check_dtype=False\n ) # HACK: win32\n else:\n skipna_wrapper = alternative\n wrapper = alternative\n\n result0 = f(axis=0)\n result1 = f(axis=1)\n\n tm.assert_series_equal(result0, frame.apply(skipna_wrapper))\n tm.assert_series_equal(\n result1, frame.apply(skipna_wrapper, axis=1), check_dtype=False\n )\n\n # bad axis\n with pytest.raises(ValueError, match=\"No axis named 2\"):\n f(axis=2)\n\n # all NA case\n if has_skipna:\n all_na = frame * np.NaN\n r0 = getattr(all_na, opname)(axis=0)\n r1 = getattr(all_na, opname)(axis=1)\n if opname == \"any\":\n assert not r0.any()\n assert not r1.any()\n else:\n assert r0.all()\n assert r1.all()\n\n\ndef assert_bool_op_api(\n opname, bool_frame_with_na, float_string_frame, has_bool_only=False\n):\n \"\"\"\n Check that API for boolean operator opname works as advertised on frame\n\n Parameters\n ----------\n opname : string\n Name of the operator to test on frame\n float_frame : DataFrame\n DataFrame with columns of type float\n float_string_frame : DataFrame\n DataFrame with both float and string columns\n has_bool_only : bool, default False\n Whether the method \"opname\" has the kwarg \"bool_only\"\n \"\"\"\n # make sure op works on mixed-type frame\n mixed = float_string_frame\n mixed[\"_bool_\"] = np.random.randn(len(mixed)) > 0.5\n getattr(mixed, opname)(axis=0)\n getattr(mixed, opname)(axis=1)\n\n if has_bool_only:\n getattr(mixed, opname)(axis=0, bool_only=True)\n getattr(mixed, opname)(axis=1, bool_only=True)\n getattr(bool_frame_with_na, opname)(axis=0, bool_only=False)\n getattr(bool_frame_with_na, opname)(axis=1, bool_only=False)\n\n\nclass TestDataFrameAnalytics:\n\n # ---------------------------------------------------------------------\n # Reductions\n\n def test_stat_op_api(self, float_frame, float_string_frame):\n assert_stat_op_api(\n \"count\", float_frame, float_string_frame, has_numeric_only=True\n )\n assert_stat_op_api(\n \"sum\", float_frame, float_string_frame, has_numeric_only=True\n )\n\n assert_stat_op_api(\"nunique\", float_frame, float_string_frame)\n assert_stat_op_api(\"mean\", float_frame, float_string_frame)\n assert_stat_op_api(\"product\", float_frame, float_string_frame)\n assert_stat_op_api(\"median\", float_frame, float_string_frame)\n assert_stat_op_api(\"min\", float_frame, float_string_frame)\n assert_stat_op_api(\"max\", float_frame, float_string_frame)\n assert_stat_op_api(\"mad\", float_frame, float_string_frame)\n assert_stat_op_api(\"var\", float_frame, float_string_frame)\n assert_stat_op_api(\"std\", float_frame, float_string_frame)\n assert_stat_op_api(\"sem\", float_frame, float_string_frame)\n assert_stat_op_api(\"median\", float_frame, float_string_frame)\n\n try:\n from scipy.stats import kurtosis, skew # noqa:F401\n\n assert_stat_op_api(\"skew\", float_frame, float_string_frame)\n assert_stat_op_api(\"kurt\", float_frame, float_string_frame)\n except ImportError:\n pass\n\n def test_stat_op_calc(self, float_frame_with_na, mixed_float_frame):\n def count(s):\n return notna(s).sum()\n\n def nunique(s):\n return len(algorithms.unique1d(s.dropna()))\n\n def mad(x):\n return np.abs(x - x.mean()).mean()\n\n def var(x):\n return np.var(x, ddof=1)\n\n def std(x):\n return np.std(x, ddof=1)\n\n def sem(x):\n return np.std(x, ddof=1) / np.sqrt(len(x))\n\n def skewness(x):\n from scipy.stats import skew # noqa:F811\n\n if len(x) < 3:\n return np.nan\n return skew(x, bias=False)\n\n def kurt(x):\n from scipy.stats import kurtosis # noqa:F811\n\n if len(x) < 4:\n return np.nan\n return kurtosis(x, bias=False)\n\n assert_stat_op_calc(\n \"nunique\",\n nunique,\n float_frame_with_na,\n has_skipna=False,\n check_dtype=False,\n check_dates=True,\n )\n\n # GH#32571 check_less_precise is needed on apparently-random\n # py37-npdev builds and OSX-PY36-min_version builds\n # mixed types (with upcasting happening)\n assert_stat_op_calc(\n \"sum\",\n np.sum,\n mixed_float_frame.astype(\"float32\"),\n check_dtype=False,\n rtol=1e-3,\n )\n\n assert_stat_op_calc(\n \"sum\", np.sum, float_frame_with_na, skipna_alternative=np.nansum\n )\n assert_stat_op_calc(\"mean\", np.mean, float_frame_with_na, check_dates=True)\n assert_stat_op_calc(\n \"product\", np.prod, float_frame_with_na, skipna_alternative=np.nanprod\n )\n\n assert_stat_op_calc(\"mad\", mad, float_frame_with_na)\n assert_stat_op_calc(\"var\", var, float_frame_with_na)\n assert_stat_op_calc(\"std\", std, float_frame_with_na)\n assert_stat_op_calc(\"sem\", sem, float_frame_with_na)\n\n assert_stat_op_calc(\n \"count\",\n count,\n float_frame_with_na,\n has_skipna=False,\n check_dtype=False,\n check_dates=True,\n )\n\n try:\n from scipy import kurtosis, skew # noqa:F401\n\n assert_stat_op_calc(\"skew\", skewness, float_frame_with_na)\n assert_stat_op_calc(\"kurt\", kurt, float_frame_with_na)\n except ImportError:\n pass\n\n # TODO: Ensure warning isn't emitted in the first place\n @pytest.mark.filterwarnings(\"ignore:All-NaN:RuntimeWarning\")\n def test_median(self, float_frame_with_na, int_frame):\n def wrapper(x):\n if isna(x).any():\n return np.nan\n return np.median(x)\n\n assert_stat_op_calc(\"median\", wrapper, float_frame_with_na, check_dates=True)\n assert_stat_op_calc(\n \"median\", wrapper, int_frame, check_dtype=False, check_dates=True\n )\n\n @pytest.mark.parametrize(\n \"method\", [\"sum\", \"mean\", \"prod\", \"var\", \"std\", \"skew\", \"min\", \"max\"]\n )\n def test_stat_operators_attempt_obj_array(self, method):\n # GH#676\n data = {\n \"a\": [\n -0.00049987540199591344,\n -0.0016467257772919831,\n 0.00067695870775883013,\n ],\n \"b\": [-0, -0, 0.0],\n \"c\": [\n 0.00031111847529610595,\n 0.0014902627951905339,\n -0.00094099200035979691,\n ],\n }\n df1 = DataFrame(data, index=[\"foo\", \"bar\", \"baz\"], dtype=\"O\")\n\n df2 = DataFrame({0: [np.nan, 2], 1: [np.nan, 3], 2: [np.nan, 4]}, dtype=object)\n\n for df in [df1, df2]:\n assert df.values.dtype == np.object_\n result = getattr(df, method)(1)\n expected = getattr(df.astype(\"f8\"), method)(1)\n\n if method in [\"sum\", \"prod\"]:\n tm.assert_series_equal(result, expected)\n\n @pytest.mark.parametrize(\"op\", [\"mean\", \"std\", \"var\", \"skew\", \"kurt\", \"sem\"])\n def test_mixed_ops(self, op):\n # GH#16116\n df = DataFrame(\n {\n \"int\": [1, 2, 3, 4],\n \"float\": [1.0, 2.0, 3.0, 4.0],\n \"str\": [\"a\", \"b\", \"c\", \"d\"],\n }\n )\n\n result = getattr(df, op)()\n assert len(result) == 2\n\n with pd.option_context(\"use_bottleneck\", False):\n result = getattr(df, op)()\n assert len(result) == 2\n\n def test_reduce_mixed_frame(self):\n # GH 6806\n df = DataFrame(\n {\n \"bool_data\": [True, True, False, False, False],\n \"int_data\": [10, 20, 30, 40, 50],\n \"string_data\": [\"a\", \"b\", \"c\", \"d\", \"e\"],\n }\n )\n df.reindex(columns=[\"bool_data\", \"int_data\", \"string_data\"])\n test = df.sum(axis=0)\n tm.assert_numpy_array_equal(\n test.values, np.array([2, 150, \"abcde\"], dtype=object)\n )\n tm.assert_series_equal(test, df.T.sum(axis=1))\n\n def test_nunique(self):\n df = DataFrame({\"A\": [1, 1, 1], \"B\": [1, 2, 3], \"C\": [1, np.nan, 3]})\n tm.assert_series_equal(df.nunique(), Series({\"A\": 1, \"B\": 3, \"C\": 2}))\n tm.assert_series_equal(\n df.nunique(dropna=False), Series({\"A\": 1, \"B\": 3, \"C\": 3})\n )\n tm.assert_series_equal(df.nunique(axis=1), Series({0: 1, 1: 2, 2: 2}))\n tm.assert_series_equal(\n df.nunique(axis=1, dropna=False), Series({0: 1, 1: 3, 2: 2})\n )\n\n @pytest.mark.parametrize(\"tz\", [None, \"UTC\"])\n def test_mean_mixed_datetime_numeric(self, tz):\n # https://github.com/pandas-dev/pandas/issues/24752\n df = DataFrame({\"A\": [1, 1], \"B\": [Timestamp(\"2000\", tz=tz)] * 2})\n with tm.assert_produces_warning(FutureWarning):\n result = df.mean()\n expected = Series([1.0], index=[\"A\"])\n tm.assert_series_equal(result, expected)\n\n @pytest.mark.parametrize(\"tz\", [None, \"UTC\"])\n def test_mean_excludes_datetimes(self, tz):\n # https://github.com/pandas-dev/pandas/issues/24752\n # Our long-term desired behavior is unclear, but the behavior in\n # 0.24.0rc1 was buggy.\n df = DataFrame({\"A\": [Timestamp(\"2000\", tz=tz)] * 2})\n with tm.assert_produces_warning(FutureWarning):\n result = df.mean()\n\n expected = Series(dtype=np.float64)\n tm.assert_series_equal(result, expected)\n\n def test_mean_mixed_string_decimal(self):\n # GH 11670\n # possible bug when calculating mean of DataFrame?\n\n d = [\n {\"A\": 2, \"B\": None, \"C\": Decimal(\"628.00\")},\n {\"A\": 1, \"B\": None, \"C\": Decimal(\"383.00\")},\n {\"A\": 3, \"B\": None, \"C\": Decimal(\"651.00\")},\n {\"A\": 2, \"B\": None, \"C\": Decimal(\"575.00\")},\n {\"A\": 4, \"B\": None, \"C\": Decimal(\"1114.00\")},\n {\"A\": 1, \"B\": \"TEST\", \"C\": Decimal(\"241.00\")},\n {\"A\": 2, \"B\": None, \"C\": Decimal(\"572.00\")},\n {\"A\": 4, \"B\": None, \"C\": Decimal(\"609.00\")},\n {\"A\": 3, \"B\": None, \"C\": Decimal(\"820.00\")},\n {\"A\": 5, \"B\": None, \"C\": Decimal(\"1223.00\")},\n ]\n\n df = DataFrame(d)\n\n result = df.mean()\n expected = Series([2.7, 681.6], index=[\"A\", \"C\"])\n tm.assert_series_equal(result, expected)\n\n def test_var_std(self, datetime_frame):\n result = datetime_frame.std(ddof=4)\n expected = datetime_frame.apply(lambda x: x.std(ddof=4))\n tm.assert_almost_equal(result, expected)\n\n result = datetime_frame.var(ddof=4)\n expected = datetime_frame.apply(lambda x: x.var(ddof=4))\n tm.assert_almost_equal(result, expected)\n\n arr = np.repeat(np.random.random((1, 1000)), 1000, 0)\n result = nanops.nanvar(arr, axis=0)\n assert not (result < 0).any()\n\n with pd.option_context(\"use_bottleneck\", False):\n result = nanops.nanvar(arr, axis=0)\n assert not (result < 0).any()\n\n @pytest.mark.parametrize(\"meth\", [\"sem\", \"var\", \"std\"])\n def test_numeric_only_flag(self, meth):\n # GH 9201\n df1 = DataFrame(np.random.randn(5, 3), columns=[\"foo\", \"bar\", \"baz\"])\n # set one entry to a number in str format\n df1.loc[0, \"foo\"] = \"100\"\n\n df2 = DataFrame(np.random.randn(5, 3), columns=[\"foo\", \"bar\", \"baz\"])\n # set one entry to a non-number str\n df2.loc[0, \"foo\"] = \"a\"\n\n result = getattr(df1, meth)(axis=1, numeric_only=True)\n expected = getattr(df1[[\"bar\", \"baz\"]], meth)(axis=1)\n tm.assert_series_equal(expected, result)\n\n result = getattr(df2, meth)(axis=1, numeric_only=True)\n expected = getattr(df2[[\"bar\", \"baz\"]], meth)(axis=1)\n tm.assert_series_equal(expected, result)\n\n # df1 has all numbers, df2 has a letter inside\n msg = r\"unsupported operand type\\(s\\) for -: 'float' and 'str'\"\n with pytest.raises(TypeError, match=msg):\n getattr(df1, meth)(axis=1, numeric_only=False)\n msg = \"could not convert string to float: 'a'\"\n with pytest.raises(TypeError, match=msg):\n getattr(df2, meth)(axis=1, numeric_only=False)\n\n def test_sem(self, datetime_frame):\n result = datetime_frame.sem(ddof=4)\n expected = datetime_frame.apply(lambda x: x.std(ddof=4) / np.sqrt(len(x)))\n tm.assert_almost_equal(result, expected)\n\n arr = np.repeat(np.random.random((1, 1000)), 1000, 0)\n result = nanops.nansem(arr, axis=0)\n assert not (result < 0).any()\n\n with pd.option_context(\"use_bottleneck\", False):\n result = nanops.nansem(arr, axis=0)\n assert not (result < 0).any()\n\n @td.skip_if_no_scipy\n def test_kurt(self):\n index = MultiIndex(\n levels=[[\"bar\"], [\"one\", \"two\", \"three\"], [0, 1]],\n codes=[[0, 0, 0, 0, 0, 0], [0, 1, 2, 0, 1, 2], [0, 1, 0, 1, 0, 1]],\n )\n df = DataFrame(np.random.randn(6, 3), index=index)\n\n kurt = df.kurt()\n kurt2 = df.kurt(level=0).xs(\"bar\")\n tm.assert_series_equal(kurt, kurt2, check_names=False)\n assert kurt.name is None\n assert kurt2.name == \"bar\"\n\n @pytest.mark.parametrize(\n \"dropna, expected\",\n [\n (\n True,\n {\n \"A\": [12],\n \"B\": [10.0],\n \"C\": [1.0],\n \"D\": [\"a\"],\n \"E\": Categorical([\"a\"], categories=[\"a\"]),\n \"F\": to_datetime([\"2000-1-2\"]),\n \"G\": to_timedelta([\"1 days\"]),\n },\n ),\n (\n False,\n {\n \"A\": [12],\n \"B\": [10.0],\n \"C\": [np.nan],\n \"D\": np.array([np.nan], dtype=object),\n \"E\": Categorical([np.nan], categories=[\"a\"]),\n \"F\": [pd.NaT],\n \"G\": to_timedelta([pd.NaT]),\n },\n ),\n (\n True,\n {\n \"H\": [8, 9, np.nan, np.nan],\n \"I\": [8, 9, np.nan, np.nan],\n \"J\": [1, np.nan, np.nan, np.nan],\n \"K\": Categorical([\"a\", np.nan, np.nan, np.nan], categories=[\"a\"]),\n \"L\": to_datetime([\"2000-1-2\", \"NaT\", \"NaT\", \"NaT\"]),\n \"M\": to_timedelta([\"1 days\", \"nan\", \"nan\", \"nan\"]),\n \"N\": [0, 1, 2, 3],\n },\n ),\n (\n False,\n {\n \"H\": [8, 9, np.nan, np.nan],\n \"I\": [8, 9, np.nan, np.nan],\n \"J\": [1, np.nan, np.nan, np.nan],\n \"K\": Categorical([np.nan, \"a\", np.nan, np.nan], categories=[\"a\"]),\n \"L\": to_datetime([\"NaT\", \"2000-1-2\", \"NaT\", \"NaT\"]),\n \"M\": to_timedelta([\"nan\", \"1 days\", \"nan\", \"nan\"]),\n \"N\": [0, 1, 2, 3],\n },\n ),\n ],\n )\n def test_mode_dropna(self, dropna, expected):\n\n df = DataFrame(\n {\n \"A\": [12, 12, 19, 11],\n \"B\": [10, 10, np.nan, 3],\n \"C\": [1, np.nan, np.nan, np.nan],\n \"D\": [np.nan, np.nan, \"a\", np.nan],\n \"E\": Categorical([np.nan, np.nan, \"a\", np.nan]),\n \"F\": to_datetime([\"NaT\", \"2000-1-2\", \"NaT\", \"NaT\"]),\n \"G\": to_timedelta([\"1 days\", \"nan\", \"nan\", \"nan\"]),\n \"H\": [8, 8, 9, 9],\n \"I\": [9, 9, 8, 8],\n \"J\": [1, 1, np.nan, np.nan],\n \"K\": Categorical([\"a\", np.nan, \"a\", np.nan]),\n \"L\": to_datetime([\"2000-1-2\", \"2000-1-2\", \"NaT\", \"NaT\"]),\n \"M\": to_timedelta([\"1 days\", \"nan\", \"1 days\", \"nan\"]),\n \"N\": np.arange(4, dtype=\"int64\"),\n }\n )\n\n result = df[sorted(expected.keys())].mode(dropna=dropna)\n expected = DataFrame(expected)\n tm.assert_frame_equal(result, expected)\n\n def test_mode_sortwarning(self):\n # Check for the warning that is raised when the mode\n # results cannot be sorted\n\n df = DataFrame({\"A\": [np.nan, np.nan, \"a\", \"a\"]})\n expected = DataFrame({\"A\": [\"a\", np.nan]})\n\n with tm.assert_produces_warning(UserWarning, check_stacklevel=False):\n result = df.mode(dropna=False)\n result = result.sort_values(by=\"A\").reset_index(drop=True)\n\n tm.assert_frame_equal(result, expected)\n\n def test_mode_empty_df(self):\n df = DataFrame([], columns=[\"a\", \"b\"])\n result = df.mode()\n expected = DataFrame([], columns=[\"a\", \"b\"], index=Index([], dtype=int))\n tm.assert_frame_equal(result, expected)\n\n def test_operators_timedelta64(self):\n df = DataFrame(\n {\n \"A\": date_range(\"2012-1-1\", periods=3, freq=\"D\"),\n \"B\": date_range(\"2012-1-2\", periods=3, freq=\"D\"),\n \"C\": Timestamp(\"20120101\") - timedelta(minutes=5, seconds=5),\n }\n )\n\n diffs = DataFrame({\"A\": df[\"A\"] - df[\"C\"], \"B\": df[\"A\"] - df[\"B\"]})\n\n # min\n result = diffs.min()\n assert result[0] == diffs.loc[0, \"A\"]\n assert result[1] == diffs.loc[0, \"B\"]\n\n result = diffs.min(axis=1)\n assert (result == diffs.loc[0, \"B\"]).all()\n\n # max\n result = diffs.max()\n assert result[0] == diffs.loc[2, \"A\"]\n assert result[1] == diffs.loc[2, \"B\"]\n\n result = diffs.max(axis=1)\n assert (result == diffs[\"A\"]).all()\n\n # abs\n result = diffs.abs()\n result2 = abs(diffs)\n expected = DataFrame({\"A\": df[\"A\"] - df[\"C\"], \"B\": df[\"B\"] - df[\"A\"]})\n tm.assert_frame_equal(result, expected)\n tm.assert_frame_equal(result2, expected)\n\n # mixed frame\n mixed = diffs.copy()\n mixed[\"C\"] = \"foo\"\n mixed[\"D\"] = 1\n mixed[\"E\"] = 1.0\n mixed[\"F\"] = Timestamp(\"20130101\")\n\n # results in an object array\n result = mixed.min()\n expected = Series(\n [\n pd.Timedelta(timedelta(seconds=5 * 60 + 5)),\n pd.Timedelta(timedelta(days=-1)),\n \"foo\",\n 1,\n 1.0,\n Timestamp(\"20130101\"),\n ],\n index=mixed.columns,\n )\n tm.assert_series_equal(result, expected)\n\n # excludes numeric\n result = mixed.min(axis=1)\n expected = Series([1, 1, 1.0], index=[0, 1, 2])\n tm.assert_series_equal(result, expected)\n\n # works when only those columns are selected\n result = mixed[[\"A\", \"B\"]].min(1)\n expected = Series([timedelta(days=-1)] * 3)\n tm.assert_series_equal(result, expected)\n\n result = mixed[[\"A\", \"B\"]].min()\n expected = Series(\n [timedelta(seconds=5 * 60 + 5), timedelta(days=-1)], index=[\"A\", \"B\"]\n )\n tm.assert_series_equal(result, expected)\n\n # GH 3106\n df = DataFrame(\n {\n \"time\": date_range(\"20130102\", periods=5),\n \"time2\": date_range(\"20130105\", periods=5),\n }\n )\n df[\"off1\"] = df[\"time2\"] - df[\"time\"]\n assert df[\"off1\"].dtype == \"timedelta64[ns]\"\n\n df[\"off2\"] = df[\"time\"] - df[\"time2\"]\n df._consolidate_inplace()\n assert df[\"off1\"].dtype == \"timedelta64[ns]\"\n assert df[\"off2\"].dtype == \"timedelta64[ns]\"\n\n def test_std_timedelta64_skipna_false(self):\n # GH#37392\n tdi = pd.timedelta_range(\"1 Day\", periods=10)\n df = DataFrame({\"A\": tdi, \"B\": tdi})\n df.iloc[-2, -1] = pd.NaT\n\n result = df.std(skipna=False)\n expected = Series(\n [df[\"A\"].std(), pd.NaT], index=[\"A\", \"B\"], dtype=\"timedelta64[ns]\"\n )\n tm.assert_series_equal(result, expected)\n\n result = df.std(axis=1, skipna=False)\n expected = Series([pd.Timedelta(0)] * 8 + [pd.NaT, pd.Timedelta(0)])\n tm.assert_series_equal(result, expected)\n\n def test_sum_corner(self):\n empty_frame = DataFrame()\n\n axis0 = empty_frame.sum(0)\n axis1 = empty_frame.sum(1)\n assert isinstance(axis0, Series)\n assert isinstance(axis1, Series)\n assert len(axis0) == 0\n assert len(axis1) == 0\n\n @pytest.mark.parametrize(\"method, unit\", [(\"sum\", 0), (\"prod\", 1)])\n def test_sum_prod_nanops(self, method, unit):\n idx = [\"a\", \"b\", \"c\"]\n df = DataFrame({\"a\": [unit, unit], \"b\": [unit, np.nan], \"c\": [np.nan, np.nan]})\n # The default\n result = getattr(df, method)\n expected = Series([unit, unit, unit], index=idx, dtype=\"float64\")\n\n # min_count=1\n result = getattr(df, method)(min_count=1)\n expected = Series([unit, unit, np.nan], index=idx)\n tm.assert_series_equal(result, expected)\n\n # min_count=0\n result = getattr(df, method)(min_count=0)\n expected = Series([unit, unit, unit], index=idx, dtype=\"float64\")\n tm.assert_series_equal(result, expected)\n\n result = getattr(df.iloc[1:], method)(min_count=1)\n expected = Series([unit, np.nan, np.nan], index=idx)\n tm.assert_series_equal(result, expected)\n\n # min_count > 1\n df = DataFrame({\"A\": [unit] * 10, \"B\": [unit] * 5 + [np.nan] * 5})\n result = getattr(df, method)(min_count=5)\n expected = Series(result, index=[\"A\", \"B\"])\n tm.assert_series_equal(result, expected)\n\n result = getattr(df, method)(min_count=6)\n expected = Series(result, index=[\"A\", \"B\"])\n tm.assert_series_equal(result, expected)\n\n def test_sum_nanops_timedelta(self):\n # prod isn't defined on timedeltas\n idx = [\"a\", \"b\", \"c\"]\n df = DataFrame({\"a\": [0, 0], \"b\": [0, np.nan], \"c\": [np.nan, np.nan]})\n\n df2 = df.apply(pd.to_timedelta)\n\n # 0 by default\n result = df2.sum()\n expected = Series([0, 0, 0], dtype=\"m8[ns]\", index=idx)\n tm.assert_series_equal(result, expected)\n\n # min_count=0\n result = df2.sum(min_count=0)\n tm.assert_series_equal(result, expected)\n\n # min_count=1\n result = df2.sum(min_count=1)\n expected = Series([0, 0, np.nan], dtype=\"m8[ns]\", index=idx)\n tm.assert_series_equal(result, expected)\n\n def test_sum_object(self, float_frame):\n values = float_frame.values.astype(int)\n frame = DataFrame(values, index=float_frame.index, columns=float_frame.columns)\n deltas = frame * timedelta(1)\n deltas.sum()\n\n def test_sum_bool(self, float_frame):\n # ensure this works, bug report\n bools = np.isnan(float_frame)\n bools.sum(1)\n bools.sum(0)\n\n def test_sum_mixed_datetime(self):\n # GH#30886\n df = DataFrame(\n {\"A\": pd.date_range(\"2000\", periods=4), \"B\": [1, 2, 3, 4]}\n ).reindex([2, 3, 4])\n result = df.sum()\n\n expected = Series({\"B\": 7.0})\n tm.assert_series_equal(result, expected)\n\n def test_mean_corner(self, float_frame, float_string_frame):\n # unit test when have object data\n the_mean = float_string_frame.mean(axis=0)\n the_sum = float_string_frame.sum(axis=0, numeric_only=True)\n tm.assert_index_equal(the_sum.index, the_mean.index)\n assert len(the_mean.index) < len(float_string_frame.columns)\n\n # xs sum mixed type, just want to know it works...\n the_mean = float_string_frame.mean(axis=1)\n the_sum = float_string_frame.sum(axis=1, numeric_only=True)\n tm.assert_index_equal(the_sum.index, the_mean.index)\n\n # take mean of boolean column\n float_frame[\"bool\"] = float_frame[\"A\"] > 0\n means = float_frame.mean(0)\n assert means[\"bool\"] == float_frame[\"bool\"].values.mean()\n\n def test_mean_datetimelike(self):\n # GH#24757 check that datetimelike are excluded by default, handled\n # correctly with numeric_only=True\n\n df = DataFrame(\n {\n \"A\": np.arange(3),\n \"B\": pd.date_range(\"2016-01-01\", periods=3),\n \"C\": pd.timedelta_range(\"1D\", periods=3),\n \"D\": pd.period_range(\"2016\", periods=3, freq=\"A\"),\n }\n )\n result = df.mean(numeric_only=True)\n expected = Series({\"A\": 1.0})\n tm.assert_series_equal(result, expected)\n\n with tm.assert_produces_warning(FutureWarning):\n # in the future datetime columns will be included\n result = df.mean()\n expected = Series({\"A\": 1.0, \"C\": df.loc[1, \"C\"]})\n tm.assert_series_equal(result, expected)\n\n def test_mean_datetimelike_numeric_only_false(self):\n df = DataFrame(\n {\n \"A\": np.arange(3),\n \"B\": pd.date_range(\"2016-01-01\", periods=3),\n \"C\": pd.timedelta_range(\"1D\", periods=3),\n }\n )\n\n # datetime(tz) and timedelta work\n result = df.mean(numeric_only=False)\n expected = Series({\"A\": 1, \"B\": df.loc[1, \"B\"], \"C\": df.loc[1, \"C\"]})\n tm.assert_series_equal(result, expected)\n\n # mean of period is not allowed\n df[\"D\"] = pd.period_range(\"2016\", periods=3, freq=\"A\")\n\n with pytest.raises(TypeError, match=\"mean is not implemented for Period\"):\n df.mean(numeric_only=False)\n\n def test_mean_extensionarray_numeric_only_true(self):\n # https://github.com/pandas-dev/pandas/issues/33256\n arr = np.random.randint(1000, size=(10, 5))\n df = DataFrame(arr, dtype=\"Int64\")\n result = df.mean(numeric_only=True)\n expected = DataFrame(arr).mean()\n tm.assert_series_equal(result, expected)\n\n def test_stats_mixed_type(self, float_string_frame):\n # don't blow up\n float_string_frame.std(1)\n float_string_frame.var(1)\n float_string_frame.mean(1)\n float_string_frame.skew(1)\n\n def test_sum_bools(self):\n df = DataFrame(index=range(1), columns=range(10))\n bools = isna(df)\n assert bools.sum(axis=1)[0] == 10\n\n # ----------------------------------------------------------------------\n # Index of max / min\n\n def test_idxmin(self, float_frame, int_frame):\n frame = float_frame\n frame.iloc[5:10] = np.nan\n frame.iloc[15:20, -2:] = np.nan\n for skipna in [True, False]:\n for axis in [0, 1]:\n for df in [frame, int_frame]:\n result = df.idxmin(axis=axis, skipna=skipna)\n expected = df.apply(Series.idxmin, axis=axis, skipna=skipna)\n tm.assert_series_equal(result, expected)\n\n msg = \"No axis named 2 for object type DataFrame\"\n with pytest.raises(ValueError, match=msg):\n frame.idxmin(axis=2)\n\n def test_idxmax(self, float_frame, int_frame):\n frame = float_frame\n frame.iloc[5:10] = np.nan\n frame.iloc[15:20, -2:] = np.nan\n for skipna in [True, False]:\n for axis in [0, 1]:\n for df in [frame, int_frame]:\n result = df.idxmax(axis=axis, skipna=skipna)\n expected = df.apply(Series.idxmax, axis=axis, skipna=skipna)\n tm.assert_series_equal(result, expected)\n\n msg = \"No axis named 2 for object type DataFrame\"\n with pytest.raises(ValueError, match=msg):\n frame.idxmax(axis=2)\n\n def test_idxmax_mixed_dtype(self):\n # don't cast to object, which would raise in nanops\n dti = pd.date_range(\"2016-01-01\", periods=3)\n\n df = DataFrame({1: [0, 2, 1], 2: range(3)[::-1], 3: dti})\n\n result = df.idxmax()\n expected = Series([1, 0, 2], index=[1, 2, 3])\n tm.assert_series_equal(result, expected)\n\n result = df.idxmin()\n expected = Series([0, 2, 0], index=[1, 2, 3])\n tm.assert_series_equal(result, expected)\n\n # ----------------------------------------------------------------------\n # Logical reductions\n\n @pytest.mark.parametrize(\"opname\", [\"any\", \"all\"])\n def test_any_all(self, opname, bool_frame_with_na, float_string_frame):\n assert_bool_op_calc(\n opname, getattr(np, opname), bool_frame_with_na, has_skipna=True\n )\n assert_bool_op_api(\n opname, bool_frame_with_na, float_string_frame, has_bool_only=True\n )\n\n def test_any_all_extra(self):\n df = DataFrame(\n {\n \"A\": [True, False, False],\n \"B\": [True, True, False],\n \"C\": [True, True, True],\n },\n index=[\"a\", \"b\", \"c\"],\n )\n result = df[[\"A\", \"B\"]].any(1)\n expected = Series([True, True, False], index=[\"a\", \"b\", \"c\"])\n tm.assert_series_equal(result, expected)\n\n result = df[[\"A\", \"B\"]].any(1, bool_only=True)\n tm.assert_series_equal(result, expected)\n\n result = df.all(1)\n expected = Series([True, False, False], index=[\"a\", \"b\", \"c\"])\n tm.assert_series_equal(result, expected)\n\n result = df.all(1, bool_only=True)\n tm.assert_series_equal(result, expected)\n\n # Axis is None\n result = df.all(axis=None).item()\n assert result is False\n\n result = df.any(axis=None).item()\n assert result is True\n\n result = df[[\"C\"]].all(axis=None).item()\n assert result is True\n\n def test_any_datetime(self):\n\n # GH 23070\n float_data = [1, np.nan, 3, np.nan]\n datetime_data = [\n Timestamp(\"1960-02-15\"),\n Timestamp(\"1960-02-16\"),\n pd.NaT,\n pd.NaT,\n ]\n df = DataFrame({\"A\": float_data, \"B\": datetime_data})\n\n result = df.any(1)\n expected = Series([True, True, True, False])\n tm.assert_series_equal(result, expected)\n\n def test_any_all_bool_only(self):\n\n # GH 25101\n df = DataFrame(\n {\"col1\": [1, 2, 3], \"col2\": [4, 5, 6], \"col3\": [None, None, None]}\n )\n\n result = df.all(bool_only=True)\n expected = Series(dtype=np.bool_)\n tm.assert_series_equal(result, expected)\n\n df = DataFrame(\n {\n \"col1\": [1, 2, 3],\n \"col2\": [4, 5, 6],\n \"col3\": [None, None, None],\n \"col4\": [False, False, True],\n }\n )\n\n result = df.all(bool_only=True)\n expected = Series({\"col4\": False})\n tm.assert_series_equal(result, expected)\n\n @pytest.mark.parametrize(\n \"func, data, expected\",\n [\n (np.any, {}, False),\n (np.all, {}, True),\n (np.any, {\"A\": []}, False),\n (np.all, {\"A\": []}, True),\n (np.any, {\"A\": [False, False]}, False),\n (np.all, {\"A\": [False, False]}, False),\n (np.any, {\"A\": [True, False]}, True),\n (np.all, {\"A\": [True, False]}, False),\n (np.any, {\"A\": [True, True]}, True),\n (np.all, {\"A\": [True, True]}, True),\n (np.any, {\"A\": [False], \"B\": [False]}, False),\n (np.all, {\"A\": [False], \"B\": [False]}, False),\n (np.any, {\"A\": [False, False], \"B\": [False, True]}, True),\n (np.all, {\"A\": [False, False], \"B\": [False, True]}, False),\n # other types\n (np.all, {\"A\": Series([0.0, 1.0], dtype=\"float\")}, False),\n (np.any, {\"A\": Series([0.0, 1.0], dtype=\"float\")}, True),\n (np.all, {\"A\": Series([0, 1], dtype=int)}, False),\n (np.any, {\"A\": Series([0, 1], dtype=int)}, True),\n pytest.param(np.all, {\"A\": Series([0, 1], dtype=\"M8[ns]\")}, False),\n pytest.param(np.all, {\"A\": Series([0, 1], dtype=\"M8[ns, UTC]\")}, False),\n pytest.param(np.any, {\"A\": Series([0, 1], dtype=\"M8[ns]\")}, True),\n pytest.param(np.any, {\"A\": Series([0, 1], dtype=\"M8[ns, UTC]\")}, True),\n pytest.param(np.all, {\"A\": Series([1, 2], dtype=\"M8[ns]\")}, True),\n pytest.param(np.all, {\"A\": Series([1, 2], dtype=\"M8[ns, UTC]\")}, True),\n pytest.param(np.any, {\"A\": Series([1, 2], dtype=\"M8[ns]\")}, True),\n pytest.param(np.any, {\"A\": Series([1, 2], dtype=\"M8[ns, UTC]\")}, True),\n pytest.param(np.all, {\"A\": Series([0, 1], dtype=\"m8[ns]\")}, False),\n pytest.param(np.any, {\"A\": Series([0, 1], dtype=\"m8[ns]\")}, True),\n pytest.param(np.all, {\"A\": Series([1, 2], dtype=\"m8[ns]\")}, True),\n pytest.param(np.any, {\"A\": Series([1, 2], dtype=\"m8[ns]\")}, True),\n # np.all on Categorical raises, so the reduction drops the\n # column, so all is being done on an empty Series, so is True\n (np.all, {\"A\": Series([0, 1], dtype=\"category\")}, True),\n (np.any, {\"A\": Series([0, 1], dtype=\"category\")}, False),\n (np.all, {\"A\": Series([1, 2], dtype=\"category\")}, True),\n (np.any, {\"A\": Series([1, 2], dtype=\"category\")}, False),\n # Mix GH#21484\n pytest.param(\n np.all,\n {\n \"A\": Series([10, 20], dtype=\"M8[ns]\"),\n \"B\": Series([10, 20], dtype=\"m8[ns]\"),\n },\n True,\n ),\n ],\n )\n def test_any_all_np_func(self, func, data, expected):\n # GH 19976\n data = DataFrame(data)\n result = func(data)\n assert isinstance(result, np.bool_)\n assert result.item() is expected\n\n # method version\n result = getattr(DataFrame(data), func.__name__)(axis=None)\n assert isinstance(result, np.bool_)\n assert result.item() is expected\n\n def test_any_all_object(self):\n # GH 19976\n result = np.all(DataFrame(columns=[\"a\", \"b\"])).item()\n assert result is True\n\n result = np.any(DataFrame(columns=[\"a\", \"b\"])).item()\n assert result is False\n\n def test_any_all_object_bool_only(self):\n df = DataFrame({\"A\": [\"foo\", 2], \"B\": [True, False]}).astype(object)\n df._consolidate_inplace()\n df[\"C\"] = Series([True, True])\n\n # The underlying bug is in DataFrame._get_bool_data, so we check\n # that while we're here\n res = df._get_bool_data()\n expected = df[[\"B\", \"C\"]]\n tm.assert_frame_equal(res, expected)\n\n res = df.all(bool_only=True, axis=0)\n expected = Series([False, True], index=[\"B\", \"C\"])\n tm.assert_series_equal(res, expected)\n\n # operating on a subset of columns should not produce a _larger_ Series\n res = df[[\"B\", \"C\"]].all(bool_only=True, axis=0)\n tm.assert_series_equal(res, expected)\n\n assert not df.all(bool_only=True, axis=None)\n\n res = df.any(bool_only=True, axis=0)\n expected = Series([True, True], index=[\"B\", \"C\"])\n tm.assert_series_equal(res, expected)\n\n # operating on a subset of columns should not produce a _larger_ Series\n res = df[[\"B\", \"C\"]].any(bool_only=True, axis=0)\n tm.assert_series_equal(res, expected)\n\n assert df.any(bool_only=True, axis=None)\n\n @pytest.mark.parametrize(\"method\", [\"any\", \"all\"])\n def test_any_all_level_axis_none_raises(self, method):\n df = DataFrame(\n {\"A\": 1},\n index=MultiIndex.from_product(\n [[\"A\", \"B\"], [\"a\", \"b\"]], names=[\"out\", \"in\"]\n ),\n )\n xpr = \"Must specify 'axis' when aggregating by level.\"\n with pytest.raises(ValueError, match=xpr):\n getattr(df, method)(axis=None, level=\"out\")\n\n # ---------------------------------------------------------------------\n # Unsorted\n\n def test_series_broadcasting(self):\n # smoke test for numpy warnings\n # GH 16378, GH 16306\n df = DataFrame([1.0, 1.0, 1.0])\n df_nan = DataFrame({\"A\": [np.nan, 2.0, np.nan]})\n s = Series([1, 1, 1])\n s_nan = Series([np.nan, np.nan, 1])\n\n with tm.assert_produces_warning(None):\n df_nan.clip(lower=s, axis=0)\n for op in [\"lt\", \"le\", \"gt\", \"ge\", \"eq\", \"ne\"]:\n getattr(df, op)(s_nan, axis=0)\n\n\nclass TestDataFrameReductions:\n def test_min_max_dt64_with_NaT(self):\n # Both NaT and Timestamp are in DataFrame.\n df = DataFrame({\"foo\": [pd.NaT, pd.NaT, Timestamp(\"2012-05-01\")]})\n\n res = df.min()\n exp = Series([Timestamp(\"2012-05-01\")], index=[\"foo\"])\n tm.assert_series_equal(res, exp)\n\n res = df.max()\n exp = Series([Timestamp(\"2012-05-01\")], index=[\"foo\"])\n tm.assert_series_equal(res, exp)\n\n # GH12941, only NaTs are in DataFrame.\n df = DataFrame({\"foo\": [pd.NaT, pd.NaT]})\n\n res = df.min()\n exp = Series([pd.NaT], index=[\"foo\"])\n tm.assert_series_equal(res, exp)\n\n res = df.max()\n exp = Series([pd.NaT], index=[\"foo\"])\n tm.assert_series_equal(res, exp)\n\n def test_min_max_dt64_with_NaT_skipna_false(self, request, tz_naive_fixture):\n # GH#36907\n tz = tz_naive_fixture\n if isinstance(tz, tzlocal) and is_platform_windows():\n request.node.add_marker(\n pytest.mark.xfail(\n reason=\"GH#37659 OSError raised within tzlocal bc Windows \"\n \"chokes in times before 1970-01-01\"\n )\n )\n\n df = DataFrame(\n {\n \"a\": [\n Timestamp(\"2020-01-01 08:00:00\", tz=tz),\n Timestamp(\"1920-02-01 09:00:00\", tz=tz),\n ],\n \"b\": [Timestamp(\"2020-02-01 08:00:00\", tz=tz), pd.NaT],\n }\n )\n\n res = df.min(axis=1, skipna=False)\n expected = Series([df.loc[0, \"a\"], pd.NaT])\n assert expected.dtype == df[\"a\"].dtype\n\n tm.assert_series_equal(res, expected)\n\n res = df.max(axis=1, skipna=False)\n expected = Series([df.loc[0, \"b\"], pd.NaT])\n assert expected.dtype == df[\"a\"].dtype\n\n tm.assert_series_equal(res, expected)\n\n def test_min_max_dt64_api_consistency_with_NaT(self):\n # Calling the following sum functions returned an error for dataframes but\n # returned NaT for series. These tests check that the API is consistent in\n # min/max calls on empty Series/DataFrames. See GH:33704 for more\n # information\n df = DataFrame({\"x\": pd.to_datetime([])})\n expected_dt_series = Series(pd.to_datetime([]))\n # check axis 0\n assert (df.min(axis=0).x is pd.NaT) == (expected_dt_series.min() is pd.NaT)\n assert (df.max(axis=0).x is pd.NaT) == (expected_dt_series.max() is pd.NaT)\n\n # check axis 1\n tm.assert_series_equal(df.min(axis=1), expected_dt_series)\n tm.assert_series_equal(df.max(axis=1), expected_dt_series)\n\n def test_min_max_dt64_api_consistency_empty_df(self):\n # check DataFrame/Series api consistency when calling min/max on an empty\n # DataFrame/Series.\n df = DataFrame({\"x\": []})\n expected_float_series = Series([], dtype=float)\n # check axis 0\n assert np.isnan(df.min(axis=0).x) == np.isnan(expected_float_series.min())\n assert np.isnan(df.max(axis=0).x) == np.isnan(expected_float_series.max())\n # check axis 1\n tm.assert_series_equal(df.min(axis=1), expected_float_series)\n tm.assert_series_equal(df.min(axis=1), expected_float_series)\n\n @pytest.mark.parametrize(\n \"initial\",\n [\"2018-10-08 13:36:45+00:00\", \"2018-10-08 13:36:45+03:00\"], # Non-UTC timezone\n )\n @pytest.mark.parametrize(\"method\", [\"min\", \"max\"])\n def test_preserve_timezone(self, initial: str, method):\n # GH 28552\n initial_dt = pd.to_datetime(initial)\n expected = Series([initial_dt])\n df = DataFrame([expected])\n result = getattr(df, method)(axis=1)\n tm.assert_series_equal(result, expected)\n\n def test_frame_any_all_with_level(self):\n df = DataFrame(\n {\"data\": [False, False, True, False, True, False, True]},\n index=[\n [\"one\", \"one\", \"two\", \"one\", \"two\", \"two\", \"two\"],\n [0, 1, 0, 2, 1, 2, 3],\n ],\n )\n\n result = df.any(level=0)\n ex = DataFrame({\"data\": [False, True]}, index=[\"one\", \"two\"])\n tm.assert_frame_equal(result, ex)\n\n result = df.all(level=0)\n ex = DataFrame({\"data\": [False, False]}, index=[\"one\", \"two\"])\n tm.assert_frame_equal(result, ex)\n\n def test_frame_any_with_timedelta(self):\n # GH#17667\n df = DataFrame(\n {\n \"a\": Series([0, 0]),\n \"t\": Series([pd.to_timedelta(0, \"s\"), pd.to_timedelta(1, \"ms\")]),\n }\n )\n\n result = df.any(axis=0)\n expected = Series(data=[False, True], index=[\"a\", \"t\"])\n tm.assert_series_equal(result, expected)\n\n result = df.any(axis=1)\n expected = Series(data=[False, True])\n tm.assert_series_equal(result, expected)\n\n\nclass TestNuisanceColumns:\n @pytest.mark.parametrize(\"method\", [\"any\", \"all\"])\n def test_any_all_categorical_dtype_nuisance_column(self, method):\n # GH#36076 DataFrame should match Series behavior\n ser = Series([0, 1], dtype=\"category\", name=\"A\")\n df = ser.to_frame()\n\n # Double-check the Series behavior is to raise\n with pytest.raises(TypeError, match=\"does not implement reduction\"):\n getattr(ser, method)()\n\n with pytest.raises(TypeError, match=\"does not implement reduction\"):\n getattr(np, method)(ser)\n\n with pytest.raises(TypeError, match=\"does not implement reduction\"):\n getattr(df, method)(bool_only=False)\n\n # With bool_only=None, operating on this column raises and is ignored,\n # so we expect an empty result.\n result = getattr(df, method)(bool_only=None)\n expected = Series([], index=Index([]), dtype=bool)\n tm.assert_series_equal(result, expected)\n\n result = getattr(np, method)(df, axis=0)\n tm.assert_series_equal(result, expected)\n\n def test_median_categorical_dtype_nuisance_column(self):\n # GH#21020 DataFrame.median should match Series.median\n df = DataFrame({\"A\": Categorical([1, 2, 2, 2, 3])})\n ser = df[\"A\"]\n\n # Double-check the Series behavior is to raise\n with pytest.raises(TypeError, match=\"does not implement reduction\"):\n ser.median()\n\n with pytest.raises(TypeError, match=\"does not implement reduction\"):\n df.median(numeric_only=False)\n\n result = df.median()\n expected = Series([], index=Index([]), dtype=np.float64)\n tm.assert_series_equal(result, expected)\n\n # same thing, but with an additional non-categorical column\n df[\"B\"] = df[\"A\"].astype(int)\n\n with pytest.raises(TypeError, match=\"does not implement reduction\"):\n df.median(numeric_only=False)\n\n result = df.median()\n expected = Series([2.0], index=[\"B\"])\n tm.assert_series_equal(result, expected)\n\n # TODO: np.median(df, axis=0) gives np.array([2.0, 2.0]) instead\n # of expected.values\n\n @pytest.mark.parametrize(\"method\", [\"min\", \"max\"])\n def test_min_max_categorical_dtype_non_ordered_nuisance_column(self, method):\n # GH#28949 DataFrame.min should behave like Series.min\n cat = Categorical([\"a\", \"b\", \"c\", \"b\"], ordered=False)\n ser = Series(cat)\n df = ser.to_frame(\"A\")\n\n # Double-check the Series behavior\n with pytest.raises(TypeError, match=\"is not ordered for operation\"):\n getattr(ser, method)()\n\n with pytest.raises(TypeError, match=\"is not ordered for operation\"):\n getattr(np, method)(ser)\n\n with pytest.raises(TypeError, match=\"is not ordered for operation\"):\n getattr(df, method)(numeric_only=False)\n\n result = getattr(df, method)()\n expected = Series([], index=Index([]), dtype=np.float64)\n tm.assert_series_equal(result, expected)\n\n result = getattr(np, method)(df)\n tm.assert_series_equal(result, expected)\n\n # same thing, but with an additional non-categorical column\n df[\"B\"] = df[\"A\"].astype(object)\n result = getattr(df, method)()\n if method == \"min\":\n expected = Series([\"a\"], index=[\"B\"])\n else:\n expected = Series([\"c\"], index=[\"B\"])\n tm.assert_series_equal(result, expected)\n\n result = getattr(np, method)(df)\n tm.assert_series_equal(result, expected)\n\n def test_reduction_object_block_splits_nuisance_columns(self):\n # GH#37827\n df = DataFrame({\"A\": [0, 1, 2], \"B\": [\"a\", \"b\", \"c\"]}, dtype=object)\n\n # We should only exclude \"B\", not \"A\"\n result = df.mean()\n expected = Series([1.0], index=[\"A\"])\n tm.assert_series_equal(result, expected)\n\n # Same behavior but heterogeneous dtype\n df[\"C\"] = df[\"A\"].astype(int) + 4\n\n result = df.mean()\n expected = Series([1.0, 5.0], index=[\"A\", \"C\"])\n tm.assert_series_equal(result, expected)\n\n\ndef test_sum_timedelta64_skipna_false():\n # GH#17235\n arr = np.arange(8).astype(np.int64).view(\"m8[s]\").reshape(4, 2)\n arr[-1, -1] = \"Nat\"\n\n df = DataFrame(arr)\n\n result = df.sum(skipna=False)\n expected = Series([pd.Timedelta(seconds=12), pd.NaT])\n tm.assert_series_equal(result, expected)\n\n result = df.sum(axis=0, skipna=False)\n tm.assert_series_equal(result, expected)\n\n result = df.sum(axis=1, skipna=False)\n expected = Series(\n [\n pd.Timedelta(seconds=1),\n pd.Timedelta(seconds=5),\n pd.Timedelta(seconds=9),\n pd.NaT,\n ]\n )\n tm.assert_series_equal(result, expected)\n\n\ndef test_mixed_frame_with_integer_sum():\n # https://github.com/pandas-dev/pandas/issues/34520\n df = DataFrame([[\"a\", 1]], columns=list(\"ab\"))\n df = df.astype({\"b\": \"Int64\"})\n result = df.sum()\n expected = Series([\"a\", 1], index=[\"a\", \"b\"])\n tm.assert_series_equal(result, expected)\n\n\[email protected](\"numeric_only\", [True, False, None])\[email protected](\"method\", [\"min\", \"max\"])\ndef test_minmax_extensionarray(method, numeric_only):\n # https://github.com/pandas-dev/pandas/issues/32651\n int64_info = np.iinfo(\"int64\")\n ser = Series([int64_info.max, None, int64_info.min], dtype=pd.Int64Dtype())\n df = DataFrame({\"Int64\": ser})\n result = getattr(df, method)(numeric_only=numeric_only)\n expected = Series(\n [getattr(int64_info, method)], index=Index([\"Int64\"], dtype=\"object\")\n )\n tm.assert_series_equal(result, expected)\n" ]
[ [ "pandas._testing.assert_almost_equal", "pandas.to_datetime", "pandas.Series", "pandas.DataFrame", "numpy.random.randn", "numpy.iinfo", "numpy.var", "pandas.isna", "pandas._testing.assert_frame_equal", "pandas._testing._make_skipna_wrapper", "numpy.random.randint", "pandas.notna", "numpy.arange", "pandas.Index", "numpy.std", "pandas._testing.assert_series_equal", "pandas._testing.assert_index_equal", "pandas._testing.assert_produces_warning", "pandas.core.nanops.nansem", "pandas.compat.is_platform_windows", "pandas.MultiIndex", "numpy.isnan", "pandas.Categorical", "numpy.median", "pandas.option_context", "pandas.Timedelta", "pandas.to_timedelta", "pandas.MultiIndex.from_product", "pandas.date_range", "numpy.array", "pandas.timedelta_range", "numpy.random.random", "pandas.period_range", "scipy.skew", "scipy.kurtosis", "pandas.Int64Dtype", "pandas.Timestamp", "pandas.core.nanops.nanvar" ] ]
mathemakitten/tensorflow
[ "e62a6a8be2f9cfb79913bdb64f99efb5e88df0df" ]
[ "tensorflow/python/ops/array_ops.py" ]
[ "# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n# Tests for this file live in python/kernel_tests/array_ops_test.py\n\"\"\"Support for manipulating tensors.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numbers\nimport numpy as np\n\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.framework import common_shapes\nfrom tensorflow.python.framework import composite_tensor\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import errors\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import sparse_tensor\nfrom tensorflow.python.framework import tensor_shape\nfrom tensorflow.python.framework import tensor_util\n# 'Constant' gets imported in the module 'array_ops'.\nfrom tensorflow.python.framework.constant_op import constant\nfrom tensorflow.python.ops import gen_array_ops\nfrom tensorflow.python.ops import gen_math_ops\n# go/tf-wildcard-import\n# pylint: disable=wildcard-import\nfrom tensorflow.python.ops.gen_array_ops import *\nfrom tensorflow.python.ops.gen_array_ops import reverse_v2 as reverse # pylint: disable=unused-import\nfrom tensorflow.python.types import core\nfrom tensorflow.python.util import deprecation\nfrom tensorflow.python.util import dispatch\nfrom tensorflow.python.util import nest\nfrom tensorflow.python.util import tf_decorator\nfrom tensorflow.python.util.tf_export import tf_export\n# pylint: enable=wildcard-import\n\n# Used for slicing to specify a new 1 size dimension\nnewaxis = None\ntf_export(\"newaxis\").export_constant(__name__, \"newaxis\")\n\n# We override the 'slice' for the \"slice\" op, so we keep Python's\n# existing 'slice' for later use in this module.\n_BaseSlice = slice\n\n\n@tf_export(\"reshape\", v1=[\"reshape\", \"manip.reshape\"])\[email protected]_dispatch_support\ndef reshape(tensor, shape, name=None): # pylint: disable=redefined-outer-name\n r\"\"\"Reshapes a tensor.\n\n Given `tensor`, this operation returns a new `tf.Tensor` that has the same\n values as `tensor` in the same order, except with a new shape given by\n `shape`.\n\n >>> t1 = [[1, 2, 3],\n ... [4, 5, 6]]\n >>> print(tf.shape(t1).numpy())\n [2 3]\n >>> t2 = tf.reshape(t1, [6])\n >>> t2\n <tf.Tensor: shape=(6,), dtype=int32,\n numpy=array([1, 2, 3, 4, 5, 6], dtype=int32)>\n >>> tf.reshape(t2, [3, 2])\n <tf.Tensor: shape=(3, 2), dtype=int32, numpy=\n array([[1, 2],\n [3, 4],\n [5, 6]], dtype=int32)>\n\n The `tf.reshape` does not change the order of or the total number of elements\n in the tensor, and so it can reuse the underlying data buffer. This makes it\n a fast operation independent of how big of a tensor it is operating on.\n\n >>> tf.reshape([1, 2, 3], [2, 2])\n Traceback (most recent call last):\n ...\n InvalidArgumentError: Input to reshape is a tensor with 3 values, but the\n requested shape has 4\n\n To instead reorder the data to rearrange the dimensions of a tensor, see\n `tf.transpose`.\n\n >>> t = [[1, 2, 3],\n ... [4, 5, 6]]\n >>> tf.reshape(t, [3, 2]).numpy()\n array([[1, 2],\n [3, 4],\n [5, 6]], dtype=int32)\n >>> tf.transpose(t, perm=[1, 0]).numpy()\n array([[1, 4],\n [2, 5],\n [3, 6]], dtype=int32)\n\n If one component of `shape` is the special value -1, the size of that\n dimension is computed so that the total size remains constant. In particular,\n a `shape` of `[-1]` flattens into 1-D. At most one component of `shape` can\n be -1.\n\n >>> t = [[1, 2, 3],\n ... [4, 5, 6]]\n >>> tf.reshape(t, [-1])\n <tf.Tensor: shape=(6,), dtype=int32,\n numpy=array([1, 2, 3, 4, 5, 6], dtype=int32)>\n >>> tf.reshape(t, [3, -1])\n <tf.Tensor: shape=(3, 2), dtype=int32, numpy=\n array([[1, 2],\n [3, 4],\n [5, 6]], dtype=int32)>\n >>> tf.reshape(t, [-1, 2])\n <tf.Tensor: shape=(3, 2), dtype=int32, numpy=\n array([[1, 2],\n [3, 4],\n [5, 6]], dtype=int32)>\n\n `tf.reshape(t, [])` reshapes a tensor `t` with one element to a scalar.\n\n >>> tf.reshape([7], []).numpy()\n 7\n\n More examples:\n\n >>> t = [1, 2, 3, 4, 5, 6, 7, 8, 9]\n >>> print(tf.shape(t).numpy())\n [9]\n >>> tf.reshape(t, [3, 3])\n <tf.Tensor: shape=(3, 3), dtype=int32, numpy=\n array([[1, 2, 3],\n [4, 5, 6],\n [7, 8, 9]], dtype=int32)>\n\n >>> t = [[[1, 1], [2, 2]],\n ... [[3, 3], [4, 4]]]\n >>> print(tf.shape(t).numpy())\n [2 2 2]\n >>> tf.reshape(t, [2, 4])\n <tf.Tensor: shape=(2, 4), dtype=int32, numpy=\n array([[1, 1, 2, 2],\n [3, 3, 4, 4]], dtype=int32)>\n\n >>> t = [[[1, 1, 1],\n ... [2, 2, 2]],\n ... [[3, 3, 3],\n ... [4, 4, 4]],\n ... [[5, 5, 5],\n ... [6, 6, 6]]]\n >>> print(tf.shape(t).numpy())\n [3 2 3]\n >>> # Pass '[-1]' to flatten 't'.\n >>> tf.reshape(t, [-1])\n <tf.Tensor: shape=(18,), dtype=int32,\n numpy=array([1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6],\n dtype=int32)>\n >>> # -- Using -1 to infer the shape --\n >>> # Here -1 is inferred to be 9:\n >>> tf.reshape(t, [2, -1])\n <tf.Tensor: shape=(2, 9), dtype=int32, numpy=\n array([[1, 1, 1, 2, 2, 2, 3, 3, 3],\n [4, 4, 4, 5, 5, 5, 6, 6, 6]], dtype=int32)>\n >>> # -1 is inferred to be 2:\n >>> tf.reshape(t, [-1, 9])\n <tf.Tensor: shape=(2, 9), dtype=int32, numpy=\n array([[1, 1, 1, 2, 2, 2, 3, 3, 3],\n [4, 4, 4, 5, 5, 5, 6, 6, 6]], dtype=int32)>\n >>> # -1 is inferred to be 3:\n >>> tf.reshape(t, [ 2, -1, 3])\n <tf.Tensor: shape=(2, 3, 3), dtype=int32, numpy=\n array([[[1, 1, 1],\n [2, 2, 2],\n [3, 3, 3]],\n [[4, 4, 4],\n [5, 5, 5],\n [6, 6, 6]]], dtype=int32)>\n\n Args:\n tensor: A `Tensor`.\n shape: A `Tensor`. Must be one of the following types: `int32`, `int64`.\n Defines the shape of the output tensor.\n name: Optional string. A name for the operation.\n\n Returns:\n A `Tensor`. Has the same type as `tensor`.\n \"\"\"\n result = gen_array_ops.reshape(tensor, shape, name)\n tensor_util.maybe_set_static_shape(result, shape)\n return result\n\n\n@tf_export(\"fill\")\[email protected]_dispatch_support\ndef fill(dims, value, name=None):\n r\"\"\"Creates a tensor filled with a scalar value.\n\n See also `tf.ones`, `tf.zeros`, `tf.one_hot`, `tf.eye`.\n\n This operation creates a tensor of shape `dims` and fills it with `value`.\n\n For example:\n\n >>> tf.fill([2, 3], 9)\n <tf.Tensor: shape=(2, 3), dtype=int32, numpy=\n array([[9, 9, 9],\n [9, 9, 9]], dtype=int32)>\n\n `tf.fill` evaluates at graph runtime and supports dynamic shapes based on\n other runtime `tf.Tensors`, unlike `tf.constant(value, shape=dims)`, which\n embeds the value as a `Const` node.\n\n Args:\n dims: A 1-D sequence of non-negative numbers. Represents the shape of the\n output `tf.Tensor`. Entries should be of type: `int32`, `int64`.\n value: A value to fill the returned `tf.Tensor`.\n name: Optional string. The name of the output `tf.Tensor`.\n\n Returns:\n A `tf.Tensor` with shape `dims` and the same dtype as `value`.\n\n Raises:\n InvalidArgumentError: `dims` contains negative entries.\n NotFoundError: `dims` contains non-integer entries.\n\n @compatibility(numpy)\n Similar to `np.full`. In `numpy`, more parameters are supported. Passing a\n number argument as the shape (`np.full(5, value)`) is valid in `numpy` for\n specifying a 1-D shaped result, while TensorFlow does not support this syntax.\n @end_compatibility\n \"\"\"\n result = gen_array_ops.fill(dims, value, name=name)\n tensor_util.maybe_set_static_shape(result, dims)\n return result\n\n\n@tf_export(\"identity\")\[email protected]_unary_elementwise_api\[email protected]_dispatch_support\ndef identity(input, name=None): # pylint: disable=redefined-builtin\n r\"\"\"Return a Tensor with the same shape and contents as input.\n\n The return value is not the same Tensor as the original, but contains the same\n values. This operation is fast when used on the same device.\n\n For example:\n\n >>> a = tf.constant([0.78])\n >>> a_identity = tf.identity(a)\n >>> a.numpy()\n array([0.78], dtype=float32)\n >>> a_identity.numpy()\n array([0.78], dtype=float32)\n\n Calling `tf.identity` on a variable will make a Tensor that represents the\n value of that variable at the time it is called. This is equivalent to calling\n `<variable>.read_value()`.\n\n >>> a = tf.Variable(5)\n >>> a_identity = tf.identity(a)\n >>> a.assign_add(1)\n <tf.Variable ... shape=() dtype=int32, numpy=6>\n >>> a.numpy()\n 6\n >>> a_identity.numpy()\n 5\n\n Args:\n input: A `Tensor`, a `Variable`, a `CompositeTensor` or anything that can be\n converted to a tensor using `tf.convert_to_tensor`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` or CompositeTensor. Has the same type and contents as `input`.\n \"\"\"\n if isinstance(input, composite_tensor.CompositeTensor):\n return nest.map_structure(identity, input, expand_composites=True)\n if context.executing_eagerly() and not hasattr(input, \"graph\"):\n # Make sure we get an input with handle data attached from resource\n # variables. Variables have correct handle data when graph building.\n input = ops.convert_to_tensor(input)\n ret = gen_array_ops.identity(input, name=name)\n # Propagate handle data for happier shape inference for resource variables.\n if hasattr(input, \"_handle_data\"):\n ret._handle_data = input._handle_data # pylint: disable=protected-access\n return ret\n\n\n# pylint: disable=redefined-builtin,protected-access\n@tf_export(v1=[\"expand_dims\"])\[email protected]_dispatch_support\[email protected]_args(None, \"Use the `axis` argument instead\", \"dim\")\ndef expand_dims(input, axis=None, name=None, dim=None):\n \"\"\"Returns a tensor with a length 1 axis inserted at index `axis`.\n\n Given a tensor `input`, this operation inserts a dimension of length 1 at the\n dimension index `axis` of `input`'s shape. The dimension index follows Python\n indexing rules: It's zero-based, a negative index it is counted backward\n from the end.\n\n This operation is useful to:\n\n * Add an outer \"batch\" dimension to a single element.\n * Align axes for broadcasting.\n * To add an inner vector length axis to a tensor of scalars.\n\n For example:\n\n If you have a single image of shape `[height, width, channels]`:\n\n >>> image = tf.zeros([10,10,3])\n\n You can add an outer `batch` axis by passing `axis=0`:\n\n >>> tf.expand_dims(image, axis=0).shape.as_list()\n [1, 10, 10, 3]\n\n The new axis location matches Python `list.insert(axis, 1)`:\n\n >>> tf.expand_dims(image, axis=1).shape.as_list()\n [10, 1, 10, 3]\n\n Following standard Python indexing rules, a negative `axis` counts from the\n end so `axis=-1` adds an inner most dimension:\n\n >>> tf.expand_dims(image, -1).shape.as_list()\n [10, 10, 3, 1]\n\n This operation requires that `axis` is a valid index for `input.shape`,\n following Python indexing rules:\n\n ```\n -1-tf.rank(input) <= axis <= tf.rank(input)\n ```\n\n This operation is related to:\n\n * `tf.squeeze`, which removes dimensions of size 1.\n * `tf.reshape`, which provides more flexible reshaping capability.\n * `tf.sparse.expand_dims`, which provides this functionality for\n `tf.SparseTensor`\n\n Args:\n input: A `Tensor`.\n axis: 0-D (scalar). Specifies the dimension index at which to expand the\n shape of `input`. Must be in the range `[-rank(input) - 1, rank(input)]`.\n name: The name of the output `Tensor` (optional).\n dim: 0-D (scalar). Equivalent to `axis`, to be deprecated.\n\n Returns:\n A `Tensor` with the same data as `input`, but its shape has an additional\n dimension of size 1 added.\n\n Raises:\n ValueError: if either both or neither of `dim` and `axis` are specified.\n \"\"\"\n axis = deprecation.deprecated_argument_lookup(\"axis\", axis, \"dim\", dim)\n if axis is None:\n raise ValueError(\"Must specify an axis argument to tf.expand_dims()\")\n return expand_dims_v2(input, axis, name)\n\n\n@tf_export(\"expand_dims\", v1=[])\[email protected]_dispatch_support\ndef expand_dims_v2(input, axis, name=None):\n \"\"\"Returns a tensor with a length 1 axis inserted at index `axis`.\n\n Given a tensor `input`, this operation inserts a dimension of length 1 at the\n dimension index `axis` of `input`'s shape. The dimension index follows Python\n indexing rules: It's zero-based, a negative index it is counted backward\n from the end.\n\n This operation is useful to:\n\n * Add an outer \"batch\" dimension to a single element.\n * Align axes for broadcasting.\n * To add an inner vector length axis to a tensor of scalars.\n\n For example:\n\n If you have a single image of shape `[height, width, channels]`:\n\n >>> image = tf.zeros([10,10,3])\n\n You can add an outer `batch` axis by passing `axis=0`:\n\n >>> tf.expand_dims(image, axis=0).shape.as_list()\n [1, 10, 10, 3]\n\n The new axis location matches Python `list.insert(axis, 1)`:\n\n >>> tf.expand_dims(image, axis=1).shape.as_list()\n [10, 1, 10, 3]\n\n Following standard Python indexing rules, a negative `axis` counts from the\n end so `axis=-1` adds an inner most dimension:\n\n >>> tf.expand_dims(image, -1).shape.as_list()\n [10, 10, 3, 1]\n\n This operation requires that `axis` is a valid index for `input.shape`,\n following Python indexing rules:\n\n ```\n -1-tf.rank(input) <= axis <= tf.rank(input)\n ```\n\n This operation is related to:\n\n * `tf.squeeze`, which removes dimensions of size 1.\n * `tf.reshape`, which provides more flexible reshaping capability.\n * `tf.sparse.expand_dims`, which provides this functionality for\n `tf.SparseTensor`\n\n Args:\n input: A `Tensor`.\n axis: Integer specifying the dimension index at which to expand the\n shape of `input`. Given an input of D dimensions, `axis` must be in range\n `[-(D+1), D]` (inclusive).\n name: Optional string. The name of the output `Tensor`.\n\n Returns:\n A tensor with the same data as `input`, with an additional dimension\n inserted at the index specified by `axis`.\n\n Raises:\n TypeError: If `axis` is not specified.\n InvalidArgumentError: If `axis` is out of range `[-(D+1), D]`.\n \"\"\"\n return gen_array_ops.expand_dims(input, axis, name)\n\n\n# pylint: enable=redefined-builtin,protected-access\n\n\n# Aliases for some automatically-generated names.\n# pylint: disable=protected-access\[email protected](\"2016-11-30\",\n \"This op will be removed after the deprecation date. \"\n \"Please switch to tf.setdiff1d().\")\ndef listdiff(x, y, out_idx=None, name=None):\n return gen_array_ops.list_diff(x, y, out_idx, name)\n\n\nlistdiff.__doc__ = gen_array_ops.list_diff.__doc__ + \"\\n\" + listdiff.__doc__\n\n# pylint: enable=protected-access\n\n\n# pylint: disable=undefined-variable\[email protected](\"2018-11-30\",\n \"This op will be removed after the deprecation date. \"\n \"Please switch to tf.sets.difference().\")\n@tf_export(v1=[\"setdiff1d\"])\[email protected]_dispatch_support\ndef setdiff1d(x, y, index_dtype=dtypes.int32, name=None):\n \"\"\"Computes the difference between two lists of numbers or strings.\n\n Given a list x and a list y, this operation returns a list out that\n represents all values that are in x but not in y. The returned list\n out is sorted in the same order that the numbers appear in x\n (duplicates are preserved). This operation also returns a list idx\n that represents the position of each out element in x.\n\n In other words:\n\n ```python\n out[i] = x[idx[i]] for i in [0, 1, ..., len(out) - 1]\n ```\n\n Example usage:\n\n >>> x = [1, 2, 3, 4, 5, 6]\n >>> y = [1, 3, 5]\n >>> setdiff1d(x,y)\n ListDiff(out=<tf.Tensor: id=2, shape=(3,), dtype=int32,\n numpy=array([2, 4, 6], dtype=int32)>, idx=<tf.Tensor: id=3,\n shape=(3,), dtype=int32, numpy=array([1, 3, 5], dtype=int32)>)\n\n Args:\n x: A Tensor. 1-D. Values to keep.\n y: A Tensor. Must have the same type as x. 1-D. Values to remove.\n out_idx: An optional tf.DType from: tf.int32, tf.int64. Defaults to\n tf.int32.\n name: A name for the operation (optional).\n\n Returns:\n A tuple of Tensor objects (out, idx).\n out: A Tensor. Has the same type as x.\n idx: A Tensor of type out_idx.\n \"\"\"\n return gen_array_ops.list_diff(x, y, index_dtype, name)\n\n\nsetdiff1d.__doc__ = gen_array_ops.list_diff.__doc__\n\n\n@tf_export(\"broadcast_dynamic_shape\")\[email protected]_dispatch_support\ndef broadcast_dynamic_shape(shape_x, shape_y):\n \"\"\"Computes the shape of a broadcast given symbolic shapes.\n\n When `shape_x` and `shape_y` are Tensors representing shapes (i.e. the result\n of calling tf.shape on another Tensor) this computes a Tensor which is the\n shape of the result of a broadcasting op applied in tensors of shapes\n `shape_x` and `shape_y`.\n\n This is useful when validating the result of a broadcasting operation when the\n tensors do not have statically known shapes.\n\n Example:\n\n >>> shape_x = (1, 2, 3)\n >>> shape_y = (5, 1, 3)\n >>> tf.broadcast_dynamic_shape(shape_x, shape_y)\n <tf.Tensor: shape=(3,), dtype=int32, numpy=array([5, 2, 3], ...>\n\n Args:\n shape_x: A rank 1 integer `Tensor`, representing the shape of x.\n shape_y: A rank 1 integer `Tensor`, representing the shape of y.\n\n Returns:\n A rank 1 integer `Tensor` representing the broadcasted shape.\n\n Raises:\n InvalidArgumentError: If the two shapes are incompatible for\n broadcasting.\n \"\"\"\n return gen_array_ops.broadcast_args(shape_x, shape_y)\n\n\n@tf_export(\"broadcast_static_shape\")\[email protected]_dispatch_support\ndef broadcast_static_shape(shape_x, shape_y):\n \"\"\"Computes the shape of a broadcast given known shapes.\n\n When `shape_x` and `shape_y` are fully known `TensorShape`s this computes a\n `TensorShape` which is the shape of the result of a broadcasting op applied in\n tensors of shapes `shape_x` and `shape_y`.\n\n For example, if shape_x is `TensorShape([1, 2, 3])` and shape_y is\n `TensorShape([5, 1, 3])`, the result is a TensorShape whose value is\n `TensorShape([5, 2, 3])`.\n\n This is useful when validating the result of a broadcasting operation when the\n tensors have statically known shapes.\n\n Example:\n\n >>> shape_x = tf.TensorShape([1, 2, 3])\n >>> shape_y = tf.TensorShape([5, 1 ,3])\n >>> tf.broadcast_static_shape(shape_x, shape_y)\n TensorShape([5, 2, 3])\n\n Args:\n shape_x: A `TensorShape`\n shape_y: A `TensorShape`\n\n Returns:\n A `TensorShape` representing the broadcasted shape.\n\n Raises:\n ValueError: If the two shapes can not be broadcasted.\n \"\"\"\n return common_shapes.broadcast_shape(shape_x, shape_y)\n\n\n@tf_export(\"shape\", v1=[])\[email protected]_dispatch_support\ndef shape_v2(input, out_type=dtypes.int32, name=None):\n # pylint: disable=redefined-builtin\n \"\"\"Returns a tensor containing the shape of the input tensor.\n\n See also `tf.size`, `tf.rank`.\n\n `tf.shape` returns a 1-D integer tensor representing the shape of `input`.\n For a scalar input, the tensor returned has a shape of (0,) and its value is\n the empty vector (i.e. []).\n\n For example:\n\n >>> tf.shape(1.)\n <tf.Tensor: shape=(0,), dtype=int32, numpy=array([], dtype=int32)>\n\n >>> t = tf.constant([[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]])\n >>> tf.shape(t)\n <tf.Tensor: shape=(3,), dtype=int32, numpy=array([2, 2, 3], dtype=int32)>\n\n Note: When using symbolic tensors, such as when using the Keras API,\n tf.shape() will return the shape of the symbolic tensor.\n\n >>> a = tf.keras.layers.Input((None, 10))\n >>> tf.shape(a)\n <... shape=(3,) dtype=int32...>\n\n In these cases, using `tf.Tensor.shape` will return more informative results.\n\n >>> a.shape\n TensorShape([None, None, 10])\n\n (The first `None` represents the as yet unknown batch size.)\n\n `tf.shape` and `Tensor.shape` should be identical in eager mode. Within\n `tf.function` or within a `compat.v1` context, not all dimensions may be\n known until execution time. Hence when defining custom layers and models\n for graph mode, prefer the dynamic `tf.shape(x)` over the static `x.shape`.\n\n Args:\n input: A `Tensor` or `SparseTensor`.\n out_type: (Optional) The specified output type of the operation (`int32` or\n `int64`). Defaults to `tf.int32`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` of type `out_type`.\n \"\"\"\n return shape(input, name, out_type)\n\n\n@tf_export(v1=[\"shape\"])\[email protected]_dispatch_support\ndef shape(input, name=None, out_type=dtypes.int32):\n # pylint: disable=redefined-builtin\n \"\"\"Returns the shape of a tensor.\n\n This operation returns a 1-D integer tensor representing the shape of `input`.\n\n For example:\n\n ```python\n t = tf.constant([[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]])\n tf.shape(t) # [2, 2, 3]\n ```\n\n Args:\n input: A `Tensor` or `SparseTensor`.\n name: A name for the operation (optional).\n out_type: (Optional) The specified output type of the operation (`int32`\n or `int64`). Defaults to `tf.int32`.\n\n Returns:\n A `Tensor` of type `out_type`.\n \"\"\"\n return shape_internal(input, name, optimize=True, out_type=out_type)\n\n\ndef shape_internal(input, name=None, optimize=True, out_type=dtypes.int32):\n # pylint: disable=redefined-builtin\n \"\"\"Returns the shape of a tensor.\n\n Args:\n input: A `Tensor` or `SparseTensor`.\n name: A name for the operation (optional).\n optimize: if true, encode the shape as a constant when possible.\n out_type: (Optional) The specified output type of the operation (`int32` or\n `int64`). Defaults to tf.int32.\n\n Returns:\n A `Tensor` of type `out_type`.\n\n \"\"\"\n with ops.name_scope(name, \"Shape\", [input]) as name:\n if isinstance(\n input, (sparse_tensor.SparseTensor, sparse_tensor.SparseTensorValue)):\n return gen_math_ops.cast(input.dense_shape, out_type)\n else:\n if not context.executing_eagerly():\n input = ops.convert_to_tensor(input)\n input_shape = input.get_shape()\n if optimize and input_shape.is_fully_defined():\n return constant(input_shape.as_list(), out_type, name=name)\n return gen_array_ops.shape(input, name=name, out_type=out_type)\n\n\n@tf_export(\"shape_n\")\[email protected]_dispatch_support\ndef shape_n(input, out_type=dtypes.int32, name=None):\n # pylint: disable=redefined-builtin\n \"\"\"Returns shape of tensors.\n\n Args:\n input: A list of at least 1 `Tensor` object with the same type.\n out_type: The specified output type of the operation (`int32` or `int64`).\n Defaults to `tf.int32`(optional).\n name: A name for the operation (optional).\n\n Returns:\n A list with the same length as `input` of `Tensor` objects with\n type `out_type`.\n \"\"\"\n\n return gen_array_ops.shape_n(input, out_type=out_type, name=name)\n\n\n@tf_export(\"size\", v1=[])\[email protected]_dispatch_support\ndef size_v2(input, out_type=dtypes.int32, name=None):\n # pylint: disable=redefined-builtin\n \"\"\"Returns the size of a tensor.\n\n See also `tf.shape`.\n\n Returns a 0-D `Tensor` representing the number of elements in `input`\n of type `out_type`. Defaults to tf.int32.\n\n For example:\n\n >>> t = tf.constant([[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]])\n >>> tf.size(t)\n <tf.Tensor: shape=(), dtype=int32, numpy=12>\n\n Args:\n input: A `Tensor` or `SparseTensor`.\n name: A name for the operation (optional).\n out_type: (Optional) The specified non-quantized numeric output type of the\n operation. Defaults to `tf.int32`.\n\n Returns:\n A `Tensor` of type `out_type`. Defaults to `tf.int32`.\n\n @compatibility(numpy)\n Equivalent to np.size()\n @end_compatibility\n \"\"\"\n\n return size(input, name, out_type)\n\n\n@tf_export(v1=[\"size\"])\[email protected]_dispatch_support\ndef size(input, name=None, out_type=dtypes.int32):\n # pylint: disable=redefined-builtin\n \"\"\"Returns the size of a tensor.\n\n Returns a 0-D `Tensor` representing the number of elements in `input`\n of type `out_type`. Defaults to tf.int32.\n\n For example:\n\n ```python\n t = tf.constant([[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]])\n tf.size(t) # 12\n ```\n\n Args:\n input: A `Tensor` or `SparseTensor`.\n name: A name for the operation (optional).\n out_type: (Optional) The specified non-quantized numeric output type of the\n operation. Defaults to `tf.int32`.\n\n Returns:\n A `Tensor` of type `out_type`. Defaults to `tf.int32`.\n\n @compatibility(numpy)\n Equivalent to np.size()\n @end_compatibility\n \"\"\"\n return size_internal(input, name, optimize=True, out_type=out_type)\n\n\ndef size_internal(input, name=None, optimize=True, out_type=dtypes.int32):\n # pylint: disable=redefined-builtin,protected-access\n \"\"\"Returns the size of a tensor.\n\n Args:\n input: A `Tensor` or `SparseTensor`.\n name: A name for the operation (optional).\n optimize: if true, encode the size as a constant when possible.\n out_type: (Optional) The specified non-quantized numeric output type of the\n operation. Defaults to `tf.int32`.\n\n Returns:\n A `Tensor` of type `out_type`. Defaults to `tf.int32`.\n \"\"\"\n if (context.executing_eagerly() and not hasattr(input, \"graph\") and\n not isinstance(\n input,\n (sparse_tensor.SparseTensor, sparse_tensor.SparseTensorValue))):\n input = ops.convert_to_tensor(input)\n np_out_type = out_type.as_numpy_dtype\n num_elements = np.prod(input._shape_tuple(), dtype=np_out_type) # pylint: disable=protected-access\n return ops.convert_to_tensor(num_elements, dtype=out_type)\n with ops.name_scope(name, \"Size\", [input]) as name:\n if isinstance(\n input, (sparse_tensor.SparseTensor, sparse_tensor.SparseTensorValue)):\n return gen_math_ops.prod(\n gen_math_ops.cast(input.dense_shape, out_type), 0, name=name)\n else:\n input = ops.convert_to_tensor(input)\n input_shape = input.get_shape()\n if optimize:\n if input_shape.is_fully_defined():\n return constant(input_shape.num_elements(), out_type, name=name)\n if input_shape.dims and any(dim == 0 for dim in input_shape.dims):\n return constant(0, out_type, name=name)\n return gen_array_ops.size(input, name=name, out_type=out_type)\n\n\n@tf_export(\"rank\")\[email protected]_dispatch_support\ndef rank(input, name=None):\n # pylint: disable=redefined-builtin\n \"\"\"Returns the rank of a tensor.\n\n See also `tf.shape`.\n\n Returns a 0-D `int32` `Tensor` representing the rank of `input`.\n\n For example:\n\n ```python\n # shape of tensor 't' is [2, 2, 3]\n t = tf.constant([[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]])\n tf.rank(t) # 3\n ```\n\n **Note**: The rank of a tensor is not the same as the rank of a matrix. The\n rank of a tensor is the number of indices required to uniquely select each\n element of the tensor. Rank is also known as \"order\", \"degree\", or \"ndims.\"\n\n Args:\n input: A `Tensor` or `SparseTensor`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` of type `int32`.\n\n @compatibility(numpy)\n Equivalent to np.ndim\n @end_compatibility\n \"\"\"\n return rank_internal(input, name, optimize=True)\n\n\ndef rank_internal(input, name=None, optimize=True):\n # pylint: disable=redefined-builtin\n \"\"\"Returns the rank of a tensor.\n\n Args:\n input: A `Tensor` or `SparseTensor`.\n name: A name for the operation (optional).\n optimize: if true, encode the rank as a constant when possible.\n\n Returns:\n A `Tensor` of type `int32`.\n \"\"\"\n with ops.name_scope(name, \"Rank\", [input]) as name:\n if isinstance(\n input, (sparse_tensor.SparseTensor, sparse_tensor.SparseTensorValue)):\n return gen_array_ops.size(input.dense_shape, name=name)\n else:\n input = ops.convert_to_tensor(input)\n input_shape = input.get_shape()\n if optimize and input_shape.ndims is not None:\n return constant(input_shape.ndims, dtypes.int32, name=name)\n return gen_array_ops.rank(input, name=name)\n\n\n_SLICE_TYPE_ERROR = (\n \"Only integers, slices (`:`), ellipsis (`...`), \"\n \"tf.newaxis (`None`) and scalar tf.int32/tf.int64 tensors are valid \"\n \"indices\")\n\n_SUPPORTED_SLICE_DTYPES = (dtypes.int32, dtypes.int32_ref, dtypes.int64,\n dtypes.int64_ref)\n\n\ndef _check_index(idx):\n \"\"\"Check if a given value is a valid index into a tensor.\"\"\"\n if isinstance(idx, (numbers.Integral, tensor_shape.Dimension)):\n return\n\n # Optimistic check. Assumptions:\n # * any object with a dtype is supported\n # * any object with a dtype has a sizeable shape attribute.\n dtype = getattr(idx, \"dtype\", None)\n if (dtype is None or dtypes.as_dtype(dtype) not in _SUPPORTED_SLICE_DTYPES or\n idx.shape and len(idx.shape) == 1):\n # TODO(slebedev): IndexError seems more appropriate here, but it\n # will break `_slice_helper` contract.\n raise TypeError(_SLICE_TYPE_ERROR + \", got {!r}\".format(idx))\n\n\ndef _is_undefined_dimension(d):\n return isinstance(d, tensor_shape.Dimension) and d.value is None\n\n\n@tf_export(\"__operators__.getitem\", v1=[])\[email protected]_dispatch_support\ndef _slice_helper(tensor, slice_spec, var=None):\n \"\"\"Overload for Tensor.__getitem__.\n\n This operation extracts the specified region from the tensor.\n The notation is similar to NumPy with the restriction that\n currently only support basic indexing. That means that\n using a non-scalar tensor as input is not currently allowed.\n\n Some useful examples:\n\n ```python\n # Strip leading and trailing 2 elements\n foo = tf.constant([1,2,3,4,5,6])\n print(foo[2:-2].eval()) # => [3,4]\n\n # Skip every other row and reverse the order of the columns\n foo = tf.constant([[1,2,3], [4,5,6], [7,8,9]])\n print(foo[::2,::-1].eval()) # => [[3,2,1], [9,8,7]]\n\n # Use scalar tensors as indices on both dimensions\n print(foo[tf.constant(0), tf.constant(2)].eval()) # => 3\n\n # Insert another dimension\n foo = tf.constant([[1,2,3], [4,5,6], [7,8,9]])\n print(foo[tf.newaxis, :, :].eval()) # => [[[1,2,3], [4,5,6], [7,8,9]]]\n print(foo[:, tf.newaxis, :].eval()) # => [[[1,2,3]], [[4,5,6]], [[7,8,9]]]\n print(foo[:, :, tf.newaxis].eval()) # => [[[1],[2],[3]], [[4],[5],[6]],\n [[7],[8],[9]]]\n\n # Ellipses (3 equivalent operations)\n foo = tf.constant([[1,2,3], [4,5,6], [7,8,9]])\n print(foo[tf.newaxis, :, :].eval()) # => [[[1,2,3], [4,5,6], [7,8,9]]]\n print(foo[tf.newaxis, ...].eval()) # => [[[1,2,3], [4,5,6], [7,8,9]]]\n print(foo[tf.newaxis].eval()) # => [[[1,2,3], [4,5,6], [7,8,9]]]\n\n # Masks\n foo = tf.constant([[1,2,3], [4,5,6], [7,8,9]])\n print(foo[foo > 2].eval()) # => [3, 4, 5, 6, 7, 8, 9]\n ```\n\n Notes:\n - `tf.newaxis` is `None` as in NumPy.\n - An implicit ellipsis is placed at the end of the `slice_spec`\n - NumPy advanced indexing is currently not supported.\n\n Purpose in the API:\n\n This method is exposed in TensorFlow's API so that library developers\n can register dispatching for `Tensor.__getitem__` to allow it to handle\n custom composite tensors & other custom objects.\n\n The API symbol is not intended to be called by users directly and does\n appear in TensorFlow's generated documentation.\n\n Args:\n tensor: An ops.Tensor object.\n slice_spec: The arguments to Tensor.__getitem__.\n var: In the case of variable slice assignment, the Variable object to slice\n (i.e. tensor is the read-only view of this variable).\n\n Returns:\n The appropriate slice of \"tensor\", based on \"slice_spec\".\n\n Raises:\n ValueError: If a slice range is negative size.\n TypeError: If the slice indices aren't int, slice, ellipsis,\n tf.newaxis or scalar int32/int64 tensors.\n \"\"\"\n tensor = ops.convert_to_tensor(tensor)\n # TODO(wangpeng): Consider supporting var\n if var is None and ops._numpy_style_slicing: # pylint: disable=protected-access\n return tensor._numpy_style_getitem(slice_spec) # pylint: disable=protected-access\n\n if isinstance(slice_spec, bool) or \\\n (isinstance(slice_spec, ops.Tensor) and slice_spec.dtype == dtypes.bool) or \\\n (isinstance(slice_spec, np.ndarray) and slice_spec.dtype == bool):\n return boolean_mask(tensor=tensor, mask=slice_spec)\n\n if not isinstance(slice_spec, (list, tuple)):\n slice_spec = [slice_spec]\n\n begin, end, strides = [], [], []\n index = 0\n\n new_axis_mask, shrink_axis_mask = 0, 0\n begin_mask, end_mask = 0, 0\n ellipsis_mask = 0\n for s in slice_spec:\n if isinstance(s, _BaseSlice):\n if s.start is not None and not _is_undefined_dimension(s.start):\n _check_index(s.start)\n begin.append(s.start)\n else:\n begin.append(0)\n begin_mask |= (1 << index)\n if s.stop is not None and not _is_undefined_dimension(s.stop):\n _check_index(s.stop)\n end.append(s.stop)\n else:\n end.append(0)\n end_mask |= (1 << index)\n if s.step is not None and not _is_undefined_dimension(s.step):\n _check_index(s.step)\n strides.append(s.step)\n else:\n strides.append(1)\n elif s is Ellipsis:\n begin.append(0)\n end.append(0)\n strides.append(1)\n ellipsis_mask |= (1 << index)\n elif s is newaxis:\n begin.append(0)\n end.append(0)\n strides.append(1)\n new_axis_mask |= (1 << index)\n else:\n _check_index(s)\n begin.append(s)\n end.append(s + 1)\n strides.append(1)\n shrink_axis_mask |= (1 << index)\n index += 1\n\n # stack possibly involves no tensors, so we must use op_scope correct graph.\n with ops.name_scope(\n None,\n \"strided_slice\", [tensor] + begin + end + strides,\n skip_on_eager=False) as name:\n if begin:\n packed_begin, packed_end, packed_strides = (stack(begin), stack(end),\n stack(strides))\n if (packed_begin.dtype == dtypes.int64 or\n packed_end.dtype == dtypes.int64 or\n packed_strides.dtype == dtypes.int64):\n if packed_begin.dtype != dtypes.int64:\n packed_begin = gen_math_ops.cast(packed_begin, dtypes.int64)\n if packed_end.dtype != dtypes.int64:\n packed_end = gen_math_ops.cast(packed_end, dtypes.int64)\n if packed_strides.dtype != dtypes.int64:\n packed_strides = gen_math_ops.cast(packed_strides, dtypes.int64)\n else:\n var_empty = constant([], dtype=dtypes.int32)\n packed_begin = packed_end = packed_strides = var_empty\n return strided_slice(\n tensor,\n packed_begin,\n packed_end,\n packed_strides,\n begin_mask=begin_mask,\n end_mask=end_mask,\n shrink_axis_mask=shrink_axis_mask,\n new_axis_mask=new_axis_mask,\n ellipsis_mask=ellipsis_mask,\n var=var,\n name=name)\n\n\n# pylint: disable=undefined-variable,protected-access,redefined-outer-name\n@tf_export(\"slice\")\[email protected]_dispatch_support\ndef slice(input_, begin, size, name=None):\n # pylint: disable=redefined-builtin\n \"\"\"Extracts a slice from a tensor.\n\n See also `tf.strided_slice`.\n\n This operation extracts a slice of size `size` from a tensor `input_` starting\n at the location specified by `begin`. The slice `size` is represented as a\n tensor shape, where `size[i]` is the number of elements of the 'i'th dimension\n of `input_` that you want to slice. The starting location (`begin`) for the\n slice is represented as an offset in each dimension of `input_`. In other\n words, `begin[i]` is the offset into the i'th dimension of `input_` that you\n want to slice from.\n\n Note that `tf.Tensor.__getitem__` is typically a more pythonic way to\n perform slices, as it allows you to write `foo[3:7, :-2]` instead of\n `tf.slice(foo, [3, 0], [4, foo.get_shape()[1]-2])`.\n\n `begin` is zero-based; `size` is one-based. If `size[i]` is -1,\n all remaining elements in dimension i are included in the\n slice. In other words, this is equivalent to setting:\n\n `size[i] = input_.dim_size(i) - begin[i]`\n\n This operation requires that:\n\n `0 <= begin[i] <= begin[i] + size[i] <= Di for i in [0, n]`\n\n For example:\n\n ```python\n t = tf.constant([[[1, 1, 1], [2, 2, 2]],\n [[3, 3, 3], [4, 4, 4]],\n [[5, 5, 5], [6, 6, 6]]])\n tf.slice(t, [1, 0, 0], [1, 1, 3]) # [[[3, 3, 3]]]\n tf.slice(t, [1, 0, 0], [1, 2, 3]) # [[[3, 3, 3],\n # [4, 4, 4]]]\n tf.slice(t, [1, 0, 0], [2, 1, 3]) # [[[3, 3, 3]],\n # [[5, 5, 5]]]\n ```\n\n Args:\n input_: A `Tensor`.\n begin: An `int32` or `int64` `Tensor`.\n size: An `int32` or `int64` `Tensor`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` the same type as `input_`.\n \"\"\"\n return gen_array_ops._slice(input_, begin, size, name=name)\n\n\n# pylint: disable=invalid-name\n@tf_export(\"strided_slice\")\[email protected]_dispatch_support\ndef strided_slice(input_,\n begin,\n end,\n strides=None,\n begin_mask=0,\n end_mask=0,\n ellipsis_mask=0,\n new_axis_mask=0,\n shrink_axis_mask=0,\n var=None,\n name=None):\n \"\"\"Extracts a strided slice of a tensor (generalized Python array indexing).\n\n See also `tf.slice`.\n\n **Instead of calling this op directly most users will want to use the\n NumPy-style slicing syntax (e.g. `tensor[..., 3:4:-1, tf.newaxis, 3]`), which\n is supported via `tf.Tensor.__getitem__` and `tf.Variable.__getitem__`.**\n The interface of this op is a low-level encoding of the slicing syntax.\n\n Roughly speaking, this op extracts a slice of size `(end-begin)/stride`\n from the given `input_` tensor. Starting at the location specified by `begin`\n the slice continues by adding `stride` to the index until all dimensions are\n not less than `end`.\n Note that a stride can be negative, which causes a reverse slice.\n\n Given a Python slice `input[spec0, spec1, ..., specn]`,\n this function will be called as follows.\n\n `begin`, `end`, and `strides` will be vectors of length n.\n n in general is not equal to the rank of the `input_` tensor.\n\n In each mask field (`begin_mask`, `end_mask`, `ellipsis_mask`,\n `new_axis_mask`, `shrink_axis_mask`) the ith bit will correspond to\n the ith spec.\n\n If the ith bit of `begin_mask` is set, `begin[i]` is ignored and\n the fullest possible range in that dimension is used instead.\n `end_mask` works analogously, except with the end range.\n\n `foo[5:,:,:3]` on a 7x8x9 tensor is equivalent to `foo[5:7,0:8,0:3]`.\n `foo[::-1]` reverses a tensor with shape 8.\n\n If the ith bit of `ellipsis_mask` is set, as many unspecified dimensions\n as needed will be inserted between other dimensions. Only one\n non-zero bit is allowed in `ellipsis_mask`.\n\n For example `foo[3:5,...,4:5]` on a shape 10x3x3x10 tensor is\n equivalent to `foo[3:5,:,:,4:5]` and\n `foo[3:5,...]` is equivalent to `foo[3:5,:,:,:]`.\n\n If the ith bit of `new_axis_mask` is set, then `begin`,\n `end`, and `stride` are ignored and a new length 1 dimension is\n added at this point in the output tensor.\n\n For example,\n `foo[:4, tf.newaxis, :2]` would produce a shape `(4, 1, 2)` tensor.\n\n If the ith bit of `shrink_axis_mask` is set, it implies that the ith\n specification shrinks the dimensionality by 1, taking on the value at index\n `begin[i]`. `end[i]` and `strides[i]` are ignored in this case. For example in\n Python one might do `foo[:, 3, :]` which would result in `shrink_axis_mask`\n equal to 2.\n\n\n NOTE: `begin` and `end` are zero-indexed.\n `strides` entries must be non-zero.\n\n\n ```python\n t = tf.constant([[[1, 1, 1], [2, 2, 2]],\n [[3, 3, 3], [4, 4, 4]],\n [[5, 5, 5], [6, 6, 6]]])\n tf.strided_slice(t, [1, 0, 0], [2, 1, 3], [1, 1, 1]) # [[[3, 3, 3]]]\n tf.strided_slice(t, [1, 0, 0], [2, 2, 3], [1, 1, 1]) # [[[3, 3, 3],\n # [4, 4, 4]]]\n tf.strided_slice(t, [1, -1, 0], [2, -3, 3], [1, -1, 1]) # [[[4, 4, 4],\n # [3, 3, 3]]]\n ```\n\n Args:\n input_: A `Tensor`.\n begin: An `int32` or `int64` `Tensor`.\n end: An `int32` or `int64` `Tensor`.\n strides: An `int32` or `int64` `Tensor`.\n begin_mask: An `int32` mask.\n end_mask: An `int32` mask.\n ellipsis_mask: An `int32` mask.\n new_axis_mask: An `int32` mask.\n shrink_axis_mask: An `int32` mask.\n var: The variable corresponding to `input_` or None\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` the same type as `input`.\n \"\"\"\n\n if strides is None:\n strides = ones_like(begin)\n\n op = gen_array_ops.strided_slice(\n input=input_,\n begin=begin,\n end=end,\n strides=strides,\n name=name,\n begin_mask=begin_mask,\n end_mask=end_mask,\n ellipsis_mask=ellipsis_mask,\n new_axis_mask=new_axis_mask,\n shrink_axis_mask=shrink_axis_mask)\n\n parent_name = name\n\n if var is not None:\n def assign(val, name=None):\n \"\"\"Closure that holds all the arguments to create an assignment.\"\"\"\n\n if name is None:\n name = parent_name + \"_assign\"\n\n return var._strided_slice_assign(\n begin=begin,\n end=end,\n strides=strides,\n value=val,\n name=name,\n begin_mask=begin_mask,\n end_mask=end_mask,\n ellipsis_mask=ellipsis_mask,\n new_axis_mask=new_axis_mask,\n shrink_axis_mask=shrink_axis_mask)\n\n op.assign = assign\n\n return op\n\n\ndef _SliceHelperVar(var, slice_spec):\n \"\"\"Creates a slice helper object given a variable.\n\n This allows creating a sub-tensor from part of the current contents\n of a variable. See `tf.Tensor.__getitem__` for detailed examples\n of slicing.\n\n This function in addition also allows assignment to a sliced range.\n This is similar to `__setitem__` functionality in Python. However,\n the syntax is different so that the user can capture the assignment\n operation for grouping or passing to `sess.run()`.\n For example,\n\n ```python\n import tensorflow as tf\n A = tf.Variable([[1,2,3], [4,5,6], [7,8,9]], dtype=tf.float32)\n with tf.compat.v1.Session() as sess:\n sess.run(tf.compat.v1.global_variables_initializer())\n print(sess.run(A[:2, :2])) # => [[1,2], [4,5]]\n\n op = A[:2,:2].assign(22. * tf.ones((2, 2)))\n print(sess.run(op)) # => [[22, 22, 3], [22, 22, 6], [7,8,9]]\n ```\n\n Note that assignments currently do not support NumPy broadcasting\n semantics.\n\n Args:\n var: An `ops.Variable` object.\n slice_spec: The arguments to `Tensor.__getitem__`.\n\n Returns:\n The appropriate slice of \"tensor\", based on \"slice_spec\".\n As an operator. The operator also has a `assign()` method\n that can be used to generate an assignment operator.\n\n Raises:\n ValueError: If a slice range is negative size.\n TypeError: TypeError: If the slice indices aren't int, slice,\n ellipsis, tf.newaxis or int32/int64 tensors.\n\n \"\"\"\n\n return _slice_helper(var.value(), slice_spec, var)\n\n\nops.Tensor._override_operator(\"__getitem__\", _slice_helper)\n\n\n@tf_export(\"parallel_stack\")\[email protected]_dispatch_support\ndef parallel_stack(values, name=\"parallel_stack\"):\n \"\"\"Stacks a list of rank-`R` tensors into one rank-`(R+1)` tensor in parallel.\n\n Requires that the shape of inputs be known at graph construction time.\n\n Packs the list of tensors in `values` into a tensor with rank one higher than\n each tensor in `values`, by packing them along the first dimension.\n Given a list of length `N` of tensors of shape `(A, B, C)`; the `output`\n tensor will have the shape `(N, A, B, C)`.\n\n For example:\n\n ```python\n x = tf.constant([1, 4])\n y = tf.constant([2, 5])\n z = tf.constant([3, 6])\n tf.parallel_stack([x, y, z]) # [[1, 4], [2, 5], [3, 6]]\n ```\n\n The difference between `stack` and `parallel_stack` is that `stack` requires\n all the inputs be computed before the operation will begin but doesn't require\n that the input shapes be known during graph construction.\n\n `parallel_stack` will copy pieces of the input into the output as they become\n available, in some situations this can provide a performance benefit.\n\n Unlike `stack`, `parallel_stack` does NOT support backpropagation.\n\n This is the opposite of unstack. The numpy equivalent is\n\n tf.parallel_stack([x, y, z]) = np.asarray([x, y, z])\n\n @compatibility(eager)\n parallel_stack is not compatible with eager execution.\n @end_compatibility\n\n Args:\n values: A list of `Tensor` objects with the same shape and type.\n name: A name for this operation (optional).\n\n Returns:\n output: A stacked `Tensor` with the same type as `values`.\n\n Raises:\n RuntimeError: if executed in eager mode.\n \"\"\"\n if context.executing_eagerly():\n raise RuntimeError(\"tf.parallel_stack() is not compatible with \"\n \"eager execution.\")\n with ops.name_scope(name):\n value_t = ops.convert_to_tensor(values[0])\n value_shape = ops.convert_to_tensor(value_t).get_shape()\n\n output_shape = tensor_shape.TensorShape([len(values)])\n output_shape = output_shape.concatenate(value_shape)\n # expand_dims converts concat to stack.\n return gen_array_ops.parallel_concat(\n [expand_dims(value, 0) for value in values], shape=output_shape)\n\n\n@tf_export(\"stack\")\[email protected]_dispatch_support\ndef stack(values, axis=0, name=\"stack\"):\n \"\"\"Stacks a list of rank-`R` tensors into one rank-`(R+1)` tensor.\n\n See also `tf.concat`, `tf.tile`, `tf.repeat`.\n\n Packs the list of tensors in `values` into a tensor with rank one higher than\n each tensor in `values`, by packing them along the `axis` dimension.\n Given a list of length `N` of tensors of shape `(A, B, C)`;\n\n if `axis == 0` then the `output` tensor will have the shape `(N, A, B, C)`.\n if `axis == 1` then the `output` tensor will have the shape `(A, N, B, C)`.\n Etc.\n\n For example:\n\n >>> x = tf.constant([1, 4])\n >>> y = tf.constant([2, 5])\n >>> z = tf.constant([3, 6])\n >>> tf.stack([x, y, z])\n <tf.Tensor: shape=(3, 2), dtype=int32, numpy=\n array([[1, 4],\n [2, 5],\n [3, 6]], dtype=int32)>\n >>> tf.stack([x, y, z], axis=1)\n <tf.Tensor: shape=(2, 3), dtype=int32, numpy=\n array([[1, 2, 3],\n [4, 5, 6]], dtype=int32)>\n\n This is the opposite of unstack. The numpy equivalent is `np.stack`\n\n >>> np.array_equal(np.stack([x, y, z]), tf.stack([x, y, z]))\n True\n\n Args:\n values: A list of `Tensor` objects with the same shape and type.\n axis: An `int`. The axis to stack along. Defaults to the first dimension.\n Negative values wrap around, so the valid range is `[-(R+1), R+1)`.\n name: A name for this operation (optional).\n\n Returns:\n output: A stacked `Tensor` with the same type as `values`.\n\n Raises:\n ValueError: If `axis` is out of the range [-(R+1), R+1).\n \"\"\"\n if axis == 0:\n try:\n # If the input is a constant list, it can be converted to a constant op\n return ops.convert_to_tensor(values, name=name)\n except (TypeError, ValueError):\n pass # Input list contains non-constant tensors\n\n value_shape = ops.convert_to_tensor(values[0], name=name)._shape_tuple() # pylint: disable=protected-access\n if value_shape is not None:\n expanded_num_dims = len(value_shape) + 1\n if axis < -expanded_num_dims or axis >= expanded_num_dims:\n raise ValueError(\"axis = %d not in [%d, %d)\" %\n (axis, -expanded_num_dims, expanded_num_dims))\n\n return gen_array_ops.pack(values, axis=axis, name=name)\n\n\n# pylint: disable=invalid-name\ndef _autopacking_helper(list_or_tuple, dtype, name):\n \"\"\"Converts the given list or tuple to a tensor by packing.\n\n Args:\n list_or_tuple: A (possibly nested) list or tuple containing a tensor.\n dtype: The element type of the returned tensor.\n name: A name for the returned tensor.\n\n Returns:\n A `tf.Tensor` with value equivalent to `list_or_tuple`.\n \"\"\"\n if context.executing_eagerly():\n # NOTE: Fast path when all the items are tensors, this doesn't do any type\n # checking.\n if all(isinstance(elem, core.Tensor) for elem in list_or_tuple):\n return gen_array_ops.pack(list_or_tuple, name=name)\n must_pack = False\n converted_elems = []\n with ops.name_scope(name) as scope:\n for i, elem in enumerate(list_or_tuple):\n if isinstance(elem, core.Tensor):\n if dtype is not None and elem.dtype.base_dtype != dtype:\n raise TypeError(\"Cannot convert a list containing a tensor of dtype \"\n \"%s to %s (Tensor is: %r)\" %\n (elem.dtype, dtype, elem))\n converted_elems.append(elem)\n must_pack = True\n elif isinstance(elem, (list, tuple)):\n converted_elem = _autopacking_helper(elem, dtype, str(i))\n if isinstance(converted_elem, core.Tensor):\n must_pack = True\n converted_elems.append(converted_elem)\n else:\n converted_elems.append(elem)\n if must_pack:\n elems_as_tensors = []\n for i, elem in enumerate(converted_elems):\n if isinstance(elem, core.Tensor):\n elems_as_tensors.append(elem)\n else:\n # NOTE(mrry): This is inefficient, but it enables us to\n # handle the case where the list arguments are other\n # convertible-to-tensor types, such as numpy arrays.\n elems_as_tensors.append(\n constant_op.constant(elem, dtype=dtype, name=str(i)))\n return gen_array_ops.pack(elems_as_tensors, name=scope)\n else:\n return converted_elems\n\n\ndef _get_dtype_from_nested_lists(list_or_tuple):\n \"\"\"Returns the dtype of any tensor-like object in `list_or_tuple`, if found.\n\n Args:\n list_or_tuple: A list or tuple representing an object that can be converted\n to a `tf.Tensor`.\n\n Returns:\n The dtype of any tensor-like object in `list_or_tuple`, or `None` if no\n such object exists.\n \"\"\"\n for elem in list_or_tuple:\n if isinstance(elem, core.Tensor):\n return elem.dtype.base_dtype\n elif isinstance(elem, (list, tuple)):\n maybe_dtype = _get_dtype_from_nested_lists(elem)\n if maybe_dtype is not None:\n return maybe_dtype\n return None\n\n\ndef _cast_nested_seqs_to_dtype(dtype):\n\n def _maybe_cast(elem):\n if isinstance(elem, core.Tensor):\n if dtype != elem.dtype.base_dtype:\n elem = gen_math_ops.cast(elem, dtype)\n return elem\n\n return _maybe_cast\n\n\n_NON_AUTOPACKABLE_TYPES = set(np.core.numerictypes.ScalarType)\n_NON_AUTOPACKABLE_TYPES.add(np.ndarray)\n\n\ndef _should_not_autopack(v):\n # The condition we really want is\n # any(isinstance(elem, core.Tensor))\n # but it is >5x slower due to abc.ABCMeta.__instancecheck__.\n # pylint: disable=unidiomatic-typecheck\n # TODO(slebedev): add nest.all?\n return all(type(elem) in _NON_AUTOPACKABLE_TYPES for elem in nest.flatten(v))\n # pylint: enable=unidiomatic-typecheck\n\n\ndef _autopacking_conversion_function(v, dtype=None, name=None, as_ref=False):\n \"\"\"Tensor conversion function that automatically packs arguments.\"\"\"\n if as_ref or _should_not_autopack(v):\n return NotImplemented\n inferred_dtype = _get_dtype_from_nested_lists(v)\n if inferred_dtype is None:\n # We did not find any tensor-like objects in the nested lists, so defer to\n # other conversion functions.\n return NotImplemented\n if dtype is None:\n dtype = inferred_dtype\n elif dtype != inferred_dtype:\n v = nest.map_structure(_cast_nested_seqs_to_dtype(dtype), v)\n return _autopacking_helper(v, dtype, name or \"packed\")\n\n\n# pylint: enable=invalid-name\n\n# NOTE: Register this conversion function to run *before* one that\n# assumes every element is a value.\nops.register_tensor_conversion_function((list, tuple),\n _autopacking_conversion_function, 99)\n\n\n@tf_export(\"unstack\")\[email protected]_dispatch_support\ndef unstack(value, num=None, axis=0, name=\"unstack\"):\n \"\"\"Unpacks the given dimension of a rank-`R` tensor into rank-`(R-1)` tensors.\n\n Unpacks tensors from `value` by chipping it along the `axis` dimension.\n\n >>> x = tf.reshape(tf.range(12), (3,4))\n >>>\n >>> p, q, r = tf.unstack(x)\n >>> p.shape.as_list()\n [4]\n\n >>> i, j, k, l = tf.unstack(x, axis=1)\n >>> i.shape.as_list()\n [3]\n\n This is the opposite of stack.\n\n >>> x = tf.stack([i, j, k, l], axis=1)\n\n More generally if you have a tensor of shape `(A, B, C, D)`:\n\n >>> A, B, C, D = [2, 3, 4, 5]\n >>> t = tf.random.normal(shape=[A, B, C, D])\n\n The number of tensor returned is equal to the length of the target `axis`:\n\n >>> axis = 2\n >>> items = tf.unstack(t, axis=axis)\n >>> len(items) == t.shape[axis]\n True\n\n The shape of each result tensor is equal to the shape of the input tensor,\n with the target `axis` removed.\n\n >>> items[0].shape.as_list() # [A, B, D]\n [2, 3, 5]\n\n The value of each tensor `items[i]` is equal to the slice of `input` across\n `axis` at index `i`:\n\n >>> for i in range(len(items)):\n ... slice = t[:,:,i,:]\n ... assert tf.reduce_all(slice == items[i])\n\n #### Python iterable unpacking\n\n With eager execution you _can_ unstack the 0th axis of a tensor using python's\n iterable unpacking:\n\n >>> t = tf.constant([1,2,3])\n >>> a,b,c = t\n\n `unstack` is still necessary because Iterable unpacking doesn't work in\n a `@tf.function`: Symbolic tensors are not iterable.\n\n You need to use `tf.unstack` here:\n\n >>> @tf.function\n ... def bad(t):\n ... a,b,c = t\n ... return a\n >>>\n >>> bad(t)\n Traceback (most recent call last):\n ...\n OperatorNotAllowedInGraphError: ...\n\n >>> @tf.function\n ... def good(t):\n ... a,b,c = tf.unstack(t)\n ... return a\n >>>\n >>> good(t).numpy()\n 1\n\n #### Unknown shapes\n\n Eager tensors have concrete values, so their shape is always known.\n Inside a `tf.function` the symbolic tensors may have unknown shapes.\n If the length of `axis` is unknown `tf.unstack` will fail because it cannot\n handle an unknown number of tensors:\n\n >>> @tf.function(input_signature=[tf.TensorSpec([None], tf.float32)])\n ... def bad(t):\n ... tensors = tf.unstack(t)\n ... return tensors[0]\n >>>\n >>> bad(tf.constant([1,2,3]))\n Traceback (most recent call last):\n ...\n ValueError: Cannot infer num from shape (None,)\n\n If you know the `axis` length you can pass it as the `num` argument. But this\n must be a constant value.\n\n If you actually need a variable number of tensors in a single `tf.function`\n trace, you will need to use exlicit loops and a `tf.TensorArray` instead.\n\n Args:\n value: A rank `R > 0` `Tensor` to be unstacked.\n num: An `int`. The length of the dimension `axis`. Automatically inferred if\n `None` (the default).\n axis: An `int`. The axis to unstack along. Defaults to the first dimension.\n Negative values wrap around, so the valid range is `[-R, R)`.\n name: A name for the operation (optional).\n\n Returns:\n The list of `Tensor` objects unstacked from `value`.\n\n Raises:\n ValueError: If `axis` is out of the range `[-R, R)`.\n ValueError: If `num` is unspecified and cannot be inferred.\n InvalidArgumentError: If `num` does not match the shape of `value`.\n \"\"\"\n if num is None:\n value = ops.convert_to_tensor(value)\n value_shape = value.get_shape()\n if value_shape.ndims is not None:\n if axis < -value_shape.ndims or axis >= value_shape.ndims:\n raise ValueError(\"axis = %d not in [%d, %d)\" %\n (axis, -value_shape.ndims, value_shape.ndims))\n num = value_shape.dims[axis].value\n if num is None:\n raise ValueError(\"Cannot infer num from shape %s\" % value_shape)\n return gen_array_ops.unpack(value, num=num, axis=axis, name=name)\n\n\n@tf_export(\"concat\")\[email protected]_dispatch_support\ndef concat(values, axis, name=\"concat\"):\n \"\"\"Concatenates tensors along one dimension.\n\n See also `tf.tile`, `tf.stack`, `tf.repeat`.\n\n Concatenates the list of tensors `values` along dimension `axis`. If\n `values[i].shape = [D0, D1, ... Daxis(i), ...Dn]`, the concatenated\n result has shape\n\n [D0, D1, ... Raxis, ...Dn]\n\n where\n\n Raxis = sum(Daxis(i))\n\n That is, the data from the input tensors is joined along the `axis`\n dimension.\n\n The number of dimensions of the input tensors must match, and all dimensions\n except `axis` must be equal.\n\n For example:\n\n >>> t1 = [[1, 2, 3], [4, 5, 6]]\n >>> t2 = [[7, 8, 9], [10, 11, 12]]\n >>> tf.concat([t1, t2], 0)\n <tf.Tensor: shape=(4, 3), dtype=int32, numpy=\n array([[ 1, 2, 3],\n [ 4, 5, 6],\n [ 7, 8, 9],\n [10, 11, 12]], dtype=int32)>\n\n >>> tf.concat([t1, t2], 1)\n <tf.Tensor: shape=(2, 6), dtype=int32, numpy=\n array([[ 1, 2, 3, 7, 8, 9],\n [ 4, 5, 6, 10, 11, 12]], dtype=int32)>\n\n As in Python, the `axis` could also be negative numbers. Negative `axis`\n are interpreted as counting from the end of the rank, i.e.,\n `axis + rank(values)`-th dimension.\n\n For example:\n\n >>> t1 = [[[1, 2], [2, 3]], [[4, 4], [5, 3]]]\n >>> t2 = [[[7, 4], [8, 4]], [[2, 10], [15, 11]]]\n >>> tf.concat([t1, t2], -1)\n <tf.Tensor: shape=(2, 2, 4), dtype=int32, numpy=\n array([[[ 1, 2, 7, 4],\n [ 2, 3, 8, 4]],\n [[ 4, 4, 2, 10],\n [ 5, 3, 15, 11]]], dtype=int32)>\n\n Note: If you are concatenating along a new axis consider using stack.\n E.g.\n\n ```python\n tf.concat([tf.expand_dims(t, axis) for t in tensors], axis)\n ```\n\n can be rewritten as\n\n ```python\n tf.stack(tensors, axis=axis)\n ```\n\n Args:\n values: A list of `Tensor` objects or a single `Tensor`.\n axis: 0-D `int32` `Tensor`. Dimension along which to concatenate. Must be\n in the range `[-rank(values), rank(values))`. As in Python, indexing for\n axis is 0-based. Positive axis in the rage of `[0, rank(values))` refers\n to `axis`-th dimension. And negative axis refers to `axis +\n rank(values)`-th dimension.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` resulting from concatenation of the input tensors.\n \"\"\"\n if not isinstance(values, (list, tuple)):\n values = [values]\n # TODO(mrry): Change to return values?\n if len(values) == 1: # Degenerate case of one tensor.\n # Make a throwaway call to convert_to_tensor to make sure\n # that axis is of the correct type, and make sure that\n # the returned tensor is a scalar.\n # TODO(keveman): Implement a standalone type and shape checker.\n with ops.name_scope(name) as scope:\n ops.convert_to_tensor(\n axis, name=\"concat_dim\",\n dtype=dtypes.int32).get_shape().assert_has_rank(0)\n return identity(values[0], name=name)\n return gen_array_ops.concat_v2(values=values, axis=axis, name=name)\n\n\n@tf_export(v1=[\"boolean_mask\"])\[email protected]_dispatch_support\ndef boolean_mask(tensor, mask, name=\"boolean_mask\", axis=None):\n \"\"\"Apply boolean mask to tensor.\n\n Numpy equivalent is `tensor[mask]`.\n\n In general, `0 < dim(mask) = K <= dim(tensor)`, and `mask`'s shape must match\n the first K dimensions of `tensor`'s shape. We then have:\n `boolean_mask(tensor, mask)[i, j1,...,jd] = tensor[i1,...,iK,j1,...,jd]`\n where `(i1,...,iK)` is the ith `True` entry of `mask` (row-major order).\n The `axis` could be used with `mask` to indicate the axis to mask from.\n In that case, `axis + dim(mask) <= dim(tensor)` and `mask`'s shape must match\n the first `axis + dim(mask)` dimensions of `tensor`'s shape.\n\n See also: `tf.ragged.boolean_mask`, which can be applied to both dense and\n ragged tensors, and can be used if you need to preserve the masked dimensions\n of `tensor` (rather than flattening them, as `tf.boolean_mask` does).\n\n Examples:\n\n ```python\n # 1-D example\n tensor = [0, 1, 2, 3]\n mask = np.array([True, False, True, False])\n tf.boolean_mask(tensor, mask) # [0, 2]\n\n # 2-D example\n tensor = [[1, 2], [3, 4], [5, 6]]\n mask = np.array([True, False, True])\n tf.boolean_mask(tensor, mask) # [[1, 2], [5, 6]]\n ```\n\n Args:\n tensor: N-D Tensor.\n mask: K-D boolean Tensor, K <= N and K must be known statically.\n name: A name for this operation (optional).\n axis: A 0-D int Tensor representing the axis in `tensor` to mask from. By\n default, axis is 0 which will mask from the first dimension. Otherwise K +\n axis <= N.\n\n Returns:\n (N-K+1)-dimensional tensor populated by entries in `tensor` corresponding\n to `True` values in `mask`.\n\n Raises:\n ValueError: If shapes do not conform.\n \"\"\"\n\n def _apply_mask_1d(reshaped_tensor, mask, axis=None):\n \"\"\"Mask tensor along dimension 0 with a 1-D mask.\"\"\"\n indices = squeeze(where_v2(mask), axis=[1])\n return gather(reshaped_tensor, indices, axis=axis)\n\n with ops.name_scope(name, values=[tensor, mask]):\n tensor = ops.convert_to_tensor(tensor, name=\"tensor\")\n mask = ops.convert_to_tensor(mask, name=\"mask\")\n\n shape_mask = mask.get_shape()\n ndims_mask = shape_mask.ndims\n shape_tensor = tensor.get_shape()\n if ndims_mask == 0:\n raise ValueError(\"mask cannot be scalar.\")\n if ndims_mask is None:\n raise ValueError(\n \"Number of mask dimensions must be specified, even if some dimensions\"\n \" are None. E.g. shape=[None] is ok, but shape=None is not.\")\n axis = 0 if axis is None else axis\n axis_value = tensor_util.constant_value(axis)\n if axis_value is not None:\n axis = axis_value\n shape_tensor[axis:axis + ndims_mask].assert_is_compatible_with(shape_mask)\n\n leading_size = gen_math_ops.prod(shape(tensor)[axis:axis + ndims_mask], [0])\n tensor = reshape(\n tensor,\n concat([\n shape(tensor)[:axis], [leading_size],\n shape(tensor)[axis + ndims_mask:]\n ], 0))\n # TODO(yongtang): tf.reshape in C++ kernel might have set the shape\n # correctly, so the following may not be needed? It still might be possible\n # that there are some edge case where tensor_util.constant_value resolves\n # more cases than ShapeInference of tf.reshape in C++ kernel.\n if axis_value is not None:\n first_dim = shape_tensor[axis:axis + ndims_mask].num_elements()\n tensor.set_shape(\n tensor_shape.as_shape(shape_tensor[:axis]).concatenate(\n [first_dim]).concatenate(shape_tensor[axis + ndims_mask:]))\n\n mask = reshape(mask, [-1])\n return _apply_mask_1d(tensor, mask, axis)\n\n\n@tf_export(\"boolean_mask\", v1=[])\[email protected]_dispatch_support\ndef boolean_mask_v2(tensor, mask, axis=None, name=\"boolean_mask\"):\n \"\"\"Apply boolean mask to tensor.\n\n Numpy equivalent is `tensor[mask]`.\n\n In general, `0 < dim(mask) = K <= dim(tensor)`, and `mask`'s shape must match\n the first K dimensions of `tensor`'s shape. We then have:\n `boolean_mask(tensor, mask)[i, j1,...,jd] = tensor[i1,...,iK,j1,...,jd]`\n where `(i1,...,iK)` is the ith `True` entry of `mask` (row-major order).\n The `axis` could be used with `mask` to indicate the axis to mask from.\n In that case, `axis + dim(mask) <= dim(tensor)` and `mask`'s shape must match\n the first `axis + dim(mask)` dimensions of `tensor`'s shape.\n\n See also: `tf.ragged.boolean_mask`, which can be applied to both dense and\n ragged tensors, and can be used if you need to preserve the masked dimensions\n of `tensor` (rather than flattening them, as `tf.boolean_mask` does).\n\n Examples:\n\n >>> tensor = [0, 1, 2, 3] # 1-D example\n >>> mask = np.array([True, False, True, False])\n >>> tf.boolean_mask(tensor, mask)\n <tf.Tensor: shape=(2,), dtype=int32, numpy=array([0, 2], dtype=int32)>\n\n >>> tensor = [[1, 2], [3, 4], [5, 6]] # 2-D example\n >>> mask = np.array([True, False, True])\n >>> tf.boolean_mask(tensor, mask)\n <tf.Tensor: shape=(2, 2), dtype=int32, numpy=\n array([[1, 2],\n [5, 6]], dtype=int32)>\n\n Args:\n tensor: N-D Tensor.\n mask: K-D boolean Tensor, K <= N and K must be known statically.\n axis: A 0-D int Tensor representing the axis in `tensor` to mask from. By\n default, axis is 0 which will mask from the first dimension. Otherwise K +\n axis <= N.\n name: A name for this operation (optional).\n\n Returns:\n (N-K+1)-dimensional tensor populated by entries in `tensor` corresponding\n to `True` values in `mask`.\n\n Raises:\n ValueError: If shapes do not conform.\n\n Examples:\n\n ```python\n # 2-D example\n tensor = [[1, 2], [3, 4], [5, 6]]\n mask = np.array([True, False, True])\n boolean_mask(tensor, mask) # [[1, 2], [5, 6]]\n ```\n \"\"\"\n return boolean_mask(tensor, mask, name, axis)\n\n\n@tf_export(\"sparse.mask\", v1=[\"sparse.mask\", \"sparse_mask\"])\[email protected]_endpoints(\"sparse_mask\")\ndef sparse_mask(a, mask_indices, name=None):\n \"\"\"Masks elements of `IndexedSlices`.\n\n Given an `IndexedSlices` instance `a`, returns another `IndexedSlices` that\n contains a subset of the slices of `a`. Only the slices at indices not\n specified in `mask_indices` are returned.\n\n This is useful when you need to extract a subset of slices in an\n `IndexedSlices` object.\n\n For example:\n\n ```python\n # `a` contains slices at indices [12, 26, 37, 45] from a large tensor\n # with shape [1000, 10]\n a.indices # [12, 26, 37, 45]\n tf.shape(a.values) # [4, 10]\n\n # `b` will be the subset of `a` slices at its second and third indices, so\n # we want to mask its first and last indices (which are at absolute\n # indices 12, 45)\n b = tf.sparse.mask(a, [12, 45])\n\n b.indices # [26, 37]\n tf.shape(b.values) # [2, 10]\n ```\n\n Args:\n a: An `IndexedSlices` instance.\n mask_indices: Indices of elements to mask.\n name: A name for the operation (optional).\n\n Returns:\n The masked `IndexedSlices` instance.\n \"\"\"\n with ops.name_scope(name, \"sparse_mask\", [a, mask_indices]) as name:\n indices = a.indices\n out_indices, to_gather = gen_array_ops.list_diff(indices, mask_indices)\n out_values = gather(a.values, to_gather, name=name)\n return ops.IndexedSlices(out_values, out_indices, a.dense_shape)\n\n\n@tf_export(\"unique\")\[email protected]_dispatch_support\ndef unique(x, out_idx=dtypes.int32, name=None):\n \"\"\"Finds unique elements in a 1-D tensor.\n\n See also `tf.unique_with_counts`.\n\n This operation returns a tensor `y` containing all of the unique elements\n of `x` sorted in the same order that they occur in `x`. This operation\n also returns a tensor `idx` the same size as `x` that contains the index\n of each value of `x` in the unique output `y`. In other words:\n\n\n y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]\n\n Example usage:\n\n >>> x = tf.constant([1, 1, 2, 4, 4, 4, 7, 8, 8])\n >>> y, idx = unique(x)\n >>> y\n <tf.Tensor: id=5, shape=(5,), dtype=int32,\n numpy=array([1, 2, 4, 7, 8], dtype=int32)>\n >>> idx\n <tf.Tensor: id=6, shape=(9,), dtype=int32,\n numpy=array([0, 0, 1, 2, 2, 2, 3, 4, 4], dtype=int32)>\n\n Args:\n x: A Tensor. 1-D.\n out_idx: An optional tf.DType from: tf.int32, tf.int64. Defaults to\n tf.int32.\n name: A name for the operation (optional).\n\n Returns:\n A tuple of Tensor objects (y, idx).\n y: A Tensor. Has the same type as x.\n idx: A Tensor of type out_idx.\n\n \"\"\"\n # TODO(yongtang): switch to v2 once API deprecation\n # period (3 weeks) pass.\n # TODO(yongtang): The documentation should also\n # be updated when switch to v2.\n return gen_array_ops.unique(x, out_idx, name)\n\n\nunique.__doc__ = gen_array_ops.unique.__doc__\n\n\n@tf_export(\"unique_with_counts\")\[email protected]_dispatch_support\ndef unique_with_counts(x, out_idx=dtypes.int32, name=None):\n \"\"\"Finds unique elements in a 1-D tensor.\n\n See also `tf.unique`.\n\n This operation returns a tensor `y` containing all of the unique elements\n of `x` sorted in the same order that they occur in `x`. This operation\n also returns a tensor `idx` the same size as `x` that contains the index\n of each value of `x` in the unique output `y`. Finally, it returns a\n third tensor `count` that contains the count of each element of `y`\n in `x`. In other words:\n\n y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]\n\n Example usage:\n\n >>> x = tf.constant([1, 1, 2, 4, 4, 4, 7, 8, 8])\n >>> y, idx, count = unique_with_counts(x)\n >>> y\n <tf.Tensor: id=8, shape=(5,), dtype=int32,\n numpy=array([1, 2, 4, 7, 8], dtype=int32)>\n >>> idx\n <tf.Tensor: id=9, shape=(9,), dtype=int32,\n numpy=array([0, 0, 1, 2, 2, 2, 3, 4, 4], dtype=int32)>\n >>> count\n <tf.Tensor: id=10, shape=(5,), dtype=int32,\n numpy=array([2, 1, 3, 1, 2], dtype=int32)>\n\n Args:\n x: A Tensor. 1-D.\n out_idx: An optional tf.DType from: tf.int32, tf.int64. Defaults to\n tf.int32.\n name: A name for the operation (optional).\n\n Returns:\n A tuple of Tensor objects (y, idx, count).\n y: A Tensor. Has the same type as x.\n idx: A Tensor of type out_idx.\n count: A Tensor of type out_idx.\n\n \"\"\"\n # TODO(yongtang): switch to v2 once API deprecation\n # period (3 weeks) pass.\n # TODO(yongtang): The documentation should also\n # be updated when switch to v2.\n return gen_array_ops.unique_with_counts(x, out_idx, name)\n\n\nunique_with_counts.__doc__ = gen_array_ops.unique_with_counts.__doc__\n\n\n@tf_export(\"split\")\[email protected]_dispatch_support\ndef split(value, num_or_size_splits, axis=0, num=None, name=\"split\"):\n \"\"\"Splits a tensor `value` into a list of sub tensors.\n\n See also `tf.unstack`.\n\n If `num_or_size_splits` is an integer, then `value` is split along the\n dimension `axis` into `num_or_size_splits` smaller tensors. This requires that\n `value.shape[axis]` is divisible by `num_or_size_splits`.\n\n If `num_or_size_splits` is a 1-D Tensor (or list), then `value` is split into\n `len(num_or_size_splits)` elements. The shape of the `i`-th\n element has the same size as the `value` except along dimension `axis` where\n the size is `num_or_size_splits[i]`.\n\n For example:\n\n >>> x = tf.Variable(tf.random.uniform([5, 30], -1, 1))\n >>>\n >>> # Split `x` into 3 tensors along dimension 1\n >>> s0, s1, s2 = tf.split(x, num_or_size_splits=3, axis=1)\n >>> tf.shape(s0).numpy()\n array([ 5, 10], dtype=int32)\n >>>\n >>> # Split `x` into 3 tensors with sizes [4, 15, 11] along dimension 1\n >>> split0, split1, split2 = tf.split(x, [4, 15, 11], 1)\n >>> tf.shape(split0).numpy()\n array([5, 4], dtype=int32)\n >>> tf.shape(split1).numpy()\n array([ 5, 15], dtype=int32)\n >>> tf.shape(split2).numpy()\n array([ 5, 11], dtype=int32)\n\n Args:\n value: The `Tensor` to split.\n num_or_size_splits: Either an integer indicating the number of splits along\n `axis` or a 1-D integer `Tensor` or Python list containing the sizes of\n each output tensor along `axis`. If a scalar, then it must evenly divide\n `value.shape[axis]`; otherwise the sum of sizes along the split axis\n must match that of the `value`.\n axis: An integer or scalar `int32` `Tensor`. The dimension along which to\n split. Must be in the range `[-rank(value), rank(value))`. Defaults to 0.\n num: Optional, used to specify the number of outputs when it cannot be\n inferred from the shape of `size_splits`.\n name: A name for the operation (optional).\n\n Returns:\n if `num_or_size_splits` is a scalar returns a list of `num_or_size_splits`\n `Tensor` objects; if `num_or_size_splits` is a 1-D Tensor returns\n `num_or_size_splits.get_shape[0]` `Tensor` objects resulting from splitting\n `value`.\n\n Raises:\n ValueError: If `num` is unspecified and cannot be inferred.\n \"\"\"\n if isinstance(num_or_size_splits,\n (numbers.Integral, tensor_shape.Dimension)):\n return gen_array_ops.split(\n axis=axis, num_split=num_or_size_splits, value=value, name=name)\n\n size_splits = ops.convert_to_tensor(num_or_size_splits)\n\n if size_splits._rank() == 0:\n raise ValueError(\n \"Rank-0 tensors are not supported as the num_or_size_splits argument \"\n \"to split. Argument provided: %s\" % (num_or_size_splits,))\n\n if num is None:\n size_splits_shape = size_splits._shape_tuple()\n if size_splits_shape:\n num = size_splits_shape[0]\n if num is None:\n raise ValueError(\"Cannot infer num from shape %s\" % num_or_size_splits)\n\n return gen_array_ops.split_v(\n value=value, size_splits=size_splits, axis=axis, num_split=num, name=name)\n\n\n@tf_export(\"transpose\", v1=[])\[email protected]_dispatch_support\ndef transpose_v2(a, perm=None, conjugate=False, name=\"transpose\"):\n \"\"\"Transposes `a`, where `a` is a Tensor.\n\n Permutes the dimensions according to the value of `perm`.\n\n The returned tensor's dimension `i` will correspond to the input dimension\n `perm[i]`. If `perm` is not given, it is set to (n-1...0), where n is the rank\n of the input tensor. Hence by default, this operation performs a regular\n matrix transpose on 2-D input Tensors.\n\n If conjugate is `True` and `a.dtype` is either `complex64` or `complex128`\n then the values of `a` are conjugated and transposed.\n\n @compatibility(numpy)\n In `numpy` transposes are memory-efficient constant time operations as they\n simply return a new view of the same data with adjusted `strides`.\n\n TensorFlow does not support strides, so `transpose` returns a new tensor with\n the items permuted.\n @end_compatibility\n\n For example:\n\n >>> x = tf.constant([[1, 2, 3], [4, 5, 6]])\n >>> tf.transpose(x)\n <tf.Tensor: shape=(3, 2), dtype=int32, numpy=\n array([[1, 4],\n [2, 5],\n [3, 6]], dtype=int32)>\n\n Equivalently, you could call `tf.transpose(x, perm=[1, 0])`.\n\n If `x` is complex, setting conjugate=True gives the conjugate transpose:\n\n >>> x = tf.constant([[1 + 1j, 2 + 2j, 3 + 3j],\n ... [4 + 4j, 5 + 5j, 6 + 6j]])\n >>> tf.transpose(x, conjugate=True)\n <tf.Tensor: shape=(3, 2), dtype=complex128, numpy=\n array([[1.-1.j, 4.-4.j],\n [2.-2.j, 5.-5.j],\n [3.-3.j, 6.-6.j]])>\n\n 'perm' is more useful for n-dimensional tensors where n > 2:\n\n >>> x = tf.constant([[[ 1, 2, 3],\n ... [ 4, 5, 6]],\n ... [[ 7, 8, 9],\n ... [10, 11, 12]]])\n\n As above, simply calling `tf.transpose` will default to `perm=[2,1,0]`.\n\n To take the transpose of the matrices in dimension-0 (such as when you are\n transposing matrices where 0 is the batch dimension), you would set\n `perm=[0,2,1]`.\n\n >>> tf.transpose(x, perm=[0, 2, 1])\n <tf.Tensor: shape=(2, 3, 2), dtype=int32, numpy=\n array([[[ 1, 4],\n [ 2, 5],\n [ 3, 6]],\n [[ 7, 10],\n [ 8, 11],\n [ 9, 12]]], dtype=int32)>\n\n Note: This has a shorthand `linalg.matrix_transpose`):\n\n Args:\n a: A `Tensor`.\n perm: A permutation of the dimensions of `a`. This should be a vector.\n conjugate: Optional bool. Setting it to `True` is mathematically equivalent\n to tf.math.conj(tf.transpose(input)).\n name: A name for the operation (optional).\n\n Returns:\n A transposed `Tensor`.\n \"\"\"\n return transpose(a=a, perm=perm, name=name, conjugate=conjugate)\n\n\n@tf_export(v1=[\"transpose\"])\[email protected]_dispatch_support\ndef transpose(a, perm=None, name=\"transpose\", conjugate=False):\n \"\"\"Transposes `a`.\n\n Permutes the dimensions according to `perm`.\n\n The returned tensor's dimension i will correspond to the input dimension\n `perm[i]`. If `perm` is not given, it is set to (n-1...0), where n is\n the rank of the input tensor. Hence by default, this operation performs a\n regular matrix transpose on 2-D input Tensors. If conjugate is True and\n `a.dtype` is either `complex64` or `complex128` then the values of `a`\n are conjugated and transposed.\n\n @compatibility(numpy)\n In `numpy` transposes are memory-efficient constant time operations as they\n simply return a new view of the same data with adjusted `strides`.\n\n TensorFlow does not support strides, so `transpose` returns a new tensor with\n the items permuted.\n @end_compatibility\n\n For example:\n\n ```python\n x = tf.constant([[1, 2, 3], [4, 5, 6]])\n tf.transpose(x) # [[1, 4]\n # [2, 5]\n # [3, 6]]\n\n # Equivalently\n tf.transpose(x, perm=[1, 0]) # [[1, 4]\n # [2, 5]\n # [3, 6]]\n\n # If x is complex, setting conjugate=True gives the conjugate transpose\n x = tf.constant([[1 + 1j, 2 + 2j, 3 + 3j],\n [4 + 4j, 5 + 5j, 6 + 6j]])\n tf.transpose(x, conjugate=True) # [[1 - 1j, 4 - 4j],\n # [2 - 2j, 5 - 5j],\n # [3 - 3j, 6 - 6j]]\n\n # 'perm' is more useful for n-dimensional tensors, for n > 2\n x = tf.constant([[[ 1, 2, 3],\n [ 4, 5, 6]],\n [[ 7, 8, 9],\n [10, 11, 12]]])\n\n # Take the transpose of the matrices in dimension-0\n # (this common operation has a shorthand `linalg.matrix_transpose`)\n tf.transpose(x, perm=[0, 2, 1]) # [[[1, 4],\n # [2, 5],\n # [3, 6]],\n # [[7, 10],\n # [8, 11],\n # [9, 12]]]\n ```\n\n Args:\n a: A `Tensor`.\n perm: A permutation of the dimensions of `a`.\n name: A name for the operation (optional).\n conjugate: Optional bool. Setting it to `True` is mathematically equivalent\n to tf.math.conj(tf.transpose(input)).\n\n Returns:\n A transposed `Tensor`.\n \"\"\"\n with ops.name_scope(name, \"transpose\", [a]) as name:\n if not tensor_util.is_tf_type(a):\n a = ops.convert_to_tensor(a, name=\"a\")\n\n if conjugate and a.dtype.is_complex:\n transpose_fn = gen_array_ops.conjugate_transpose\n else:\n transpose_fn = gen_array_ops.transpose\n\n if perm is not None:\n return transpose_fn(a, perm, name=name)\n\n rank = a.shape.rank\n if rank is None:\n perm = gen_math_ops._range(gen_array_ops.rank(a) - 1, -1, -1)\n else:\n perm = np.arange(rank - 1, -1, -1, dtype=np.int32)\n return transpose_fn(a, perm, name=name)\n\n\n# pylint: disable=invalid-name\n@tf_export(\n \"linalg.matrix_transpose\",\n v1=[\"linalg.transpose\", \"linalg.matrix_transpose\", \"matrix_transpose\"])\[email protected]_dispatch_support\[email protected]_endpoints(\"matrix_transpose\", \"linalg.transpose\")\ndef matrix_transpose(a, name=\"matrix_transpose\", conjugate=False):\n \"\"\"Transposes last two dimensions of tensor `a`.\n\n For example:\n\n ```python\n x = tf.constant([[1, 2, 3], [4, 5, 6]])\n tf.linalg.matrix_transpose(x) # [[1, 4],\n # [2, 5],\n # [3, 6]]\n\n x = tf.constant([[1 + 1j, 2 + 2j, 3 + 3j],\n [4 + 4j, 5 + 5j, 6 + 6j]])\n tf.linalg.matrix_transpose(x, conjugate=True) # [[1 - 1j, 4 - 4j],\n # [2 - 2j, 5 - 5j],\n # [3 - 3j, 6 - 6j]]\n\n # Matrix with two batch dimensions.\n # x.shape is [1, 2, 3, 4]\n # tf.linalg.matrix_transpose(x) is shape [1, 2, 4, 3]\n ```\n\n Note that `tf.matmul` provides kwargs allowing for transpose of arguments.\n This is done with minimal cost, and is preferable to using this function. E.g.\n\n ```python\n # Good! Transpose is taken at minimal additional cost.\n tf.matmul(matrix, b, transpose_b=True)\n\n # Inefficient!\n tf.matmul(matrix, tf.linalg.matrix_transpose(b))\n ```\n\n @compatibility(numpy)\n In `numpy` transposes are memory-efficient constant time operations as they\n simply return a new view of the same data with adjusted `strides`.\n\n TensorFlow does not support strides, `linalg.matrix_transpose` returns a new\n tensor with the items permuted.\n @end_compatibility\n\n Args:\n a: A `Tensor` with `rank >= 2`.\n name: A name for the operation (optional).\n conjugate: Optional bool. Setting it to `True` is mathematically equivalent\n to tf.math.conj(tf.linalg.matrix_transpose(input)).\n\n Returns:\n A transposed batch matrix `Tensor`.\n\n Raises:\n ValueError: If `a` is determined statically to have `rank < 2`.\n \"\"\"\n with ops.name_scope(name, values=[a]):\n a = ops.convert_to_tensor(a, name=\"a\")\n\n # If we know the number of dimensions (statically), we can do two things:\n # 1. Check that `a` is a (batch) matrix.\n # 2. Use a Python list for perm. This preserves static shape information\n # and avoids extra computations.\n a_shape = a.get_shape()\n ndims = a_shape.ndims\n if ndims is not None:\n if ndims < 2:\n raise ValueError(\n \"Argument 'a' should be a (batch) matrix, with rank >= 2. Found: \"\n \"%s\" % a_shape)\n perm = list(range(ndims - 2)) + [ndims - 1] + [ndims - 2]\n else:\n a_rank = rank(a)\n perm = concat(\n (gen_math_ops._range(0, a_rank - 2, 1), [a_rank - 1, a_rank - 2]), 0)\n\n return transpose(a, perm=perm, conjugate=conjugate)\n\n\n@tf_export(\"linalg.diag\", v1=[\"linalg.diag\", \"matrix_diag\"])\[email protected]_dispatch_support\[email protected]_endpoints(\"matrix_diag\")\ndef matrix_diag(diagonal,\n name=\"diag\",\n k=0,\n num_rows=-1,\n num_cols=-1,\n padding_value=0,\n align=\"RIGHT_LEFT\"):\n \"\"\"Returns a batched diagonal tensor with given batched diagonal values.\n\n Returns a tensor with the contents in `diagonal` as `k[0]`-th to `k[1]`-th\n diagonals of a matrix, with everything else padded with `padding`. `num_rows`\n and `num_cols` specify the dimension of the innermost matrix of the output. If\n both are not specified, the op assumes the innermost matrix is square and\n infers its size from `k` and the innermost dimension of `diagonal`. If only\n one of them is specified, the op assumes the unspecified value is the smallest\n possible based on other criteria.\n\n Let `diagonal` have `r` dimensions `[I, J, ..., L, M, N]`. The output tensor\n has rank `r+1` with shape `[I, J, ..., L, M, num_rows, num_cols]` when only\n one diagonal is given (`k` is an integer or `k[0] == k[1]`). Otherwise, it has\n rank `r` with shape `[I, J, ..., L, num_rows, num_cols]`.\n\n The second innermost dimension of `diagonal` has double meaning. When `k` is\n scalar or `k[0] == k[1]`, `M` is part of the batch size [I, J, ..., M], and\n the output tensor is:\n\n ```\n output[i, j, ..., l, m, n]\n = diagonal[i, j, ..., l, n-max(d_upper, 0)] ; if n - m == d_upper\n padding_value ; otherwise\n ```\n\n Otherwise, `M` is treated as the number of diagonals for the matrix in the\n same batch (`M = k[1]-k[0]+1`), and the output tensor is:\n\n ```\n output[i, j, ..., l, m, n]\n = diagonal[i, j, ..., l, diag_index, index_in_diag] ; if k[0] <= d <= k[1]\n padding_value ; otherwise\n ```\n where `d = n - m`, `diag_index = k[1] - d`, and\n `index_in_diag = n - max(d, 0) + offset`.\n\n `offset` is zero except when the alignment of the diagonal is to the right.\n ```\n offset = max_diag_len - diag_len(d) ; if (`align` in {RIGHT_LEFT, RIGHT_RIGHT}\n and `d >= 0`) or\n (`align` in {LEFT_RIGHT, RIGHT_RIGHT}\n and `d <= 0`)\n 0 ; otherwise\n ```\n where `diag_len(d) = min(cols - max(d, 0), rows + min(d, 0))`.\n\n For example:\n\n ```\n # The main diagonal.\n diagonal = np.array([[1, 2, 3, 4], # Input shape: (2, 4)\n [5, 6, 7, 8]])\n tf.matrix_diag(diagonal) ==> [[[1, 0, 0, 0], # Output shape: (2, 4, 4)\n [0, 2, 0, 0],\n [0, 0, 3, 0],\n [0, 0, 0, 4]],\n [[5, 0, 0, 0],\n [0, 6, 0, 0],\n [0, 0, 7, 0],\n [0, 0, 0, 8]]]\n\n # A superdiagonal (per batch).\n diagonal = np.array([[1, 2, 3], # Input shape: (2, 3)\n [4, 5, 6]])\n tf.matrix_diag(diagonal, k = 1)\n ==> [[[0, 1, 0, 0], # Output shape: (2, 4, 4)\n [0, 0, 2, 0],\n [0, 0, 0, 3],\n [0, 0, 0, 0]],\n [[0, 4, 0, 0],\n [0, 0, 5, 0],\n [0, 0, 0, 6],\n [0, 0, 0, 0]]]\n\n # A tridiagonal band (per batch).\n diagonals = np.array([[[8, 9, 0], # Input shape: (2, 2, 3)\n [1, 2, 3],\n [0, 4, 5]],\n [[2, 3, 0],\n [6, 7, 9],\n [0, 9, 1]]])\n tf.matrix_diag(diagonals, k = (-1, 1))\n ==> [[[1, 8, 0], # Output shape: (2, 3, 3)\n [4, 2, 9],\n [0, 5, 3]],\n [[6, 2, 0],\n [9, 7, 3],\n [0, 1, 9]]]\n\n # RIGHT_LEFT alignment.\n diagonals = np.array([[[0, 8, 9], # Input shape: (2, 2, 3)\n [1, 2, 3],\n [4, 5, 0]],\n [[0, 2, 3],\n [6, 7, 9],\n [9, 1, 0]]])\n tf.matrix_diag(diagonals, k = (-1, 1), align=\"RIGHT_LEFT\")\n ==> [[[1, 8, 0], # Output shape: (2, 3, 3)\n [4, 2, 9],\n [0, 5, 3]],\n [[6, 2, 0],\n [9, 7, 3],\n [0, 1, 9]]]\n\n # Rectangular matrix.\n diagonal = np.array([1, 2]) # Input shape: (2)\n tf.matrix_diag(diagonal, k = -1, num_rows = 3, num_cols = 4)\n ==> [[0, 0, 0, 0], # Output shape: (3, 4)\n [1, 0, 0, 0],\n [0, 2, 0, 0]]\n\n # Rectangular matrix with inferred num_cols and padding_value = 9.\n tf.matrix_diag(diagonal, k = -1, num_rows = 3, padding_value = 9)\n ==> [[9, 9], # Output shape: (3, 2)\n [1, 9],\n [9, 2]]\n ```\n\n Args:\n diagonal: A `Tensor` with `rank k >= 1`.\n name: A name for the operation (optional).\n k: Diagonal offset(s). Positive value means superdiagonal, 0 refers to the\n main diagonal, and negative value means subdiagonals. `k` can be a single\n integer (for a single diagonal) or a pair of integers specifying the low\n and high ends of a matrix band. `k[0]` must not be larger than `k[1]`.\n num_rows: The number of rows of the output matrix. If it is not provided,\n the op assumes the output matrix is a square matrix and infers the matrix\n size from `d_lower`, `d_upper`, and the innermost dimension of `diagonal`.\n num_cols: The number of columns of the output matrix. If it is not provided,\n the op assumes the output matrix is a square matrix and infers the matrix\n size from `d_lower`, `d_upper`, and the innermost dimension of `diagonal`.\n padding_value: The value to fill the area outside the specified diagonal\n band with. Default is 0.\n align: Some diagonals are shorter than `max_diag_len` and need to be padded.\n `align` is a string specifying how superdiagonals and subdiagonals should\n be aligned, respectively. There are four possible alignments: \"RIGHT_LEFT\"\n (default), \"LEFT_RIGHT\", \"LEFT_LEFT\", and \"RIGHT_RIGHT\". \"RIGHT_LEFT\"\n aligns superdiagonals to the right (left-pads the row) and subdiagonals to\n the left (right-pads the row). It is the packing format LAPACK uses.\n cuSPARSE uses \"LEFT_RIGHT\", which is the opposite alignment.\n\n Returns:\n A Tensor. Has the same type as `diagonal`.\n \"\"\"\n # Special case to sidestep the tf.constant conversion error:\n # TypeError: Expected bool, got 0 of type 'int' instead.\n if hasattr(diagonal, \"dtype\") and diagonal.dtype == \"bool\":\n padding_value = bool(padding_value)\n\n return gen_array_ops.matrix_diag_v3(\n diagonal=diagonal,\n k=k,\n num_rows=num_rows,\n num_cols=num_cols,\n padding_value=padding_value,\n align=align,\n name=name)\n\n\n@tf_export(\"linalg.diag_part\", v1=[\"linalg.diag_part\", \"matrix_diag_part\"])\[email protected]_dispatch_support\[email protected]_endpoints(\"matrix_diag_part\")\ndef matrix_diag_part(\n input, # pylint:disable=redefined-builtin\n name=\"diag_part\",\n k=0,\n padding_value=0,\n align=\"RIGHT_LEFT\"):\n \"\"\"Returns the batched diagonal part of a batched tensor.\n\n Returns a tensor with the `k[0]`-th to `k[1]`-th diagonals of the batched\n `input`.\n\n Assume `input` has `r` dimensions `[I, J, ..., L, M, N]`.\n Let `max_diag_len` be the maximum length among all diagonals to be extracted,\n `max_diag_len = min(M + min(k[1], 0), N + min(-k[0], 0))`\n Let `num_diags` be the number of diagonals to extract,\n `num_diags = k[1] - k[0] + 1`.\n\n If `num_diags == 1`, the output tensor is of rank `r - 1` with shape\n `[I, J, ..., L, max_diag_len]` and values:\n\n ```\n diagonal[i, j, ..., l, n]\n = input[i, j, ..., l, n+y, n+x] ; if 0 <= n+y < M and 0 <= n+x < N,\n padding_value ; otherwise.\n ```\n where `y = max(-k[1], 0)`, `x = max(k[1], 0)`.\n\n Otherwise, the output tensor has rank `r` with dimensions\n `[I, J, ..., L, num_diags, max_diag_len]` with values:\n\n ```\n diagonal[i, j, ..., l, m, n]\n = input[i, j, ..., l, n+y, n+x] ; if 0 <= n+y < M and 0 <= n+x < N,\n padding_value ; otherwise.\n ```\n where `d = k[1] - m`, `y = max(-d, 0) - offset`, and `x = max(d, 0) - offset`.\n\n `offset` is zero except when the alignment of the diagonal is to the right.\n ```\n offset = max_diag_len - diag_len(d) ; if (`align` in {RIGHT_LEFT, RIGHT_RIGHT}\n and `d >= 0`) or\n (`align` in {LEFT_RIGHT, RIGHT_RIGHT}\n and `d <= 0`)\n 0 ; otherwise\n ```\n where `diag_len(d) = min(cols - max(d, 0), rows + min(d, 0))`.\n\n The input must be at least a matrix.\n\n For example:\n\n ```\n input = np.array([[[1, 2, 3, 4], # Input shape: (2, 3, 4)\n [5, 6, 7, 8],\n [9, 8, 7, 6]],\n [[5, 4, 3, 2],\n [1, 2, 3, 4],\n [5, 6, 7, 8]]])\n\n # A main diagonal from each batch.\n tf.linalg.diag_part(input) ==> [[1, 6, 7], # Output shape: (2, 3)\n [5, 2, 7]]\n\n # A superdiagonal from each batch.\n tf.linalg.diag_part(input, k = 1)\n ==> [[2, 7, 6], # Output shape: (2, 3)\n [4, 3, 8]]\n\n # A band from each batch.\n tf.linalg.diag_part(input, k = (-1, 2))\n ==> [[[3, 8, 0], # Output shape: (2, 4, 3)\n [2, 7, 6],\n [1, 6, 7],\n [0, 5, 8]],\n [[3, 4, 0],\n [4, 3, 8],\n [5, 2, 7],\n [0, 1, 6]]]\n\n # RIGHT_LEFT alignment.\n tf.linalg.diag_part(input, k = (-1, 2), align=\"RIGHT_LEFT\")\n ==> [[[0, 3, 8], # Output shape: (2, 4, 3)\n [2, 7, 6],\n [1, 6, 7],\n [5, 8, 0]],\n [[0, 3, 4],\n [4, 3, 8],\n [5, 2, 7],\n [1, 6, 0]]]\n\n # max_diag_len can be shorter than the main diagonal.\n tf.linalg.diag_part(input, k = (-2, -1))\n ==> [[[5, 8],\n [0, 9]],\n [[1, 6],\n [0, 5]]]\n\n # padding_value = 9\n tf.linalg.diag_part(input, k = (1, 3), padding_value = 9)\n ==> [[[4, 9, 9], # Output shape: (2, 3, 3)\n [3, 8, 9],\n [2, 7, 6]],\n [[2, 9, 9],\n [3, 4, 9],\n [4, 3, 8]]]\n\n ```\n\n Args:\n input: A `Tensor` with `rank k >= 2`.\n name: A name for the operation (optional).\n k: Diagonal offset(s). Positive value means superdiagonal, 0 refers to the\n main diagonal, and negative value means subdiagonals. `k` can be a single\n integer (for a single diagonal) or a pair of integers specifying the low\n and high ends of a matrix band. `k[0]` must not be larger than `k[1]`.\n padding_value: The value to fill the area outside the specified diagonal\n band with. Default is 0.\n align: Some diagonals are shorter than `max_diag_len` and need to be padded.\n `align` is a string specifying how superdiagonals and subdiagonals should\n be aligned, respectively. There are four possible alignments: \"RIGHT_LEFT\"\n (default), \"LEFT_RIGHT\", \"LEFT_LEFT\", and \"RIGHT_RIGHT\". \"RIGHT_LEFT\"\n aligns superdiagonals to the right (left-pads the row) and subdiagonals to\n the left (right-pads the row). It is the packing format LAPACK uses.\n cuSPARSE uses \"LEFT_RIGHT\", which is the opposite alignment.\n\n Returns:\n A Tensor containing diagonals of `input`. Has the same type as `input`.\n\n Raises:\n InvalidArgumentError: When `k` is out of bound or when `k[0]>k[1:]`.\n \"\"\"\n # Special case to sidestep the tf.constant conversion error:\n # TypeError: Expected bool, got 0 of type 'int' instead.\n if hasattr(input, \"dtype\") and input.dtype == \"bool\":\n padding_value = bool(padding_value)\n\n return gen_array_ops.matrix_diag_part_v3(\n input=input, k=k, padding_value=padding_value, align=align, name=name)\n\n\n@tf_export(\n \"linalg.tensor_diag_part\", v1=[\"linalg.tensor_diag_part\", \"diag_part\"])\[email protected]_dispatch_support\[email protected]_endpoints(\"diag_part\")\ndef tensor_diag_part(\n input, # pylint:disable=redefined-builtin\n name=None):\n \"\"\"Returns the diagonal part of the tensor.\n\n This operation returns a tensor with the `diagonal` part\n of the `input`. The `diagonal` part is computed as follows:\n\n Assume `input` has dimensions `[D1,..., Dk, D1,..., Dk]`, then the output is a\n tensor of rank `k` with dimensions `[D1,..., Dk]` where:\n\n `diagonal[i1,..., ik] = input[i1, ..., ik, i1,..., ik]`.\n\n For a rank 2 tensor, `linalg.diag_part` and `linalg.tensor_diag_part`\n produce the same result. For rank 3 and higher, linalg.diag_part extracts\n the diagonal of each inner-most matrix in the tensor. An example where\n they differ is given below.\n\n >>> x = [[[[1111,1112],[1121,1122]],\n ... [[1211,1212],[1221,1222]]],\n ... [[[2111, 2112], [2121, 2122]],\n ... [[2211, 2212], [2221, 2222]]]\n ... ]\n >>> tf.linalg.tensor_diag_part(x)\n <tf.Tensor: shape=(2, 2), dtype=int32, numpy=\n array([[1111, 1212],\n [2121, 2222]], dtype=int32)>\n >>> tf.linalg.diag_part(x).shape\n TensorShape([2, 2, 2])\n\n Args:\n input: A `Tensor` with rank `2k`.\n name: A name for the operation (optional).\n\n Returns:\n A Tensor containing diagonals of `input`. Has the same type as `input`, and\n rank `k`.\n \"\"\"\n return gen_array_ops.diag_part(input=input, name=name)\n\n\n@tf_export(\"linalg.set_diag\", v1=[\"linalg.set_diag\", \"matrix_set_diag\"])\[email protected]_dispatch_support\[email protected]_endpoints(\"matrix_set_diag\")\ndef matrix_set_diag(\n input, # pylint:disable=redefined-builtin\n diagonal,\n name=\"set_diag\",\n k=0,\n align=\"RIGHT_LEFT\"):\n \"\"\"Returns a batched matrix tensor with new batched diagonal values.\n\n Given `input` and `diagonal`, this operation returns a tensor with the\n same shape and values as `input`, except for the specified diagonals of the\n innermost matrices. These will be overwritten by the values in `diagonal`.\n\n `input` has `r+1` dimensions `[I, J, ..., L, M, N]`. When `k` is scalar or\n `k[0] == k[1]`, `diagonal` has `r` dimensions `[I, J, ..., L, max_diag_len]`.\n Otherwise, it has `r+1` dimensions `[I, J, ..., L, num_diags, max_diag_len]`.\n `num_diags` is the number of diagonals, `num_diags = k[1] - k[0] + 1`.\n `max_diag_len` is the longest diagonal in the range `[k[0], k[1]]`,\n `max_diag_len = min(M + min(k[1], 0), N + min(-k[0], 0))`\n\n The output is a tensor of rank `k+1` with dimensions `[I, J, ..., L, M, N]`.\n If `k` is scalar or `k[0] == k[1]`:\n\n ```\n output[i, j, ..., l, m, n]\n = diagonal[i, j, ..., l, n-max(k[1], 0)] ; if n - m == k[1]\n input[i, j, ..., l, m, n] ; otherwise\n ```\n\n Otherwise,\n\n ```\n output[i, j, ..., l, m, n]\n = diagonal[i, j, ..., l, diag_index, index_in_diag] ; if k[0] <= d <= k[1]\n input[i, j, ..., l, m, n] ; otherwise\n ```\n where `d = n - m`, `diag_index = k[1] - d`, and\n `index_in_diag = n - max(d, 0) + offset`.\n\n `offset` is zero except when the alignment of the diagonal is to the right.\n ```\n offset = max_diag_len - diag_len(d) ; if (`align` in {RIGHT_LEFT, RIGHT_RIGHT}\n and `d >= 0`) or\n (`align` in {LEFT_RIGHT, RIGHT_RIGHT}\n and `d <= 0`)\n 0 ; otherwise\n ```\n where `diag_len(d) = min(cols - max(d, 0), rows + min(d, 0))`.\n\n For example:\n\n ```\n # The main diagonal.\n input = np.array([[[7, 7, 7, 7], # Input shape: (2, 3, 4)\n [7, 7, 7, 7],\n [7, 7, 7, 7]],\n [[7, 7, 7, 7],\n [7, 7, 7, 7],\n [7, 7, 7, 7]]])\n diagonal = np.array([[1, 2, 3], # Diagonal shape: (2, 3)\n [4, 5, 6]])\n tf.matrix_set_diag(input, diagonal)\n ==> [[[1, 7, 7, 7], # Output shape: (2, 3, 4)\n [7, 2, 7, 7],\n [7, 7, 3, 7]],\n [[4, 7, 7, 7],\n [7, 5, 7, 7],\n [7, 7, 6, 7]]]\n\n # A superdiagonal (per batch).\n tf.matrix_set_diag(input, diagonal, k = 1)\n ==> [[[7, 1, 7, 7], # Output shape: (2, 3, 4)\n [7, 7, 2, 7],\n [7, 7, 7, 3]],\n [[7, 4, 7, 7],\n [7, 7, 5, 7],\n [7, 7, 7, 6]]]\n\n # A band of diagonals.\n diagonals = np.array([[[9, 1, 0], # Diagonal shape: (2, 4, 3)\n [6, 5, 8],\n [1, 2, 3],\n [0, 4, 5]],\n [[1, 2, 0],\n [5, 6, 4],\n [6, 1, 2],\n [0, 3, 4]]])\n tf.matrix_set_diag(input, diagonals, k = (-1, 2))\n ==> [[[1, 6, 9, 7], # Output shape: (2, 3, 4)\n [4, 2, 5, 1],\n [7, 5, 3, 8]],\n [[6, 5, 1, 7],\n [3, 1, 6, 2],\n [7, 4, 2, 4]]]\n\n # RIGHT_LEFT alignment.\n diagonals = np.array([[[0, 9, 1], # Diagonal shape: (2, 4, 3)\n [6, 5, 8],\n [1, 2, 3],\n [4, 5, 0]],\n [[0, 1, 2],\n [5, 6, 4],\n [6, 1, 2],\n [3, 4, 0]]])\n tf.matrix_set_diag(input, diagonals, k = (-1, 2), align=\"RIGHT_LEFT\")\n ==> [[[1, 6, 9, 7], # Output shape: (2, 3, 4)\n [4, 2, 5, 1],\n [7, 5, 3, 8]],\n [[6, 5, 1, 7],\n [3, 1, 6, 2],\n [7, 4, 2, 4]]]\n\n ```\n\n Args:\n input: A `Tensor` with rank `k + 1`, where `k >= 1`.\n diagonal: A `Tensor` with rank `k`, when `d_lower == d_upper`, or `k + 1`,\n otherwise. `k >= 1`.\n name: A name for the operation (optional).\n k: Diagonal offset(s). Positive value means superdiagonal, 0 refers to the\n main diagonal, and negative value means subdiagonals. `k` can be a single\n integer (for a single diagonal) or a pair of integers specifying the low\n and high ends of a matrix band. `k[0]` must not be larger than `k[1]`.\n align: Some diagonals are shorter than `max_diag_len` and need to be padded.\n `align` is a string specifying how superdiagonals and subdiagonals should\n be aligned, respectively. There are four possible alignments: \"RIGHT_LEFT\"\n (default), \"LEFT_RIGHT\", \"LEFT_LEFT\", and \"RIGHT_RIGHT\". \"RIGHT_LEFT\"\n aligns superdiagonals to the right (left-pads the row) and subdiagonals to\n the left (right-pads the row). It is the packing format LAPACK uses.\n cuSPARSE uses \"LEFT_RIGHT\", which is the opposite alignment.\n \"\"\"\n return gen_array_ops.matrix_set_diag_v3(\n input=input, diagonal=diagonal, k=k, align=align, name=name)\n\n\n# pylint: enable=invalid-name\n\n\ndef _constant_if_small(value, shape, dtype, name):\n try:\n if np.prod(shape) < 1000:\n return constant(value, shape=shape, dtype=dtype, name=name)\n except (NotImplementedError, TypeError):\n # Happens when shape is a Tensor, list with Tensor elements, etc.\n pass\n return None\n\n\ndef _tag_zeros_tensor(fun):\n \"\"\" Tags the result of function by setting _is_zeros_tensor attribute.\n\n This is useful to compute Hessians of fused ops such as cross_entropy.\n \"\"\"\n\n def wrapped(*args, **kwargs):\n tensor = fun(*args, **kwargs)\n tensor._is_zeros_tensor = True\n return tensor\n\n return tf_decorator.make_decorator(fun, wrapped)\n\n\n@tf_export(\"zeros\")\[email protected]_dispatch_support\n@_tag_zeros_tensor\ndef zeros(shape, dtype=dtypes.float32, name=None):\n \"\"\"Creates a tensor with all elements set to zero.\n\n See also `tf.zeros_like`, `tf.ones`, `tf.fill`, `tf.eye`.\n\n This operation returns a tensor of type `dtype` with shape `shape` and\n all elements set to zero.\n\n >>> tf.zeros([3, 4], tf.int32)\n <tf.Tensor: shape=(3, 4), dtype=int32, numpy=\n array([[0, 0, 0, 0],\n [0, 0, 0, 0],\n [0, 0, 0, 0]], dtype=int32)>\n\n Args:\n shape: A `list` of integers, a `tuple` of integers, or\n a 1-D `Tensor` of type `int32`.\n dtype: The DType of an element in the resulting `Tensor`.\n name: Optional string. A name for the operation.\n\n Returns:\n A `Tensor` with all elements set to zero.\n \"\"\"\n dtype = dtypes.as_dtype(dtype).base_dtype\n with ops.name_scope(name, \"zeros\", [shape]) as name:\n if dtype == dtypes.bool:\n zero = False\n elif dtype == dtypes.string:\n zero = \"\"\n elif dtype.is_quantized:\n zero = np.zeros([]).astype(dtype.as_numpy_dtype)\n else:\n zero = 0\n\n if not isinstance(shape, ops.Tensor):\n try:\n if not context.executing_eagerly():\n # Create a constant if it won't be very big. Otherwise create a fill\n # op to prevent serialized GraphDefs from becoming too large.\n output = _constant_if_small(zero, shape, dtype, name)\n if output is not None:\n return output\n\n # Go through tensor shapes to get int64-if-needed semantics\n shape = constant_op._tensor_shape_tensor_conversion_function(\n tensor_shape.TensorShape(shape))\n except (TypeError, ValueError, errors.UnimplementedError):\n # Happens when shape is a list with tensor elements\n shape = ops.convert_to_tensor(shape, dtype=dtypes.int32)\n if not shape._shape_tuple():\n shape = reshape(shape, [-1]) # Ensure it's a vector\n output = fill(shape, constant(zero, dtype=dtype), name=name)\n assert output.dtype.base_dtype == dtype\n return output\n\n\n@tf_export(v1=[\"zeros_like\"])\[email protected]_unary_elementwise_api\[email protected]_dispatch_support\ndef zeros_like(tensor, dtype=None, name=None, optimize=True):\n \"\"\"Creates a tensor with all elements set to zero.\n\n See also `tf.zeros`.\n\n Given a single tensor (`tensor`), this operation returns a tensor of the\n same type and shape as `tensor` with all elements set to zero. Optionally,\n you can use `dtype` to specify a new type for the returned tensor.\n\n Examples:\n\n >>> tensor = tf.constant([[1, 2, 3], [4, 5, 6]])\n >>> tf.zeros_like(tensor)\n <tf.Tensor: shape=(2, 3), dtype=int32, numpy=\n array([[0, 0, 0],\n [0, 0, 0]], dtype=int32)>\n\n >>> tf.zeros_like(tensor, dtype=tf.float32)\n <tf.Tensor: shape=(2, 3), dtype=float32, numpy=\n array([[0., 0., 0.],\n [0., 0., 0.]], dtype=float32)>\n\n Args:\n tensor: A `Tensor`.\n dtype: A type for the returned `Tensor`. Must be `float16`, `float32`,\n `float64`, `int8`, `uint8`, `int16`, `uint16`, `int32`, `int64`,\n `complex64`, `complex128`, `bool` or `string`. (optional)\n name: A name for the operation (optional).\n optimize: if `True`, attempt to statically determine the shape of `tensor`\n and encode it as a constant. (optional, defaults to `True`)\n\n Returns:\n A `Tensor` with all elements set to zero.\n \"\"\"\n return zeros_like_impl(tensor, dtype, name, optimize)\n\n\n@tf_export(\"zeros_like\", v1=[])\[email protected]_unary_elementwise_api\[email protected]_dispatch_support\ndef zeros_like_v2(\n input, # pylint: disable=redefined-builtin\n dtype=None,\n name=None):\n \"\"\"Creates a tensor with all elements set to zero.\n\n See also `tf.zeros`.\n\n Given a single tensor or array-like object (`input`), this operation returns\n a tensor of the same type and shape as `input` with all elements set to zero.\n Optionally, you can use `dtype` to specify a new type for the returned tensor.\n\n Examples:\n\n >>> tensor = tf.constant([[1, 2, 3], [4, 5, 6]])\n >>> tf.zeros_like(tensor)\n <tf.Tensor: shape=(2, 3), dtype=int32, numpy=\n array([[0, 0, 0],\n [0, 0, 0]], dtype=int32)>\n\n >>> tf.zeros_like(tensor, dtype=tf.float32)\n <tf.Tensor: shape=(2, 3), dtype=float32, numpy=\n array([[0., 0., 0.],\n [0., 0., 0.]], dtype=float32)>\n\n >>> tf.zeros_like([[1, 2, 3], [4, 5, 6]])\n <tf.Tensor: shape=(2, 3), dtype=int32, numpy=\n array([[0, 0, 0],\n [0, 0, 0]], dtype=int32)>\n\n Args:\n input: A `Tensor` or array-like object.\n dtype: A type for the returned `Tensor`. Must be `float16`, `float32`,\n `float64`, `int8`, `uint8`, `int16`, `uint16`, `int32`, `int64`,\n `complex64`, `complex128`, `bool` or `string` (optional).\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` with all elements set to zero.\n \"\"\"\n return zeros_like_impl(input, dtype, name, optimize=True)\n\n\n@_tag_zeros_tensor\ndef zeros_like_impl(tensor, dtype, name, optimize=True):\n \"\"\"Internal implementation for the v1/v2 zeros_like API calls.\"\"\"\n with ops.name_scope(name, \"zeros_like\", [tensor]) as name:\n if not tensor_util.is_tf_type(tensor):\n tensor = ops.convert_to_tensor(tensor, name=\"tensor\")\n tensor_shape = tensor.shape\n tensor_dtype = tensor.dtype\n\n if context.executing_eagerly():\n if dtype is not None and dtype != tensor_dtype:\n return zeros(\n shape_internal(tensor, optimize=optimize), dtype=dtype, name=name)\n return gen_array_ops.zeros_like(tensor, name=name)\n\n # For now, variant types must be created via zeros_like; as we need to\n # pass the input variant object to the proper zeros callback.\n\n if (optimize and tensor_shape.is_fully_defined() and\n tensor_dtype != dtypes.variant):\n # We can produce a zeros tensor independent of the value of 'tensor',\n # since the shape is known statically.\n return zeros(tensor_shape, dtype=dtype or tensor_dtype, name=name)\n\n if dtype is not None and dtype != tensor_dtype and dtype != dtypes.variant:\n return zeros(\n shape_internal(tensor, optimize=optimize), dtype=dtype, name=name)\n else:\n return gen_array_ops.zeros_like(tensor, name=name)\n\n\n@tf_export(v1=[\"ones_like\"])\[email protected]_unary_elementwise_api\[email protected]_dispatch_support\ndef ones_like(tensor, dtype=None, name=None, optimize=True):\n \"\"\"Creates a tensor with all elements set to 1.\n\n See also `tf.ones`.\n\n Given a single tensor (`tensor`), this operation returns a tensor of the same\n type and shape as `tensor` with all elements set to 1. Optionally, you can\n specify a new type (`dtype`) for the returned tensor.\n\n For example:\n\n ```python\n tensor = tf.constant([[1, 2, 3], [4, 5, 6]])\n tf.ones_like(tensor) # [[1, 1, 1], [1, 1, 1]]\n ```\n\n Args:\n tensor: A `Tensor`.\n dtype: A type for the returned `Tensor`. Must be `float32`, `float64`,\n `int8`, `uint8`, `int16`, `uint16`, `int32`, `int64`, `complex64`,\n `complex128` or `bool`.\n name: A name for the operation (optional).\n optimize: if true, attempt to statically determine the shape of 'tensor' and\n encode it as a constant.\n\n Returns:\n A `Tensor` with all elements set to 1.\n \"\"\"\n return ones_like_impl(tensor, dtype, name, optimize)\n\n\n@tf_export(\"ones_like\", v1=[])\[email protected]_unary_elementwise_api\[email protected]_dispatch_support\ndef ones_like_v2(\n input, # pylint: disable=redefined-builtin\n dtype=None,\n name=None):\n \"\"\"Creates a tensor of all ones that has the same shape as the input.\n\n See also `tf.ones`.\n\n Given a single tensor (`tensor`), this operation returns a tensor of the\n same type and shape as `tensor` with all elements set to 1. Optionally,\n you can use `dtype` to specify a new type for the returned tensor.\n\n For example:\n\n >>> tensor = tf.constant([[1, 2, 3], [4, 5, 6]])\n >>> tf.ones_like(tensor)\n <tf.Tensor: shape=(2, 3), dtype=int32, numpy=\n array([[1, 1, 1],\n [1, 1, 1]], dtype=int32)>\n\n Args:\n input: A `Tensor`.\n dtype: A type for the returned `Tensor`. Must be `float16`, `float32`,\n `float64`, `int8`, `uint8`, `int16`, `uint16`, `int32`, `int64`,\n `complex64`, `complex128`, `bool` or `string`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` with all elements set to one.\n \"\"\"\n return ones_like_impl(input, dtype, name, optimize=True)\n\n\ndef ones_like_impl(tensor, dtype, name, optimize=True):\n \"\"\"Internal implementation for the v1/v2 ones_like API calls.\"\"\"\n with ops.name_scope(name, \"ones_like\", [tensor]) as name:\n tensor = ops.convert_to_tensor(tensor, name=\"tensor\")\n ones_shape = shape_internal(tensor, optimize=optimize)\n if dtype is None:\n dtype = tensor.dtype\n ret = ones(ones_shape, dtype=dtype, name=name)\n if not context.executing_eagerly():\n ret.set_shape(tensor.get_shape())\n return ret\n\n\n@tf_export(\"ones\")\[email protected]_dispatch_support\ndef ones(shape, dtype=dtypes.float32, name=None):\n \"\"\"Creates a tensor with all elements set to one (1).\n\n See also `tf.ones_like`, `tf.zeros`, `tf.fill`, `tf.eye`.\n\n This operation returns a tensor of type `dtype` with shape `shape` and\n all elements set to one.\n\n >>> tf.ones([3, 4], tf.int32)\n <tf.Tensor: shape=(3, 4), dtype=int32, numpy=\n array([[1, 1, 1, 1],\n [1, 1, 1, 1],\n [1, 1, 1, 1]], dtype=int32)>\n\n Args:\n shape: A `list` of integers, a `tuple` of integers, or\n a 1-D `Tensor` of type `int32`.\n dtype: Optional DType of an element in the resulting `Tensor`. Default is\n `tf.float32`.\n name: Optional string. A name for the operation.\n\n Returns:\n A `Tensor` with all elements set to one (1).\n \"\"\"\n dtype = dtypes.as_dtype(dtype).base_dtype\n with ops.name_scope(name, \"ones\", [shape]) as name:\n if dtype == dtypes.bool:\n one = True\n elif dtype.is_quantized:\n one = np.ones([]).astype(dtype.as_numpy_dtype)\n else:\n one = 1\n if not isinstance(shape, ops.Tensor):\n try:\n if not context.executing_eagerly():\n # Create a constant if it won't be very big. Otherwise create a fill\n # op to prevent serialized GraphDefs from becoming too large.\n output = _constant_if_small(one, shape, dtype, name)\n if output is not None:\n return output\n\n # Go through tensor shapes to get int64-if-needed semantics\n shape = constant_op._tensor_shape_tensor_conversion_function(\n tensor_shape.TensorShape(shape))\n except (TypeError, ValueError):\n # Happens when shape is a list with tensor elements\n shape = ops.convert_to_tensor(shape, dtype=dtypes.int32)\n if not shape._shape_tuple():\n shape = reshape(shape, [-1]) # Ensure it's a vector\n output = fill(shape, constant(one, dtype=dtype), name=name)\n assert output.dtype.base_dtype == dtype\n return output\n\n\n@tf_export(v1=[\"placeholder\"])\ndef placeholder(dtype, shape=None, name=None):\n \"\"\"Inserts a placeholder for a tensor that will be always fed.\n\n **Important**: This tensor will produce an error if evaluated. Its value must\n be fed using the `feed_dict` optional argument to `Session.run()`,\n `Tensor.eval()`, or `Operation.run()`.\n\n For example:\n\n ```python\n x = tf.compat.v1.placeholder(tf.float32, shape=(1024, 1024))\n y = tf.matmul(x, x)\n\n with tf.compat.v1.Session() as sess:\n print(sess.run(y)) # ERROR: will fail because x was not fed.\n\n rand_array = np.random.rand(1024, 1024)\n print(sess.run(y, feed_dict={x: rand_array})) # Will succeed.\n ```\n\n Args:\n dtype: The type of elements in the tensor to be fed.\n shape: The shape of the tensor to be fed (optional). If the shape is not\n specified, you can feed a tensor of any shape.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` that may be used as a handle for feeding a value, but not\n evaluated directly.\n\n Raises:\n RuntimeError: if eager execution is enabled\n\n @compatibility(TF2)\n This API is not compatible with eager execution and `tf.function`. To migrate\n to TF2, rewrite the code to be compatible with eager execution. Check the\n [migration\n guide](https://www.tensorflow.org/guide/migrate#1_replace_v1sessionrun_calls)\n on replacing `Session.run` calls. In TF2, you can just pass tensors directly\n into ops and layers. If you want to explicitly set up your inputs, also see\n [Keras functional API](https://www.tensorflow.org/guide/keras/functional) on\n how to use `tf.keras.Input` to replace `tf.compat.v1.placeholder`.\n `tf.function` arguments also do the job of `tf.compat.v1.placeholder`.\n For more details please read [Better\n performance with tf.function](https://www.tensorflow.org/guide/function).\n @end_compatibility\n \"\"\"\n if context.executing_eagerly():\n raise RuntimeError(\"tf.placeholder() is not compatible with \"\n \"eager execution.\")\n\n return gen_array_ops.placeholder(dtype=dtype, shape=shape, name=name)\n\n\n@tf_export(v1=[\"placeholder_with_default\"])\ndef placeholder_with_default(input, shape, name=None): # pylint: disable=redefined-builtin\n \"\"\"A placeholder op that passes through `input` when its output is not fed.\n\n @compatibility(TF2)\n This API is strongly discouraged for use with eager execution and\n `tf.function`. The primary use of this API is for testing computation wrapped\n within a `tf.function` where the input tensors might not have statically known\n fully-defined shapes. The same can be achieved by creating a\n [concrete function](\n https://www.tensorflow.org/guide/function#obtaining_concrete_functions)\n from the `tf.function` with a `tf.TensorSpec` input which has partially\n defined shapes. For example, the code\n\n >>> @tf.function\n ... def f():\n ... x = tf.compat.v1.placeholder_with_default(\n ... tf.constant([[1., 2., 3.], [4., 5., 6.]]), [None, 3])\n ... y = tf.constant([[1.],[2.], [3.]])\n ... z = tf.matmul(x, y)\n ... assert z.shape[0] == None\n ... assert z.shape[1] == 1\n\n >>> f()\n\n can easily be replaced by\n\n >>> @tf.function\n ... def f(x):\n ... y = tf.constant([[1.],[2.], [3.]])\n ... z = tf.matmul(x, y)\n ... assert z.shape[0] == None\n ... assert z.shape[1] == 1\n\n >>> g = f.get_concrete_function(tf.TensorSpec([None, 3]))\n\n You can learn more about `tf.function` at [Better\n performance with tf.function](https://www.tensorflow.org/guide/function).\n @end_compatibility\n\n Args:\n input: A `Tensor`. The default value to produce when output is not fed.\n shape: A `tf.TensorShape` or list of `int`s. The (possibly partial) shape of\n the tensor.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor`. Has the same type as `input`.\n \"\"\"\n return gen_array_ops.placeholder_with_default(input, shape, name)\n\n\n@tf_export(v1=[\"sparse.placeholder\", \"sparse_placeholder\"])\[email protected]_endpoints(\"sparse_placeholder\")\ndef sparse_placeholder(dtype, shape=None, name=None):\n \"\"\"Inserts a placeholder for a sparse tensor that will be always fed.\n\n **Important**: This sparse tensor will produce an error if evaluated.\n Its value must be fed using the `feed_dict` optional argument to\n `Session.run()`, `Tensor.eval()`, or `Operation.run()`.\n\n For example:\n\n ```python\n x = tf.compat.v1.sparse.placeholder(tf.float32)\n y = tf.sparse.reduce_sum(x)\n\n with tf.compat.v1.Session() as sess:\n print(sess.run(y)) # ERROR: will fail because x was not fed.\n\n indices = np.array([[3, 2, 0], [4, 5, 1]], dtype=np.int64)\n values = np.array([1.0, 2.0], dtype=np.float32)\n shape = np.array([7, 9, 2], dtype=np.int64)\n print(sess.run(y, feed_dict={\n x: tf.compat.v1.SparseTensorValue(indices, values, shape)})) # Will\n succeed.\n print(sess.run(y, feed_dict={\n x: (indices, values, shape)})) # Will succeed.\n\n sp = tf.sparse.SparseTensor(indices=indices, values=values,\n dense_shape=shape)\n sp_value = sp.eval(session=sess)\n print(sess.run(y, feed_dict={x: sp_value})) # Will succeed.\n ```\n\n @compatibility{eager} Placeholders are not compatible with eager execution.\n\n Args:\n dtype: The type of `values` elements in the tensor to be fed.\n shape: The shape of the tensor to be fed (optional). If the shape is not\n specified, you can feed a sparse tensor of any shape.\n name: A name for prefixing the operations (optional).\n\n Returns:\n A `SparseTensor` that may be used as a handle for feeding a value, but not\n evaluated directly.\n\n Raises:\n RuntimeError: if eager execution is enabled\n \"\"\"\n if context.executing_eagerly():\n raise RuntimeError(\"`sparse_placeholder` is not compatible with \"\n \"eager execution.\")\n\n shape_name = (name + \"/shape\") if name is not None else None\n default_shape_name = (name + \"/shape_default\") if name is not None else None\n if shape is None:\n rank = None\n dense_shape = placeholder(dtypes.int64, shape=[rank], name=shape_name)\n dense_shape_default = tensor_util.constant_value_as_shape(dense_shape)\n else:\n if isinstance(shape, ops.Tensor):\n rank = shape.get_shape()[0]\n dense_shape_default = tensor_util.constant_value_as_shape(shape)\n else:\n rank = len(shape)\n # determine the shape, to override the `.shape` property of the\n # `SparseTensor`\n dense_shape_default = tensor_shape.TensorShape(\n tuple(None if dim == -1 else dim for dim in shape))\n shape = tuple(tensor_shape.dimension_value(dim) for dim in shape)\n shape = tuple(-1 if dim is None else dim for dim in shape)\n shape = ops.convert_to_tensor(\n shape, dtype=dtypes.int64, name=default_shape_name)\n\n # `dense_shape` needs to be feedable (for users that treat this as an\n # actual placeholder). `constant_value_as_shape` sets constants to\n # not-feedable. placeholder_with_default works, but blocks `SparseTensor`\n # from reading the default value back out.\n dense_shape = placeholder_with_default(\n shape, shape=shape.shape, name=shape_name)\n\n result = sparse_tensor.SparseTensor(\n values=placeholder(\n dtype,\n shape=[None],\n name=(name + \"/values\") if name is not None else None),\n indices=placeholder(\n dtypes.int64,\n shape=[None, rank],\n name=(name + \"/indices\") if name is not None else None),\n dense_shape=dense_shape)\n\n # Now the SparseTensor.shape is a list of `None`s, since it couldn't read the\n # default shape out of the placeholder. Override that\n # shape to be the value determined here, so partial shapes can be\n # propagated.\n result._dense_shape_default = dense_shape_default\n return result\n\n# pylint: enable=redefined-outer-name\n\n\n@tf_export(\"pad\", v1=[])\[email protected]_dispatch_support\ndef pad_v2(tensor, paddings, mode=\"CONSTANT\", constant_values=0, name=None):\n \"\"\"Pads a tensor.\n\n This operation pads a `tensor` according to the `paddings` you specify.\n `paddings` is an integer tensor with shape `[n, 2]`, where n is the rank of\n `tensor`. For each dimension D of `input`, `paddings[D, 0]` indicates how\n many values to add before the contents of `tensor` in that dimension, and\n `paddings[D, 1]` indicates how many values to add after the contents of\n `tensor` in that dimension. If `mode` is \"REFLECT\" then both `paddings[D, 0]`\n and `paddings[D, 1]` must be no greater than `tensor.dim_size(D) - 1`. If\n `mode` is \"SYMMETRIC\" then both `paddings[D, 0]` and `paddings[D, 1]` must be\n no greater than `tensor.dim_size(D)`.\n\n The padded size of each dimension D of the output is:\n\n `paddings[D, 0] + tensor.dim_size(D) + paddings[D, 1]`\n\n For example:\n\n ```python\n t = tf.constant([[1, 2, 3], [4, 5, 6]])\n paddings = tf.constant([[1, 1,], [2, 2]])\n # 'constant_values' is 0.\n # rank of 't' is 2.\n tf.pad(t, paddings, \"CONSTANT\") # [[0, 0, 0, 0, 0, 0, 0],\n # [0, 0, 1, 2, 3, 0, 0],\n # [0, 0, 4, 5, 6, 0, 0],\n # [0, 0, 0, 0, 0, 0, 0]]\n\n tf.pad(t, paddings, \"REFLECT\") # [[6, 5, 4, 5, 6, 5, 4],\n # [3, 2, 1, 2, 3, 2, 1],\n # [6, 5, 4, 5, 6, 5, 4],\n # [3, 2, 1, 2, 3, 2, 1]]\n\n tf.pad(t, paddings, \"SYMMETRIC\") # [[2, 1, 1, 2, 3, 3, 2],\n # [2, 1, 1, 2, 3, 3, 2],\n # [5, 4, 4, 5, 6, 6, 5],\n # [5, 4, 4, 5, 6, 6, 5]]\n ```\n\n Args:\n tensor: A `Tensor`.\n paddings: A `Tensor` of type `int32`.\n mode: One of \"CONSTANT\", \"REFLECT\", or \"SYMMETRIC\" (case-insensitive)\n constant_values: In \"CONSTANT\" mode, the scalar pad value to use. Must be\n same type as `tensor`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor`. Has the same type as `tensor`.\n\n Raises:\n ValueError: When mode is not one of \"CONSTANT\", \"REFLECT\", or \"SYMMETRIC\".\n \"\"\"\n return pad(tensor, paddings, mode, name, constant_values)\n\n\n@tf_export(v1=[\"pad\"])\[email protected]_dispatch_support\ndef pad(tensor, paddings, mode=\"CONSTANT\", name=None, constant_values=0): # pylint: disable=invalid-name\n \"\"\"Pads a tensor.\n\n This operation pads a `tensor` according to the `paddings` you specify.\n `paddings` is an integer tensor with shape `[n, 2]`, where n is the rank of\n `tensor`. For each dimension D of `input`, `paddings[D, 0]` indicates how\n many values to add before the contents of `tensor` in that dimension, and\n `paddings[D, 1]` indicates how many values to add after the contents of\n `tensor` in that dimension. If `mode` is \"REFLECT\" then both `paddings[D, 0]`\n and `paddings[D, 1]` must be no greater than `tensor.dim_size(D) - 1`. If\n `mode` is \"SYMMETRIC\" then both `paddings[D, 0]` and `paddings[D, 1]` must be\n no greater than `tensor.dim_size(D)`.\n\n The padded size of each dimension D of the output is:\n\n `paddings[D, 0] + tensor.dim_size(D) + paddings[D, 1]`\n\n For example:\n\n ```python\n t = tf.constant([[1, 2, 3], [4, 5, 6]])\n paddings = tf.constant([[1, 1,], [2, 2]])\n # 'constant_values' is 0.\n # rank of 't' is 2.\n tf.pad(t, paddings, \"CONSTANT\") # [[0, 0, 0, 0, 0, 0, 0],\n # [0, 0, 1, 2, 3, 0, 0],\n # [0, 0, 4, 5, 6, 0, 0],\n # [0, 0, 0, 0, 0, 0, 0]]\n\n tf.pad(t, paddings, \"REFLECT\") # [[6, 5, 4, 5, 6, 5, 4],\n # [3, 2, 1, 2, 3, 2, 1],\n # [6, 5, 4, 5, 6, 5, 4],\n # [3, 2, 1, 2, 3, 2, 1]]\n\n tf.pad(t, paddings, \"SYMMETRIC\") # [[2, 1, 1, 2, 3, 3, 2],\n # [2, 1, 1, 2, 3, 3, 2],\n # [5, 4, 4, 5, 6, 6, 5],\n # [5, 4, 4, 5, 6, 6, 5]]\n ```\n\n Args:\n tensor: A `Tensor`.\n paddings: A `Tensor` of type `int32`.\n mode: One of \"CONSTANT\", \"REFLECT\", or \"SYMMETRIC\" (case-insensitive)\n name: A name for the operation (optional).\n constant_values: In \"CONSTANT\" mode, the scalar pad value to use. Must be\n same type as `tensor`.\n\n Returns:\n A `Tensor`. Has the same type as `tensor`.\n\n Raises:\n ValueError: When mode is not one of \"CONSTANT\", \"REFLECT\", or \"SYMMETRIC\".\n \"\"\"\n\n # Convert lower/mixed case to upper for NumPy compatibility\n # NumPy uses all lower-case modes.\n mode = mode.upper()\n if mode == \"CONSTANT\":\n # TODO(rjryan): Once the forward compatibility period (3 weeks) have passed\n # remove the \"Pad\" fallback here.\n if not tensor_util.is_tf_type(constant_values) and constant_values == 0:\n result = gen_array_ops.pad(tensor, paddings, name=name)\n else:\n result = gen_array_ops.pad_v2(\n tensor, paddings, constant_values, name=name)\n elif mode == \"REFLECT\":\n result = gen_array_ops.mirror_pad(\n tensor, paddings, mode=\"REFLECT\", name=name)\n elif mode == \"SYMMETRIC\":\n result = gen_array_ops.mirror_pad(\n tensor, paddings, mode=\"SYMMETRIC\", name=name)\n else:\n raise ValueError(\"Unknown padding mode: %s\" % mode)\n\n # Restore shape information where possible.\n if not context.executing_eagerly():\n paddings_constant = _get_paddings_constant(paddings)\n input_shape = (\n tensor_shape.TensorShape(tensor.shape)\n if isinstance(tensor, ops.Tensor) else result.op.inputs[0].shape)\n if (input_shape.ndims is not None and\n not result.shape.is_fully_defined() and paddings_constant is not None):\n new_shape = []\n for padding, dim in zip(paddings_constant, input_shape.as_list()):\n if padding is None or dim is None or any((x is None for x in padding)):\n new_shape.append(None)\n else:\n new_shape.append(sum(padding) + dim)\n result.set_shape(new_shape)\n\n return result\n\n\ndef _get_paddings_constant(paddings):\n \"\"\"Helper to get the constant values of the paddings arg to pad().\n\n Used under V1 graph mode to facilitate computation of the shape of the output\n tensor of `pad()`.\n\n Args:\n paddings: The same paddings arg as passed to pad(). Can be a Tensor, or\n a nested list or tuple of Tensor and/or numbers.\n\n Returns:\n A nested list or numbers or `None`, in which `None` indicates unknown\n padding size.\n \"\"\"\n if isinstance(paddings, ops.Tensor):\n return tensor_util.constant_value(paddings, partial=True)\n elif isinstance(paddings, (list, tuple)):\n return [_get_paddings_constant(x) for x in paddings]\n else:\n return paddings\n\n\n@tf_export(\"meshgrid\")\[email protected]_dispatch_support\ndef meshgrid(*args, **kwargs):\n \"\"\"Broadcasts parameters for evaluation on an N-D grid.\n\n Given N one-dimensional coordinate arrays `*args`, returns a list `outputs`\n of N-D coordinate arrays for evaluating expressions on an N-D grid.\n\n Notes:\n\n `meshgrid` supports cartesian ('xy') and matrix ('ij') indexing conventions.\n When the `indexing` argument is set to 'xy' (the default), the broadcasting\n instructions for the first two dimensions are swapped.\n\n Examples:\n\n Calling `X, Y = meshgrid(x, y)` with the tensors\n\n ```python\n x = [1, 2, 3]\n y = [4, 5, 6]\n X, Y = tf.meshgrid(x, y)\n # X = [[1, 2, 3],\n # [1, 2, 3],\n # [1, 2, 3]]\n # Y = [[4, 4, 4],\n # [5, 5, 5],\n # [6, 6, 6]]\n ```\n\n Args:\n *args: `Tensor`s with rank 1.\n **kwargs:\n - indexing: Either 'xy' or 'ij' (optional, default: 'xy').\n - name: A name for the operation (optional).\n\n Returns:\n outputs: A list of N `Tensor`s with rank N.\n\n Raises:\n TypeError: When no keyword arguments (kwargs) are passed.\n ValueError: When indexing keyword argument is not one of `xy` or `ij`.\n \"\"\"\n\n indexing = kwargs.pop(\"indexing\", \"xy\")\n name = kwargs.pop(\"name\", \"meshgrid\")\n if kwargs:\n key = list(kwargs.keys())[0]\n raise TypeError(\"'{}' is an invalid keyword argument \"\n \"for this function\".format(key))\n\n if indexing not in (\"xy\", \"ij\"):\n raise ValueError(\"indexing parameter must be either 'xy' or 'ij'\")\n\n with ops.name_scope(name, \"meshgrid\", args) as name:\n ndim = len(args)\n s0 = (1,) * ndim\n\n if not ndim:\n return []\n\n # Prepare reshape by inserting dimensions with size 1 where needed\n output = []\n for i, x in enumerate(args):\n output.append(reshape(stack(x), (s0[:i] + (-1,) + s0[i + 1::])))\n # Create parameters for broadcasting each tensor to the full size\n shapes = [size(x) for x in args]\n\n output_dtype = ops.convert_to_tensor(args[0]).dtype.base_dtype\n\n if indexing == \"xy\" and ndim > 1:\n output[0] = reshape(output[0], (1, -1) + (1,) * (ndim - 2))\n output[1] = reshape(output[1], (-1, 1) + (1,) * (ndim - 2))\n shapes[0], shapes[1] = shapes[1], shapes[0]\n\n # TODO(nolivia): improve performance with a broadcast\n mult_fact = ones(shapes, output_dtype)\n return [x * mult_fact for x in output]\n\n\nNEW_AXIS = -1\nSHRINK_AXIS = -2\n\n\n# PEP-8 naming\n# pylint: disable=invalid-name,redefined-outer-name\ndef _compute_size_of_strided_dim(shrink, spec, size):\n \"\"\"Computes the size of a single strided slice dimension.\"\"\"\n\n unknown = None # Document what None means here.\n use_full_range = None # Document other use of None.\n # if this is a shrink axis (i.e. a non-range index)\n # it either will produce an error or return 1\n if shrink:\n return 1\n if size is unknown or size.value is unknown:\n return unknown\n size = size.value\n stride = spec.step\n if stride is not unknown:\n if stride == 0:\n return unknown\n stride = spec.step\n valid_range = [0, size] if stride > 0 else [-1, size - 1]\n\n # PEP-8 naming\n # pylint: disable=invalid-name\n def canonical(x, c):\n if x is use_full_range:\n return valid_range[c] if stride > 0 else valid_range[(c + 1) & 1]\n else:\n x_fwd = size + x if x < 0 else x # make negative indices positive\n return max(valid_range[0], min(valid_range[1], x_fwd))\n\n begin = canonical(spec.start, 0)\n end = canonical(spec.stop, 1)\n interval_length = end - begin\n if interval_length == 0 or ((interval_length < 0) != (stride < 0)):\n return 0\n else:\n remainder = 1 if interval_length % stride != 0 else 0\n return interval_length // stride + remainder\n else:\n return unknown # unknown because stride is unknown\n\n\ndef _TileGradShape(op):\n \"\"\"Shape function for the TileGrad op.\"\"\"\n multiples_shape = op.inputs[1].get_shape().with_rank(1)\n input_shape = op.inputs[0].get_shape().with_rank(multiples_shape[0])\n # NOTE(mrry): Represent `multiples` as a `TensorShape` because (i)\n # it is a vector of non-negative integers, and (ii) doing so allows\n # us to handle partially-known multiples.\n multiples = tensor_util.constant_value_as_shape(op.inputs[1]).with_rank(\n input_shape.ndims)\n if multiples.ndims is None:\n return [tensor_shape.unknown_shape()]\n else:\n output_dims = []\n for dim, multiple in zip(input_shape.dims, multiples.dims):\n output_dims.append(dim // multiple)\n return [tensor_shape.TensorShape(output_dims)]\n\n\n@tf_export(\"edit_distance\")\[email protected]_dispatch_support\ndef edit_distance(hypothesis, truth, normalize=True, name=\"edit_distance\"):\n \"\"\"Computes the Levenshtein distance between sequences.\n\n This operation takes variable-length sequences (`hypothesis` and `truth`),\n each provided as a `SparseTensor`, and computes the Levenshtein distance.\n You can normalize the edit distance by length of `truth` by setting\n `normalize` to true.\n\n For example:\n\n Given the following input,\n * `hypothesis` is a `tf.SparseTensor` of shape `[2, 1, 1]`\n * `truth` is a `tf.SparseTensor` of shape `[2, 2, 2]`\n\n >>> hypothesis = tf.SparseTensor(\n ... [[0, 0, 0],\n ... [1, 0, 0]],\n ... [\"a\", \"b\"],\n ... (2, 1, 1))\n >>> truth = tf.SparseTensor(\n ... [[0, 1, 0],\n ... [1, 0, 0],\n ... [1, 0, 1],\n ... [1, 1, 0]],\n ... [\"a\", \"b\", \"c\", \"a\"],\n ... (2, 2, 2))\n >>> tf.edit_distance(hypothesis, truth, normalize=True)\n <tf.Tensor: shape=(2, 2), dtype=float32, numpy=\n array([[inf, 1. ],\n [0.5, 1. ]], dtype=float32)>\n\n The operation returns a dense Tensor of shape `[2, 2]` with\n edit distances normalized by `truth` lengths.\n\n **Note**: It is possible to calculate edit distance between two\n sparse tensors with variable-length values. However, attempting to create\n them while eager execution is enabled will result in a `ValueError`.\n\n For the following inputs,\n\n ```python\n # 'hypothesis' is a tensor of shape `[2, 1]` with variable-length values:\n # (0,0) = [\"a\"]\n # (1,0) = [\"b\"]\n hypothesis = tf.sparse.SparseTensor(\n [[0, 0, 0],\n [1, 0, 0]],\n [\"a\", \"b\"],\n (2, 1, 1))\n\n # 'truth' is a tensor of shape `[2, 2]` with variable-length values:\n # (0,0) = []\n # (0,1) = [\"a\"]\n # (1,0) = [\"b\", \"c\"]\n # (1,1) = [\"a\"]\n truth = tf.sparse.SparseTensor(\n [[0, 1, 0],\n [1, 0, 0],\n [1, 0, 1],\n [1, 1, 0]],\n [\"a\", \"b\", \"c\", \"a\"],\n (2, 2, 2))\n\n normalize = True\n\n # The output would be a dense Tensor of shape `(2,)`, with edit distances\n normalized by 'truth' lengths.\n # output => array([0., 0.5], dtype=float32)\n ```\n\n Args:\n hypothesis: A `SparseTensor` containing hypothesis sequences.\n truth: A `SparseTensor` containing truth sequences.\n normalize: A `bool`. If `True`, normalizes the Levenshtein distance by\n length of `truth.`\n name: A name for the operation (optional).\n\n Returns:\n A dense `Tensor` with rank `R - 1`, where R is the rank of the\n `SparseTensor` inputs `hypothesis` and `truth`.\n\n Raises:\n TypeError: If either `hypothesis` or `truth` are not a `SparseTensor`.\n \"\"\"\n if not isinstance(\n hypothesis,\n (sparse_tensor.SparseTensor, sparse_tensor.SparseTensorValue)):\n raise TypeError(\"Hypothesis must be a SparseTensor.\")\n if not isinstance(\n truth, (sparse_tensor.SparseTensor, sparse_tensor.SparseTensorValue)):\n raise TypeError(\"Truth must be a SparseTensor.\")\n\n return gen_array_ops.edit_distance(\n hypothesis.indices,\n hypothesis.values,\n hypothesis.dense_shape,\n truth.indices,\n truth.values,\n truth.dense_shape,\n normalize=normalize,\n name=name)\n\n\[email protected](\"FakeQuantWithMinMaxArgs\")\ndef _FakeQuantWithMinMaxArgsGradient(op, grad):\n \"\"\"Gradient for FakeQuantWithMinMaxArgs op.\"\"\"\n return fake_quant_with_min_max_args_gradient(\n grad,\n op.inputs[0],\n min=op.get_attr(\"min\"),\n max=op.get_attr(\"max\"),\n num_bits=op.get_attr(\"num_bits\"),\n narrow_range=op.get_attr(\"narrow_range\"))\n\n\[email protected](\"FakeQuantWithMinMaxVars\")\ndef _FakeQuantWithMinMaxVarsGradient(op, grad):\n \"\"\"Gradient for FakeQuantWithMinMaxVars op.\"\"\"\n return fake_quant_with_min_max_vars_gradient(\n grad,\n op.inputs[0],\n op.inputs[1],\n op.inputs[2],\n num_bits=op.get_attr(\"num_bits\"),\n narrow_range=op.get_attr(\"narrow_range\"))\n\n\[email protected](\"FakeQuantWithMinMaxVarsPerChannel\")\ndef _FakeQuantWithMinMaxVarsPerChannelGradient(op, grad):\n \"\"\"Gradient for FakeQuantWithMinMaxVarsPerChannel op.\"\"\"\n return fake_quant_with_min_max_vars_per_channel_gradient(\n grad,\n op.inputs[0],\n op.inputs[1],\n op.inputs[2],\n num_bits=op.get_attr(\"num_bits\"),\n narrow_range=op.get_attr(\"narrow_range\"))\n\n\[email protected](\"QuantizeAndDequantizeV4\")\ndef _QuantizeAndDequantizeV4Grad(op, grad):\n \"\"\"Gradient for QuantizeAndDequantizeV4 op.\"\"\"\n return quantize_and_dequantize_v4_grad(\n grad,\n op.inputs[0],\n op.inputs[1],\n op.inputs[2],\n axis=op.get_attr(\"axis\"))\n\n\[email protected](\"QuantizeAndDequantizeV4Grad\")\ndef _QuantizeAndDequantizeV4GradGrad(op, grad):\n \"\"\"Gradient for QuantizeAndDequantizeV4Grad op.\"\"\"\n return _QuantizeAndDequantizeV4Grad(op, grad)\n\n\n@tf_export(\"required_space_to_batch_paddings\")\ndef required_space_to_batch_paddings(input_shape,\n block_shape,\n base_paddings=None,\n name=None):\n \"\"\"Calculate padding required to make block_shape divide input_shape.\n\n This function can be used to calculate a suitable paddings argument for use\n with space_to_batch_nd and batch_to_space_nd.\n\n Args:\n input_shape: int32 Tensor of shape [N].\n block_shape: int32 Tensor of shape [N].\n base_paddings: Optional int32 Tensor of shape [N, 2]. Specifies the minimum\n amount of padding to use. All elements must be >= 0. If not specified,\n defaults to 0.\n name: string. Optional name prefix.\n\n Returns:\n (paddings, crops), where:\n\n `paddings` and `crops` are int32 Tensors of rank 2 and shape [N, 2]\n satisfying:\n\n paddings[i, 0] = base_paddings[i, 0].\n 0 <= paddings[i, 1] - base_paddings[i, 1] < block_shape[i]\n (input_shape[i] + paddings[i, 0] + paddings[i, 1]) % block_shape[i] == 0\n\n crops[i, 0] = 0\n crops[i, 1] = paddings[i, 1] - base_paddings[i, 1]\n\n Raises: ValueError if called with incompatible shapes.\n \"\"\"\n with ops.name_scope(name, \"required_space_to_batch_paddings\",\n [input_shape, block_shape]):\n input_shape = ops.convert_to_tensor(\n input_shape, dtype=dtypes.int32, name=\"input_shape\")\n block_shape = ops.convert_to_tensor(\n block_shape, dtype=dtypes.int32, name=\"block_shape\")\n\n block_shape.get_shape().assert_is_fully_defined()\n block_shape.get_shape().assert_has_rank(1)\n num_block_dims = block_shape.get_shape().dims[0].value\n if num_block_dims == 0:\n return zeros([0, 2], dtypes.int32), zeros([0, 2], dtypes.int32)\n\n input_shape.get_shape().assert_is_compatible_with([num_block_dims])\n\n if base_paddings is not None:\n base_paddings = ops.convert_to_tensor(\n base_paddings, dtype=dtypes.int32, name=\"base_paddings\")\n base_paddings.get_shape().assert_is_compatible_with([num_block_dims, 2])\n else:\n base_paddings = zeros([num_block_dims, 2], dtypes.int32)\n\n const_block_shape = tensor_util.constant_value(block_shape)\n const_input_shape = tensor_util.constant_value(input_shape)\n const_base_paddings = tensor_util.constant_value(base_paddings)\n if (const_block_shape is not None and const_input_shape is not None and\n const_base_paddings is not None):\n block_shape = const_block_shape\n input_shape = const_input_shape\n base_paddings = const_base_paddings\n\n # Use same expression for both constant and non-constant case.\n pad_start = base_paddings[:, 0]\n orig_pad_end = base_paddings[:, 1]\n full_input_shape = input_shape + pad_start + orig_pad_end\n pad_end_extra = (block_shape - full_input_shape % block_shape) % block_shape\n pad_end = orig_pad_end + pad_end_extra\n\n result_paddings = stack(\n [[pad_start[i], pad_end[i]] for i in range(num_block_dims)],\n name=\"paddings\")\n result_crops = stack([[0, pad_end_extra[i]] for i in range(num_block_dims)],\n name=\"crops\")\n return result_paddings, result_crops\n\n\n@tf_export(v1=[\"nn.space_to_batch\", \"space_to_batch\"])\[email protected]_dispatch_support\[email protected]_endpoints(\"space_to_batch\")\ndef space_to_batch( # pylint: disable=missing-docstring\n input, # pylint: disable=redefined-builtin\n paddings,\n block_size=None,\n name=None,\n block_shape=None): # pylint: disable=redefined-builtin\n block_size = deprecation.deprecated_argument_lookup(\"block_shape\",\n block_shape, \"block_size\",\n block_size)\n result = space_to_batch_nd(\n input,\n paddings=paddings,\n block_shape=np.array([block_size, block_size], dtype=np.int64),\n name=name)\n result.set_shape(result.get_shape().with_rank(4))\n return result\n\n\nspace_to_batch.__doc__ = gen_array_ops.space_to_batch.__doc__\n\n\n@tf_export(\"space_to_batch\", \"nn.space_to_batch\", v1=[])\[email protected]_dispatch_support\ndef space_to_batch_v2(input, block_shape, paddings, name=None): # pylint: disable=redefined-builtin\n return space_to_batch_nd(input, block_shape, paddings, name)\n\n\nspace_to_batch_v2.__doc__ = gen_array_ops.space_to_batch_nd.__doc__\n\n\n@tf_export(v1=[\"nn.space_to_depth\", \"space_to_depth\"])\[email protected]_dispatch_support\[email protected]_endpoints(\"space_to_depth\")\ndef space_to_depth(input, block_size, name=None, data_format=\"NHWC\"): # pylint: disable=redefined-builtin\n return gen_array_ops.space_to_depth(input, block_size, data_format, name=name)\n\n\nspace_to_depth.__doc__ = gen_array_ops.space_to_depth.__doc__\n\n\n@tf_export(\"nn.space_to_depth\", v1=[])\[email protected]_dispatch_support\ndef space_to_depth_v2(input, block_size, data_format=\"NHWC\", name=None): # pylint: disable=redefined-builtin\n return gen_array_ops.space_to_depth(input, block_size, data_format, name=name)\n\n\nspace_to_depth_v2.__doc__ = gen_array_ops.space_to_depth.__doc__\n\n\n@tf_export(v1=[\"nn.depth_to_space\", \"depth_to_space\"])\[email protected]_dispatch_support\[email protected]_endpoints(\"depth_to_space\")\ndef depth_to_space(input, block_size, name=None, data_format=\"NHWC\"): # pylint: disable=redefined-builtin\n return gen_array_ops.depth_to_space(input, block_size, data_format, name=name)\n\n\ndepth_to_space.__doc__ = gen_array_ops.depth_to_space.__doc__\n\n\n@tf_export(\"nn.depth_to_space\", v1=[])\[email protected]_dispatch_support\ndef depth_to_space_v2(input, block_size, data_format=\"NHWC\", name=None): # pylint: disable=redefined-builtin\n return gen_array_ops.depth_to_space(input, block_size, data_format, name=name)\n\n\ndepth_to_space_v2.__doc__ = gen_array_ops.depth_to_space.__doc__\n\n\n@tf_export(v1=[\"batch_to_space\"])\[email protected]_dispatch_support\ndef batch_to_space(input, crops, block_size, name=None, block_shape=None): # pylint: disable=redefined-builtin,missing-docstring\n block_size = deprecation.deprecated_argument_lookup(\"block_shape\",\n block_shape, \"block_size\",\n block_size)\n result = batch_to_space_nd(\n input,\n crops=crops,\n block_shape=np.array([block_size, block_size], dtype=np.int64),\n name=name)\n result.set_shape(result.get_shape().with_rank(4))\n return result\n\n\nbatch_to_space.__doc__ = gen_array_ops.batch_to_space.__doc__\n\n\n@tf_export(\"batch_to_space\", v1=[])\[email protected]_dispatch_support\ndef batch_to_space_v2(input, block_shape, crops, name=None): # pylint: disable=redefined-builtin\n \"\"\"BatchToSpace for N-D tensors of type T.\n\n This operation reshapes the \"batch\" dimension 0 into `M + 1` dimensions of\n shape `block_shape + [batch]`, interleaves these blocks back into the grid\n defined by the spatial dimensions `[1, ..., M]`, to obtain a result with the\n same rank as the input. The spatial dimensions of this intermediate result\n are then optionally cropped according to `crops` to produce the output. This\n is the reverse of SpaceToBatch (see `tf.space_to_batch`).\n\n Args:\n input: A N-D `Tensor` with shape `input_shape = [batch] + spatial_shape +\n remaining_shape`, where `spatial_shape` has M dimensions.\n block_shape: A 1-D `Tensor` with shape [M]. Must be one of the following\n types: `int32`, `int64`. All values must be >= 1. For backwards\n compatibility with TF 1.0, this parameter may be an int, in which case it\n is converted to\n `numpy.array([block_shape, block_shape],\n dtype=numpy.int64)`.\n crops: A 2-D `Tensor` with shape `[M, 2]`. Must be one of the\n following types: `int32`, `int64`. All values must be >= 0.\n `crops[i] = [crop_start, crop_end]` specifies the amount to crop from\n input dimension `i + 1`, which corresponds to spatial dimension `i`.\n It is required that\n `crop_start[i] + crop_end[i] <= block_shape[i] * input_shape[i + 1]`.\n This operation is equivalent to the following steps:\n 1. Reshape `input` to `reshaped` of shape: [block_shape[0], ...,\n block_shape[M-1], batch / prod(block_shape), input_shape[1], ...,\n input_shape[N-1]]\n 2. Permute dimensions of `reshaped` to produce `permuted` of shape\n [batch / prod(block_shape), input_shape[1], block_shape[0], ...,\n input_shape[M], block_shape[M-1], input_shape[M+1],\n ..., input_shape[N-1]]\n 3. Reshape `permuted` to produce `reshaped_permuted` of shape\n [batch / prod(block_shape), input_shape[1] * block_shape[0], ...,\n input_shape[M] * block_shape[M-1], input_shape[M+1], ...,\n input_shape[N-1]]\n 4. Crop the start and end of dimensions `[1, ..., M]` of\n `reshaped_permuted` according to `crops` to produce the output\n of shape:\n [batch / prod(block_shape), input_shape[1] *\n block_shape[0] - crops[0,0] - crops[0,1], ..., input_shape[M] *\n block_shape[M-1] - crops[M-1,0] - crops[M-1,1], input_shape[M+1],\n ..., input_shape[N-1]]\n name: A name for the operation (optional).\n\n Examples:\n\n 1. For the following input of shape `[4, 1, 1, 1]`,\n `block_shape = [2, 2]`, and `crops = [[0, 0], [0, 0]]`:\n\n ```python\n [[[[1]]],\n [[[2]]],\n [[[3]]],\n [[[4]]]]\n ```\n\n The output tensor has shape `[1, 2, 2, 1]` and value:\n\n ```\n x = [[[[1], [2]],\n [[3], [4]]]]\n ```\n\n 2. For the following input of shape `[4, 1, 1, 3]`,\n `block_shape = [2, 2]`, and `crops = [[0, 0], [0, 0]]`:\n\n ```python\n [[[1, 2, 3]],\n [[4, 5, 6]],\n [[7, 8, 9]],\n [[10, 11, 12]]]\n ```\n\n The output tensor has shape `[1, 2, 2, 3]` and value:\n\n ```python\n x = [[[[1, 2, 3], [4, 5, 6 ]],\n [[7, 8, 9], [10, 11, 12]]]]\n ```\n\n 3. For the following\n input of shape `[4, 2, 2, 1]`,\n `block_shape = [2, 2]`, and `crops = [[0, 0], [0, 0]]`:\n\n ```python\n x = [[[[1], [3]], [[ 9], [11]]],\n [[[2], [4]], [[10], [12]]],\n [[[5], [7]], [[13], [15]]],\n [[[6], [8]], [[14], [16]]]]\n ```\n\n The output tensor has shape `[1, 4, 4, 1]` and value:\n\n ```python\n x = [[[1], [2], [ 3], [ 4]],\n [[5], [6], [ 7], [ 8]],\n [[9], [10], [11], [12]],\n [[13], [14], [15], [16]]]\n ```\n\n 4. For the following input of shape\n `[8, 1, 3, 1]`,\n `block_shape = [2, 2]`, and `crops = [[0, 0], [2, 0]]`:\n\n ```python\n x = [[[[0], [ 1], [ 3]]],\n [[[0], [ 9], [11]]],\n [[[0], [ 2], [ 4]]],\n [[[0], [10], [12]]],\n [[[0], [ 5], [ 7]]],\n [[[0], [13], [15]]],\n [[[0], [ 6], [ 8]]],\n [[[0], [14], [16]]]]\n ```\n\n The output tensor has shape `[2, 2, 4, 1]` and value:\n\n ```python\n x = [[[[ 1], [ 2], [ 3], [ 4]],\n [[ 5], [ 6], [ 7], [ 8]]],\n [[[ 9], [10], [11], [12]],\n [[13], [14], [15], [16]]]]\n ```\n\n Returns:\n A `Tensor`. Has the same type as `input`.\n \"\"\"\n if isinstance(block_shape, int):\n block_shape = np.array([block_shape, block_shape], dtype=np.int64)\n\n return batch_to_space_nd(\n input=input, block_shape=block_shape, crops=crops, name=name)\n\n\n@tf_export(\"one_hot\")\[email protected]_dispatch_support\ndef one_hot(indices,\n depth,\n on_value=None,\n off_value=None,\n axis=None,\n dtype=None,\n name=None):\n \"\"\"Returns a one-hot tensor.\n\n See also `tf.fill`, `tf.eye`.\n\n The locations represented by indices in `indices` take value `on_value`,\n while all other locations take value `off_value`.\n\n `on_value` and `off_value` must have matching data types. If `dtype` is also\n provided, they must be the same data type as specified by `dtype`.\n\n If `on_value` is not provided, it will default to the value `1` with type\n `dtype`\n\n If `off_value` is not provided, it will default to the value `0` with type\n `dtype`\n\n If the input `indices` is rank `N`, the output will have rank `N+1`. The\n new axis is created at dimension `axis` (default: the new axis is appended\n at the end).\n\n If `indices` is a scalar the output shape will be a vector of length `depth`\n\n If `indices` is a vector of length `features`, the output shape will be:\n\n ```\n features x depth if axis == -1\n depth x features if axis == 0\n ```\n\n If `indices` is a matrix (batch) with shape `[batch, features]`, the output\n shape will be:\n\n ```\n batch x features x depth if axis == -1\n batch x depth x features if axis == 1\n depth x batch x features if axis == 0\n ```\n\n If `indices` is a RaggedTensor, the 'axis' argument must be positive and refer\n to a non-ragged axis. The output will be equivalent to applying 'one_hot' on\n the values of the RaggedTensor, and creating a new RaggedTensor from the\n result.\n\n If `dtype` is not provided, it will attempt to assume the data type of\n `on_value` or `off_value`, if one or both are passed in. If none of\n `on_value`, `off_value`, or `dtype` are provided, `dtype` will default to the\n value `tf.float32`.\n\n Note: If a non-numeric data type output is desired (`tf.string`, `tf.bool`,\n etc.), both `on_value` and `off_value` _must_ be provided to `one_hot`.\n\n For example:\n\n ```python\n indices = [0, 1, 2]\n depth = 3\n tf.one_hot(indices, depth) # output: [3 x 3]\n # [[1., 0., 0.],\n # [0., 1., 0.],\n # [0., 0., 1.]]\n\n indices = [0, 2, -1, 1]\n depth = 3\n tf.one_hot(indices, depth,\n on_value=5.0, off_value=0.0,\n axis=-1) # output: [4 x 3]\n # [[5.0, 0.0, 0.0], # one_hot(0)\n # [0.0, 0.0, 5.0], # one_hot(2)\n # [0.0, 0.0, 0.0], # one_hot(-1)\n # [0.0, 5.0, 0.0]] # one_hot(1)\n\n indices = [[0, 2], [1, -1]]\n depth = 3\n tf.one_hot(indices, depth,\n on_value=1.0, off_value=0.0,\n axis=-1) # output: [2 x 2 x 3]\n # [[[1.0, 0.0, 0.0], # one_hot(0)\n # [0.0, 0.0, 1.0]], # one_hot(2)\n # [[0.0, 1.0, 0.0], # one_hot(1)\n # [0.0, 0.0, 0.0]]] # one_hot(-1)\n\n indices = tf.ragged.constant([[0, 1], [2]])\n depth = 3\n tf.one_hot(indices, depth) # output: [2 x None x 3]\n # [[[1., 0., 0.],\n # [0., 1., 0.]],\n # [[0., 0., 1.]]]\n ```\n\n Args:\n indices: A `Tensor` of indices.\n depth: A scalar defining the depth of the one hot dimension.\n on_value: A scalar defining the value to fill in output when `indices[j]\n = i`. (default: 1)\n off_value: A scalar defining the value to fill in output when `indices[j]\n != i`. (default: 0)\n axis: The axis to fill (default: -1, a new inner-most axis).\n dtype: The data type of the output tensor.\n name: A name for the operation (optional).\n\n Returns:\n output: The one-hot tensor.\n\n Raises:\n TypeError: If dtype of either `on_value` or `off_value` don't match `dtype`\n TypeError: If dtype of `on_value` and `off_value` don't match one another\n \"\"\"\n with ops.name_scope(\n name, \"one_hot\",\n [indices, depth, on_value, off_value, axis, dtype]) as name:\n on_exists = on_value is not None\n off_exists = off_value is not None\n\n if on_exists:\n on_value = ops.convert_to_tensor(on_value, dtype_hint=dtype)\n if off_exists:\n off_value = ops.convert_to_tensor(off_value, dtype_hint=dtype)\n\n on_dtype = on_value.dtype.base_dtype if on_exists else None\n off_dtype = off_value.dtype.base_dtype if off_exists else None\n\n if on_exists or off_exists:\n if dtype is not None:\n # Ensure provided on_value and/or off_value match dtype\n if on_exists and on_dtype != dtype:\n raise TypeError(\"dtype {0} of on_value does not match \"\n \"dtype parameter {1}\".format(on_dtype, dtype))\n if off_exists and off_dtype != dtype:\n raise TypeError(\"dtype {0} of off_value does not match \"\n \"dtype parameter {1}\".format(off_dtype, dtype))\n else:\n # dtype not provided: automatically assign it\n dtype = on_dtype if on_exists else off_dtype\n elif dtype is None:\n # None of on_value, off_value, or dtype provided. Default dtype to float32\n dtype = dtypes.float32\n\n if not on_exists:\n # on_value not provided: assign to value 1 of type dtype\n on_value = ops.convert_to_tensor(1, dtype, name=\"on_value\")\n on_dtype = dtype\n if not off_exists:\n # off_value not provided: assign to value 0 of type dtype\n off_value = ops.convert_to_tensor(0, dtype, name=\"off_value\")\n off_dtype = dtype\n\n if on_dtype != off_dtype:\n raise TypeError(\"dtype {0} of on_value does not match \"\n \"dtype {1} of off_value\".format(on_dtype, off_dtype))\n\n return gen_array_ops.one_hot(indices, depth, on_value, off_value, axis,\n name)\n\n\ndef _all_dimensions(x):\n \"\"\"Returns a 1D-tensor listing all dimensions in x.\"\"\"\n # Fast path: avoid creating Rank and Range ops if ndims is known.\n if isinstance(x, ops.Tensor) and x.get_shape().ndims is not None:\n return constant_op.constant(\n np.arange(x.get_shape().ndims), dtype=dtypes.int32)\n if (isinstance(x, sparse_tensor.SparseTensor) and\n x.dense_shape.get_shape().is_fully_defined()):\n r = x.dense_shape.get_shape().dims[0].value # sparse.dense_shape is 1-D.\n return constant_op.constant(np.arange(r), dtype=dtypes.int32)\n\n # Otherwise, we rely on `range` and `rank` to do the right thing at runtime.\n return gen_math_ops._range(0, rank(x), 1)\n\n\n@tf_export(\"sequence_mask\")\[email protected]_dispatch_support\ndef sequence_mask(lengths, maxlen=None, dtype=dtypes.bool, name=None):\n \"\"\"Returns a mask tensor representing the first N positions of each cell.\n\n If `lengths` has shape `[d_1, d_2, ..., d_n]` the resulting tensor `mask` has\n dtype `dtype` and shape `[d_1, d_2, ..., d_n, maxlen]`, with\n\n ```\n mask[i_1, i_2, ..., i_n, j] = (j < lengths[i_1, i_2, ..., i_n])\n ```\n\n Examples:\n\n ```python\n tf.sequence_mask([1, 3, 2], 5) # [[True, False, False, False, False],\n # [True, True, True, False, False],\n # [True, True, False, False, False]]\n\n tf.sequence_mask([[1, 3],[2,0]]) # [[[True, False, False],\n # [True, True, True]],\n # [[True, True, False],\n # [False, False, False]]]\n ```\n\n Args:\n lengths: integer tensor, all its values <= maxlen.\n maxlen: scalar integer tensor, size of last dimension of returned tensor.\n Default is the maximum value in `lengths`.\n dtype: output type of the resulting tensor.\n name: name of the op.\n\n Returns:\n A mask tensor of shape `lengths.shape + (maxlen,)`, cast to specified dtype.\n Raises:\n ValueError: if `maxlen` is not a scalar.\n \"\"\"\n with ops.name_scope(name, \"SequenceMask\", [lengths, maxlen]):\n lengths = ops.convert_to_tensor(lengths)\n\n if maxlen is None:\n maxlen = gen_math_ops._max(lengths, _all_dimensions(lengths))\n maxlen = gen_math_ops.maximum(constant(0, maxlen.dtype), maxlen)\n else:\n maxlen = ops.convert_to_tensor(maxlen)\n if maxlen.get_shape().ndims is not None and maxlen.get_shape().ndims != 0:\n raise ValueError(\"maxlen must be scalar for sequence_mask\")\n\n # The basic idea is to compare a range row vector of size maxlen:\n # [0, 1, 2, 3, 4]\n # to length as a matrix with 1 column: [[1], [3], [2]].\n # Because of broadcasting on both arguments this comparison results\n # in a matrix of size (len(lengths), maxlen)\n row_vector = gen_math_ops._range(\n constant(0, maxlen.dtype), maxlen, constant(1, maxlen.dtype))\n # Since maxlen >= max(lengths), it is safe to use maxlen as a cast\n # authoritative type. Whenever maxlen fits into tf.int32, so do the lengths.\n matrix = gen_math_ops.cast(expand_dims(lengths, -1), maxlen.dtype)\n result = row_vector < matrix\n if dtype is None or result.dtype.is_compatible_with(dtype):\n return result\n else:\n return gen_math_ops.cast(result, dtype)\n\n\n@tf_export(v1=[\"squeeze\"])\[email protected]_dispatch_support\[email protected]_args(None, \"Use the `axis` argument instead\",\n \"squeeze_dims\")\ndef squeeze(input, axis=None, name=None, squeeze_dims=None):\n # pylint: disable=redefined-builtin\n \"\"\"Removes dimensions of size 1 from the shape of a tensor.\n\n Given a tensor `input`, this operation returns a tensor of the same type with\n all dimensions of size 1 removed. If you don't want to remove all size 1\n dimensions, you can remove specific size 1 dimensions by specifying\n `axis`.\n\n For example:\n\n >>> # 't' is a tensor of shape [1, 2, 1, 3, 1, 1]\n >>> t = tf.ones([1, 2, 1, 3, 1, 1])\n >>> print(tf.shape(tf.squeeze(t)).numpy())\n [2 3]\n\n Or, to remove specific size 1 dimensions:\n\n >>> # 't' is a tensor of shape [1, 2, 1, 3, 1, 1]\n >>> t = tf.ones([1, 2, 1, 3, 1, 1])\n >>> print(tf.shape(tf.squeeze(t, [2, 4])).numpy())\n [1 2 3 1]\n\n Note: if `input` is a `tf.RaggedTensor`, then this operation takes `O(N)`\n time, where `N` is the number of elements in the squeezed dimensions.\n\n Args:\n input: A `Tensor`. The `input` to squeeze.\n axis: An optional list of `ints`. Defaults to `[]`. If specified, only\n squeezes the dimensions listed. The dimension index starts at 0. It is an\n error to squeeze a dimension that is not 1. Must be in the range\n `[-rank(input), rank(input))`. Must be specified if `input` is a\n `RaggedTensor`.\n name: A name for the operation (optional).\n squeeze_dims: Deprecated keyword argument that is now axis.\n\n Returns:\n A `Tensor`. Has the same type as `input`.\n Contains the same data as `input`, but has one or more dimensions of\n size 1 removed.\n\n Raises:\n ValueError: When both `squeeze_dims` and `axis` are specified.\n \"\"\"\n axis = deprecation.deprecated_argument_lookup(\"axis\", axis, \"squeeze_dims\",\n squeeze_dims)\n if np.isscalar(axis):\n axis = [axis]\n return gen_array_ops.squeeze(input, axis, name)\n\n\n@tf_export(\"squeeze\", v1=[])\[email protected]_dispatch_support\ndef squeeze_v2(input, axis=None, name=None):\n \"\"\"Removes dimensions of size 1 from the shape of a tensor.\n\n Given a tensor `input`, this operation returns a tensor of the same type with\n all dimensions of size 1 removed. If you don't want to remove all size 1\n dimensions, you can remove specific size 1 dimensions by specifying\n `axis`.\n\n For example:\n\n ```python\n # 't' is a tensor of shape [1, 2, 1, 3, 1, 1]\n tf.shape(tf.squeeze(t)) # [2, 3]\n ```\n\n Or, to remove specific size 1 dimensions:\n\n ```python\n # 't' is a tensor of shape [1, 2, 1, 3, 1, 1]\n tf.shape(tf.squeeze(t, [2, 4])) # [1, 2, 3, 1]\n ```\n\n Unlike the older op `tf.compat.v1.squeeze`, this op does not accept a\n deprecated `squeeze_dims` argument.\n\n Note: if `input` is a `tf.RaggedTensor`, then this operation takes `O(N)`\n time, where `N` is the number of elements in the squeezed dimensions.\n\n Args:\n input: A `Tensor`. The `input` to squeeze.\n axis: An optional list of `ints`. Defaults to `[]`. If specified, only\n squeezes the dimensions listed. The dimension index starts at 0. It is an\n error to squeeze a dimension that is not 1. Must be in the range\n `[-rank(input), rank(input))`. Must be specified if `input` is a\n `RaggedTensor`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor`. Has the same type as `input`.\n Contains the same data as `input`, but has one or more dimensions of\n size 1 removed.\n\n Raises:\n ValueError: The input cannot be converted to a tensor, or the specified\n axis cannot be squeezed.\n \"\"\"\n # pylint: disable=redefined-builtin\n return squeeze(input, axis, name)\n\n\n@tf_export(v1=[\"where\"])\[email protected]_dispatch_support\ndef where(condition, x=None, y=None, name=None):\n \"\"\"Return the elements, either from `x` or `y`, depending on the `condition`.\n\n If both `x` and `y` are None, then this operation returns the coordinates of\n true elements of `condition`. The coordinates are returned in a 2-D tensor\n where the first dimension (rows) represents the number of true elements, and\n the second dimension (columns) represents the coordinates of the true\n elements. Keep in mind, the shape of the output tensor can vary depending on\n how many true values there are in input. Indices are output in row-major\n order.\n\n If both non-None, `x` and `y` must have the same shape.\n The `condition` tensor must be a scalar if `x` and `y` are scalar.\n If `x` and `y` are tensors of higher rank, then `condition` must be either a\n vector with size matching the first dimension of `x`, or must have the same\n shape as `x`.\n\n The `condition` tensor acts as a mask that chooses, based on the value at each\n element, whether the corresponding element / row in the output should be taken\n from `x` (if true) or `y` (if false).\n\n If `condition` is a vector and `x` and `y` are higher rank matrices, then it\n chooses which row (outer dimension) to copy from `x` and `y`. If `condition`\n has the same shape as `x` and `y`, then it chooses which element to copy from\n `x` and `y`.\n\n Args:\n condition: A `Tensor` of type `bool`\n x: A Tensor which may have the same shape as `condition`. If `condition` is\n rank 1, `x` may have higher rank, but its first dimension must match the\n size of `condition`.\n y: A `tensor` with the same shape and type as `x`.\n name: A name of the operation (optional)\n\n Returns:\n A `Tensor` with the same type and shape as `x`, `y` if they are non-None.\n Otherwise, a `Tensor` with shape `(num_true, rank(condition))`.\n\n Raises:\n ValueError: When exactly one of `x` or `y` is non-None.\n\n @compatibility(TF2)\n\n This API is compatible with eager execution and `tf.function`. However, this\n is still a legacy API endpoint originally designed for TF1. To migrate to\n fully-native TF2, please replace its usage with `tf.where` instead, which is\n directly backwards compatible with `tf.compat.v1.where`.\n\n However,`tf.compat.v1.where` is more restrictive than `tf.where`, requiring\n `x` and `y` to have the same shape, and returning a `Tensor` with the same\n type and shape as `x`, `y` (if they are both non-None).\n\n `tf.where` will accept `x`, `y` that are not the same shape as long as they\n are broadcastable with one another and with `condition`, and will return a\n `Tensor` with shape broadcast from `condition`, `x`, and `y`.\n\n For example, the following works with `tf.where` but not `tf.compat.v1.where`:\n\n >>> tf.where([True, False, False, True], [1,2,3,4], [100])\n <tf.Tensor: shape=(4,), dtype=int32, numpy=array([ 1, 100, 100, 4],\n dtype=int32)>\n\n >>> tf.where(True, [1,2,3,4], 100)\n <tf.Tensor: shape=(4,), dtype=int32, numpy=array([1, 2, 3, 4],\n dtype=int32)>\n\n @end_compatibility\n \"\"\"\n if x is None and y is None:\n with ops.name_scope(name, \"Where\", [condition]) as name:\n condition = ops.convert_to_tensor(\n condition, preferred_dtype=dtypes.bool, name=\"condition\")\n return gen_array_ops.where(condition=condition, name=name)\n elif x is not None and y is not None:\n return gen_math_ops.select(condition=condition, x=x, y=y, name=name)\n else:\n raise ValueError(\"x and y must both be non-None or both be None.\")\n\n\n@tf_export(\"where\", v1=[\"where_v2\"])\[email protected]_dispatch_support\ndef where_v2(condition, x=None, y=None, name=None):\n \"\"\"Return the elements where `condition` is `True` (multiplexing `x` and `y`).\n\n This operator has two modes: in one mode both `x` and `y` are provided, in\n another mode neither are provided. `condition` is always expected to be a\n `tf.Tensor` of type `bool`.\n\n #### Retrieving indices of `True` elements\n\n If `x` and `y` are not provided (both are None):\n\n `tf.where` will return the indices of `condition` that are `True`, in\n the form of a 2-D tensor with shape (n, d).\n (Where n is the number of matching indices in `condition`,\n and d is the number of dimensions in `condition`).\n\n Indices are output in row-major order.\n\n >>> tf.where([True, False, False, True])\n <tf.Tensor: shape=(2, 1), dtype=int64, numpy=\n array([[0],\n [3]])>\n\n >>> tf.where([[True, False], [False, True]])\n <tf.Tensor: shape=(2, 2), dtype=int64, numpy=\n array([[0, 0],\n [1, 1]])>\n\n >>> tf.where([[[True, False], [False, True], [True, True]]])\n <tf.Tensor: shape=(4, 3), dtype=int64, numpy=\n array([[0, 0, 0],\n [0, 1, 1],\n [0, 2, 0],\n [0, 2, 1]])>\n\n #### Multiplexing between `x` and `y`\n\n If `x` and `y` are provided (both have non-None values):\n\n `tf.where` will choose an output shape from the shapes of `condition`, `x`,\n and `y` that all three shapes are\n [broadcastable](https://docs.scipy.org/doc/numpy/reference/ufuncs.html) to.\n\n The `condition` tensor acts as a mask that chooses whether the corresponding\n element / row in the output should be taken from `x`\n (if the element in `condition` is True) or `y` (if it is false).\n\n >>> tf.where([True, False, False, True], [1,2,3,4], [100,200,300,400])\n <tf.Tensor: shape=(4,), dtype=int32, numpy=array([ 1, 200, 300, 4],\n dtype=int32)>\n >>> tf.where([True, False, False, True], [1,2,3,4], [100])\n <tf.Tensor: shape=(4,), dtype=int32, numpy=array([ 1, 100, 100, 4],\n dtype=int32)>\n >>> tf.where([True, False, False, True], [1,2,3,4], 100)\n <tf.Tensor: shape=(4,), dtype=int32, numpy=array([ 1, 100, 100, 4],\n dtype=int32)>\n >>> tf.where([True, False, False, True], 1, 100)\n <tf.Tensor: shape=(4,), dtype=int32, numpy=array([ 1, 100, 100, 1],\n dtype=int32)>\n\n >>> tf.where(True, [1,2,3,4], 100)\n <tf.Tensor: shape=(4,), dtype=int32, numpy=array([1, 2, 3, 4],\n dtype=int32)>\n >>> tf.where(False, [1,2,3,4], 100)\n <tf.Tensor: shape=(4,), dtype=int32, numpy=array([100, 100, 100, 100],\n dtype=int32)>\n\n Note that if the gradient of either branch of the tf.where generates\n a NaN, then the gradient of the entire tf.where will be NaN. This is because\n the gradient calculation for tf.where combines the two branches, for\n performance reasons.\n\n A workaround is to use an inner tf.where to ensure the function has\n no asymptote, and to avoid computing a value whose gradient is NaN by\n replacing dangerous inputs with safe inputs.\n\n Instead of this,\n\n >>> x = tf.constant(0., dtype=tf.float32)\n >>> with tf.GradientTape() as tape:\n ... tape.watch(x)\n ... y = tf.where(x < 1., 0., 1. / x)\n >>> print(tape.gradient(y, x))\n tf.Tensor(nan, shape=(), dtype=float32)\n\n Although, the `1. / x` values are never used, its gradient is a NaN when x =\n 0. Instead, we should guard that with another `tf.where`\n\n >>> x = tf.constant(0., dtype=tf.float32)\n >>> with tf.GradientTape() as tape:\n ... tape.watch(x)\n ... safe_x = tf.where(tf.equal(x, 0.), 1., x)\n ... y = tf.where(x < 1., 0., 1. / safe_x)\n >>> print(tape.gradient(y, x))\n tf.Tensor(0.0, shape=(), dtype=float32)\n\n Args:\n condition: A `tf.Tensor` of type `bool`\n x: If provided, a Tensor which is of the same type as `y`, and has a shape\n broadcastable with `condition` and `y`.\n y: If provided, a Tensor which is of the same type as `x`, and has a shape\n broadcastable with `condition` and `x`.\n name: A name of the operation (optional).\n\n Returns:\n If `x` and `y` are provided:\n A `Tensor` with the same type as `x` and `y`, and shape that\n is broadcast from `condition`, `x`, and `y`.\n Otherwise, a `Tensor` with shape `(num_true, dim_size(condition))`.\n\n Raises:\n ValueError: When exactly one of `x` or `y` is non-None, or the shapes\n are not all broadcastable.\n \"\"\"\n if x is None and y is None:\n with ops.name_scope(name, \"Where\", [condition]) as name:\n condition = ops.convert_to_tensor(\n condition, preferred_dtype=dtypes.bool, name=\"condition\")\n return gen_array_ops.where(condition=condition, name=name)\n elif x is not None and y is not None:\n return gen_math_ops.select_v2(condition=condition, t=x, e=y, name=name)\n else:\n raise ValueError(\"x and y must both be non-None or both be None.\")\n\n\n# pylint: disable=redefined-builtin\n@tf_export(v1=[\"reverse_sequence\"])\[email protected]_args(None,\n \"seq_dim is deprecated, use seq_axis instead\",\n \"seq_dim\")\[email protected]_args(None,\n \"batch_dim is deprecated, use batch_axis instead\",\n \"batch_dim\")\ndef reverse_sequence(input,\n seq_lengths,\n seq_axis=None,\n batch_axis=None,\n name=None,\n seq_dim=None,\n batch_dim=None):\n \"\"\"Reverses variable length slices.\n\n This op first slices `input` along the dimension `batch_axis`, and for\n each slice `i`, reverses the first `seq_lengths[i]` elements along the\n dimension `seq_axis`.\n\n The elements of `seq_lengths` must obey `seq_lengths[i] <=\n input.dims[seq_axis]`, and `seq_lengths` must be a vector of length\n `input.dims[batch_axis]`.\n\n The output slice `i` along dimension `batch_axis` is then given by\n input slice `i`, with the first `seq_lengths[i]` slices along\n dimension `seq_axis` reversed.\n\n Example usage:\n\n >>> seq_lengths = [7, 2, 3, 5]\n >>> input = [[1, 2, 3, 4, 5, 0, 0, 0], [1, 2, 0, 0, 0, 0, 0, 0],\n ... [1, 2, 3, 4, 0, 0, 0, 0], [1, 2, 3, 4, 5, 6, 7, 8]]\n >>> output = tf.reverse_sequence(input, seq_lengths, seq_axis=1, batch_axis=0)\n >>> output\n <tf.Tensor: shape=(4, 8), dtype=int32, numpy=\n array([[0, 0, 5, 4, 3, 2, 1, 0],\n [2, 1, 0, 0, 0, 0, 0, 0],\n [3, 2, 1, 4, 0, 0, 0, 0],\n [5, 4, 3, 2, 1, 6, 7, 8]], dtype=int32)>\n\n Args:\n input: A `Tensor`. The input to reverse.\n seq_lengths: A `Tensor`. Must be one of the following types: `int32`,\n `int64`. 1-D with length `input.dims(batch_axis)` and `max(seq_lengths) <=\n input.dims(seq_axis)`\n seq_axis: An `int`. The dimension which is partially reversed.\n batch_axis: An optional `int`. Defaults to `0`. The dimension along which\n reversal is performed.\n name: A name for the operation (optional).\n\n Returns:\n A Tensor. Has the same type as input.\n \"\"\"\n seq_axis = deprecation.deprecated_argument_lookup(\"seq_axis\", seq_axis,\n \"seq_dim\", seq_dim)\n batch_axis = deprecation.deprecated_argument_lookup(\"batch_axis\", batch_axis,\n \"batch_dim\", batch_dim)\n return gen_array_ops.reverse_sequence(\n input=input,\n seq_lengths=seq_lengths,\n seq_dim=seq_axis,\n batch_dim=batch_axis,\n name=name)\n\n\n@tf_export(\"reverse_sequence\", v1=[])\[email protected]_dispatch_support\ndef reverse_sequence_v2(input,\n seq_lengths,\n seq_axis=None,\n batch_axis=None,\n name=None):\n \"\"\"Reverses variable length slices.\n\n This op first slices `input` along the dimension `batch_axis`, and for\n each slice `i`, reverses the first `seq_lengths[i]` elements along the\n dimension `seq_axis`.\n\n The elements of `seq_lengths` must obey `seq_lengths[i] <=\n input.dims[seq_axis]`, and `seq_lengths` must be a vector of length\n `input.dims[batch_axis]`.\n\n The output slice `i` along dimension `batch_axis` is then given by\n input slice `i`, with the first `seq_lengths[i]` slices along\n dimension `seq_axis` reversed.\n\n Example usage:\n\n >>> seq_lengths = [7, 2, 3, 5]\n >>> input = [[1, 2, 3, 4, 5, 0, 0, 0], [1, 2, 0, 0, 0, 0, 0, 0],\n ... [1, 2, 3, 4, 0, 0, 0, 0], [1, 2, 3, 4, 5, 6, 7, 8]]\n >>> output = tf.reverse_sequence(input, seq_lengths, seq_axis=1, batch_axis=0)\n >>> output\n <tf.Tensor: shape=(4, 8), dtype=int32, numpy=\n array([[0, 0, 5, 4, 3, 2, 1, 0],\n [2, 1, 0, 0, 0, 0, 0, 0],\n [3, 2, 1, 4, 0, 0, 0, 0],\n [5, 4, 3, 2, 1, 6, 7, 8]], dtype=int32)>\n\n Args:\n input: A `Tensor`. The input to reverse.\n seq_lengths: A `Tensor`. Must be one of the following types: `int32`,\n `int64`. 1-D with length `input.dims(batch_axis)` and `max(seq_lengths) <=\n input.dims(seq_axis)`\n seq_axis: An `int`. The dimension which is partially reversed.\n batch_axis: An optional `int`. Defaults to `0`. The dimension along which\n reversal is performed.\n name: A name for the operation (optional).\n\n Returns:\n A Tensor. Has the same type as input.\n \"\"\"\n return gen_array_ops.reverse_sequence(\n input=input,\n seq_lengths=seq_lengths,\n seq_dim=seq_axis,\n batch_dim=batch_axis,\n name=name)\n\n# pylint: enable=redefined-builtin\n\n\n@tf_export(v1=[\"gather\"])\[email protected]_dispatch_support\[email protected]_args(None,\n (\"The `validate_indices` argument has no effect. \"\n \"Indices are always validated on CPU and never \"\n \"validated on GPU.\"),\n (\"validate_indices\", None))\ndef gather(params,\n indices,\n validate_indices=None,\n name=None,\n axis=None,\n batch_dims=0): # pylint: disable=g-doc-args\n r\"\"\"Gather slices from params axis `axis` according to indices.\n\n Gather slices from `params` axis `axis` according to `indices`. `indices`\n must be an integer tensor of any dimension (often 1-D).\n\n `Tensor.__getitem__` works for scalars, `tf.newaxis`, and\n [python slices](https://numpy.org/doc/stable/reference/arrays.indexing.html#basic-slicing-and-indexing)\n\n `tf.gather` extends indexing to handle tensors of indices.\n\n In the simplest case it's identical to scalar indexing:\n\n >>> params = tf.constant(['p0', 'p1', 'p2', 'p3', 'p4', 'p5'])\n >>> params[3].numpy()\n b'p3'\n >>> tf.gather(params, 3).numpy()\n b'p3'\n\n The most common case is to pass a single axis tensor of indices (this\n can't be expressed as a python slice because the indices are not sequential):\n\n >>> indices = [2, 0, 2, 5]\n >>> tf.gather(params, indices).numpy()\n array([b'p2', b'p0', b'p2', b'p5'], dtype=object)\n\n <div style=\"width:70%; margin:auto; margin-bottom:10px; margin-top:20px;\">\n <img style=\"width:100%\" src=\"https://www.tensorflow.org/images/Gather.png\"\n alt>\n </div>\n\n The indices can have any shape. When the `params` has 1 axis, the\n output shape is equal to the input shape:\n\n >>> tf.gather(params, [[2, 0], [2, 5]]).numpy()\n array([[b'p2', b'p0'],\n [b'p2', b'p5']], dtype=object)\n\n The `params` may also have any shape. `gather` can select slices\n across any axis depending on the `axis` argument (which defaults to 0).\n Below it is used to gather first rows, then columns from a matrix:\n\n >>> params = tf.constant([[0, 1.0, 2.0],\n ... [10.0, 11.0, 12.0],\n ... [20.0, 21.0, 22.0],\n ... [30.0, 31.0, 32.0]])\n >>> tf.gather(params, indices=[3,1]).numpy()\n array([[30., 31., 32.],\n [10., 11., 12.]], dtype=float32)\n >>> tf.gather(params, indices=[2,1], axis=1).numpy()\n array([[ 2., 1.],\n [12., 11.],\n [22., 21.],\n [32., 31.]], dtype=float32)\n\n More generally: The output shape has the same shape as the input, with the\n indexed-axis replaced by the shape of the indices.\n\n >>> def result_shape(p_shape, i_shape, axis=0):\n ... return p_shape[:axis] + i_shape + p_shape[axis+1:]\n >>>\n >>> result_shape([1, 2, 3], [], axis=1)\n [1, 3]\n >>> result_shape([1, 2, 3], [7], axis=1)\n [1, 7, 3]\n >>> result_shape([1, 2, 3], [7, 5], axis=1)\n [1, 7, 5, 3]\n\n Here are some examples:\n\n >>> params.shape.as_list()\n [4, 3]\n >>> indices = tf.constant([[0, 2]])\n >>> tf.gather(params, indices=indices, axis=0).shape.as_list()\n [1, 2, 3]\n >>> tf.gather(params, indices=indices, axis=1).shape.as_list()\n [4, 1, 2]\n\n >>> params = tf.random.normal(shape=(5, 6, 7, 8))\n >>> indices = tf.random.uniform(shape=(10, 11), maxval=7, dtype=tf.int32)\n >>> result = tf.gather(params, indices, axis=2)\n >>> result.shape.as_list()\n [5, 6, 10, 11, 8]\n\n This is because each index takes a slice from `params`, and\n places it at the corresponding location in the output. For the above example\n\n >>> # For any location in indices\n >>> a, b = 0, 1\n >>> tf.reduce_all(\n ... # the corresponding slice of the result\n ... result[:, :, a, b, :] ==\n ... # is equal to the slice of `params` along `axis` at the index.\n ... params[:, :, indices[a, b], :]\n ... ).numpy()\n True\n\n ### Batching:\n\n The `batch_dims` argument lets you gather different items from each element\n of a batch.\n\n Using `batch_dims=1` is equivalent to having an outer loop over the first\n axis of `params` and `indices`:\n\n >>> params = tf.constant([\n ... [0, 0, 1, 0, 2],\n ... [3, 0, 0, 0, 4],\n ... [0, 5, 0, 6, 0]])\n >>> indices = tf.constant([\n ... [2, 4],\n ... [0, 4],\n ... [1, 3]])\n\n >>> tf.gather(params, indices, axis=1, batch_dims=1).numpy()\n array([[1, 2],\n [3, 4],\n [5, 6]], dtype=int32)\n\n This is is equivalent to:\n\n >>> def manually_batched_gather(params, indices, axis):\n ... batch_dims=1\n ... result = []\n ... for p,i in zip(params, indices):\n ... r = tf.gather(p, i, axis=axis-batch_dims)\n ... result.append(r)\n ... return tf.stack(result)\n >>> manually_batched_gather(params, indices, axis=1).numpy()\n array([[1, 2],\n [3, 4],\n [5, 6]], dtype=int32)\n\n Higher values of `batch_dims` are equivalent to multiple nested loops over\n the outer axes of `params` and `indices`. So the overall shape function is\n\n >>> def batched_result_shape(p_shape, i_shape, axis=0, batch_dims=0):\n ... return p_shape[:axis] + i_shape[batch_dims:] + p_shape[axis+1:]\n >>>\n >>> batched_result_shape(\n ... p_shape=params.shape.as_list(),\n ... i_shape=indices.shape.as_list(),\n ... axis=1,\n ... batch_dims=1)\n [3, 2]\n\n >>> tf.gather(params, indices, axis=1, batch_dims=1).shape.as_list()\n [3, 2]\n\n This comes up naturally if you need to use the indices of an operation like\n `tf.argsort`, or `tf.math.top_k` where the last dimension of the indices\n indexes into the last dimension of input, at the corresponding location.\n In this case you can use `tf.gather(values, indices, batch_dims=-1)`.\n\n See also:\n\n * `tf.Tensor.__getitem__`: The direct tensor index operation (`t[]`), handles\n scalars and python-slices `tensor[..., 7, 1:-1]`\n * `tf.scatter`: A collection of operations similar to `__setitem__`\n (`t[i] = x`)\n * `tf.gather_nd`: An operation similar to `tf.gather` but gathers across\n multiple axis at once (it can gather elements of a matrix instead of rows\n or columns)\n * `tf.boolean_mask`, `tf.where`: Binary indexing.\n * `tf.slice` and `tf.strided_slice`: For lower level access to the\n implementation of `__getitem__`'s python-slice handling (`t[1:-1:2]`)\n\n Args:\n params: The `Tensor` from which to gather values. Must be at least rank\n `axis + 1`.\n indices: The index `Tensor`. Must be one of the following types: `int32`,\n `int64`. The values must be in range `[0, params.shape[axis])`.\n validate_indices: Deprecated, does nothing. Indices are always validated on\n CPU, never validated on GPU.\n\n Caution: On CPU, if an out of bound index is found, an error is raised.\n On GPU, if an out of bound index is found, a 0 is stored in the\n corresponding output value.\n axis: A `Tensor`. Must be one of the following types: `int32`, `int64`. The\n `axis` in `params` to gather `indices` from. Must be greater than or equal\n to `batch_dims`. Defaults to the first non-batch dimension. Supports\n negative indexes.\n batch_dims: An `integer`. The number of batch dimensions. Must be less\n than or equal to `rank(indices)`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor`. Has the same type as `params`.\n \"\"\"\n del validate_indices\n\n if axis is None:\n axis = batch_dims\n if tensor_util.constant_value(axis) != 0:\n return gen_array_ops.gather_v2(\n params, indices, axis, batch_dims=batch_dims, name=name)\n try:\n # TODO(apassos) find a less bad way of detecting resource variables\n # without introducing a circular dependency.\n return params.sparse_read(indices, name=name)\n except AttributeError:\n return gen_array_ops.gather_v2(params, indices, axis, name=name)\n\n\n@tf_export(\"gather\", v1=[])\[email protected]_dispatch_support\ndef gather_v2(params,\n indices,\n validate_indices=None,\n axis=None,\n batch_dims=0,\n name=None):\n return gather(\n params,\n indices,\n validate_indices=validate_indices,\n name=name,\n axis=axis,\n batch_dims=batch_dims)\n\n\ngather_v2.__doc__ = gather.__doc__\n\n\n@tf_export(v1=[\"batch_gather\"])\[email protected]_dispatch_support\[email protected](\n \"2017-10-25\", \"`tf.batch_gather` is deprecated, please use `tf.gather` \"\n \"with `batch_dims=-1` instead.\") # pylint: disable=missing-docstring\ndef batch_gather(params, indices, name=None):\n \"\"\"Gather slices from params according to indices with leading batch dims.\"\"\"\n with ops.name_scope(name, \"BatchGather\", [params, indices]):\n indices = ops.convert_to_tensor(indices, name=\"indices\")\n params = ops.convert_to_tensor(params, name=\"params\")\n if indices.shape.ndims is None:\n raise ValueError(\n \"batch_gather does not allow indices with unknown shape.\")\n return _batch_gather(params, indices, batch_dims=indices.shape.ndims - 1)\n\n\ndef _batch_gather(params, indices, batch_dims, axis=None):\n r\"\"\"Gather slices from params according to indices with leading batch dims.\n\n This operation assumes that the leading `batch_dims` dimensions of `indices`\n and `params` are batch dimensions; and performs a `tf.gather` operation within\n each batch. (If `batch_dims` is not specified, then it defaults to\n `rank(indices)-1`.) In the case in which `batch_dims==0`, this operation\n is equivalent to `tf.gather`.\n\n Args:\n params: A Tensor. The tensor from which to gather values.\n indices: A Tensor. Must be one of the following types: int32, int64. Index\n tensor. Must be in range `[0, params.shape[batch_dims]]`.\n batch_dims: An integer or none. The number of batch dimensions. Must be\n less than `rank(indices)`. Defaults to `rank(indices) - 1` if None.\n axis: A `Tensor`. Must be one of the following types: `int32`, `int64`. The\n `axis` in `params` to gather `indices` from. Must be greater than or equal\n to `batch_dims`. Defaults to the first non-batch dimension. Supports\n negative indexes.\n\n Returns:\n A Tensor. Has the same type as `params`.\n\n Raises:\n ValueError: if `indices` has an unknown shape.\n \"\"\"\n if batch_dims is not None and not isinstance(batch_dims, int):\n raise TypeError(\"batch_dims must be an int; got %r\" % (batch_dims,))\n indices = ops.convert_to_tensor(indices, name=\"indices\")\n params = ops.convert_to_tensor(params, name=\"params\")\n\n indices_ndims = indices.shape.ndims\n if indices_ndims is None:\n raise ValueError(\"tf.gather does not allow indices with unknown \"\n \"rank when batch_dims is specified.\")\n if batch_dims is None:\n batch_dims = indices_ndims - 1\n if batch_dims < 0:\n batch_dims += indices_ndims\n if batch_dims < 0 or batch_dims >= indices_ndims:\n raise ValueError(\"batch_dims = %d must be less than rank(indices) = %d\" %\n (batch_dims, indices_ndims))\n if params.shape.ndims is not None and batch_dims >= params.shape.ndims:\n raise ValueError(\"batch_dims = %d must be less than rank(params) = %d\" %\n (batch_dims, params.shape.ndims))\n\n # Handle axis by transposing the axis dimension to be the first non-batch\n # dimension, recursively calling batch_gather with axis=0, and then\n # transposing the result to put the pre-axis dimensions before the indices\n # dimensions.\n if axis is not None and axis != batch_dims:\n # Adjust axis to be positive.\n if not isinstance(axis, int):\n axis = tf.where(axis < 0, axis + array_ops.rank(params), axis)\n elif axis < 0 and params.shape.ndims is None:\n axis = axis + array_ops.rank(params)\n else:\n if (axis < -params.shape.ndims) or (axis >= params.shape.ndims):\n raise ValueError(\"axis (%d) out of range [%d, %d)\" %\n (axis, -params.shape.ndims, params.shape.ndims))\n if axis < 0:\n axis += params.shape.ndims\n if axis < batch_dims:\n raise ValueError(\"batch_dims = %d must be less than or equal to \"\n \"axis = %d\" % (batch_dims, axis))\n\n # Move params[axis] up to params[batch_dims].\n perm = [\n list(range(batch_dims)), [axis],\n gen_math_ops._range(batch_dims, axis, 1),\n gen_math_ops._range(axis + 1, rank(params), 1)\n ]\n params = transpose(params, concat(perm, axis=0))\n\n result = _batch_gather(params, indices, batch_dims=batch_dims)\n\n # Move the result dimensions corresponding to params[batch_dims:axis]\n # to just before the dimensions corresponding to indices[batch_dims:].\n params_start = indices_ndims + axis - batch_dims\n perm = [\n list(range(batch_dims)),\n gen_math_ops._range(indices_ndims, params_start, 1),\n list(range(batch_dims, indices_ndims)),\n gen_math_ops._range(params_start, rank(result), 1)\n ]\n return transpose(result, perm=concat(perm, axis=0))\n\n indices_shape = shape(indices)\n params_shape = shape(params)\n batch_indices = indices\n indices_dtype = indices.dtype.base_dtype\n accum_dim_value = ones((), dtype=indices_dtype)\n # Use correct type for offset index computation\n casted_params_shape = gen_math_ops.cast(params_shape, indices_dtype)\n for dim in range(batch_dims, 0, -1):\n dim_value = casted_params_shape[dim - 1]\n accum_dim_value *= casted_params_shape[dim]\n start = zeros((), dtype=indices_dtype)\n step = ones((), dtype=indices_dtype)\n dim_indices = gen_math_ops._range(start, dim_value, step)\n dim_indices *= accum_dim_value\n dim_shape = stack(\n [1] * (dim - 1) + [dim_value] + [1] * (indices_ndims - dim), axis=0)\n batch_indices += reshape(dim_indices, dim_shape)\n\n flat_indices = reshape(batch_indices, [-1])\n outer_shape = params_shape[batch_dims + 1:]\n flat_inner_shape = gen_math_ops.prod(params_shape[:batch_dims + 1], [0],\n False)\n\n flat_params = reshape(params, concat([[flat_inner_shape], outer_shape],\n axis=0))\n flat_result = gather(flat_params, flat_indices)\n result = reshape(flat_result, concat([indices_shape, outer_shape], axis=0))\n final_shape = indices.get_shape()[:batch_dims].merge_with(\n params.get_shape()[:batch_dims])\n final_shape = final_shape.concatenate(indices.get_shape().dims[batch_dims:])\n final_shape = final_shape.concatenate(params.get_shape()[batch_dims + 1:])\n result.set_shape(final_shape)\n return result\n\n\n@tf_export(v1=[\"gather_nd\", \"manip.gather_nd\"])\[email protected]_dispatch_support\n@deprecated_endpoints(\"manip.gather_nd\")\ndef gather_nd(params, indices, name=None, batch_dims=0):\n r\"\"\"Gather slices from `params` into a Tensor with shape specified by `indices`.\n\n `indices` is an K-dimensional integer tensor, best thought of as a\n (K-1)-dimensional tensor of indices into `params`, where each element defines\n a slice of `params`:\n\n output[\\\\(i_0, ..., i_{K-2}\\\\)] = params[indices[\\\\(i_0, ..., i_{K-2}\\\\)]]\n\n Whereas in `tf.gather` `indices` defines slices into the first\n dimension of `params`, in `tf.gather_nd`, `indices` defines slices into the\n first `N` dimensions of `params`, where `N = indices.shape[-1]`.\n\n The last dimension of `indices` can be at most the rank of\n `params`:\n\n indices.shape[-1] <= params.rank\n\n The last dimension of `indices` corresponds to elements\n (if `indices.shape[-1] == params.rank`) or slices\n (if `indices.shape[-1] < params.rank`) along dimension `indices.shape[-1]`\n of `params`. The output tensor has shape\n\n indices.shape[:-1] + params.shape[indices.shape[-1]:]\n\n Additionally both 'params' and 'indices' can have M leading batch\n dimensions that exactly match. In this case 'batch_dims' must be M.\n\n Note that on CPU, if an out of bound index is found, an error is returned.\n On GPU, if an out of bound index is found, a 0 is stored in the\n corresponding output value.\n\n Some examples below.\n\n Simple indexing into a matrix:\n\n ```python\n indices = [[0, 0], [1, 1]]\n params = [['a', 'b'], ['c', 'd']]\n output = ['a', 'd']\n ```\n\n Slice indexing into a matrix:\n\n ```python\n indices = [[1], [0]]\n params = [['a', 'b'], ['c', 'd']]\n output = [['c', 'd'], ['a', 'b']]\n ```\n\n Indexing into a 3-tensor:\n\n ```python\n indices = [[1]]\n params = [[['a0', 'b0'], ['c0', 'd0']],\n [['a1', 'b1'], ['c1', 'd1']]]\n output = [[['a1', 'b1'], ['c1', 'd1']]]\n\n\n indices = [[0, 1], [1, 0]]\n params = [[['a0', 'b0'], ['c0', 'd0']],\n [['a1', 'b1'], ['c1', 'd1']]]\n output = [['c0', 'd0'], ['a1', 'b1']]\n\n\n indices = [[0, 0, 1], [1, 0, 1]]\n params = [[['a0', 'b0'], ['c0', 'd0']],\n [['a1', 'b1'], ['c1', 'd1']]]\n output = ['b0', 'b1']\n ```\n\n The examples below are for the case when only indices have leading extra\n dimensions. If both 'params' and 'indices' have leading batch dimensions, use\n the 'batch_dims' parameter to run gather_nd in batch mode.\n\n Batched indexing into a matrix:\n\n ```python\n indices = [[[0, 0]], [[0, 1]]]\n params = [['a', 'b'], ['c', 'd']]\n output = [['a'], ['b']]\n ```\n\n Batched slice indexing into a matrix:\n\n ```python\n indices = [[[1]], [[0]]]\n params = [['a', 'b'], ['c', 'd']]\n output = [[['c', 'd']], [['a', 'b']]]\n ```\n\n Batched indexing into a 3-tensor:\n\n ```python\n indices = [[[1]], [[0]]]\n params = [[['a0', 'b0'], ['c0', 'd0']],\n [['a1', 'b1'], ['c1', 'd1']]]\n output = [[[['a1', 'b1'], ['c1', 'd1']]],\n [[['a0', 'b0'], ['c0', 'd0']]]]\n\n indices = [[[0, 1], [1, 0]], [[0, 0], [1, 1]]]\n params = [[['a0', 'b0'], ['c0', 'd0']],\n [['a1', 'b1'], ['c1', 'd1']]]\n output = [[['c0', 'd0'], ['a1', 'b1']],\n [['a0', 'b0'], ['c1', 'd1']]]\n\n\n indices = [[[0, 0, 1], [1, 0, 1]], [[0, 1, 1], [1, 1, 0]]]\n params = [[['a0', 'b0'], ['c0', 'd0']],\n [['a1', 'b1'], ['c1', 'd1']]]\n output = [['b0', 'b1'], ['d0', 'c1']]\n ```\n\n Examples with batched 'params' and 'indices':\n\n ```python\n batch_dims = 1\n indices = [[1], [0]]\n params = [[['a0', 'b0'], ['c0', 'd0']],\n [['a1', 'b1'], ['c1', 'd1']]]\n output = [['c0', 'd0'], ['a1', 'b1']]\n\n batch_dims = 1\n indices = [[[1]], [[0]]]\n params = [[['a0', 'b0'], ['c0', 'd0']],\n [['a1', 'b1'], ['c1', 'd1']]]\n output = [[['c0', 'd0']], [['a1', 'b1']]]\n\n batch_dims = 1\n indices = [[[1, 0]], [[0, 1]]]\n params = [[['a0', 'b0'], ['c0', 'd0']],\n [['a1', 'b1'], ['c1', 'd1']]]\n output = [['c0'], ['b1']]\n ```\n\n See also `tf.gather`.\n\n Args:\n params: A `Tensor`. The tensor from which to gather values.\n indices: A `Tensor`. Must be one of the following types: `int32`, `int64`.\n Index tensor.\n name: A name for the operation (optional).\n batch_dims: An integer or a scalar 'Tensor'. The number of batch dimensions.\n\n Returns:\n A `Tensor`. Has the same type as `params`.\n \"\"\"\n batch_dims_ = tensor_util.constant_value(batch_dims)\n if batch_dims_ is not None:\n batch_dims = int(batch_dims_)\n if batch_dims == 0:\n try:\n # TODO(apassos) find a less bad way of detecting resource variables\n # without introducing a circular dependency.\n return params.gather_nd(indices, name=name)\n except AttributeError:\n return gen_array_ops.gather_nd(params, indices, name=name)\n else:\n return batch_gather_nd(params, indices, batch_dims=batch_dims, name=name)\n\n\n@tf_export(\"gather_nd\", v1=[])\[email protected]_dispatch_support\ndef gather_nd_v2(params, indices, batch_dims=0, name=None):\n return gather_nd(params, indices, name=name, batch_dims=batch_dims)\n\n\ngather_nd_v2.__doc__ = gather_nd.__doc__\n\n\ndef batch_gather_nd(params, indices, batch_dims, name=None):\n \"\"\"gather_nd implementation with batch support.\"\"\"\n with ops.name_scope(name, \"BatchGatherND\", [params, indices]):\n indices = ops.convert_to_tensor(indices, name=\"indices\")\n params = ops.convert_to_tensor(params, name=\"params\")\n\n if not isinstance(batch_dims, int):\n raise TypeError(\"batch_dims must be an int; got %r\" % (batch_dims,))\n if batch_dims < 0:\n raise ValueError(\"tf.gather_nd does not allow negative batch_dims.\")\n params_ndims = params.shape.ndims\n indices_ndims = indices.shape.ndims\n if indices_ndims is not None and batch_dims >= indices_ndims:\n raise ValueError(\"batch_dims = %d must be less than rank(indices) = %d\" %\n (batch_dims, indices_ndims))\n if params_ndims is not None and batch_dims >= params_ndims:\n raise ValueError(\"batch_dims = %d must be less than rank(params) = %d\" %\n (batch_dims, params_ndims))\n\n expand = batch_dims == 0\n if expand:\n # Normally gather_nd will be called when batch_dims == 0.\n # But if this function is called with batch_dims = 0, e.g. for testing\n # purposes, this adds a dummy batch dimension to make batch_dims = 1.\n params = expand_dims(params, axis=0)\n indices = expand_dims(indices, axis=0)\n batch_dims = 1\n\n params_shape = shape(params)\n indices_shape = shape(indices)\n batch_shape = params_shape[:batch_dims]\n batch_size = gen_math_ops.prod(batch_shape, [0])\n index_internal_ndims = rank(indices) - batch_dims - 1\n indices_internal_shape = indices_shape[batch_dims:-1]\n\n # Assuming a 'params' with shape [b1, ..., bM, g1, ..., gN] and an 'indices'\n # with shape [b1, ..., bM, i1, ..., iK, C], where C <= N, we need to modify\n # 'indices' s.t. it has shape [i1, ..., iK, D], where D <= M + N and slices\n # to the entire 'params' tensor.\n # Assuming we have a batch of shape [B1, B2], we use meshgrid to create a\n # grid of size B1 x B2.\n batch_dim_list = unstack(batch_shape, axis=0)\n dim_ranges = [\n gen_math_ops.cast(gen_math_ops._range(0, x, 1), indices.dtype)\n for x in batch_dim_list\n ]\n mesh_list = meshgrid(*dim_ranges, indexing=\"ij\") if dim_ranges else []\n # Then we flatten and stack the tensors to form a (B1.B2) by 2 matrix.\n flat_list = [reshape(x, shape=(-1,)) for x in mesh_list]\n index_grid = transpose(stack(flat_list, axis=0))\n # We need to concatenate these batch coordinates with the internal indices.\n # concat -> index_grid [B1.B2, 2] with indices [i1, ..., iK, C]\n # So we reshape them both to [(B1.B2), i1, ..., iK, *]\n index_grid_shape = shape(index_grid)\n index_grid = reshape(\n index_grid,\n concat([\n index_grid_shape[:1],\n ones(index_internal_ndims, dtype=dtypes.int32), index_grid_shape[1:]\n ],\n axis=0))\n tile_shape = concat(((1,), indices_internal_shape, (1,)), axis=0)\n index_grid = tile(index_grid, multiples=tile_shape)\n # index_grid now has shape [(B1.B2), i1, ..., iK, 2]\n flat_shape = concat(([batch_size], indices_shape[batch_dims:]), axis=0)\n flat_indices = reshape(indices, shape=flat_shape)\n # flat_indices now has shape [(B1.B2), i1, ..., iK, C]\n indices = concat((index_grid, flat_indices), axis=-1)\n # indices has shape [(B1.B2), i1, ..., iK, 2+C]\n out = gen_array_ops.gather_nd(params, indices)\n # out has shape [(B1.B2), i1, ..., iK, N-C]. Now we reshape batch to\n # its original form.\n out_shape = shape(out)\n out = reshape(out, shape=concat((batch_shape, out_shape[1:]), axis=0))\n if expand:\n out = squeeze(out, axis=0)\n return out\n\n\[email protected]_endpoints(\"tensor_scatter_update\")\n@tf_export(\n \"tensor_scatter_nd_update\",\n v1=[\"tensor_scatter_nd_update\", \"tensor_scatter_update\"])\[email protected]_dispatch_support\ndef tensor_scatter_nd_update(tensor, indices, updates, name=None):\n \"\"\"\"Scatter `updates` into an existing tensor according to `indices`.\n\n This operation creates a new tensor by applying sparse `updates` to the\n input `tensor`. This is similar to an index assignment.\n\n ```\n # Not implemented: tensors cannot be updated inplace.\n tensor[indices] = updates\n ```\n\n If an out of bound index is found on CPU, an error is returned.\n\n > **WARNING**: There are some GPU specific semantics for this operation.\n >\n > - If an out of bound index is found, the index is ignored.\n > - The order in which updates are applied is nondeterministic, so the output\n > will be nondeterministic if `indices` contains duplicates.\n\n This operation is very similar to `tf.scatter_nd`, except that the updates are\n scattered onto an existing tensor (as opposed to a zero-tensor). If the memory\n for the existing tensor cannot be re-used, a copy is made and updated.\n\n In general:\n\n * `indices` is an integer tensor - the indices to update in `tensor`.\n * `indices` has **at least two** axes, the last axis is the depth of the\n index vectors.\n * For each index vector in `indices` there is a corresponding entry in\n `updates`.\n * If the length of the index vectors matches the rank of the `tensor`, then\n the index vectors each point to scalars in `tensor` and each update is a\n scalar.\n * If the length of the index vectors is less than the rank of `tensor`, then\n the index vectors each point to slices of `tensor` and shape of the updates\n must match that slice.\n\n Overall this leads to the following shape constraints:\n\n ```\n assert tf.rank(indices) >= 2\n index_depth = indices.shape[-1]\n batch_shape = indices.shape[:-1]\n assert index_depth <= tf.rank(tensor)\n outer_shape = tensor.shape[:index_depth]\n inner_shape = tensor.shape[index_depth:]\n assert updates.shape == batch_shape + inner_shape\n ```\n\n Typical usage is often much simpler than this general form, and it\n can be better understood starting with simple examples:\n\n ### Scalar updates\n\n The simplest usage inserts scalar elements into a tensor by index.\n In this case, the `index_depth` must equal the rank of the\n input `tensor`, slice each column of `indices` is an index into an axis of the\n input `tensor`.\n\n In this simplest case the shape constraints are:\n\n ```\n num_updates, index_depth = indices.shape.as_list()\n assert updates.shape == [num_updates]\n assert index_depth == tf.rank(tensor)`\n ```\n\n For example, to insert 4 scattered elements in a rank-1 tensor with\n 8 elements.\n\n <div style=\"width:70%; margin:auto; margin-bottom:10px; margin-top:20px;\">\n <img style=\"width:100%\"\n src=\"https://www.tensorflow.org/images/ScatterNd1.png\">\n </div>\n\n This scatter operation would look like this:\n\n >>> tensor = [0, 0, 0, 0, 0, 0, 0, 0] # tf.rank(tensor) == 1\n >>> indices = [[1], [3], [4], [7]] # num_updates == 4, index_depth == 1\n >>> updates = [9, 10, 11, 12] # num_updates == 4\n >>> print(tf.tensor_scatter_nd_update(tensor, indices, updates))\n tf.Tensor([ 0 9 0 10 11 0 0 12], shape=(8,), dtype=int32)\n\n The length (first axis) of `updates` must equal the length of the `indices`:\n `num_updates`. This is the number of updates being inserted. Each scalar\n update is inserted into `tensor` at the indexed location.\n\n For a higher rank input `tensor` scalar updates can be inserted by using an\n `index_depth` that matches `tf.rank(tensor)`:\n\n >>> tensor = [[1, 1], [1, 1], [1, 1]] # tf.rank(tensor) == 2\n >>> indices = [[0, 1], [2, 0]] # num_updates == 2, index_depth == 2\n >>> updates = [5, 10] # num_updates == 2\n >>> print(tf.tensor_scatter_nd_update(tensor, indices, updates))\n tf.Tensor(\n [[ 1 5]\n [ 1 1]\n [10 1]], shape=(3, 2), dtype=int32)\n\n ### Slice updates\n\n When the input `tensor` has more than one axis scatter can be used to update\n entire slices.\n\n In this case it's helpful to think of the input `tensor` as being a two level\n array-of-arrays. The shape of this two level array is split into the\n `outer_shape` and the `inner_shape`.\n\n `indices` indexes into the outer level of the input tensor (`outer_shape`).\n and replaces the sub-array at that location with the corresponding item from\n the `updates` list. The shape of each update is `inner_shape`.\n\n When updating a list of slices the shape constraints are:\n\n ```\n num_updates, index_depth = indices.shape.as_list()\n inner_shape = tensor.shape[:index_depth]\n outer_shape = tensor.shape[index_depth:]\n assert updates.shape == [num_updates, inner_shape]\n ```\n\n For example, to update rows of a `(6, 3)` `tensor`:\n\n >>> tensor = tf.zeros([6, 3], dtype=tf.int32)\n\n Use an index depth of one.\n\n >>> indices = tf.constant([[2], [4]]) # num_updates == 2, index_depth == 1\n >>> num_updates, index_depth = indices.shape.as_list()\n\n The `outer_shape` is `6`, the inner shape is `3`:\n\n >>> outer_shape = tensor.shape[:index_depth]\n >>> inner_shape = tensor.shape[index_depth:]\n\n 2 rows are being indexed so 2 `updates` must be supplied.\n Each update must be shaped to match the `inner_shape`.\n\n >>> # num_updates == 2, inner_shape==3\n >>> updates = tf.constant([[1, 2, 3],\n ... [4, 5, 6]])\n\n Altogether this gives:\n\n >>> tf.tensor_scatter_nd_update(tensor, indices, updates).numpy()\n array([[0, 0, 0],\n [0, 0, 0],\n [1, 2, 3],\n [0, 0, 0],\n [4, 5, 6],\n [0, 0, 0]], dtype=int32)\n\n #### More slice update examples\n\n A tensor representing a batch of uniformly sized video clips naturally has 5\n axes: `[batch_size, time, width, height, channels]`.\n\n For example:\n\n >>> batch_size, time, width, height, channels = 13,11,7,5,3\n >>> video_batch = tf.zeros([batch_size, time, width, height, channels])\n\n To replace a selection of video clips:\n * Use an `index_depth` of 1 (indexing the `outer_shape`: `[batch_size]`)\n * Provide updates each with a shape matching the `inner_shape`:\n `[time, width, height, channels]`.\n\n To replace the first two clips with ones:\n\n >>> indices = [[0],[1]]\n >>> new_clips = tf.ones([2, time, width, height, channels])\n >>> tf.tensor_scatter_nd_update(video_batch, indices, new_clips)\n\n To replace a selection of frames in the videos:\n\n * `indices` must have an `index_depth` of 2 for the `outer_shape`:\n `[batch_size, time]`.\n * `updates` must be shaped like a list of images. Each update must have a\n shape, matching the `inner_shape`: `[width, height, channels]`.\n\n To replace the first frame of the first three video clips:\n\n >>> indices = [[0, 0], [1, 0], [2, 0]] # num_updates=3, index_depth=2\n >>> new_images = tf.ones([\n ... # num_updates=3, inner_shape=(width, height, channels)\n ... 3, width, height, channels])\n >>> tf.tensor_scatter_nd_update(video_batch, indices, new_images)\n\n ### Folded indices\n\n In simple cases it's convenient to think of `indices` and `updates` as\n lists, but this is not a strict requirement. Instead of a flat `num_updates`,\n the `indices` and `updates` can be folded into a `batch_shape`. This\n `batch_shape` is all axes of the `indices`, except for the innermost\n `index_depth` axis.\n\n ```\n index_depth = indices.shape[-1]\n batch_shape = indices.shape[:-1]\n ```\n\n Note: The one exception is that the `batch_shape` cannot be `[]`. You can't\n update a single index by passing indices with shape `[index_depth]`.\n\n `updates` must have a matching `batch_shape` (the axes before `inner_shape`).\n\n ```\n assert updates.shape == batch_shape + inner_shape\n ```\n\n Note: The result is equivalent to flattening the `batch_shape` axes of\n `indices` and `updates`. This generalization just avoids the need\n for reshapes when it is more natural to construct \"folded\" indices and\n updates.\n\n With this generalization the full shape constraints are:\n\n ```\n assert tf.rank(indices) >= 2\n index_depth = indices.shape[-1]\n batch_shape = indices.shape[:-1]\n assert index_depth <= tf.rank(tensor)\n outer_shape = tensor.shape[:index_depth]\n inner_shape = tensor.shape[index_depth:]\n assert updates.shape == batch_shape + inner_shape\n ```\n\n For example, to draw an `X` on a `(5,5)` matrix start with these indices:\n\n >>> tensor = tf.zeros([5,5])\n >>> indices = tf.constant([\n ... [[0,0],\n ... [1,1],\n ... [2,2],\n ... [3,3],\n ... [4,4]],\n ... [[0,4],\n ... [1,3],\n ... [2,2],\n ... [3,1],\n ... [4,0]],\n ... ])\n >>> indices.shape.as_list() # batch_shape == [2, 5], index_depth == 2\n [2, 5, 2]\n\n Here the `indices` do not have a shape of `[num_updates, index_depth]`, but a\n shape of `batch_shape+[index_depth]`.\n\n Since the `index_depth` is equal to the rank of `tensor`:\n\n * `outer_shape` is `(5,5)`\n * `inner_shape` is `()` - each update is scalar\n * `updates.shape` is `batch_shape + inner_shape == (5,2) + ()`\n\n >>> updates = [\n ... [1,1,1,1,1],\n ... [1,1,1,1,1],\n ... ]\n\n Putting this together gives:\n\n >>> tf.tensor_scatter_nd_update(tensor, indices, updates).numpy()\n array([[1., 0., 0., 0., 1.],\n [0., 1., 0., 1., 0.],\n [0., 0., 1., 0., 0.],\n [0., 1., 0., 1., 0.],\n [1., 0., 0., 0., 1.]], dtype=float32)\n\n Args:\n tensor: Tensor to copy/update.\n indices: Indices to update.\n updates: Updates to apply at the indices.\n name: Optional name for the operation.\n\n Returns:\n A new tensor with the given shape and updates applied according to the\n indices.\n \"\"\"\n return gen_array_ops.tensor_scatter_update(\n tensor=tensor, indices=indices, updates=updates, name=name)\n\n\n# Define quantize_v2 here in order to make name the second-to-last attribute,\n# because round_mode was added later.\n# (And also now because of 'axis' processing).\n@tf_export(v1=[\"quantize_v2\"])\[email protected]_dispatch_support\[email protected](\n \"2017-10-25\",\n \"`tf.quantize_v2` is deprecated, please use `tf.quantization.quantize` \"\n \"instead.\") # pylint: disable=missing-docstring\ndef quantize_v2(\n input, # pylint: disable=redefined-builtin\n min_range,\n max_range,\n T,\n mode=\"MIN_COMBINED\",\n name=None,\n round_mode=\"HALF_AWAY_FROM_ZERO\",\n narrow_range=False,\n axis=None,\n ensure_minimum_range=0.01):\n if axis is None:\n axis = -1\n elif axis < 0:\n if input.shape.ndims is None:\n raise ValueError(\"input should have known rank to use negative axis.\")\n axis %= input.shape.ndims\n\n if ensure_minimum_range != 0.01:\n return gen_array_ops.quantize_v2(\n input,\n min_range,\n max_range,\n T=T,\n mode=mode,\n name=name,\n round_mode=round_mode,\n narrow_range=narrow_range,\n axis=axis,\n ensure_minimum_range=ensure_minimum_range)\n return gen_array_ops.quantize_v2(\n input,\n min_range,\n max_range,\n T=T,\n mode=mode,\n name=name,\n round_mode=round_mode,\n narrow_range=narrow_range,\n axis=axis)\n\n\nquantize_v2.__doc__ = \"\"\"Please use `tf.quantization.quantize` instead.\"\"\"\n\n\n# We want to expose tf.quantization.quantize instead of\n# tf.quantization.quantize; we can deprecate tf.quantization.quantize in next\n# version of TensorFlow.\n@tf_export(\"quantization.quantize\", v1=[\"quantization.quantize\", \"quantize\"])\[email protected]_dispatch_support\[email protected]_endpoints(\"quantize\")\ndef quantize(\n input, # pylint: disable=redefined-builtin\n min_range,\n max_range,\n T,\n mode=\"MIN_COMBINED\",\n round_mode=\"HALF_AWAY_FROM_ZERO\",\n name=None,\n narrow_range=False,\n axis=None,\n ensure_minimum_range=0.01):\n \"\"\"Quantize the input tensor.\"\"\"\n if ensure_minimum_range != 0.01:\n return quantize_v2(\n input,\n min_range,\n max_range,\n T,\n mode=mode,\n round_mode=round_mode,\n name=name,\n narrow_range=narrow_range,\n axis=axis,\n ensure_minimum_range=ensure_minimum_range)\n return quantize_v2(\n input,\n min_range,\n max_range,\n T,\n mode=mode,\n round_mode=round_mode,\n name=name,\n narrow_range=narrow_range,\n axis=axis)\n\n\n@tf_export(\"quantization.dequantize\", v1=[\"quantization.dequantize\",\n \"dequantize\"])\[email protected]_dispatch_support\[email protected]_endpoints(\"dequantize\")\ndef dequantize( # pylint: disable=missing-docstring\n input, # pylint: disable=redefined-builtin\n min_range,\n max_range,\n mode=\"MIN_COMBINED\",\n name=None,\n axis=None,\n narrow_range=False,\n dtype=dtypes.float32):\n if axis is None:\n axis = -1\n elif axis < 0:\n if input.shape.ndims is None:\n raise ValueError(\"input should have known rank to use negative axis.\")\n axis %= input.shape.ndims\n\n if axis >= 0 or narrow_range:\n return gen_array_ops.dequantize(\n input,\n min_range,\n max_range,\n mode=mode,\n name=name,\n narrow_range=narrow_range,\n axis=axis,\n dtype=dtype)\n return gen_array_ops.dequantize(\n input, min_range, max_range, mode=mode, name=name, dtype=dtype)\n\n\ndequantize.__doc__ = gen_array_ops.dequantize.__doc__\n\n\n@tf_export(\"quantization.quantize_and_dequantize\")\[email protected]_dispatch_support\[email protected](None,\n \"This Op has been deprecated, use\" +\n \"`quantize_and_dequantize_v2` instead. To \" +\n \"To simulate the V1 the behavior of \" +\n \"tf.quantization.quantize_and_dequantize(...) use \" +\n \"tf.grad_pass_through(\" +\n \"tf.quantization.quantize_and_dequantize_v2)(...).\")\ndef quantize_and_dequantize(\n input, # pylint: disable=redefined-builtin\n input_min,\n input_max,\n signed_input=True,\n num_bits=8,\n range_given=False,\n round_mode=\"HALF_TO_EVEN\",\n name=None,\n narrow_range=False,\n axis=None):\n \"\"\"Quantizes then dequantizes a tensor.\n\n Args:\n input: A `Tensor` to quantize and dequantize.\n input_min: If range_given=True, the minimum input value, that needs to be\n represented in the quantized representation. If axis is specified, this\n should be a vector of minimum values for each slice along axis.\n input_max: If range_given=True, the maximum input value that needs to be\n represented in the quantized representation. If axis is specified, this\n should be a vector of maximum values for each slice along axis.\n signed_input: True if the quantization is signed or unsigned.\n num_bits: The bitwidth of the quantization.\n range_given: If true use `input_min` and `input_max` for the range of the\n input, otherwise determine min and max from the input `Tensor`.\n round_mode: Rounding mode when rounding from float values to quantized ones.\n one of ['HALF_TO_EVEN', 'HALF_UP']\n name: Optional name for the operation.\n narrow_range: If true, then the absolute value of the quantized minimum\n value is the same as the quantized maximum value, instead of 1 greater.\n i.e. for 8 bit quantization, the minimum value is -127 instead of -128.\n axis: Integer. If specified, refers to a dimension of the input tensor, such\n that quantization will be per slice along that dimension.\n\n Returns:\n A `Tensor`. Each element is the result of quantizing and dequantizing the\n corresponding element of `input`.\n \"\"\"\n if axis is None:\n axis = -1\n elif axis < 0:\n if input.shape.ndims is None:\n raise ValueError(\"input should have known rank to use negative axis.\")\n axis %= input.shape.ndims\n\n return gen_array_ops.quantize_and_dequantize_v2(\n input,\n input_min=input_min,\n input_max=input_max,\n signed_input=signed_input,\n num_bits=num_bits,\n range_given=range_given,\n round_mode=round_mode,\n narrow_range=narrow_range,\n axis=axis,\n name=name)\n\n\n@tf_export(\"quantization.quantize_and_dequantize_v2\")\[email protected]_dispatch_support\ndef quantize_and_dequantize_v2(\n input, # pylint: disable=redefined-builtin\n input_min,\n input_max,\n signed_input=True,\n num_bits=8,\n range_given=False,\n round_mode=\"HALF_TO_EVEN\",\n name=None,\n narrow_range=False,\n axis=None):\n \"\"\"Quantizes then dequantizes a tensor.\n\n Updates the gradient definition for quantization that is outside the range to\n be 0.To simulate the V1 the behavior of\n tf.quantization.quantize_and_dequantize(...) use\n tf.grad_pass_through(tf.quantization.quantize_and_dequantize_v2)(...).\n\n Example usage:\n\n ```python\n def getQuantizeOp(input):\n input_tensor = tf.placeholder(tf.float32, shape=[4, 4])\n net = tf.quantization.quantize_and_dequantize(input,\n input_min=min_threshold,\n input_max=max_threshold,\n range_given=True)\n\n To simulate v1 behavior:\n\n def testDecomposeQuantizeDequantize(self):\n def f(input_tensor):\n return tf.quantization.quantize_and_dequantize_v2(input_tensor,\n input_min = 5.0,\n input_max= -10.0,\n range_given=True)\n input_tensor = tf.placeholder(tf.float32, shape=[4, 4])\n net = tf.grad_pass_through(f)(input_tensor)\n ```\n\n Args:\n input: A `Tensor` to quantize and dequantize.\n input_min: If range_given=True, the minimum input value, that needs to be\n represented in the quantized representation. If axis is specified, this\n should be a vector of minimum values for each slice along axis.\n input_max: If range_given=True, the maximum input value that needs to be\n represented in the quantized representation. If axis is specified, this\n should be a vector of maximum values for each slice along axis.\n signed_input: True if the quantization is signed or unsigned.\n num_bits: The bitwidth of the quantization.\n range_given: If true use `input_min` and `input_max` for the range of the\n input, otherwise determine min and max from the input `Tensor`.\n round_mode: Rounding mode when rounding from float values to quantized ones.\n one of ['HALF_TO_EVEN', 'HALF_UP']\n name: Optional name for the operation.\n narrow_range: If true, then the absolute value of the quantized minimum\n value is the same as the quantized maximum value, instead of 1 greater.\n i.e. for 8 bit quantization, the minimum value is -127 instead of -128.\n axis: Integer. If specified, refers to a dimension of the input tensor, such\n that quantization will be per slice along that dimension.\n\n Returns:\n A `Tensor`. Each element is the result of quantizing and dequantizing the\n corresponding element of `input`.\n \"\"\"\n if axis is None:\n axis = -1\n elif axis < 0:\n if input.shape.ndims is None:\n raise ValueError(\"input should have known rank to use negative axis.\")\n axis %= input.shape.ndims\n\n return gen_array_ops.quantize_and_dequantize_v4(\n input,\n input_min=input_min,\n input_max=input_max,\n signed_input=signed_input,\n num_bits=num_bits,\n range_given=range_given,\n round_mode=round_mode,\n narrow_range=narrow_range,\n axis=axis,\n name=name)\n\n\n@tf_export(\"searchsorted\")\[email protected]_dispatch_support\ndef searchsorted(sorted_sequence,\n values,\n side=\"left\",\n out_type=dtypes.int32,\n name=None):\n \"\"\"Searches for where a value would go in a sorted sequence.\n\n This is not a method for checking containment (like python `in`).\n\n The typical use case for this operation is \"binning\", \"bucketing\", or\n \"discretizing\". The `values` are assigned to bucket-indices based on the\n **edges** listed in `sorted_sequence`. This operation\n returns the bucket-index for each value.\n\n >>> edges = [-1, 3.3, 9.1, 10.0]\n >>> values = [0.0, 4.1, 12.0]\n >>> tf.searchsorted(edges, values).numpy()\n array([1, 2, 4], dtype=int32)\n\n The `side` argument controls which index is returned if a value lands exactly\n on an edge:\n\n >>> seq = [0, 3, 9, 10, 10]\n >>> values = [0, 4, 10]\n >>> tf.searchsorted(seq, values).numpy()\n array([0, 2, 3], dtype=int32)\n >>> tf.searchsorted(seq, values, side=\"right\").numpy()\n array([1, 2, 5], dtype=int32)\n\n The `axis` is not settable for this operation. It always operates on the\n innermost dimension (`axis=-1`). The operation will accept any number of\n outer dimensions. Here it is applied to the rows of a matrix:\n\n >>> sorted_sequence = [[0., 3., 8., 9., 10.],\n ... [1., 2., 3., 4., 5.]]\n >>> values = [[9.8, 2.1, 4.3],\n ... [0.1, 6.6, 4.5, ]]\n >>> tf.searchsorted(sorted_sequence, values).numpy()\n array([[4, 1, 2],\n [0, 5, 4]], dtype=int32)\n\n Note: This operation assumes that `sorted_sequence` **is sorted** along the\n innermost axis, maybe using `tf.sort(..., axis=-1)`. **If the sequence is not\n sorted no error is raised** and the content of the returned tensor is not well\n defined.\n\n Args:\n sorted_sequence: N-D `Tensor` containing a sorted sequence.\n values: N-D `Tensor` containing the search values.\n side: 'left' or 'right'; 'left' corresponds to lower_bound and 'right' to\n upper_bound.\n out_type: The output type (`int32` or `int64`). Default is `tf.int32`.\n name: Optional name for the operation.\n\n Returns:\n An N-D `Tensor` the size of `values` containing the result of applying\n either lower_bound or upper_bound (depending on side) to each value. The\n result is not a global index to the entire `Tensor`, but the index in the\n last dimension.\n\n Raises:\n ValueError: If the last dimension of `sorted_sequence >= 2^31-1` elements.\n If the total size of `values` exceeds `2^31 - 1` elements.\n If the first `N-1` dimensions of the two tensors don't match.\n \"\"\"\n sequence_size = shape_internal(sorted_sequence)[-1]\n values_size = shape_internal(values)[-1]\n sorted_sequence_2d = reshape(sorted_sequence, [-1, sequence_size])\n values_2d = reshape(values, [-1, values_size])\n if side == \"right\":\n output = gen_array_ops.upper_bound(sorted_sequence_2d, values_2d, out_type,\n name)\n elif side == \"left\":\n output = gen_array_ops.lower_bound(sorted_sequence_2d, values_2d, out_type,\n name)\n else:\n raise ValueError(\"side must be either 'right' or 'left'. Saw: %s.\" % side)\n return reshape(output, shape_internal(values))\n\n\nquantize.__doc__ = gen_array_ops.quantize_v2.__doc__\n\n\n@tf_export(\"image.extract_patches\")\[email protected]_dispatch_support\ndef extract_image_patches_v2(images, sizes, strides, rates, padding, name=None):\n r\"\"\"Extract `patches` from `images`.\n\n This op collects patches from the input image, as if applying a\n convolution. All extracted patches are stacked in the depth (last) dimension\n of the output.\n\n Specifically, the op extracts patches of shape `sizes` which are `strides`\n apart in the input image. The output is subsampled using the `rates` argument,\n in the same manner as \"atrous\" or \"dilated\" convolutions.\n\n The result is a 4D tensor which is indexed by batch, row, and column.\n `output[i, x, y]` contains a flattened patch of size `sizes[1], sizes[2]`\n which is taken from the input starting at\n `images[i, x*strides[1], y*strides[2]]`.\n\n Each output patch can be reshaped to `sizes[1], sizes[2], depth`, where\n `depth` is `images.shape[3]`.\n\n The output elements are taken from the input at intervals given by the `rate`\n argument, as in dilated convolutions.\n\n The `padding` argument has no effect on the size of each patch, it determines\n how many patches are extracted. If `VALID`, only patches which are fully\n contained in the input image are included. If `SAME`, all patches whose\n starting point is inside the input are included, and areas outside the input\n default to zero.\n\n Example:\n\n ```\n n = 10\n # images is a 1 x 10 x 10 x 1 array that contains the numbers 1 through 100\n images = [[[[x * n + y + 1] for y in range(n)] for x in range(n)]]\n\n # We generate two outputs as follows:\n # 1. 3x3 patches with stride length 5\n # 2. Same as above, but the rate is increased to 2\n tf.image.extract_patches(images=images,\n sizes=[1, 3, 3, 1],\n strides=[1, 5, 5, 1],\n rates=[1, 1, 1, 1],\n padding='VALID')\n\n # Yields:\n [[[[ 1 2 3 11 12 13 21 22 23]\n [ 6 7 8 16 17 18 26 27 28]]\n [[51 52 53 61 62 63 71 72 73]\n [56 57 58 66 67 68 76 77 78]]]]\n ```\n\n If we mark the pixels in the input image which are taken for the output with\n `*`, we see the pattern:\n\n ```\n * * * 4 5 * * * 9 10\n * * * 14 15 * * * 19 20\n * * * 24 25 * * * 29 30\n 31 32 33 34 35 36 37 38 39 40\n 41 42 43 44 45 46 47 48 49 50\n * * * 54 55 * * * 59 60\n * * * 64 65 * * * 69 70\n * * * 74 75 * * * 79 80\n 81 82 83 84 85 86 87 88 89 90\n 91 92 93 94 95 96 97 98 99 100\n ```\n\n ```\n tf.image.extract_patches(images=images,\n sizes=[1, 3, 3, 1],\n strides=[1, 5, 5, 1],\n rates=[1, 2, 2, 1],\n padding='VALID')\n\n # Yields:\n [[[[ 1 3 5 21 23 25 41 43 45]\n [ 6 8 10 26 28 30 46 48 50]]\n\n [[ 51 53 55 71 73 75 91 93 95]\n [ 56 58 60 76 78 80 96 98 100]]]]\n ```\n\n We can again draw the effect, this time using the symbols `*`, `x`, `+` and\n `o` to distinguish the patches:\n\n ```\n * 2 * 4 * x 7 x 9 x\n 11 12 13 14 15 16 17 18 19 20\n * 22 * 24 * x 27 x 29 x\n 31 32 33 34 35 36 37 38 39 40\n * 42 * 44 * x 47 x 49 x\n + 52 + 54 + o 57 o 59 o\n 61 62 63 64 65 66 67 68 69 70\n + 72 + 74 + o 77 o 79 o\n 81 82 83 84 85 86 87 88 89 90\n + 92 + 94 + o 97 o 99 o\n ```\n\n Args:\n images: A 4-D Tensor with shape `[batch, in_rows, in_cols, depth]`.\n sizes: The size of the extracted patches. Must be\n `[1, size_rows, size_cols, 1]`.\n strides: A 1-D Tensor of length 4. How far the centers of two consecutive\n patches are in the images. Must be: `[1, stride_rows, stride_cols, 1]`.\n rates: A 1-D Tensor of length 4. Must be: `[1, rate_rows, rate_cols, 1]`.\n This is the input stride, specifying how far two consecutive patch samples\n are in the input. Equivalent to extracting patches with `patch_sizes_eff =\n patch_sizes + (patch_sizes - 1) * (rates - 1)`, followed by subsampling\n them spatially by a factor of `rates`. This is equivalent to `rate` in\n dilated (a.k.a. Atrous) convolutions.\n padding: The type of padding algorithm to use.\n name: A name for the operation (optional).\n\n Returns:\n A 4-D Tensor of the same type as the input.\n \"\"\"\n return gen_array_ops.extract_image_patches(images, sizes, strides, rates,\n padding, name)\n\n\n@tf_export(v1=[\"image.extract_image_patches\", \"extract_image_patches\"])\[email protected]_dispatch_support\[email protected]_args(None, \"ksizes is deprecated, use sizes instead\",\n \"ksizes\")\ndef extract_image_patches( # pylint: disable=missing-docstring\n images,\n ksizes=None,\n strides=None,\n rates=None,\n padding=None,\n name=None,\n sizes=None):\n \"\"\"Extract patches from images and put them in the \"depth\" output dimension.\n\n Args:\n `images`: A `Tensor`. Must be one of the following types: `float32`,\n `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`,\n `uint16`, `half`, `uint32`, `uint64`. 4-D Tensor with shape\n `[batch, in_rows, in_cols, depth]`. `ksizes`: A list of `ints` that has\n length `>= 4`. The size of the sliding window for each\n dimension of `images`. `strides`: A list of `ints` that has length `>= 4`.\n 1-D of length 4. How far the centers of two consecutive\n patches are in the images. Must be:\n `[1, stride_rows, stride_cols, 1]`. `rates`: A list of `ints`\n that has length `>= 4`. 1-D of length 4. Must be: `[1, rate_rows, rate_cols,\n 1]`. This is the input stride, specifying how far two consecutive patch\n samples are in the input. Equivalent to extracting patches with\n `patch_sizes_eff = patch_sizes + (patch_sizes - 1) * (rates - 1)`,\n followed by subsampling them spatially by a factor of `rates`. This is\n equivalent to `rate` in dilated (a.k.a. Atrous) convolutions.\n `padding`: A `string` from: \"SAME\", \"VALID\". The type of padding algorithm\n to use.\n We specify the size-related attributes as: ``` ksizes = [1, ksize_rows,\n ksize_cols, 1] strides = [1, strides_rows, strides_cols, 1] rates = [1,\n rates_rows, rates_cols, 1]\n name: A name for the operation (optional). ```\n\n Returns:\n A Tensor. Has the same type as images.\n \"\"\"\n ksizes = deprecation.deprecated_argument_lookup(\"sizes\", sizes, \"ksizes\",\n ksizes)\n return gen_array_ops.extract_image_patches(images, ksizes, strides, rates,\n padding, name)\n\n\nextract_image_patches.__doc__ = gen_array_ops.extract_image_patches.__doc__\n\n\n@tf_export(\"fingerprint\")\[email protected]_dispatch_support\ndef fingerprint(data, method=\"farmhash64\", name=None):\n r\"\"\"Generates fingerprint values.\n\n Generates fingerprint values of `data`.\n\n Fingerprint op considers the first dimension of `data` as the batch dimension,\n and `output[i]` contains the fingerprint value generated from contents in\n `data[i, ...]` for all `i`.\n\n Fingerprint op writes fingerprint values as byte arrays. For example, the\n default method `farmhash64` generates a 64-bit fingerprint value at a time.\n This 8-byte value is written out as an `tf.uint8` array of size 8, in\n little-endian order.\n\n For example, suppose that `data` has data type `tf.int32` and shape (2, 3, 4),\n and that the fingerprint method is `farmhash64`. In this case, the output\n shape is (2, 8), where 2 is the batch dimension size of `data`, and 8 is the\n size of each fingerprint value in bytes. `output[0, :]` is generated from\n 12 integers in `data[0, :, :]` and similarly `output[1, :]` is generated from\n other 12 integers in `data[1, :, :]`.\n\n Note that this op fingerprints the raw underlying buffer, and it does not\n fingerprint Tensor's metadata such as data type and/or shape. For example, the\n fingerprint values are invariant under reshapes and bitcasts as long as the\n batch dimension remain the same:\n\n ```python\n tf.fingerprint(data) == tf.fingerprint(tf.reshape(data, ...))\n tf.fingerprint(data) == tf.fingerprint(tf.bitcast(data, ...))\n ```\n\n For string data, one should expect `tf.fingerprint(data) !=\n tf.fingerprint(tf.string.reduce_join(data))` in general.\n\n Args:\n data: A `Tensor`. Must have rank 1 or higher.\n method: A `Tensor` of type `tf.string`. Fingerprint method used by this op.\n Currently available method is `farmhash64`.\n name: A name for the operation (optional).\n\n Returns:\n A two-dimensional `Tensor` of type `tf.uint8`. The first dimension equals to\n `data`'s first dimension, and the second dimension size depends on the\n fingerprint algorithm.\n \"\"\"\n return gen_array_ops.fingerprint(data, method, name)\n\n\ndef convert_to_int_tensor(tensor, name, dtype=dtypes.int32):\n \"\"\"Converts the given value to an integer Tensor.\"\"\"\n tensor = ops.convert_to_tensor(tensor, name=name, preferred_dtype=dtype)\n if tensor.dtype.is_integer:\n tensor = gen_math_ops.cast(tensor, dtype)\n else:\n raise TypeError(\"%s must be an integer tensor; dtype=%s\" %\n (name, tensor.dtype))\n return tensor\n\n\ndef get_positive_axis(axis, ndims, axis_name=\"axis\", ndims_name=\"ndims\"):\n \"\"\"Validate an `axis` parameter, and normalize it to be positive.\n\n If `ndims` is known (i.e., not `None`), then check that `axis` is in the\n range `-ndims <= axis < ndims`, and return `axis` (if `axis >= 0`) or\n `axis + ndims` (otherwise).\n If `ndims` is not known, and `axis` is positive, then return it as-is.\n If `ndims` is not known, and `axis` is negative, then report an error.\n\n Args:\n axis: An integer constant\n ndims: An integer constant, or `None`\n axis_name: The name of `axis` (for error messages).\n ndims_name: The name of `ndims` (for error messages).\n\n Returns:\n The normalized `axis` value.\n\n Raises:\n ValueError: If `axis` is out-of-bounds, or if `axis` is negative and\n `ndims is None`.\n \"\"\"\n if not isinstance(axis, int):\n raise TypeError(\"%s must be an int; got %s\" %\n (axis_name, type(axis).__name__))\n if ndims is not None:\n if 0 <= axis < ndims:\n return axis\n elif -ndims <= axis < 0:\n return axis + ndims\n else:\n raise ValueError(\"%s=%s out of bounds: expected %s<=%s<%s\" %\n (axis_name, axis, -ndims, axis_name, ndims))\n elif axis < 0:\n raise ValueError(\"%s may only be negative if %s is statically known.\" %\n (axis_name, ndims_name))\n return axis\n\n\n# This op is intended to exactly match the semantics of numpy.repeat, with\n# one exception: numpy.repeat has special (and somewhat non-intuitive) behavior\n# when axis is not specified. Rather than implement that special behavior, we\n# simply make `axis` be a required argument.\n#\n# External (OSS) `tf.repeat` feature request:\n# https://github.com/tensorflow/tensorflow/issues/8246\ndef repeat_with_axis(data, repeats, axis, name=None):\n \"\"\"Repeats elements of `data`.\n\n Args:\n data: An `N`-dimensional tensor.\n repeats: A 1-D integer tensor specifying how many times each element in\n `axis` should be repeated. `len(repeats)` must equal `data.shape[axis]`.\n Supports broadcasting from a scalar value.\n axis: `int`. The axis along which to repeat values. Must be less than\n `max(N, 1)`.\n name: A name for the operation.\n\n Returns:\n A tensor with `max(N, 1)` dimensions. Has the same shape as `data`,\n except that dimension `axis` has size `sum(repeats)`.\n\n Example usage:\n\n >>> repeat(['a', 'b', 'c'], repeats=[3, 0, 2], axis=0)\n <tf.Tensor: shape=(5,), dtype=string,\n numpy=array([b'a', b'a', b'a', b'c', b'c'], dtype=object)>\n >>> repeat([[1, 2], [3, 4]], repeats=[2, 3], axis=0)\n <tf.Tensor: shape=(5, 2), dtype=int32, numpy=\n array([[1, 2],\n [1, 2],\n [3, 4],\n [3, 4],\n [3, 4]], dtype=int32)>\n >>> repeat([[1, 2], [3, 4]], repeats=[2, 3], axis=1)\n <tf.Tensor: shape=(2, 5), dtype=int32, numpy=\n array([[1, 1, 2, 2, 2],\n [3, 3, 4, 4, 4]], dtype=int32)>\n\n \"\"\"\n if not isinstance(axis, int):\n raise TypeError(\"axis must be an int; got %s\" % type(axis).__name__)\n\n with ops.name_scope(name, \"Repeat\", [data, repeats]):\n data = ops.convert_to_tensor(data, name=\"data\")\n repeats = convert_to_int_tensor(repeats, name=\"repeats\")\n repeats.shape.with_rank_at_most(1)\n\n # If `data` is a scalar, then upgrade it to a vector.\n data = _with_nonzero_rank(data)\n data_shape = shape(data)\n\n # If `axis` is negative, then convert it to a positive value.\n axis = get_positive_axis(axis, data.shape.rank, ndims_name=\"rank(data)\")\n\n # If we know that `repeats` is a scalar, then we can just tile & reshape.\n if repeats.shape.num_elements() == 1:\n repeats = reshape(repeats, [])\n expanded = expand_dims(data, axis + 1)\n tiled = tile_one_dimension(expanded, axis + 1, repeats)\n result_shape = concat([\n data_shape[:axis], [repeats * data_shape[axis]], data_shape[axis + 1:]\n ],\n axis=0)\n return reshape(tiled, result_shape)\n\n\n # Check data Tensor shapes.\n if repeats.shape.ndims == 1:\n data.shape.dims[axis].assert_is_compatible_with(repeats.shape[0])\n\n repeats = broadcast_to(repeats, [data_shape[axis]])\n repeats_original = repeats\n\n # Broadcast the `repeats` tensor so rank(repeats) == axis + 1.\n if repeats.shape.ndims != axis + 1:\n repeats_shape = shape(repeats)\n repeats_ndims = rank(repeats)\n broadcast_shape = concat(\n [data_shape[:axis + 1 - repeats_ndims], repeats_shape], axis=0)\n repeats = broadcast_to(repeats, broadcast_shape)\n repeats.set_shape([None] * (axis + 1))\n\n # Create a \"sequence mask\" based on `repeats`, where slices across `axis`\n # contain one `True` value for each repetition. E.g., if\n # `repeats = [3, 1, 2]`, then `mask = [[1, 1, 1], [1, 0, 0], [1, 1, 0]]`.\n max_repeat = gen_math_ops.maximum(\n 0, gen_math_ops._max(repeats, _all_dimensions(repeats)))\n mask = sequence_mask(repeats, max_repeat)\n\n # Add a new dimension around each value that needs to be repeated, and\n # then tile that new dimension to match the maximum number of repetitions.\n expanded = expand_dims(data, axis + 1)\n tiled = tile_one_dimension(expanded, axis + 1, max_repeat)\n\n # Use `boolean_mask` to discard the extra repeated values. This also\n # flattens all dimensions up through `axis`.\n masked = boolean_mask(tiled, mask)\n\n # Reshape the output tensor to add the outer dimensions back.\n if axis == 0:\n result = masked\n else:\n repeated_dim_size = gen_math_ops._sum(\n repeats_original,\n axis=gen_math_ops._range(0, rank(repeats_original), 1))\n result_shape = concat(\n [data_shape[:axis], [repeated_dim_size], data_shape[axis + 1:]],\n axis=0)\n result = reshape(masked, result_shape)\n\n # Preserve shape information.\n if data.shape.ndims is not None:\n new_axis_size = 0 if repeats.shape[0] == 0 else None\n result.set_shape(data.shape[:axis].concatenate(\n [new_axis_size]).concatenate(data.shape[axis + 1:]))\n\n return result\n\n\ndef tile_one_dimension(data, axis, multiple):\n \"\"\"Tiles a single dimension of a tensor.\"\"\"\n # Assumes axis is a nonnegative int.\n if data.shape.ndims is not None:\n multiples = [1] * data.shape.ndims\n multiples[axis] = multiple\n else:\n ones_value = ones(rank(data), dtypes.int32)\n multiples = concat([ones_value[:axis], [multiple], ones_value[axis + 1:]],\n axis=0)\n return tile(data, multiples)\n\n\ndef _with_nonzero_rank(data):\n \"\"\"If `data` is scalar, then add a dimension; otherwise return as-is.\"\"\"\n if data.shape.ndims is not None:\n if data.shape.ndims == 0:\n return stack([data])\n else:\n return data\n else:\n data_shape = shape(data)\n data_ndims = rank(data)\n return reshape(data, concat([[1], data_shape], axis=0)[-data_ndims:])\n\n\n@tf_export(\"repeat\")\[email protected]_dispatch_support\ndef repeat(input, repeats, axis=None, name=None): # pylint: disable=redefined-builtin\n \"\"\"Repeat elements of `input`.\n\n See also `tf.concat`, `tf.stack`, `tf.tile`.\n\n Args:\n input: An `N`-dimensional Tensor.\n repeats: An 1-D `int` Tensor. The number of repetitions for each element.\n repeats is broadcasted to fit the shape of the given axis. `len(repeats)`\n must equal `input.shape[axis]` if axis is not None.\n axis: An int. The axis along which to repeat values. By default (axis=None),\n use the flattened input array, and return a flat output array.\n name: A name for the operation.\n\n Returns:\n A Tensor which has the same shape as `input`, except along the given axis.\n If axis is None then the output array is flattened to match the flattened\n input array.\n\n Example usage:\n\n >>> repeat(['a', 'b', 'c'], repeats=[3, 0, 2], axis=0)\n <tf.Tensor: shape=(5,), dtype=string,\n numpy=array([b'a', b'a', b'a', b'c', b'c'], dtype=object)>\n\n >>> repeat([[1, 2], [3, 4]], repeats=[2, 3], axis=0)\n <tf.Tensor: shape=(5, 2), dtype=int32, numpy=\n array([[1, 2],\n [1, 2],\n [3, 4],\n [3, 4],\n [3, 4]], dtype=int32)>\n\n >>> repeat([[1, 2], [3, 4]], repeats=[2, 3], axis=1)\n <tf.Tensor: shape=(2, 5), dtype=int32, numpy=\n array([[1, 1, 2, 2, 2],\n [3, 3, 4, 4, 4]], dtype=int32)>\n\n >>> repeat(3, repeats=4)\n <tf.Tensor: shape=(4,), dtype=int32, numpy=array([3, 3, 3, 3], dtype=int32)>\n\n >>> repeat([[1,2], [3,4]], repeats=2)\n <tf.Tensor: shape=(8,), dtype=int32,\n numpy=array([1, 1, 2, 2, 3, 3, 4, 4], dtype=int32)>\n\n \"\"\"\n if axis is None:\n input = reshape(input, [-1])\n axis = 0\n return repeat_with_axis(input, repeats, axis, name)\n\n\n@tf_export(\"guarantee_const\")\[email protected](None, \"Not for public use.\")\ndef guarantee_const(input, name=None): # pylint: disable=redefined-builtin\n \"\"\"Promise to the TF runtime that the input tensor is a constant.\n\n The runtime is then free to make optimizations based on this.\n\n Returns the input tensor without modification.\n\n Args:\n input: A `Tensor`.\n name: A name for this operation.\n\n Returns:\n A `Tensor`. Has the same dtype as `input`.\n \"\"\"\n return gen_array_ops.guarantee_const(input=input, name=name)\n\n\n# Register elementwise ops that don't have Python wrappers.\ndispatch.register_unary_elementwise_api(gen_array_ops.check_numerics)\n" ]
[ [ "tensorflow.python.ops.gen_math_ops.select_v2", "tensorflow.python.util.tf_decorator.make_decorator", "tensorflow.python.ops.gen_array_ops.list_diff", "tensorflow.python.framework.ops.RegisterGradient", "tensorflow.python.ops.gen_array_ops.strided_slice", "tensorflow.python.ops.gen_array_ops.dequantize", "tensorflow.python.util.tf_export.tf_export", "tensorflow.python.ops.gen_array_ops.rank", "tensorflow.python.ops.gen_array_ops.identity", "tensorflow.python.ops.gen_array_ops.matrix_diag_v3", "tensorflow.python.ops.gen_array_ops.quantize_and_dequantize_v2", "tensorflow.python.framework.tensor_shape.dimension_value", "tensorflow.python.ops.gen_array_ops.reshape", "numpy.zeros", "tensorflow.python.ops.gen_array_ops.split", "tensorflow.python.ops.gen_array_ops.tensor_scatter_update", "tensorflow.python.ops.gen_array_ops.unique_with_counts", "tensorflow.python.ops.gen_array_ops.placeholder", "tensorflow.python.util.deprecation.deprecated_args", "tensorflow.python.ops.gen_array_ops.extract_image_patches", "numpy.array", "tensorflow.python.ops.gen_array_ops.pack", "tensorflow.python.ops.gen_array_ops.edit_distance", "tensorflow.python.framework.tensor_util.is_tf_type", "tensorflow.python.ops.gen_array_ops.size", "tensorflow.python.util.nest.flatten", "tensorflow.python.ops.gen_math_ops._range", "tensorflow.python.ops.gen_array_ops.unique", "tensorflow.python.ops.gen_array_ops.squeeze", "tensorflow.python.ops.gen_array_ops.quantize_v2", "tensorflow.python.ops.gen_array_ops.space_to_depth", "tensorflow.python.ops.gen_array_ops.where", "tensorflow.python.ops.gen_array_ops.concat_v2", "tensorflow.python.ops.gen_array_ops.one_hot", "tensorflow.python.framework.common_shapes.broadcast_shape", "tensorflow.python.ops.gen_array_ops.pad", "tensorflow.python.framework.tensor_util.maybe_set_static_shape", "tensorflow.python.framework.dtypes.as_dtype", "tensorflow.python.ops.gen_array_ops.matrix_set_diag_v3", "tensorflow.python.framework.ops.Tensor._override_operator", "tensorflow.python.framework.ops.convert_to_tensor", "tensorflow.python.framework.tensor_shape.unknown_shape", "numpy.ones", "numpy.isscalar", "tensorflow.python.ops.gen_array_ops.guarantee_const", "tensorflow.python.ops.gen_array_ops.fill", "tensorflow.python.ops.gen_array_ops.matrix_diag_part_v3", "tensorflow.python.ops.gen_array_ops.diag_part", "tensorflow.python.ops.gen_array_ops.reverse_sequence", "tensorflow.python.framework.tensor_util.constant_value_as_shape", "tensorflow.python.util.deprecation.deprecated_endpoints", "tensorflow.python.ops.gen_array_ops.split_v", "tensorflow.python.util.dispatch.register_unary_elementwise_api", "tensorflow.python.eager.context.executing_eagerly", "tensorflow.python.framework.ops.IndexedSlices", "tensorflow.python.framework.tensor_util.constant_value", "tensorflow.python.ops.gen_array_ops.depth_to_space", "tensorflow.python.ops.gen_array_ops.lower_bound", "tensorflow.python.framework.ops.register_tensor_conversion_function", "tensorflow.python.ops.gen_array_ops.unpack", "tensorflow.python.ops.gen_array_ops.gather_nd", "tensorflow.python.framework.tensor_shape.is_fully_defined", "tensorflow.python.ops.gen_math_ops.cast", "tensorflow.python.framework.ops.name_scope", "tensorflow.python.ops.gen_array_ops.shape", "tensorflow.python.util.deprecation.deprecated_argument_lookup", "tensorflow.python.framework.tensor_shape.TensorShape", "tensorflow.python.ops.gen_math_ops.select", "tensorflow.python.ops.gen_array_ops.gather_v2", "tensorflow.python.ops.gen_math_ops.prod", "tensorflow.python.ops.gen_array_ops.fingerprint", "tensorflow.python.ops.gen_array_ops.placeholder_with_default", "tensorflow.python.ops.gen_array_ops.upper_bound", "numpy.arange", "tensorflow.python.ops.gen_array_ops.quantize_and_dequantize_v4", "tensorflow.python.util.nest.map_structure", "tensorflow.python.ops.gen_array_ops.pad_v2", "tensorflow.python.ops.gen_array_ops.broadcast_args", "tensorflow.python.util.deprecation.deprecated", "tensorflow.python.ops.gen_array_ops.zeros_like", "tensorflow.python.ops.gen_array_ops.shape_n", "tensorflow.python.ops.gen_array_ops._slice", "tensorflow.python.ops.gen_array_ops.mirror_pad", "tensorflow.python.ops.gen_array_ops.expand_dims", "numpy.prod", "tensorflow.python.framework.tensor_shape.as_shape", "tensorflow.python.framework.constant_op.constant" ] ]
frantisekvasa/epimix_rapid_processing
[ "b4bc3cc8b2f473fd8f9538376b4ffbd6a5e9374f" ]
[ "EPImix_analysis/epimix_functions.py" ]
[ "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Oct 13 10:38:35 2020\n\n@author: Frantisek Vasa ([email protected])\n\nAdditional functions for manuscript \"Rapid processing and quantitative evaluation of multicontrast EPImix scans for adaptive multimodal imaging\"\n\n\"\"\"\n\n# Additional functions to run EPImix comparison script\n\n# for wbplot\nfrom wbplot import pscalar\nimport numpy as np \nfrom matplotlib import cm, lines\n#from matplotlib import colors \n#from matplotlib.colors import ListedColormap\n\n# for nilearn masked plot\nimport nibabel as nb\nimport nilearn as nl\n\n# colorbar\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\n\n# for spin p-value\nimport scipy as sp\n\n# formatting of p-values as powers of 10, modified from:\n# https://stackoverflow.com/questions/25750170/show-decimal-places-and-scientific-notation-on-the-axis-of-a-matplotlib-plot/49330649#49330649\ndef pow_10_fmt(p):\n if p < 1e-10:\n return 'P < $10^{-10}$'\n elif p > 0.001:\n return 'P = '+str(round(p,3))#'%s' % float('%.3f' % p)\n else:\n s = \"%1.2e\" % p\n decimal_point = '.'\n positive_sign = '+'\n tup = s.split('e')\n significand = tup[0].rstrip(decimal_point)\n sign = tup[1][0].replace(positive_sign, '')\n exponent = tup[1][1:].lstrip('0')\n if exponent:\n exponent = '10^{%s%s}' % (sign, exponent)\n if significand and exponent:\n s = r'%s{\\times}%s' % (significand, exponent)\n else:\n s = r'%s%s' % (significand, exponent)\n return \"P = ${}$\".format(s)\n\n# plot of high-res (360-ROI) parcellation, excluding \"dropped\" regions\ndef pscalar_mmp_hk(file_out, pscalars_hk, mmp_hk, orientation='landscape',\n hemi=None, vrange=None, cmap='magma', transp=False):\n \n # set vrange if it wasn't set before\n if vrange is None:\n vrange = (min(pscalars_hk),max(pscalars_hk))\n \n # replace \"dropped\" regions values by value smaller than range (for mmp_h, there are 360 ROIs -> hardcoded)\n pscalars = np.ones(360)*(vrange[0]-1); pscalars[mmp_hk] = pscalars_hk \n \n # # edit colorbar to add grey as minimum value\n # cmap_nan = cm.get_cmap(cmap, 256).colors\n # cmap_nan[0,0:3] = colors.to_rgb('grey')\n # cmap_nan_mpl=ListedColormap(cmap_nan)\n \n # edit colorbar to add grey as minimum value\n cmap_under = cm.get_cmap(cmap, 256)\n cmap_under.set_under('grey')\n \n # call pscalar function with new values\n pscalar(file_out, pscalars, orientation='landscape',\n hemisphere=hemi, vrange=vrange, cmap=cmap_under, transparent=transp) # cmap_nan_mpl\n \n# plot of low-res (44-ROI) parcellation, excluding \"dropped\" regions\ndef pscalar_mmp_lk(file_out, pscalars_lk, mmp_lk, mmp_ds_ids, orientation='landscape',\n hemi=None, vrange=None, cmap='magma', transp=False):\n \n # set vrange if it wasn't set before\n if vrange is None:\n vrange = (min(pscalars_lk),max(pscalars_lk))\n \n # replace \"dropped\" regions values by value smaller than range (for mmp_h, there are 44 ROIs -> hardcoded)\n pscalars = np.ones(44)*(vrange[0]-1); pscalars[mmp_lk] = pscalars_lk \n \n # # edit colorbar to add grey as minimum value\n # cmap_nan = cm.get_cmap(cmap, 256).colors\n # cmap_nan[0,0:3] = colors.to_rgb('grey')\n # cmap_nan_mpl=ListedColormap(cmap_nan)\n \n # edit colorbar to add grey as minimum value\n cmap_under = cm.get_cmap(cmap, 256)\n cmap_under.set_under('grey')\n \n # set vrange if it wasn't set before\n if vrange is None:\n vrange = (min(pscalars_lk),max(pscalars_lk))\n \n # call pscalar function with new values\n pscalar(file_out, pscalars[mmp_ds_ids], orientation='landscape',\n hemisphere=hemi, vrange=vrange, cmap=cmap_under, transparent=transp) # cmap_nan_mpl\n\n# plot colorbar\ndef plot_cbar(c_lim, cmap_nm, c_label, lbs, save_path):\n \n f, ax = plt.subplots(figsize=(6, 0.75)); f.subplots_adjust(bottom=0.65)\n cmap = cm.get_cmap(cmap_nm, 256) #mpl.cm.plasma_r\n norm = mpl.colors.Normalize(vmin=c_lim[0], vmax=c_lim[1])\n cb1 = mpl.colorbar.ColorbarBase(ax, cmap=cmap, norm=norm, orientation='horizontal')\n cb1.set_label(c_label, size=lbs)\n if save_path[-3:]=='png':\n plt.savefig(save_path, dpi=500)\n elif save_path[-3:]=='svg':\n plt.savefig(save_path)\n \n# Median Absolute Deviation\ndef mad(a, axis=None):\n \"\"\"\n Compute *Median Absolute Deviation* of an array along given axis.\n \"\"\"\n # Median along given axis, but *keeping* the reduced axis so that result can still broadcast against a.\n med = np.nanmedian(a, axis=axis, keepdims=True)\n mad = np.nanmedian(np.absolute(a - med), axis=axis) # MAD along given axis\n return mad\n\ndef kth_diag_indices(a, k):\n rows, cols = np.diag_indices_from(a)\n if k < 0:\n return rows[-k:], cols[:k]\n elif k > 0:\n return rows[:-k], cols[k:]\n else:\n return rows, cols\n\ndef plot_nl_image_masked(img_vec,mask_vec,img_shape,img_affine,cmap,clim=None,*line_args,**line_kwargs):\n if clim is None:\n #clim = (min(img_vec[mask_vec==1]),max(img_vec[mask_vec==1]))\n clim = (min(img_vec[mask_vec==1]),np.percentile(img_vec[mask_vec==1],95))\n # i) edit image and colorbar to map background to black\n img_masked = np.ones(img_vec.size)*(clim[0]-1); img_masked[mask_vec==1] = img_vec[mask_vec==1]\n cmap_under = cm.get_cmap(cmap, 256); cmap_under.set_under('white')\n # ii) convert image to nii and plot\n img_masked_nii = nb.Nifti1Image(np.reshape(img_masked,img_shape),affine=img_affine)\n nl.plotting.plot_img(img_masked_nii,colorbar=True,cmap=cmap_under, vmin=clim[0], vmax=clim[1],*line_args,**line_kwargs)\n\ndef add_subnetwork_lines(hm,roi_nums,*line_args,**line_kwargs):\n hm.hlines([0]+[i-0.25 for i in np.cumsum(roi_nums)], *hm.get_xlim(),*line_args,**line_kwargs); hm.vlines([0]+[i-0.25 for i in np.cumsum(roi_nums)], *hm.get_ylim(),*line_args,**line_kwargs)\n\ndef add_subnetwork_colours(hm,bb,roi_nums,roi_cols,*line_args,**line_kwargs):\n # add network colour lines\n ax2 = plt.axes([0,0,1,1], facecolor=(1,1,1,0)); ax2.axis(\"off\"); #ax2.get_xaxis().set_visible(False), ax2.get_yaxis().set_visible(False)\n temp_nroi_cum = [0]+[i-0.25 for i in np.cumsum(roi_nums)]\n for i in range(len(roi_nums)):\n ax2.add_line(lines.Line2D([bb[0,0]-0.02*(bb[1,0]-bb[0,0]) ,bb[0,0]-0.02*(bb[1,0]-bb[0,0])], [bb[1,1]-(bb[1,1]-bb[0,1])*(temp_nroi_cum[i]/sum(roi_nums)) ,bb[1,1]-(bb[1,1]-bb[0,1])*(temp_nroi_cum[i+1]/sum(roi_nums))], color=roi_cols[i], *line_args,**line_kwargs))\n ax2.add_line(lines.Line2D([bb[0,0]+(bb[1,0]-bb[0,0])*(temp_nroi_cum[i]/sum(roi_nums)) ,bb[0,0]+(bb[1,0]-bb[0,0])*(temp_nroi_cum[i+1]/sum(roi_nums))], [bb[1,1]+0.02*(bb[1,1]-bb[0,1]) ,bb[1,1]+0.02*(bb[1,1]-bb[0,1])], color=roi_cols[i], *line_args,**line_kwargs))\n\ndef adjust_lightness(color, amount=0.5):\n # from: https://stackoverflow.com/questions/37765197/darken-or-lighten-a-color-in-matplotlib\n import matplotlib.colors as mc\n import colorsys\n try:\n c = mc.cnames[color]\n except:\n c = color\n c = colorsys.rgb_to_hls(*mc.to_rgb(c))\n return colorsys.hls_to_rgb(c[0], max(0, min(1, amount * c[1])), c[2])\n\ndef perm_sphere_p(x,y,perm_id,corr_type='spearman'):\n \n # Function to generate a p-value for the spatial correlation between two parcellated cortical surface maps, \n # using a set of spherical permutations of regions of interest.\n # The function performs the permutation in both directions; i.e.: by permute both measures, \n # before correlating each permuted measure to the unpermuted version of the other measure\n #\n # Inputs:\n # x one of two maps to be correlated vector\n # y second of two maps to be correlated vector\n # perm_id array of permutations, from set of regions to itself (as generated by \"rotate_parcellation\") array of size [n(total regions) x nrot]\n # corr_type type of correlation \"spearman\" (default) or \"pearson\"\n #\n # Output:\n # p_perm permutation p-value\n \n nroi = perm_id.shape[0] # number of regions\n nperm = perm_id.shape[1] # number of permutations\n \n if corr_type=='spearman':\n rho_emp = sp.stats.spearmanr(x,y)[0]\n elif corr_type=='pearson':\n rho_emp = sp.stats.pearsonr(x,y)[0]\n \n # permutation of measures\n x_perm = y_perm = np.zeros((nroi,nperm))\n for r in range(nperm):\n for i in range(nroi):\n x_perm[i,r] = x[perm_id[i,r]]\n y_perm[i,r] = y[perm_id[i,r]]\n \n # correlation to unpermuted measures\n rho_null_xy = np.zeros(nperm)\n rho_null_yx = np.zeros(nperm)\n if corr_type=='spearman':\n for r in range(nperm):\n rho_null_xy[r] = sp.stats.spearmanr(x_perm[:,r],y)[0]\n rho_null_yx[r] = sp.stats.spearmanr(y_perm[:,r],x)[0]\n elif corr_type=='pearson':\n for r in range(nperm):\n rho_null_xy[r] = sp.stats.pearsonr(x_perm[:,r],y)[0]\n rho_null_yx[r] = sp.stats.pearsonr(y_perm[:,r],x)[0]\n \n # p-value definition depends on the sign of the empirical correlation\n if (rho_emp>0):\n p_perm_xy = sum(rho_null_xy>rho_emp)/nperm\n p_perm_yx = sum(rho_null_yx>rho_emp)/nperm\n else:\n p_perm_xy = sum(rho_null_xy<rho_emp)/nperm\n p_perm_yx = sum(rho_null_yx<rho_emp)/nperm\n \n # return average p-value\n return((p_perm_xy+p_perm_yx)/2)" ]
[ [ "numpy.nanmedian", "numpy.absolute", "matplotlib.colors.to_rgb", "numpy.reshape", "scipy.stats.pearsonr", "matplotlib.pyplot.subplots", "matplotlib.colors.Normalize", "numpy.ones", "matplotlib.pyplot.axes", "matplotlib.colorbar.ColorbarBase", "matplotlib.pyplot.savefig", "numpy.percentile", "numpy.diag_indices_from", "numpy.cumsum", "matplotlib.cm.get_cmap", "scipy.stats.spearmanr", "numpy.zeros" ] ]
jessicapetrochuk/Detectron_2_Image_Segmentation
[ "67ab6fb03b90a298367c86eab0d89a2d8438169a" ]
[ "model.py" ]
[ "import torch\nimport natsort\nimport numpy as np\nimport pycocotools\nfrom PIL import Image\nimport os, cv2, random\nimport torchvision.ops as ops\nfrom detectron2 import model_zoo\nfrom detectron2.config import get_cfg\nfrom detectron2.structures import BoxMode\nfrom detectron2.engine import DefaultTrainer\nfrom detectron2.engine import DefaultPredictor\nfrom detectron2.utils.visualizer import ColorMode\nfrom detectron2.utils.visualizer import Visualizer\nfrom detectron2.data import MetadataCatalog, DatasetCatalog\n\ndef get_masks(num_imgs):\n \"\"\"\n Loops through all masks in mask directory and returns the masks as an array and the bounding boxes\n Arguments:\n num_images (int): number of images that are being used to train the model\n Returns:\n bboxes (array of size [N, 4]): bboxes where each bbox is (x1, y1, x2, y2)\n masks (array of size [N, H, W]): masks from directory as binary array\n \"\"\"\n\n bboxes = []\n masks = []\n \n for i in range(num_imgs):\n masks_subdir = '/Users/jessicapetrochuk/Documents/School/UBC/2021-2022/Directed Studies/Code/Detectron_2/myDATASET/masks_with_rotations/section_masks_{}'.format(i) #Change to the directory masks are in \n \n # Looping through all images in the images directory\n for mask in sorted(os.listdir(masks_subdir)):\n if not mask.startswith('.'):\n full_path = os.path.join(masks_subdir, mask)\n mask_img = Image.open(full_path).convert(\"1\")\n mask_array = np.asarray(mask_img)\n mask_array_bin = np.where(mask_array > 0.5, 1, 0).astype(np.uint8)\n mask_tensor = torch.tensor(mask_array_bin).unsqueeze(0)\n bbox = ops.masks_to_boxes(mask_tensor)\n bbox_list = bbox.tolist()\n mask_array = pycocotools.mask.encode(np.asarray(mask_array, order=\"F\"))\n masks.append(mask_array)\n bboxes.append(bbox_list[0])\n\n print(i, ':', masks_subdir)\n\n print('Done getting masks and bounding boxes')\n return bboxes, masks\n\ndef get_masks_dict(bboxes, masks):\n print('starting getting dataset dictionary')\n\n dataset_dicts = []\n images_path = \"/Users/jessicapetrochuk/Documents/School/UBC/2021-2022/Directed Studies/Code/Detectron_2/myDATASET/images_with_rotations\"\n image_files = os.listdir(images_path)\n image_files_sorted = natsort.natsorted(image_files,reverse=False)\n img_id = 0\n if img_id < 227:\n for image in image_files_sorted:\n record = {}\n if not image.startswith('.'):\n filename = os.path.join(images_path, image)\n \n height, width = cv2.imread(filename).shape[:2]\n record['file_name'] = filename\n record['image_id'] = img_id\n record['height'] = height\n record['width'] = width\n annotations = []\n # fix when there are multiple regions\n annotation_hippocampus = {}\n annotation_hippocampus['bbox'] = bboxes[img_id]\n annotation_hippocampus['bbox_mode'] = BoxMode.XYXY_ABS\n annotation_hippocampus['category_id'] = 0\n annotation_hippocampus['segmentation'] = masks[img_id]\n\n annotations.append(annotation_hippocampus)\n\n record['annotations'] = annotations\n\n dataset_dicts.append(record)\n img_id += 1\n\n return dataset_dicts\n\ndef visualize(dataset_dicts):\n for d in random.sample(dataset_dicts, 3):\n img = cv2.imread(d[\"file_name\"])\n visualizer = Visualizer(img[:, :, ::-1], metadata=None, scale=0.5)\n out = visualizer.draw_dataset_dict(d)\n cv2.imshow('', out.get_image()[:, :, ::-1])\n\ndef train():\n TORCH_VERSION = \".\".join(torch.__version__.split(\".\")[:2])\n CUDA_VERSION = torch.__version__.split(\"+\")[-1]\n print(\"torch: \", TORCH_VERSION, \"; cuda: \", CUDA_VERSION)\n num_imgs = 227\n bboxes, masks = get_masks(num_imgs)\n dataset_dicts = get_masks_dict(bboxes, masks)\n\n # for d in [\"train\", \"val\"]:\n for d in [\"train\"]:\n DatasetCatalog.register(\"brain_\" + d, lambda d=d: get_masks_dict(bboxes, masks))\n MetadataCatalog.get(\"brain_\" + d).set(thing_classes=[\"hippocampus\"])\n brain_metadata = MetadataCatalog.get(\"brain_train\")\n\n # DatasetCatalog.register(\"my_dataset\", my_dataset_function)\n\n cfg = get_cfg()\n cfg.merge_from_file(model_zoo.get_config_file(\"COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml\"))\n cfg.INPUT.MASK_FORMAT='bitmask'\n cfg.MODEL.DEVICE = 'cpu'\n cfg.DATASETS.TRAIN = (\"brain_train\",)\n cfg.DATASETS.TEST = ()\n cfg.DATALOADER.NUM_WORKERS = 2\n cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url(\"COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml\") # Let training initialize from model zoo\n cfg.SOLVER.IMS_PER_BATCH = 2\n cfg.SOLVER.BASE_LR = 0.00025 # pick a good LR\n cfg.SOLVER.MAX_ITER = 300 # 300 iterations seems good enough for this toy dataset; you will need to train longer for a practical dataset\n cfg.SOLVER.STEPS = [] # do not decay learning rate\n cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 128 # faster, and good enough for this toy dataset (default: 512)\n cfg.MODEL.ROI_HEADS.NUM_CLASSES = 1 # only has one class (ballon). (see https://detectron2.readthedocs.io/tutorials/datasets.html#update-the-config-for-new-datasets)\n # NOTE: this config means the number of classes, but a few popular unofficial tutorials incorrect uses num_classes+1 here.\n\n # os.makedirs(cfg.OUTPUT_DIR, exist_ok=True)\n # trainer = DefaultTrainer(cfg) \n # trainer.resume_or_load(resume=False)\n # trainer.train()\n\n\n #Inference and evaluation\n # Inference should use the config with parameters that are used in training\n # cfg now already contains everything we've set previously. We changed it a little bit for inference:\n cfg.MODEL.WEIGHTS = os.path.join(cfg.OUTPUT_DIR, \"model_final.pth\") # path to the model we just trained\n cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.7 # set a custom testing threshold\n predictor = DefaultPredictor(cfg)\n\n for d in random.sample(dataset_dicts, 3): \n im = cv2.imread(d[\"file_name\"])\n outputs = predictor(im) # format is documented at https://detectron2.readthedocs.io/tutorials/models.html#model-output-format\n v = Visualizer(im[:, :, ::-1],\n metadata=brain_metadata, \n scale=0.5, \n instance_mode=ColorMode.IMAGE_BW # remove the colors of unsegmented pixels. This option is only available for segmentation models\n )\n out = v.draw_instance_predictions(outputs[\"instances\"].to(\"cpu\"))\n print(out.get_image())\n cv2.imshow('', out.get_image()[:, :, ::-1])\n cv2.imwrite('hello.png', out.get_image()[:, :, ::-1])\n #setup_logger()\n\n\nif __name__ == '__main__':\n train()" ]
[ [ "numpy.asarray", "torch.__version__.split", "numpy.where", "torch.tensor" ] ]
eldrin/aarms
[ "bdd5455ac8dcfc1fe91a12fdd132b74e6c37609d" ]
[ "tests/test_rsvd.py" ]
[ "import unittest\n\nimport os\nos.environ['NUMBA_NUM_THREADS'] = '1'\n\nimport numpy as np\nfrom scipy import sparse as sp\n\nfrom aarms.models.rsvd import RSVD, RSVDSPPMI\nfrom aarms.models.transform import sppmi\n\nfrom base_test import TestAARMS\n\n\nclass TestRSVD(TestAARMS):\n \"\"\"\n \"\"\"\n def test_rsvd_factorize(self):\n \"\"\"\n This test function refers a lot from::\n https://github.com/benfred/implicit/blob/master/tests/als_test.py\n \"\"\"\n X = sp.csr_matrix([[1, 1, 0, 1, 0, 0],\n [0, 1, 1, 1, 0, 0],\n [1, 0, 1, 0, 0, 0],\n [1, 1, 0, 0, 0, 0],\n [0, 0, 1, 1, 0, 1],\n [0, 1, 0, 0, 0, 1],\n [0, 0, 0, 0, 1, 1]])\n\n cases = [dtype for dtype in (np.float32, np.float64)]\n\n for dtype in cases:\n try:\n # Truncated SVD does not accept the full rank k (should be smaller)\n svd = RSVD(k = 6, dtype = dtype)\n svd.fit(X)\n\n except Exception as e:\n self.fail(msg = \"failed for basic user-item factorization: \"\n f\"{e}, dtype={dtype}, \")\n\n Xhat = svd.embeddings_['user'] @ svd.embeddings_['item'].T\n self._compare_recon(X, Xhat, thresh=3e-1, **{'dtype': dtype})\n\n def test_rsvdsppmi_factorize(self):\n \"\"\"\n This test function refers a lot from::\n https://github.com/benfred/implicit/blob/master/tests/als_test.py\n \"\"\"\n X = sp.csr_matrix([[1, 1, 0, 1, 0, 0],\n [0, 1, 1, 1, 0, 0],\n [1, 0, 1, 0, 0, 0],\n [1, 1, 0, 0, 0, 0],\n [0, 0, 1, 1, 0, 1],\n [0, 1, 0, 0, 0, 1],\n [0, 0, 0, 0, 1, 1]])\n\n cases = [dtype for dtype in (np.float32, np.float64)]\n\n for dtype in cases:\n try:\n svd = RSVDSPPMI(k = 6, dtype = dtype)\n svd.fit(X)\n\n except Exception as e:\n self.fail(msg = \"failed for basic user-item factorization: \"\n f\"{e}, dtype={dtype}, \")\n\n Xhat = svd.embeddings_['user'] @ svd.embeddings_['item'].T\n user_item_sppmi = sppmi(X, svd.kappa)\n self._compare_recon(user_item_sppmi, Xhat,\n thresh=1e-3, **{'dtype': dtype})\n\nif __name__ == \"__main__\":\n unittest.main()\n" ]
[ [ "scipy.sparse.csr_matrix" ] ]
luispedro/SemiBin
[ "7a5c9c68bb29ec27b64d7b34ed88a2eab921314b" ]
[ "integration-tests/generate_data_coassembly_command.py" ]
[ "import os\nimport pandas as pd\nimport subprocess\n\n\n### Input fa\nsubprocess.check_call('SemiBin generate_data_single -i test/coassembly_sample_data/input.fasta -o output_coassembly_fa -m 2500 --ratio 0.05 --ml-threshold 4000 -p 1 -b test/coassembly_sample_data/input.sorted*.bam', shell=True)\n\ndata = pd.read_csv('output_coassembly_fa/data.csv', index_col=0)\ndata_split = pd.read_csv('output_coassembly_fa/data_split.csv', index_col=0)\n\nassert data.shape == (40, 141)\nassert data_split.shape == (80, 141)\n\n\n### Input .gz\nsubprocess.check_call('SemiBin generate_data_single -i test/coassembly_sample_data/input.fasta.gz -o output_coassembly_gz -m 2500 --ratio 0.05 --ml-threshold 4000 -p 1 -b test/coassembly_sample_data/input.sorted*.bam', shell=True)\n\ndata = pd.read_csv('output_coassembly_gz/data.csv', index_col=0)\ndata_split = pd.read_csv('output_coassembly_gz/data_split.csv', index_col=0)\n\nassert data.shape == (40, 141)\nassert data_split.shape == (80, 141)\n\n### Input .bz2\nsubprocess.check_call('SemiBin generate_data_single -i test/coassembly_sample_data/input.fasta.bz2 -o output_coassembly_bz2 -m 2500 --ratio 0.05 --ml-threshold 4000 -p 1 -b test/coassembly_sample_data/input.sorted*.bam', shell=True)\n\ndata = pd.read_csv('output_coassembly_bz2/data.csv', index_col=0)\ndata_split = pd.read_csv('output_coassembly_bz2/data_split.csv', index_col=0)\n\nassert data.shape == (40, 141)\nassert data_split.shape == (80, 141)\n\n### Input .xz\nsubprocess.check_call('SemiBin generate_data_single -i test/coassembly_sample_data/input.fasta.xz -o output_coassembly_xz -m 2500 --ratio 0.05 --ml-threshold 4000 -p 1 -b test/coassembly_sample_data/input.sorted*.bam', shell=True)\n\ndata = pd.read_csv('output_coassembly_xz/data.csv', index_col=0)\ndata_split = pd.read_csv('output_coassembly_xz/data_split.csv', index_col=0)\n\nassert data.shape == (40, 141)\nassert data_split.shape == (80, 141)\n" ]
[ [ "pandas.read_csv" ] ]
rajivmanivannan/facenet
[ "4a896201dba3f8caf64ba4d5004d60eaf9aefd78" ]
[ "src/generative/modify_attribute.py" ]
[ "# MIT License\n# \n# Copyright (c) 2017 David Sandberg\n# \n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n# \n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n# \n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\n\"\"\"Modify attributes of images using attribute vectors calculated using\n'calculate_attribute_vectors.py'. Images are generated from latent variables of\nthe CelebA datasets.\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\nimport sys\nimport argparse\nimport importlib\nimport facenet\nimport os\nimport numpy as np\nimport h5py\nimport math\nfrom scipy import misc\n\ndef main(args):\n \n img_mean = np.array([134.10714722, 102.52040863, 87.15436554])\n img_stddev = np.sqrt(np.array([3941.30175781, 2856.94287109, 2519.35791016]))\n \n vae_def = importlib.import_module(args.vae_def)\n vae = vae_def.Vae(args.latent_var_size)\n gen_image_size = vae.get_image_size()\n\n with tf.Graph().as_default():\n tf.set_random_seed(args.seed)\n \n images = tf.placeholder(tf.float32, shape=(None,gen_image_size,gen_image_size,3), name='input')\n \n # Normalize\n images_norm = (images-img_mean) / img_stddev\n\n # Resize to appropriate size for the encoder \n images_norm_resize = tf.image.resize_images(images_norm, (gen_image_size,gen_image_size))\n \n # Create encoder network\n mean, log_variance = vae.encoder(images_norm_resize, True)\n \n epsilon = tf.random_normal((tf.shape(mean)[0], args.latent_var_size))\n std = tf.exp(log_variance/2)\n latent_var = mean + epsilon * std\n \n # Create decoder\n reconstructed_norm = vae.decoder(latent_var, False)\n \n # Un-normalize\n reconstructed = (reconstructed_norm*img_stddev) + img_mean\n\n # Create a saver\n saver = tf.train.Saver(tf.trainable_variables(), max_to_keep=3)\n \n # Start running operations on the Graph\n gpu_memory_fraction = 1.0\n gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=gpu_memory_fraction)\n sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, log_device_placement=False))\n sess.run(tf.global_variables_initializer())\n sess.run(tf.local_variables_initializer())\n coord = tf.train.Coordinator()\n tf.train.start_queue_runners(coord=coord, sess=sess)\n \n\n with sess.as_default():\n \n vae_checkpoint = os.path.expanduser(args.vae_checkpoint)\n print('Restoring VAE checkpoint: %s' % vae_checkpoint)\n saver.restore(sess, vae_checkpoint)\n \n filename = os.path.expanduser(args.attributes_filename)\n with h5py.File(filename,'r') as f:\n latent_vars = np.array(f.get('latent_vars'))\n attributes = np.array(f.get('attributes'))\n #fields = np.array(f.get('fields'))\n attribute_vectors = np.array(f.get('attribute_vectors'))\n\n # Reconstruct faces while adding varying amount of the selected attribute vector\n attribute_index = 31 # 31: 'Smiling'\n image_indices = [8,11,13,18,19,26,31,39,47,54,56,57,58,59,60,73]\n nrof_images = len(image_indices)\n nrof_interp_steps = 10\n sweep_latent_var = np.zeros((nrof_interp_steps*nrof_images, args.latent_var_size), np.float32)\n for j in range(nrof_images):\n image_index = image_indices[j]\n idx = np.argwhere(attributes[:,attribute_index]==-1)[image_index,0]\n for i in range(nrof_interp_steps):\n sweep_latent_var[i+nrof_interp_steps*j,:] = latent_vars[idx,:] + 5.0*i/nrof_interp_steps*attribute_vectors[attribute_index,:]\n \n recon = sess.run(reconstructed, feed_dict={latent_var:sweep_latent_var})\n \n img = facenet.put_images_on_grid(recon, shape=(nrof_interp_steps*2,int(math.ceil(nrof_images/2))))\n \n image_filename = os.path.expanduser(args.output_image_filename)\n print('Writing generated image to %s' % image_filename)\n misc.imsave(image_filename, img)\n\n \ndef parse_arguments(argv):\n parser = argparse.ArgumentParser()\n \n parser.add_argument('vae_def', type=str,\n help='Model definition for the variational autoencoder. Points to a module containing the definition.')\n parser.add_argument('vae_checkpoint', type=str,\n help='Checkpoint file of a pre-trained variational autoencoder.')\n parser.add_argument('attributes_filename', type=str,\n help='The file containing the attribute vectors, as generated by calculate_attribute_vectors.py.')\n parser.add_argument('output_image_filename', type=str,\n help='File to write the generated image to.')\n parser.add_argument('--latent_var_size', type=int,\n help='Dimensionality of the latent variable.', default=100)\n parser.add_argument('--seed', type=int,\n help='Random seed.', default=666)\n\n return parser.parse_args(argv)\n \n \nif __name__ == '__main__':\n main(parse_arguments(sys.argv[1:]))\n" ]
[ [ "tensorflow.Graph", "tensorflow.local_variables_initializer", "tensorflow.shape", "scipy.misc.imsave", "tensorflow.image.resize_images", "tensorflow.train.start_queue_runners", "tensorflow.train.Coordinator", "tensorflow.exp", "tensorflow.placeholder", "tensorflow.trainable_variables", "tensorflow.ConfigProto", "tensorflow.global_variables_initializer", "numpy.argwhere", "tensorflow.GPUOptions", "tensorflow.set_random_seed", "numpy.array", "numpy.zeros" ] ]
dswigh/summit
[ "a1cecdd41df8119005173b46ac45fb22472628d6" ]
[ "summit/strategies/entmoot.py" ]
[ "from summit.strategies.base import Strategy\nfrom summit.domain import *\nfrom summit.utils.dataset import DataSet\n\nimport string\nimport numpy as np\nimport pandas as pd\n\n\nclass ENTMOOT(Strategy):\n \"\"\"\n Single-objective Bayesian optimization, using gradient-boosted trees\n instead of Gaussian processes, via ENTMOOT (ENsemble Tree MOdel Optimization Tool)\n\n This is currently an experimental feature and requires Gurobipy to be installed.\n\n Parameters\n ----------\n domain: :class:`~summit.domain.Domain`\n The Summit domain describing the optimization problem.\n transform : :class:`~summit.strategies.base.Transform`, optional\n A transform object. By default no transformation will be done\n on the input variables or objectives.\n estimator_type: string, optional\n The ENTMOOT base_estimator type.\n By default, Gradient-Boosted Regression\n std_estimator_type: string, optional\n The ENTMOOT std_estimator\n By default, bounded data distance\n acquisition_type: string, optional\n The acquisition function type from ENTMOOT. See notes for options.\n By default, Lower Confidence Bound.\n optimizer_type: string, optional\n The optimizer used in ENTMOOT for maximization of the acquisition function.\n By default, sampling will be used.\n generator_type: string, optional\n The method for generating initial points before a model can be trained.\n By default, uniform random points will be used.\n initial_points: int, optional\n How many points to require before training models\n min_child_samples: int, optional\n Minimum size of a leaf in tree models\n\n Examples\n --------\n >>> from summit.domain import *\n >>> from summit.strategies.entmoot import ENTMOOT\n >>> import numpy as np\n >>> domain = Domain()\n >>> domain += ContinuousVariable(name='temperature', description='reaction temperature in celsius', bounds=[50, 100])\n >>> domain += CategoricalVariable(name='flowrate_a', description='flow of reactant a in mL/min', levels=[1,2,3,4,5])\n >>> domain += ContinuousVariable(name='flowrate_b', description='flow of reactant b in mL/min', bounds=[0.1, 0.5])\n >>> domain += ContinuousVariable(name=\"yld\", description='yield of reaction', bounds=[0,100], is_objective=True)\n >>> # strategy = ENTMOOT(domain)\n >>> # next_experiments = strategy.suggest_experiments(5)\n\n Notes\n ----------\n\n Estimator type can either by GBRT (Gradient-boosted regression trees) or RF (random forest from scikit-learn).\n\n Acquisition function type can only be LCB (lower confidence bound).\n\n Based on the paper from [Thebelt]_ et al.\n\n .. [Thebelt] A. Thebelt et al.\n \"ENTMOOT: A Framework for Optimization over Ensemble Tree Models\", `ArXiv <https://arxiv.org/abs/2003.04774>`_\n\n \"\"\"\n\n def __init__(\n self,\n domain,\n transform=None,\n estimator_type=None,\n std_estimator_type=None,\n acquisition_type=None,\n optimizer_type=None,\n generator_type=None,\n initial_points=50,\n min_child_samples=5,\n **kwargs\n ):\n Strategy.__init__(self, domain, transform=transform, **kwargs)\n\n self.use_descriptors = kwargs.get(\"use_descriptors\", False)\n # TODO: notation - discrete in our model (e.g., catalyst type) = categorical?\n self.input_domain = []\n for v in self.domain.variables:\n if not v.is_objective:\n if isinstance(v, ContinuousVariable):\n self.input_domain.append(\n {\n \"name\": v.name,\n \"type\": v.variable_type,\n \"domain\": (v.bounds[0], v.bounds[1]),\n }\n )\n elif isinstance(v, CategoricalVariable):\n raise ValueError(\n \"Categorical Variables are not yet implemented \"\n \"for ENTMOOT strategy.\"\n )\n if not self.use_descriptors:\n self.input_domain.append(\n {\n \"name\": v.name,\n \"type\": \"categorical\",\n \"domain\": tuple(self.categorical_wrapper(v.levels)),\n }\n )\n elif v.ds is not None and self.use_descriptors:\n if v.ds is None:\n raise ValueError(\n \"No descriptors provided for variable: {}\".format(\n v.name\n )\n )\n descriptor_names = v.ds.data_columns\n descriptors = np.asarray(\n [\n v.ds.loc[:, [l]].values.tolist()\n for l in v.ds.data_columns\n ]\n )\n for j, d in enumerate(descriptors):\n self.input_domain.append(\n {\n \"name\": descriptor_names[j],\n \"type\": \"continuous\",\n \"domain\": (\n np.min(np.asarray(d)),\n np.max(np.asarray(d)),\n ),\n }\n )\n elif v.ds is None and self.use_descriptors:\n raise ValueError(\n \"Cannot use descriptors because none are provided.\"\n )\n # TODO: GPyOpt currently does not support mixed-domains w/ bandit inputs, there is a PR for this though\n else:\n raise TypeError(\"Unknown variable type.\")\n\n # TODO: how to handle equality constraints? Could we remove '==' from constraint types as each equality\n # constraint reduces the degrees of freedom?\n if len(self.domain.constraints) != 0:\n self.constraints = self.constr_wrapper(self.domain)\n else:\n self.constraints = None\n\n self.input_dim = len(self.domain.input_variables)\n\n if estimator_type in [\n \"GBRT\",\n \"RF\",\n ]:\n self.estimator_type = estimator_type\n else:\n self.estimator_type = \"GBRT\" # default model type is GB trees\n\n if std_estimator_type in [\n \"BDD\",\n \"L1BDD\",\n \"DDP\",\n \"L1DDP\",\n ]:\n self.std_estimator_type = std_estimator_type\n else:\n self.std_estimator_type = (\n \"BDD\" # default model type is bounded data distance\n )\n\n if acquisition_type in [\n \"LCB\",\n ]:\n self.acquisition_type = acquisition_type\n else:\n self.acquisition_type = (\n \"LCB\" # default acquisition function is lower confidence bound\n )\n\n \"\"\" \n Method for optimization of acquisition function\n sampling: optimized by computing `acquisition_type` at `n_points` \n randomly sampled points\n global: optimized by using global solver to find minimum of \n `acquisition_type`. Requires gurobipy\n \"\"\"\n if optimizer_type in [\"sampling\", \"global\"]:\n self.optimizer_type = optimizer_type\n else:\n self.optimizer_type = \"sampling\" # default optimizer: sampling\n\n if (self.optimizer_type == \"sampling\") & (self.constraints is not None):\n raise ValueError(\n \"Constraints can only be applied when ENTMOOT is using\"\n \"global solver. Set optimizer_type = global or remove\"\n \"constraints.\"\n )\n\n import pkg_resources\n\n required = {\"gurobipy\"}\n installed = {pkg.key for pkg in pkg_resources.working_set}\n self.gurobi_missing = required - installed\n\n \"\"\"\n Sets an initial points generator. Can be either\n - \"random\" for uniform random numbers,\n - \"sobol\" for a Sobol sequence,\n - \"halton\" for a Halton sequence,\n - \"hammersly\" for a Hammersly sequence,\n - \"lhs\" for a latin hypercube sequence,\n - \"grid\" for a uniform grid sequence\n \"\"\"\n if generator_type in [\n \"random\",\n \"sobol\",\n \"halton\",\n \"hammersly\",\n \"lhs\",\n \"grid\",\n ]:\n self.generator_type = generator_type\n else:\n self.generator_type = \"random\"\n\n self.initial_points = initial_points\n self.min_child_samples = min_child_samples\n self.prev_param = None\n\n def suggest_experiments(\n self, num_experiments=1, prev_res: DataSet = None, **kwargs\n ):\n \"\"\"Suggest experiments using ENTMOOT tree-based Bayesian Optimization\n\n Parameters\n ----------\n num_experiments: int, optional\n The number of experiments (i.e., samples) to generate. Default is 1.\n prev_res: :class:`~summit.utils.data.DataSet`, optional\n Dataset with data from previous experiments of previous iteration.\n If no data is passed, then random sampling will\n be used to suggest an initial design.\n\n Returns\n -------\n next_experiments : :class:`~summit.utils.data.DataSet`\n A Dataset object with the suggested experiments\n\n \"\"\"\n from entmoot.optimizer.optimizer import Optimizer\n from entmoot.space.space import Space\n\n param = None\n xbest = np.zeros(self.domain.num_continuous_dimensions())\n obj = self.domain.output_variables[0]\n objective_dir = -1.0 if obj.maximize else 1.0\n fbest = float(\"inf\")\n\n bounds = [k[\"domain\"] for k in self.input_domain]\n space = Space(bounds)\n\n if not self.gurobi_missing:\n from gurobipy import LinExpr\n from entmoot.optimizer.gurobi_utils import get_core_gurobi_model\n\n core_model = get_core_gurobi_model(space)\n gvars = core_model.getVars()\n\n for c in self.constraints:\n left = LinExpr()\n left.addTerms(c[0], gvars)\n left.addConstant(c[1])\n core_model.addLConstr(left, c[2], 0)\n\n core_model.update()\n acq_optimizer_kwargs = {\"add_model_core\": core_model}\n else:\n acq_optimizer_kwargs = None\n\n entmoot_model = Optimizer(\n dimensions=bounds,\n base_estimator=self.estimator_type,\n std_estimator=self.std_estimator_type,\n n_initial_points=self.initial_points,\n initial_point_generator=self.generator_type,\n acq_func=self.acquisition_type,\n acq_optimizer=self.optimizer_type,\n random_state=None,\n acq_func_kwargs=None,\n acq_optimizer_kwargs=acq_optimizer_kwargs,\n base_estimator_kwargs={\"min_child_samples\": self.min_child_samples},\n std_estimator_kwargs=None,\n model_queue_size=None,\n verbose=False,\n )\n\n # If we have previous results:\n if prev_res is not None:\n # Get inputs and outputs\n inputs, outputs = self.transform.transform_inputs_outputs(\n prev_res, transform_descriptors=self.use_descriptors\n )\n\n # Set up maximization and minimization by converting maximization to minimization problem\n for v in self.domain.variables:\n if v.is_objective and v.maximize:\n outputs[v.name] = -1 * outputs[v.name]\n if isinstance(v, CategoricalVariable):\n if not self.use_descriptors:\n inputs[v.name] = self.categorical_wrapper(\n inputs[v.name], v.levels\n )\n\n inputs = inputs.to_numpy()\n outputs = outputs.to_numpy()\n\n if self.prev_param is not None:\n X_step = self.prev_param[0]\n Y_step = self.prev_param[1]\n\n X_step = np.vstack((X_step, inputs))\n Y_step = np.vstack((Y_step, outputs))\n\n else:\n X_step = inputs\n Y_step = outputs\n # Convert to list form to give to optimizer\n prev_X = [list(x) for x in X_step]\n prev_y = [y for x in Y_step for y in x]\n\n # Train entmoot model\n entmoot_model.tell(prev_X, prev_y, fit=True)\n\n # Store parameters (history of suggested points and function evaluations)\n param = [X_step, Y_step]\n fbest = np.min(Y_step)\n xbest = X_step[np.argmin(Y_step)]\n\n request = np.array(\n entmoot_model.ask(n_points=num_experiments, strategy=\"cl_mean\")\n )\n # Generate DataSet object with variable values of next\n next_experiments = None\n transform_descriptors = False\n if request is not None and len(request) != 0:\n next_experiments = {}\n i_inp = 0\n for v in self.domain.variables:\n if not v.is_objective:\n if isinstance(v, CategoricalVariable):\n if v.ds is None or not self.use_descriptors:\n cat_list = []\n for j, entry in enumerate(request[:, i_inp]):\n cat_list.append(\n self.categorical_unwrap(entry, v.levels)\n )\n next_experiments[v.name] = np.asarray(cat_list)\n i_inp += 1\n else:\n descriptor_names = v.ds.data_columns\n for d in descriptor_names:\n next_experiments[d] = request[:, i_inp]\n i_inp += 1\n transform_descriptors = True\n else:\n next_experiments[v.name] = request[:, i_inp]\n i_inp += 1\n next_experiments = DataSet.from_df(pd.DataFrame(data=next_experiments))\n next_experiments[(\"strategy\", \"METADATA\")] = \"ENTMOOT\"\n\n self.fbest = objective_dir * fbest\n self.xbest = xbest\n self.prev_param = param\n\n # Do any necessary transformation back\n next_experiments = self.transform.un_transform(\n next_experiments, transform_descriptors=self.use_descriptors\n )\n\n return next_experiments\n\n def reset(self):\n \"\"\"Reset the internal parameters\"\"\"\n self.prev_param = None\n\n def constr_wrapper(self, summit_domain):\n v_input_names = [v.name for v in summit_domain.variables if not v.is_objective]\n constraints = []\n for c in summit_domain.constraints:\n tmp_c = c.lhs\n # Split LHS on + signs into fragments\n tmp_p = str.split(tmp_c, \"+\")\n\n tmp_a = []\n for t in tmp_p:\n # For each of the fragments, split on -\n terms = str.split(t, \"-\")\n for i in range(len(terms)):\n if i == 0:\n # If the first part in the fragment is not empty, that\n # means the first term was positive.\n if terms[0] != \"\":\n tmp_a.append(terms[0])\n # All of the terms in the split will have\n # negative coefficients.\n else:\n tmp_a.append(\"-\" + terms[i])\n # Split the terms into coefficients and variables:\n constraint_dict = dict()\n for term in tmp_a:\n for i, char in enumerate(term):\n if char in string.ascii_letters:\n index = i\n c_variable = term[index:]\n if term[:index] == \"\":\n c_coeff = 1.0\n elif term[:index] == \"-\":\n c_coeff = -1.0\n else:\n c_coeff = float(term[:index])\n break\n else:\n c_variable = \"constant\"\n c_coeff = term\n constraint_dict[c_variable] = c_coeff\n # Place coefficients in the variable order the model expects.\n constraints_ordered = []\n for v_input_index, v_input_name in enumerate(v_input_names):\n constraints_ordered.append(constraint_dict.get(v_input_name, 0))\n constraints.append(\n [constraints_ordered, constraint_dict[\"constant\"], c.constraint_type]\n )\n return constraints\n\n def to_dict(self):\n if self.prev_param is not None:\n param = [self.prev_param[0].tolist(), self.prev_param[1].tolist()]\n else:\n param = None\n\n strategy_params = dict(\n prev_param=param,\n use_descriptors=self.use_descriptors,\n estimator_type=self.estimator_type,\n std_estimator_type=self.std_estimator_type,\n acquisition_type=self.acquisition_type,\n optimizer_type=self.optimizer_type,\n generator_type=self.generator_type,\n initial_points=self.initial_points,\n min_child_samples=self.min_child_samples,\n )\n\n return super().to_dict(**strategy_params)\n\n @classmethod\n def from_dict(cls, d):\n # Setup ENTMOOT\n entmoot = super().from_dict(d)\n param = d[\"strategy_params\"][\"prev_param\"]\n if param is not None:\n param = [np.array(param[0]), np.array(param[1])]\n entmoot.prev_param = param\n return entmoot\n\n\n\"\"\"\n def categorical_wrapper(self, categories, reference_categories=None):\n if not reference_categories:\n return [i for i, _ in enumerate(categories)]\n else:\n return [reference_categories.index(c) for c in categories]\n\n def categorical_unwrap(self, gpyopt_level, categories):\n return categories[int(gpyopt_level)]\n\"\"\"\n" ]
[ [ "numpy.min", "numpy.asarray", "pandas.DataFrame", "numpy.argmin", "numpy.array", "numpy.vstack" ] ]
AlejandroCN7/sinergym
[ "4e89e478b5c939323e7ddf6a6ecf25a9a13251c6" ]
[ "sinergym/utils/callbacks.py" ]
[ "\"\"\"Custom Callbacks for stable baselines 3 algorithms.\"\"\"\n\nimport os\nfrom typing import Optional, Union\n\nimport gym\nimport numpy as np\nfrom stable_baselines3.common.callbacks import BaseCallback, EvalCallback\nfrom stable_baselines3.common.env_util import is_wrapped\nfrom stable_baselines3.common.vec_env import VecEnv, sync_envs_normalization\n\nfrom sinergym.utils.evaluation import evaluate_policy\nfrom sinergym.utils.wrappers import LoggerWrapper, NormalizeObservation\n\n\nclass LoggerCallback(BaseCallback):\n \"\"\"Custom callback for plotting additional values in tensorboard.\n :param ep_rewards: Here will be stored all rewards during episode.\n :param ep_powers: Here will be stored all consumption data during episode.\n :param ep_term_comfort: Here will be stored all comfort terms (reward component) during episode.\n :param ep_term_energy: Here will be stored all energy terms (reward component) during episode.\n :param num_comfort_violation: Number of timesteps in which comfort has been violated.\n :param ep_timesteps: Each timestep during an episode, this value increment 1.\n \"\"\"\n\n def __init__(self, sinergym_logger=False, verbose=0):\n \"\"\"Custom callback for plotting additional values in tensorboard.\n Args:\n sinergym_logger (boolean): Indicate if CSVLogger inner Sinergym will be activated or not.\n \"\"\"\n super(LoggerCallback, self).__init__(verbose)\n\n self.sinergym_logger = sinergym_logger\n\n self.ep_rewards = []\n self.ep_powers = []\n self.ep_term_comfort = []\n self.ep_term_energy = []\n self.num_comfort_violation = 0\n self.ep_timesteps = 0\n\n def _on_training_start(self):\n # sinergym logger\n if is_wrapped(self.training_env, LoggerWrapper):\n if self.sinergym_logger:\n self.training_env.env_method('activate_logger')\n else:\n self.training_env.env_method('deactivate_logger')\n\n # record method depending on the type of algorithm\n\n if 'OnPolicyAlgorithm' in self.globals.keys():\n self.record = self.logger.record\n elif 'OffPolicyAlgorithm' in self.globals.keys():\n self.record = self.logger.record_mean\n else:\n raise KeyError\n\n def _on_step(self) -> bool:\n info = self.locals['infos'][-1]\n\n # OBSERVATION\n variables = self.training_env.get_attr('variables')[0]['observation']\n # log normalized and original values\n if self.training_env.env_is_wrapped(\n wrapper_class=NormalizeObservation)[0]:\n obs_normalized = self.locals['new_obs'][-1]\n obs = self.training_env.env_method('get_unwrapped_obs')[-1]\n for i, variable in enumerate(variables):\n self.record(\n 'normalized_observation/' + variable, obs_normalized[i])\n self.record(\n 'observation/' + variable, obs[i])\n # Only original values\n else:\n obs = self.locals['new_obs'][-1]\n for i, variable in enumerate(variables):\n self.record(\n 'observation/' + variable, obs[i])\n\n # ACTION\n variables = self.training_env.get_attr('variables')[0]['action']\n action = None\n # sinergym action received inner its own setpoints range\n action_ = info['action_']\n try:\n # network output clipped with gym action space\n action = self.locals['clipped_actions'][-1]\n except KeyError:\n try:\n action = self.locals['action'][-1]\n except KeyError:\n try:\n action = self.locals['actions'][-1]\n except KeyError:\n raise KeyError(\n 'Algorithm action key in locals dict unknown.')\n\n if self.training_env.get_attr('flag_discrete')[0]:\n action = self.training_env.get_attr('action_mapping')[0][action]\n for i, variable in enumerate(variables):\n if action is not None:\n self.record(\n 'action/' + variable, action[i])\n\n self.record(\n 'action_simulation/' + variable, action_[i])\n\n # Store episode data\n try:\n self.ep_rewards.append(self.locals['rewards'][-1])\n except KeyError:\n try:\n self.ep_rewards.append(self.locals['reward'][-1])\n except KeyError:\n print('Algorithm reward key in locals dict unknown')\n\n self.ep_powers.append(info['total_power'])\n self.ep_term_comfort.append(info['comfort_penalty'])\n self.ep_term_energy.append(info['total_power_no_units'])\n if(info['comfort_penalty'] != 0):\n self.num_comfort_violation += 1\n self.ep_timesteps += 1\n\n # If episode ends, store summary of episode and reset\n try:\n done = self.locals['dones'][-1]\n except KeyError:\n try:\n done = self.locals['done'][-1]\n except KeyError:\n print('Algorithm done key in locals dict unknown')\n if done:\n # store last episode metrics\n self.episode_metrics = {}\n self.episode_metrics['ep_length'] = self.ep_timesteps\n self.episode_metrics['cumulative_reward'] = np.sum(\n self.ep_rewards)\n self.episode_metrics['mean_reward'] = np.mean(self.ep_rewards)\n self.episode_metrics['mean_power'] = np.mean(self.ep_powers)\n self.episode_metrics['cumulative_power'] = np.sum(self.ep_powers)\n self.episode_metrics['mean_comfort_penalty'] = np.mean(\n self.ep_term_comfort)\n self.episode_metrics['cumulative_comfort_penalty'] = np.sum(\n self.ep_term_comfort)\n self.episode_metrics['mean_power_penalty'] = np.mean(\n self.ep_term_energy)\n self.episode_metrics['cumulative_power_penalty'] = np.sum(\n self.ep_term_energy)\n try:\n self.episode_metrics['comfort_violation_time(%)'] = self.num_comfort_violation / \\\n self.ep_timesteps * 100\n except ZeroDivisionError:\n self.episode_metrics['comfort_violation_time(%)'] = np.nan\n\n # reset episode info\n self.ep_rewards = []\n self.ep_powers = []\n self.ep_term_comfort = []\n self.ep_term_energy = []\n self.ep_timesteps = 0\n self.num_comfort_violation = 0\n\n # During first episode, as it not finished, it shouldn't be recording\n if hasattr(self, 'episode_metrics'):\n for key, metric in self.episode_metrics.items():\n self.logger.record(\n 'episode/' + key, metric)\n\n return True\n\n def on_training_end(self):\n if is_wrapped(self.training_env, LoggerWrapper):\n self.training_env.env_method('activate_logger')\n\n\nclass LoggerEvalCallback(EvalCallback):\n \"\"\"Callback for evaluating an agent.\n :param eval_env: The environment used for initialization\n :param callback_on_new_best: Callback to trigger when there is a new best model according to the ``mean_reward``\n :param n_eval_episodes: The number of episodes to test the agent\n :param eval_freq: Evaluate the agent every eval_freq call of the callback.\n :param log_path: Path to a folder where the evaluations (``evaluations.npz``) will be saved. It will be updated at each evaluation.\n :param best_model_save_path: Path to a folder where the best model according to performance on the eval env will be saved.\n :param deterministic: Whether the evaluation should use a stochastic or deterministic actions.\n :param render: Whether to render or not the environment during evaluation\n :param verbose:\n :param warn: Passed to ``evaluate_policy`` (warns if ``eval_env`` has not been wrapped with a Monitor wrapper)\n \"\"\"\n\n def __init__(\n self,\n eval_env: Union[gym.Env, VecEnv],\n callback_on_new_best: Optional[BaseCallback] = None,\n n_eval_episodes: int = 5,\n eval_freq: int = 10000,\n log_path: Optional[str] = None,\n best_model_save_path: Optional[str] = None,\n deterministic: bool = True,\n render: bool = False,\n verbose: int = 1,\n warn: bool = True,\n ):\n super(\n LoggerEvalCallback,\n self).__init__(\n eval_env=eval_env,\n callback_on_new_best=callback_on_new_best,\n n_eval_episodes=n_eval_episodes,\n eval_freq=eval_freq,\n log_path=log_path,\n best_model_save_path=best_model_save_path,\n deterministic=deterministic,\n render=render,\n verbose=verbose,\n warn=warn)\n self.evaluations_power_consumption = []\n self.evaluations_comfort_violation = []\n self.evaluations_comfort_penalty = []\n self.evaluations_power_penalty = []\n self.evaluation_metrics = {}\n\n def _on_step(self) -> bool:\n\n if self.eval_freq > 0 and self.n_calls % self.eval_freq == 0:\n # Sync training and eval env if there is VecNormalize\n sync_envs_normalization(self.training_env, self.eval_env)\n\n # Reset success rate buffer\n self._is_success_buffer = []\n #episodes_rewards, episodes_lengths, episodes_powers, episodes_comfort_violations, episodes_comfort_penalties, episodes_power_penalties\n episodes_data = evaluate_policy(\n self.model,\n self.eval_env,\n n_eval_episodes=self.n_eval_episodes,\n render=self.render,\n deterministic=self.deterministic,\n callback=None,\n )\n\n if self.log_path is not None:\n self.evaluations_timesteps.append(self.num_timesteps)\n self.evaluations_results.append(\n episodes_data['episodes_rewards'])\n self.evaluations_length.append(\n episodes_data['episodes_lengths'])\n self.evaluations_power_consumption.append(\n episodes_data['episodes_powers'])\n self.evaluations_comfort_violation.append(\n episodes_data['episodes_comfort_violations'])\n self.evaluations_comfort_penalty.append(\n episodes_data['episodes_comfort_penalties'])\n self.evaluations_power_penalty.append(\n episodes_data['episodes_power_penalties'])\n\n kwargs = {}\n # Save success log if present\n if len(self._is_success_buffer) > 0:\n self.evaluations_successes.append(self._is_success_buffer)\n kwargs = dict(successes=self.evaluations_successes)\n\n np.savez(\n self.log_path,\n timesteps=self.evaluations_timesteps,\n results=self.evaluations_results,\n ep_lengths=self.evaluations_length,\n ep_powers=self.evaluations_power_consumption,\n ep_comfort_violations=self.evaluations_comfort_violation,\n episodes_comfort_penalties=self.evaluations_comfort_penalty,\n episodes_power_penalties=self.evaluations_power_penalty,\n **kwargs,\n )\n\n mean_reward, std_reward = np.mean(\n episodes_data['episodes_rewards']), np.std(\n episodes_data['episodes_rewards'])\n mean_ep_length, std_ep_length = np.mean(\n episodes_data['episodes_lengths']), np.std(\n episodes_data['episodes_lengths'])\n\n self.evaluation_metrics['mean_rewards'] = mean_reward\n self.evaluation_metrics['std_rewards'] = std_reward\n self.evaluation_metrics['mean_ep_length'] = mean_ep_length\n self.evaluation_metrics['mean_power_consumption'] = np.mean(\n episodes_data['episodes_powers'])\n self.evaluation_metrics['comfort_violation(%)'] = np.mean(\n episodes_data['episodes_comfort_violations'])\n self.evaluation_metrics['comfort_penalty'] = np.mean(\n episodes_data['episodes_comfort_penalties'])\n self.evaluation_metrics['power_penalty'] = np.mean(\n episodes_data['episodes_power_penalties'])\n\n if self.verbose > 0:\n print(\n f\"Eval num_timesteps={self.num_timesteps}, \"\n f\"episode_reward={mean_reward:.2f} +/- {std_reward:.2f}\")\n print(\n f\"Episode length: {mean_ep_length:.2f} +/- {std_ep_length:.2f}\")\n # Add to current Logger\n for key, metric in self.evaluation_metrics.items():\n self.logger.record('eval/' + key, metric)\n\n if len(self._is_success_buffer) > 0:\n success_rate = np.mean(self._is_success_buffer)\n if self.verbose > 0:\n print(f\"Success rate: {100 * success_rate:.2f}%\")\n self.logger.record(\"eval/success_rate\", success_rate)\n\n if mean_reward > self.best_mean_reward:\n if self.verbose > 0:\n print(\"New best mean reward!\")\n if self.best_model_save_path is not None:\n self.model.save(os.path.join(\n self.best_model_save_path, \"best_model\"))\n self.best_mean_reward = mean_reward\n # Trigger callback if needed\n if self.callback is not None:\n return self._on_event()\n\n return True\n" ]
[ [ "numpy.std", "numpy.savez", "numpy.mean", "numpy.sum" ] ]
ka10ryu1/keytouch_cam
[ "042b0caacb5af31cfa9c71ae012d58e798777c8d" ]
[ "Tools/concat.py" ]
[ "#!/usr/bin/env python3\n# -*-coding: utf-8 -*-\n#\nhelp = '複数の画像を任意の行列で結合する'\n#\n\nimport os\nimport sys\nimport cv2\nimport argparse\nimport numpy as np\n\n[sys.path.append(d) for d in ['./Tools/', '../Tools/'] if os.path.isdir(d)]\nimport func as F\nimport imgfunc as IMG\n\n\ndef command():\n parser = argparse.ArgumentParser(description=help)\n parser.add_argument('jpeg', nargs='+',\n help='使用する画像のパス')\n parser.add_argument('--out_path', '-o', default='./result/',\n help='生成物の保存先 [default: ./result/]')\n parser.add_argument('--row', '-r', type=int, default=-1,\n help='画像を連結する行(負数で自動計算) [default: -1]')\n parser.add_argument('--line_width', '-lw', type=int, default=2,\n help='画像を連結する行 [default: 2]')\n parser.add_argument('--resize', '-rs', type=float, default=0.5,\n help='画像の縮尺 [default: 0.5]')\n return parser.parse_args()\n\n\ndef makeDivisorList(num):\n \"\"\"\n 入力された数の約数に1を加えたリストを返す\n [in] num: 約数を計算したい数\n [out] divisor_list: numの約数に1を加えたリスト\n \"\"\"\n\n if num < 1:\n return [0]\n elif num == 1:\n return [1]\n else:\n divisor_list = [i for i in range(2, num // 2 + 1) if num % i == 0]\n divisor_list.append(1)\n\n return divisor_list\n\n\ndef stackImgAndShape(imgs, row):\n \"\"\"\n 画像を縦横に連結するための画像リストと縦横画像数情報を取得する\n [in] imgs: 連結したい入力画像リスト\n [in] row:\n [out] 画像リスト\n [out] 縦横画像数情報\n \"\"\"\n\n # row=0は強制的に1にする\n if row == 0:\n row = 1\n\n # 入力画像リストがrowで割り切れない時用に\n # 黒塗画像を用意するが、3枚の根拠はない\n if row > 3 or 0 > row:\n bk = np.zeros(imgs[0].shape, dtype=np.uint8)\n imgs.append(bk)\n imgs.append(bk)\n imgs.append(bk)\n\n # rowが負数の場合はrowを自動計算する\n if 0 > row:\n # 黒塗画像を0枚含んだ状態でdiv_listが3以上になればdivとimgsを決定\n # div_listが十分でなかった場合、\n # 黒塗画像を1枚含んだ状態でdiv_listが3以上になればdivとimgsを決定\n # これを黒塗画像3枚まで続ける\n for i in range(3, 0, -1):\n # 画像の数で約数を探す\n div_list = makeDivisorList(len(imgs[:-i]))\n if(len(div_list) > 2):\n # rowは約数のリストの中心の値を取得する\n # これにより正方形に近い連結画像が生成できる\n row = div_list[len(div_list) // 2]\n imgs = imgs[:-i]\n break\n\n else:\n img_len = len(imgs) // row * row\n imgs = imgs[:img_len]\n\n return np.array(imgs), np.arange(len(imgs)).reshape(-1, row)\n\n\ndef makeBorder(img, top, bottom, left, right, flg, value=None):\n \"\"\"\n cv2.copyMakeBorder()のラッパー関数なので詳細は省く\n \"\"\"\n\n if flg == cv2.BORDER_CONSTANT:\n return cv2.copyMakeBorder(img, top, bottom, left, right, flg, value=value)\n else:\n return cv2.copyMakeBorder(img, top, bottom, left, right, flg)\n\n\ndef main(args):\n # 画像を読み込む\n imgs = [cv2.imread(name) for name in args.jpeg if IMG.isImgPath(name)]\n # concatするためにすべての画像の高さを統一する\n h = np.max([img.shape[0] for img in imgs])\n imgs = [IMG.resize(img, h / img.shape[0]) for img in imgs]\n # concatするためにすべての画像の幅を統一する\n flg = cv2.BORDER_REFLECT_101\n w = np.max([img.shape[1] for img in imgs])\n imgs = [makeBorder(img, 0, 0, 0, w - img.shape[1], flg) for img in imgs]\n # 画像に黒縁を追加する\n flg = cv2.BORDER_CONSTANT\n lw = args.line_width\n imgs = [makeBorder(img, 0, lw, 0, lw, flg, (0, 0, 0)) for img in imgs]\n # 縦横に連結するための画像リストと縦横情報を取得する\n imgs, size = stackImgAndShape(imgs, args.row)\n # 画像を連結してリサイズする\n buf = [np.vstack(imgs[s]) for s in size]\n img = IMG.resize(np.hstack(buf), args.resize)\n # 連結された画像を保存する\n name = F.getFilePath(args.out_path, 'concat', '.jpg')\n print('save:', name)\n cv2.imwrite(name, img)\n\n\nif __name__ == '__main__':\n args = command()\n F.argsPrint(args)\n main(args)\n" ]
[ [ "numpy.hstack", "numpy.max", "numpy.array", "numpy.zeros", "numpy.vstack" ] ]
cclai999/pyxl-stock
[ "3c0bb2f3e17f88770d16e9cb7171d56757a451b4" ]
[ "p1-stock-code/stock-code.py" ]
[ "import pandas as pd\nfrom openpyxl import load_workbook\n\nfrom tools import get_html_to_file\n\nurl1 = \"https://isin.twse.com.tw/isin/class_main.jsp?owncode=&stockname=&isincode=&market=1&issuetype=1&industry_code=&Page=1&chklike=Y\"\nurl2 = \"https://isin.twse.com.tw/isin/class_main.jsp?owncode=&stockname=&isincode=&market=2&issuetype=4&industry_code=&Page=1&chklike=Y\"\n\n\ndef get_stock_code(html_fname):\n f = open(html_fname, \"r\")\n stk_code_html = f.read()\n f.close()\n dfs = pd.read_html(stk_code_html)\n stk = dfs[0].loc[1:, :]\n compact_stk = stk[[2, 3, 7, 4, 6]]\n return compact_stk\n\n\ndef insert_stock_code_to_excel(stk_df, stk_code_sheet):\n for index, row in stk_df.iterrows():\n r = row.tolist()\n # print(r)\n stk_code_sheet.append(r)\n\n\ndef recod_html():\n get_html_to_file(url1, \"stk_code1_html.txt\")\n get_html_to_file(url2, \"stk_code2_html.txt\")\n\n\nif __name__ == '__main__':\n if 1 == 2:\n recod_html()\n workbook = load_workbook(filename=\"stock_code_blank.xlsx\")\n stk_code_sheet = workbook[\"stk_code\"]\n\n stk_df = get_stock_code(\"stk_code1_html.txt\")\n insert_stock_code_to_excel(stk_df, stk_code_sheet)\n\n stk_df = get_stock_code(\"stk_code2_html.txt\")\n insert_stock_code_to_excel(stk_df, stk_code_sheet)\n\n workbook.save(\"stock_code.xlsx\")\n" ]
[ [ "pandas.read_html" ] ]
amazing89/mathtoolbox
[ "8904bb06ced2ac501594f9574ef1ba3454b8e38e" ]
[ "python-examples/classical-mds-image.py" ]
[ "import pymathtoolbox\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport os\nimport seaborn as sns\nfrom PIL import Image\nfrom scipy.spatial.distance import pdist, squareform\n\n# Load an image\nasset_dir_path = os.path.dirname(os.path.abspath(__file__)) + \"/assets\"\nimage = Image.open(asset_dir_path + \"/autumn-leaves.jpg\")\nresized_image = image.resize((30, 20), Image.BILINEAR)\n\n# Generate a color array\ncolors = np.asarray(resized_image)\ncolors = colors.reshape(colors.shape[0] * colors.shape[1], 3) / 255.0\n\n# Generate a distance matrix\nD = squareform(pdist(colors))\n\n# Compute metric MDS (embedding into a 2-dimensional space)\nX = pymathtoolbox.compute_classical_mds(D=D, dim=2)\n\n# Define constants for plot\nFIG_SIZE = (8, 3)\nIMAGE_FORMAT = \"png\"\nDPI = 200\n\n# Set style\nsns.set()\nsns.set_context()\nplt.rcParams['font.sans-serif'] = [\"Linux Biolinum O\", \"Linux Biolinum\"]\n\n# Draw plot\nfig = plt.figure(figsize=FIG_SIZE, dpi=DPI)\n\nax = fig.add_subplot(1, 2, 1)\nax.set_title(\"Target Image\")\nax.set_xticklabels([])\nax.set_yticklabels([])\nax.set_xticks([])\nax.set_yticks([])\nax.imshow(image)\n\nax = fig.add_subplot(1, 2, 2)\nax.set_title(\"Pixel Colors Embedded into a 2D Space\")\nax.set_xticklabels([])\nax.set_yticklabels([])\nax.set_aspect(\"equal\", adjustable=\"datalim\")\nnum_pixels = colors.shape[0]\nfor i in range(num_pixels):\n ax.plot(X[0][i], X[1][i], color=colors[i], marker=\".\")\n\n# Export plot\nfig.tight_layout()\nfig.savefig(\"./classical-mds-image-out.\" + IMAGE_FORMAT)\n" ]
[ [ "numpy.asarray", "scipy.spatial.distance.pdist", "matplotlib.pyplot.figure" ] ]
claytonkanderson/SimplyRL
[ "3e808f519f174d081c80c04a8adba88cf93c9c9d" ]
[ "Source/GridWorldDrivingDQNMain.py" ]
[ "import numpy as np\nimport gym\nfrom time import time\n\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Activation, Flatten\nfrom keras.optimizers import Adam\nfrom keras.callbacks import TensorBoard\n\nfrom rl.agents.dqn import DQNAgent\nfrom rl.policy import BoltzmannQPolicy\nfrom rl.memory import SequentialMemory\n\n\nENV_NAME = 'GridWorldDrivingDiscrete-v0'\n\n# Get the environment and extract the number of actions.\nenv = gym.make(ENV_NAME)\nnp.random.seed(123)\nenv.seed(123)\nnb_actions = env.action_space.n\n\n# Next, we build a very simple model.\nmodel = Sequential()\nmodel.add(Flatten(input_shape=(1,) + env.observation_space.shape))\nmodel.add(Dense(16))\nmodel.add(Activation('relu'))\nmodel.add(Dense(16))\nmodel.add(Activation('relu'))\nmodel.add(Dense(16))\nmodel.add(Activation('relu'))\nmodel.add(Dense(nb_actions))\nmodel.add(Activation('linear'))\nprint(model.summary())\n\n# Finally, we configure and compile our agent. You can use every built-in Keras optimizer and\n# even the metrics!\nmemory = SequentialMemory(limit=100000, window_length=1)\npolicy = BoltzmannQPolicy()\ndqn = DQNAgent(model=model, nb_actions=nb_actions, memory=memory, nb_steps_warmup=32,\n target_model_update=1e-2, policy=policy)\ndqn.compile(Adam(lr=1e-3), metrics=['mae'])\n\ntensorboard = TensorBoard(log_dir=\"logs/{}\".format(time()))\n\n# Okay, now it's time to learn something! We visualize the training here for show, but this\n# slows down training quite a lot. You can always safely abort the training prematurely using\n# Ctrl + C.\n#dqn.fit(env, nb_steps=300000, visualize=False, verbose=2, nb_max_episode_steps=100, callbacks=[tensorboard])\n\ndqn.load_weights('dqn_GridWorldDrivingDiscrete-v0_weights.h5f')\n\n# After training is done, we save the final weights.\n#dqn.save_weights('dqn_{}_weights.h5f'.format(ENV_NAME), overwrite=True)\n\n# Finally, evaluate our algorithm for 5 episodes.\ndqn.test(env, nb_episodes=25, visualize=True)\n" ]
[ [ "numpy.random.seed" ] ]
website-fingerprinting/minipatch
[ "682d86c0eca5331a8c001e83003cf79b5d4b1a78" ]
[ "dual_annealing.py" ]
[ "# Dual Annealing implementation.\n# Copyright (c) 2018 Sylvain Gubian <[email protected]>,\n# Yang Xiang <[email protected]>\n# Author: Sylvain Gubian, Yang Xiang, PMP S.A.\n\n\"\"\"\nA slight modification to Scipy's implementation of simulated annealing.\nFix bug in implementation of formula in p. 398 of reference [2] (line 273).\nAdd function to display energy at each iteration by parameter 'disp' (line 679).\n\nTaken from scipy==1.4.1\n----------\n\nA Dual Annealing global optimization algorithm\n\"\"\"\n\nfrom __future__ import division, print_function, absolute_import\n\nimport numpy as np\nfrom scipy.optimize import OptimizeResult\nfrom scipy.optimize import minimize\nfrom scipy.special import gammaln\nfrom scipy._lib._util import check_random_state\n\n\n__all__ = ['dual_annealing']\n\n\nclass VisitingDistribution(object):\n \"\"\"\n Class used to generate new coordinates based on the distorted\n Cauchy-Lorentz distribution. Depending on the steps within the strategy\n chain, the class implements the strategy for generating new location\n changes.\n\n Parameters\n ----------\n lb : array_like\n A 1-D numpy ndarray containing lower bounds of the generated\n components. Neither NaN or inf are allowed.\n ub : array_like\n A 1-D numpy ndarray containing upper bounds for the generated\n components. Neither NaN or inf are allowed.\n visiting_param : float\n Parameter for visiting distribution. Default value is 2.62.\n Higher values give the visiting distribution a heavier tail, this\n makes the algorithm jump to a more distant region.\n The value range is (0, 3]. It's value is fixed for the life of the\n object.\n rand_state : `~numpy.random.mtrand.RandomState` object\n A `~numpy.random.mtrand.RandomState` object for using the current state\n of the created random generator container.\n \"\"\"\n TAIL_LIMIT = 1.e8\n MIN_VISIT_BOUND = 1.e-10\n\n def __init__(self, lb, ub, visiting_param, rand_state):\n # if you wish to make _visiting_param adjustable during the life of\n # the object then _factor2, _factor3, _factor5, _d1, _factor6 will\n # have to be dynamically calculated in `visit_fn`. They're factored\n # out here so they don't need to be recalculated all the time.\n self._visiting_param = visiting_param\n self.rand_state = rand_state\n self.lower = lb\n self.upper = ub\n self.bound_range = ub - lb\n\n # these are invariant numbers unless visiting_param changes\n self._factor2 = np.exp((4.0 - self._visiting_param) * np.log(\n self._visiting_param - 1.0))\n self._factor3 = np.exp((2.0 - self._visiting_param) * np.log(2.0)\n / (self._visiting_param - 1.0))\n self._factor4_p = np.sqrt(np.pi) * self._factor2 / (self._factor3 * (\n 3.0 - self._visiting_param))\n\n self._factor5 = 1.0 / (self._visiting_param - 1.0) - 0.5\n self._d1 = 2.0 - self._factor5\n self._factor6 = np.pi * (1.0 - self._factor5) / np.sin(\n np.pi * (1.0 - self._factor5)) / np.exp(gammaln(self._d1))\n\n def visiting(self, x, step, temperature):\n \"\"\" Based on the step in the strategy chain, new coordinated are\n generated by changing all components is the same time or only\n one of them, the new values are computed with visit_fn method\n \"\"\"\n dim = x.size\n if step < dim:\n # Changing all coordinates with a new visiting value\n visits = self.visit_fn(temperature, dim)\n upper_sample = self.rand_state.random_sample()\n lower_sample = self.rand_state.random_sample()\n visits[visits > self.TAIL_LIMIT] = self.TAIL_LIMIT * upper_sample\n visits[visits < -self.TAIL_LIMIT] = -self.TAIL_LIMIT * lower_sample\n x_visit = visits + x\n a = x_visit - self.lower\n b = np.fmod(a, self.bound_range) + self.bound_range\n x_visit = np.fmod(b, self.bound_range) + self.lower\n x_visit[np.fabs(\n x_visit - self.lower) < self.MIN_VISIT_BOUND] += 1.e-10\n else:\n # Changing only one coordinate at a time based on strategy\n # chain step\n x_visit = np.copy(x)\n visit = self.visit_fn(temperature, 1)\n if visit > self.TAIL_LIMIT:\n visit = self.TAIL_LIMIT * self.rand_state.random_sample()\n elif visit < -self.TAIL_LIMIT:\n visit = -self.TAIL_LIMIT * self.rand_state.random_sample()\n index = step - dim\n x_visit[index] = visit + x[index]\n a = x_visit[index] - self.lower[index]\n b = np.fmod(a, self.bound_range[index]) + self.bound_range[index]\n x_visit[index] = np.fmod(b, self.bound_range[\n index]) + self.lower[index]\n if np.fabs(x_visit[index] - self.lower[\n index]) < self.MIN_VISIT_BOUND:\n x_visit[index] += self.MIN_VISIT_BOUND\n return x_visit\n\n def visit_fn(self, temperature, dim):\n \"\"\" Formula Visita from p. 405 of reference [2] \"\"\"\n x, y = self.rand_state.normal(size=(dim, 2)).T\n\n factor1 = np.exp(np.log(temperature) / (self._visiting_param - 1.0))\n factor4 = self._factor4_p * factor1\n\n # sigmax\n x *= np.exp(-(self._visiting_param - 1.0) * np.log(\n self._factor6 / factor4) / (3.0 - self._visiting_param))\n\n den = np.exp((self._visiting_param - 1.0) * np.log(np.fabs(y)) /\n (3.0 - self._visiting_param))\n\n return x / den\n\n\nclass EnergyState(object):\n \"\"\"\n Class used to record the energy state. At any time, it knows what is the\n currently used coordinates and the most recent best location.\n\n Parameters\n ----------\n lower : array_like\n A 1-D numpy ndarray containing lower bounds for generating an initial\n random components in the `reset` method.\n upper : array_like\n A 1-D numpy ndarray containing upper bounds for generating an initial\n random components in the `reset` method\n components. Neither NaN or inf are allowed.\n callback : callable, ``callback(x, f, context)``, optional\n A callback function which will be called for all minima found.\n ``x`` and ``f`` are the coordinates and function value of the\n latest minimum found, and `context` has value in [0, 1, 2]\n \"\"\"\n # Maximimum number of trials for generating a valid starting point\n MAX_REINIT_COUNT = 1000\n\n def __init__(self, lower, upper, callback=None):\n self.ebest = None\n self.current_energy = None\n self.current_location = None\n self.xbest = None\n self.lower = lower\n self.upper = upper\n self.callback = callback\n\n def reset(self, func_wrapper, rand_state, x0=None):\n \"\"\"\n Initialize current location is the search domain. If `x0` is not\n provided, a random location within the bounds is generated.\n \"\"\"\n if x0 is None:\n self.current_location = self.lower + rand_state.random_sample(\n len(self.lower)) * (self.upper - self.lower)\n else:\n self.current_location = np.copy(x0)\n init_error = True\n reinit_counter = 0\n while init_error:\n self.current_energy = func_wrapper.fun(self.current_location)\n if self.current_energy is None:\n raise ValueError('Objective function is returning None')\n if (not np.isfinite(self.current_energy) or np.isnan(\n self.current_energy)):\n if reinit_counter >= EnergyState.MAX_REINIT_COUNT:\n init_error = False\n message = (\n 'Stopping algorithm because function '\n 'create NaN or (+/-) infinity values even with '\n 'trying new random parameters'\n )\n raise ValueError(message)\n self.current_location = self.lower + rand_state.random_sample(\n self.lower.size) * (self.upper - self.lower)\n reinit_counter += 1\n else:\n init_error = False\n # If first time reset, initialize ebest and xbest\n if self.ebest is None and self.xbest is None:\n self.ebest = self.current_energy\n self.xbest = np.copy(self.current_location)\n # Otherwise, we keep them in case of reannealing reset\n\n def update_best(self, e, x, context):\n self.ebest = e\n self.xbest = np.copy(x)\n if self.callback is not None:\n val = self.callback(x, e, context)\n if val is not None:\n if val:\n return('Callback function requested to stop early by '\n 'returning True')\n\n def update_current(self, e, x):\n self.current_energy = e\n self.current_location = np.copy(x)\n\n\nclass StrategyChain(object):\n \"\"\"\n Class that implements within a Markov chain the strategy for location\n acceptance and local search decision making.\n\n Parameters\n ----------\n acceptance_param : float\n Parameter for acceptance distribution. It is used to control the\n probability of acceptance. The lower the acceptance parameter, the\n smaller the probability of acceptance. Default value is -5.0 with\n a range (-1e4, -5].\n visit_dist : VisitingDistribution\n Instance of `VisitingDistribution` class.\n func_wrapper : ObjectiveFunWrapper\n Instance of `ObjectiveFunWrapper` class.\n minimizer_wrapper: LocalSearchWrapper\n Instance of `LocalSearchWrapper` class.\n rand_state : `~numpy.random.mtrand.RandomState` object\n A `~numpy.random.mtrand.RandomState` object for using the current state\n of the created random generator container.\n energy_state: EnergyState\n Instance of `EnergyState` class.\n \"\"\"\n def __init__(self, acceptance_param, visit_dist, func_wrapper,\n minimizer_wrapper, rand_state, energy_state):\n # Local strategy chain minimum energy and location\n self.emin = energy_state.current_energy\n self.xmin = np.array(energy_state.current_location)\n # Global optimizer state\n self.energy_state = energy_state\n # Acceptance parameter\n self.acceptance_param = acceptance_param\n # Visiting distribution instance\n self.visit_dist = visit_dist\n # Wrapper to objective function\n self.func_wrapper = func_wrapper\n # Wrapper to the local minimizer\n self.minimizer_wrapper = minimizer_wrapper\n self.not_improved_idx = 0\n self.not_improved_max_idx = 1000\n self._rand_state = rand_state\n self.temperature_step = 0\n self.K = 100 * len(energy_state.current_location)\n\n def accept_reject(self, j, e, x_visit):\n r = self._rand_state.random_sample()\n # pqv_temp = (self.acceptance_param - 1.0) * (\n # e - self.energy_state.current_energy) / (\n # self.temperature_step + 1.)\n ##############\n ## CHANGES: fix implementation bug from p. 398 of reference [2]\n ##############\n pqv_temp = 1.0 - ((1.0 - self.acceptance_param) *\n (e - self.energy_state.current_energy) / self.temperature_step)\n if pqv_temp <= 0.:\n pqv = 0.\n else:\n pqv = np.exp(np.log(pqv_temp) / (\n 1. - self.acceptance_param))\n if r <= pqv:\n # We accept the new location and update state\n self.energy_state.update_current(e, x_visit)\n self.xmin = np.copy(self.energy_state.current_location)\n\n # No improvement for a long time\n if self.not_improved_idx >= self.not_improved_max_idx:\n if j == 0 or self.energy_state.current_energy < self.emin:\n self.emin = self.energy_state.current_energy\n self.xmin = np.copy(self.energy_state.current_location)\n\n def run(self, step, temperature):\n self.temperature_step = temperature / float(step + 1)\n self.not_improved_idx += 1\n for j in range(self.energy_state.current_location.size * 2):\n if j == 0:\n if step == 0:\n self.energy_state_improved = True\n else:\n self.energy_state_improved = False\n x_visit = self.visit_dist.visiting(\n self.energy_state.current_location, j, temperature)\n # Calling the objective function\n e = self.func_wrapper.fun(x_visit)\n if e < self.energy_state.current_energy:\n # We have got a better energy value\n self.energy_state.update_current(e, x_visit)\n if e < self.energy_state.ebest:\n val = self.energy_state.update_best(e, x_visit, 0)\n if val is not None:\n if val:\n return val\n self.energy_state_improved = True\n self.not_improved_idx = 0\n else:\n # We have not improved but do we accept the new location?\n self.accept_reject(j, e, x_visit)\n if self.func_wrapper.nfev >= self.func_wrapper.maxfun:\n return ('Maximum number of function call reached '\n 'during annealing')\n # End of StrategyChain loop\n\n def local_search(self):\n # Decision making for performing a local search\n # based on strategy chain results\n # If energy has been improved or no improvement since too long,\n # performing a local search with the best strategy chain location\n if self.energy_state_improved:\n # Global energy has improved, let's see if LS improves further\n e, x = self.minimizer_wrapper.local_search(self.energy_state.xbest,\n self.energy_state.ebest)\n if e < self.energy_state.ebest:\n self.not_improved_idx = 0\n val = self.energy_state.update_best(e, x, 1)\n if val is not None:\n if val:\n return val\n self.energy_state.update_current(e, x)\n if self.func_wrapper.nfev >= self.func_wrapper.maxfun:\n return ('Maximum number of function call reached '\n 'during local search')\n # Check probability of a need to perform a LS even if no improvement\n do_ls = False\n if self.K < 90 * len(self.energy_state.current_location):\n pls = np.exp(self.K * (\n self.energy_state.ebest - self.energy_state.current_energy) /\n self.temperature_step)\n if pls >= self._rand_state.random_sample():\n do_ls = True\n # Global energy not improved, let's see what LS gives\n # on the best strategy chain location\n if self.not_improved_idx >= self.not_improved_max_idx:\n do_ls = True\n if do_ls:\n e, x = self.minimizer_wrapper.local_search(self.xmin, self.emin)\n self.xmin = np.copy(x)\n self.emin = e\n self.not_improved_idx = 0\n self.not_improved_max_idx = self.energy_state.current_location.size\n if e < self.energy_state.ebest:\n val = self.energy_state.update_best(\n self.emin, self.xmin, 2)\n if val is not None:\n if val:\n return val\n self.energy_state.update_current(e, x)\n if self.func_wrapper.nfev >= self.func_wrapper.maxfun:\n return ('Maximum number of function call reached '\n 'during dual annealing')\n\n\nclass ObjectiveFunWrapper(object):\n\n def __init__(self, func, maxfun=1e7, *args):\n self.func = func\n self.args = args\n # Number of objective function evaluations\n self.nfev = 0\n # Number of gradient function evaluation if used\n self.ngev = 0\n # Number of hessian of the objective function if used\n self.nhev = 0\n self.maxfun = maxfun\n\n def fun(self, x):\n self.nfev += 1\n return self.func(x, *self.args)\n\n\nclass LocalSearchWrapper(object):\n \"\"\"\n Class used to wrap around the minimizer used for local search\n Default local minimizer is SciPy minimizer L-BFGS-B\n \"\"\"\n\n LS_MAXITER_RATIO = 6\n LS_MAXITER_MIN = 100\n LS_MAXITER_MAX = 1000\n\n def __init__(self, bounds, func_wrapper, **kwargs):\n self.func_wrapper = func_wrapper\n self.kwargs = kwargs\n self.minimizer = minimize\n bounds_list = list(zip(*bounds))\n self.lower = np.array(bounds_list[0])\n self.upper = np.array(bounds_list[1])\n\n # If no minimizer specified, use SciPy minimize with 'L-BFGS-B' method\n if not self.kwargs:\n n = len(self.lower)\n ls_max_iter = min(max(n * self.LS_MAXITER_RATIO,\n self.LS_MAXITER_MIN),\n self.LS_MAXITER_MAX)\n self.kwargs['method'] = 'L-BFGS-B'\n self.kwargs['options'] = {\n 'maxiter': ls_max_iter,\n }\n self.kwargs['bounds'] = list(zip(self.lower, self.upper))\n\n def local_search(self, x, e):\n # Run local search from the given x location where energy value is e\n x_tmp = np.copy(x)\n mres = self.minimizer(self.func_wrapper.fun, x, **self.kwargs)\n if 'njev' in mres.keys():\n self.func_wrapper.ngev += mres.njev\n if 'nhev' in mres.keys():\n self.func_wrapper.nhev += mres.nhev\n # Check if is valid value\n is_finite = np.all(np.isfinite(mres.x)) and np.isfinite(mres.fun)\n in_bounds = np.all(mres.x >= self.lower) and np.all(\n mres.x <= self.upper)\n is_valid = is_finite and in_bounds\n\n # Use the new point only if it is valid and return a better results\n if is_valid and mres.fun < e:\n return mres.fun, mres.x\n else:\n return e, x_tmp\n\n\ndef dual_annealing(func, bounds, args=(), maxiter=1000,\n local_search_options={}, initial_temp=5230.,\n restart_temp_ratio=2.e-5, visit=2.62, accept=-5.0,\n maxfun=1e7, seed=None, no_local_search=False,\n callback=None, x0=None, disp=False):\n \"\"\"\n Find the global minimum of a function using Dual Annealing.\n\n Parameters\n ----------\n func : callable\n The objective function to be minimized. Must be in the form\n ``f(x, *args)``, where ``x`` is the argument in the form of a 1-D array\n and ``args`` is a tuple of any additional fixed parameters needed to\n completely specify the function.\n bounds : sequence, shape (n, 2)\n Bounds for variables. ``(min, max)`` pairs for each element in ``x``,\n defining bounds for the objective function parameter.\n args : tuple, optional\n Any additional fixed parameters needed to completely specify the\n objective function.\n maxiter : int, optional\n The maximum number of global search iterations. Default value is 1000.\n local_search_options : dict, optional\n Extra keyword arguments to be passed to the local minimizer\n (`minimize`). Some important options could be:\n ``method`` for the minimizer method to use and ``args`` for\n objective function additional arguments.\n initial_temp : float, optional\n The initial temperature, use higher values to facilitates a wider\n search of the energy landscape, allowing dual_annealing to escape\n local minima that it is trapped in. Default value is 5230. Range is\n (0.01, 5.e4].\n restart_temp_ratio : float, optional\n During the annealing process, temperature is decreasing, when it\n reaches ``initial_temp * restart_temp_ratio``, the reannealing process\n is triggered. Default value of the ratio is 2e-5. Range is (0, 1).\n visit : float, optional\n Parameter for visiting distribution. Default value is 2.62. Higher\n values give the visiting distribution a heavier tail, this makes\n the algorithm jump to a more distant region. The value range is (0, 3].\n accept : float, optional\n Parameter for acceptance distribution. It is used to control the\n probability of acceptance. The lower the acceptance parameter, the\n smaller the probability of acceptance. Default value is -5.0 with\n a range (-1e4, -5].\n maxfun : int, optional\n Soft limit for the number of objective function calls. If the\n algorithm is in the middle of a local search, this number will be\n exceeded, the algorithm will stop just after the local search is\n done. Default value is 1e7.\n seed : {int or `~numpy.random.mtrand.RandomState` instance}, optional\n If `seed` is not specified the `~numpy.random.mtrand.RandomState`\n singleton is used.\n If `seed` is an int, a new ``RandomState`` instance is used,\n seeded with `seed`.\n If `seed` is already a ``RandomState`` instance, then that\n instance is used.\n Specify `seed` for repeatable minimizations. The random numbers\n generated with this seed only affect the visiting distribution\n function and new coordinates generation.\n no_local_search : bool, optional\n If `no_local_search` is set to True, a traditional Generalized\n Simulated Annealing will be performed with no local search\n strategy applied.\n callback : callable, optional\n A callback function with signature ``callback(x, f, context)``,\n which will be called for all minima found.\n ``x`` and ``f`` are the coordinates and function value of the\n latest minimum found, and ``context`` has value in [0, 1, 2], with the\n following meaning:\n\n - 0: minimum detected in the annealing process.\n - 1: detection occurred in the local search process.\n - 2: detection done in the dual annealing process.\n\n If the callback implementation returns True, the algorithm will stop.\n x0 : ndarray, shape(n,), optional\n Coordinates of a single n-dimensional starting point.\n\n Returns\n -------\n res : OptimizeResult\n The optimization result represented as a `OptimizeResult` object.\n Important attributes are: ``x`` the solution array, ``fun`` the value\n of the function at the solution, and ``message`` which describes the\n cause of the termination.\n See `OptimizeResult` for a description of other attributes.\n\n Notes\n -----\n This function implements the Dual Annealing optimization. This stochastic\n approach derived from [3]_ combines the generalization of CSA (Classical\n Simulated Annealing) and FSA (Fast Simulated Annealing) [1]_ [2]_ coupled\n to a strategy for applying a local search on accepted locations [4]_.\n An alternative implementation of this same algorithm is described in [5]_\n and benchmarks are presented in [6]_. This approach introduces an advanced\n method to refine the solution found by the generalized annealing\n process. This algorithm uses a distorted Cauchy-Lorentz visiting\n distribution, with its shape controlled by the parameter :math:`q_{v}`\n\n .. math::\n\n g_{q_{v}}(\\\\Delta x(t)) \\\\propto \\\\frac{ \\\\\n \\\\left[T_{q_{v}}(t) \\\\right]^{-\\\\frac{D}{3-q_{v}}}}{ \\\\\n \\\\left[{1+(q_{v}-1)\\\\frac{(\\\\Delta x(t))^{2}} { \\\\\n \\\\left[T_{q_{v}}(t)\\\\right]^{\\\\frac{2}{3-q_{v}}}}}\\\\right]^{ \\\\\n \\\\frac{1}{q_{v}-1}+\\\\frac{D-1}{2}}}\n\n Where :math:`t` is the artificial time. This visiting distribution is used\n to generate a trial jump distance :math:`\\\\Delta x(t)` of variable\n :math:`x(t)` under artificial temperature :math:`T_{q_{v}}(t)`.\n\n From the starting point, after calling the visiting distribution\n function, the acceptance probability is computed as follows:\n\n .. math::\n\n p_{q_{a}} = \\\\min{\\\\{1,\\\\left[1-(1-q_{a}) \\\\beta \\\\Delta E \\\\right]^{ \\\\\n \\\\frac{1}{1-q_{a}}}\\\\}}\n\n Where :math:`q_{a}` is a acceptance parameter. For :math:`q_{a}<1`, zero\n acceptance probability is assigned to the cases where\n\n .. math::\n\n [1-(1-q_{a}) \\\\beta \\\\Delta E] < 0\n\n The artificial temperature :math:`T_{q_{v}}(t)` is decreased according to\n\n .. math::\n\n T_{q_{v}}(t) = T_{q_{v}}(1) \\\\frac{2^{q_{v}-1}-1}{\\\\left( \\\\\n 1 + t\\\\right)^{q_{v}-1}-1}\n\n Where :math:`q_{v}` is the visiting parameter.\n\n .. versionadded:: 1.2.0\n\n References\n ----------\n .. [1] Tsallis C. Possible generalization of Boltzmann-Gibbs\n statistics. Journal of Statistical Physics, 52, 479-487 (1998).\n .. [2] Tsallis C, Stariolo DA. Generalized Simulated Annealing.\n Physica A, 233, 395-406 (1996).\n .. [3] Xiang Y, Sun DY, Fan W, Gong XG. Generalized Simulated\n Annealing Algorithm and Its Application to the Thomson Model.\n Physics Letters A, 233, 216-220 (1997).\n .. [4] Xiang Y, Gong XG. Efficiency of Generalized Simulated\n Annealing. Physical Review E, 62, 4473 (2000).\n .. [5] Xiang Y, Gubian S, Suomela B, Hoeng J. Generalized\n Simulated Annealing for Efficient Global Optimization: the GenSA\n Package for R. The R Journal, Volume 5/1 (2013).\n .. [6] Mullen, K. Continuous Global Optimization in R. Journal of\n Statistical Software, 60(6), 1 - 45, (2014). DOI:10.18637/jss.v060.i06\n\n Examples\n --------\n The following example is a 10-dimensional problem, with many local minima.\n The function involved is called Rastrigin\n (https://en.wikipedia.org/wiki/Rastrigin_function)\n\n >>> from scipy.optimize import dual_annealing\n >>> func = lambda x: np.sum(x*x - 10*np.cos(2*np.pi*x)) + 10*np.size(x)\n >>> lw = [-5.12] * 10\n >>> up = [5.12] * 10\n >>> ret = dual_annealing(func, bounds=list(zip(lw, up)), seed=1234)\n >>> print(\"global minimum: xmin = {0}, f(xmin) = {1:.6f}\".format(\n ... ret.x, ret.fun))\n global minimum: xmin = [-4.26437714e-09 -3.91699361e-09 -1.86149218e-09 -3.97165720e-09\n -6.29151648e-09 -6.53145322e-09 -3.93616815e-09 -6.55623025e-09\n -6.05775280e-09 -5.00668935e-09], f(xmin) = 0.000000\n\n \"\"\" # noqa: E501\n if x0 is not None and not len(x0) == len(bounds):\n raise ValueError('Bounds size does not match x0')\n\n lu = list(zip(*bounds))\n lower = np.array(lu[0])\n upper = np.array(lu[1])\n # Check that restart temperature ratio is correct\n if restart_temp_ratio <= 0. or restart_temp_ratio >= 1.:\n raise ValueError('Restart temperature ratio has to be in range (0, 1)')\n # Checking bounds are valid\n if (np.any(np.isinf(lower)) or np.any(np.isinf(upper)) or np.any(\n np.isnan(lower)) or np.any(np.isnan(upper))):\n raise ValueError('Some bounds values are inf values or nan values')\n # Checking that bounds are consistent\n if not np.all(lower < upper):\n raise ValueError('Bounds are not consistent min < max')\n # Checking that bounds are the same length\n if not len(lower) == len(upper):\n raise ValueError('Bounds do not have the same dimensions')\n\n # Wrapper for the objective function\n func_wrapper = ObjectiveFunWrapper(func, maxfun, *args)\n # Wrapper fot the minimizer\n minimizer_wrapper = LocalSearchWrapper(\n bounds, func_wrapper, **local_search_options)\n # Initialization of RandomState for reproducible runs if seed provided\n rand_state = check_random_state(seed)\n # Initialization of the energy state\n energy_state = EnergyState(lower, upper, callback)\n energy_state.reset(func_wrapper, rand_state, x0)\n # Minimum value of annealing temperature reached to perform\n # re-annealing\n temperature_restart = initial_temp * restart_temp_ratio\n # VisitingDistribution instance\n visit_dist = VisitingDistribution(lower, upper, visit, rand_state)\n # Strategy chain instance\n strategy_chain = StrategyChain(accept, visit_dist, func_wrapper,\n minimizer_wrapper, rand_state, energy_state)\n need_to_stop = False\n iteration = 0\n message = []\n # OptimizeResult object to be returned\n optimize_res = OptimizeResult()\n optimize_res.success = True\n optimize_res.status = 0\n\n t1 = np.exp((visit - 1) * np.log(2.0)) - 1.0\n # Run the search loop\n while(not need_to_stop):\n for i in range(maxiter):\n # Compute temperature for this step\n s = float(i) + 2.0\n t2 = np.exp((visit - 1) * np.log(s)) - 1.0\n temperature = initial_temp * t1 / t2\n if iteration >= maxiter:\n message.append(\"Maximum number of iteration reached\")\n need_to_stop = True\n break\n # Need a re-annealing process?\n if temperature < temperature_restart:\n energy_state.reset(func_wrapper, rand_state)\n break\n ##############\n ## CHANGES: display energy per iteration\n ##############\n if disp:\n print(\"dual_annealing step %d: f(x)= %g\"\n % (i, energy_state.ebest))\n # starting strategy chain\n val = strategy_chain.run(i, temperature)\n if val is not None:\n message.append(val)\n need_to_stop = True\n optimize_res.success = False\n break\n # Possible local search at the end of the strategy chain\n if not no_local_search:\n val = strategy_chain.local_search()\n if val is not None:\n message.append(val)\n need_to_stop = True\n optimize_res.success = False\n break\n iteration += 1\n\n # Setting the OptimizeResult values\n optimize_res.x = energy_state.xbest\n optimize_res.fun = energy_state.ebest\n optimize_res.nit = iteration\n optimize_res.nfev = func_wrapper.nfev\n optimize_res.njev = func_wrapper.ngev\n optimize_res.nhev = func_wrapper.nhev\n optimize_res.message = message\n return optimize_res\n" ]
[ [ "numpy.fmod", "scipy.optimize.OptimizeResult", "numpy.log", "numpy.sqrt", "numpy.isfinite", "numpy.isnan", "numpy.sin", "numpy.all", "numpy.copy", "scipy._lib._util.check_random_state", "scipy.special.gammaln", "numpy.exp", "numpy.array", "numpy.isinf", "numpy.fabs" ] ]
atemysemicolon/scikit-image
[ "a48cf5822f9539c6602b9327c18253aed14fa692" ]
[ "skimage/feature/tests/test_corner.py" ]
[ "import numpy as np\nfrom numpy.testing import (assert_array_equal, assert_raises,\n assert_almost_equal)\n\nfrom skimage import data\nfrom skimage import img_as_float\nfrom skimage.color import rgb2gray\nfrom skimage.morphology import octagon\n\nfrom skimage.feature import (corner_moravec, corner_harris, corner_shi_tomasi,\n corner_subpix, peak_local_max, corner_peaks,\n corner_kitchen_rosenfeld, corner_foerstner,\n corner_fast, corner_orientations,\n structure_tensor, structure_tensor_eigvals,\n hessian_matrix, hessian_matrix_eigvals,\n hessian_matrix_det)\n\n\ndef test_structure_tensor():\n square = np.zeros((5, 5))\n square[2, 2] = 1\n Axx, Axy, Ayy = structure_tensor(square, sigma=0.1)\n assert_array_equal(Axx, np.array([[ 0, 0, 0, 0, 0],\n [ 0, 1, 0, 1, 0],\n [ 0, 4, 0, 4, 0],\n [ 0, 1, 0, 1, 0],\n [ 0, 0, 0, 0, 0]]))\n assert_array_equal(Axy, np.array([[ 0, 0, 0, 0, 0],\n [ 0, 1, 0, -1, 0],\n [ 0, 0, 0, -0, 0],\n [ 0, -1, -0, 1, 0],\n [ 0, 0, 0, 0, 0]]))\n assert_array_equal(Ayy, np.array([[ 0, 0, 0, 0, 0],\n [ 0, 1, 4, 1, 0],\n [ 0, 0, 0, 0, 0],\n [ 0, 1, 4, 1, 0],\n [ 0, 0, 0, 0, 0]]))\n\n\ndef test_hessian_matrix():\n square = np.zeros((5, 5))\n square[2, 2] = 1\n Hxx, Hxy, Hyy = hessian_matrix(square, sigma=0.1)\n assert_array_equal(Hxx, np.array([[0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0],\n [0, 0, 1, 0, 0],\n [0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0]]))\n assert_array_equal(Hxy, np.array([[0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0]]))\n assert_array_equal(Hyy, np.array([[0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0],\n [0, 0, 1, 0, 0],\n [0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0]]))\n\n\ndef test_structure_tensor_eigvals():\n square = np.zeros((5, 5))\n square[2, 2] = 1\n Axx, Axy, Ayy = structure_tensor(square, sigma=0.1)\n l1, l2 = structure_tensor_eigvals(Axx, Axy, Ayy)\n assert_array_equal(l1, np.array([[0, 0, 0, 0, 0],\n [0, 2, 4, 2, 0],\n [0, 4, 0, 4, 0],\n [0, 2, 4, 2, 0],\n [0, 0, 0, 0, 0]]))\n assert_array_equal(l2, np.array([[0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0]]))\n\n\ndef test_hessian_matrix_eigvals():\n square = np.zeros((5, 5))\n square[2, 2] = 1\n Hxx, Hxy, Hyy = hessian_matrix(square, sigma=0.1)\n l1, l2 = hessian_matrix_eigvals(Hxx, Hxy, Hyy)\n assert_array_equal(l1, np.array([[0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0],\n [0, 0, 1, 0, 0],\n [0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0]]))\n assert_array_equal(l2, np.array([[0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0],\n [0, 0, 1, 0, 0],\n [0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0]]))\n\n\ndef test_hessian_matrix_det():\n image = np.zeros((5, 5))\n image[2, 2] = 1\n det = hessian_matrix_det(image, 5)\n assert_almost_equal(det, 0, decimal = 3)\n\n\ndef test_square_image():\n im = np.zeros((50, 50)).astype(float)\n im[:25, :25] = 1.\n\n # Moravec\n results = peak_local_max(corner_moravec(im))\n # interest points along edge\n assert len(results) == 57\n\n # Harris\n results = peak_local_max(corner_harris(im, method='k'))\n # interest at corner\n assert len(results) == 1\n\n results = peak_local_max(corner_harris(im, method='eps'))\n # interest at corner\n assert len(results) == 1\n\n # Shi-Tomasi\n results = peak_local_max(corner_shi_tomasi(im))\n # interest at corner\n assert len(results) == 1\n\n\ndef test_noisy_square_image():\n im = np.zeros((50, 50)).astype(float)\n im[:25, :25] = 1.\n np.random.seed(seed=1234)\n im = im + np.random.uniform(size=im.shape) * .2\n\n # Moravec\n results = peak_local_max(corner_moravec(im))\n # undefined number of interest points\n assert results.any()\n\n # Harris\n results = peak_local_max(corner_harris(im, sigma=1.5, method='k'))\n assert len(results) == 1\n results = peak_local_max(corner_harris(im, sigma=1.5, method='eps'))\n assert len(results) == 1\n\n # Shi-Tomasi\n results = peak_local_max(corner_shi_tomasi(im, sigma=1.5))\n assert len(results) == 1\n\n\ndef test_squared_dot():\n im = np.zeros((50, 50))\n im[4:8, 4:8] = 1\n im = img_as_float(im)\n\n # Moravec fails\n\n # Harris\n results = peak_local_max(corner_harris(im))\n assert (results == np.array([[6, 6]])).all()\n\n # Shi-Tomasi\n results = peak_local_max(corner_shi_tomasi(im))\n assert (results == np.array([[6, 6]])).all()\n\n\ndef test_rotated_lena():\n \"\"\"\n The harris filter should yield the same results with an image and it's\n rotation.\n \"\"\"\n im = img_as_float(data.lena().mean(axis=2))\n im_rotated = im.T\n\n # Moravec\n results = peak_local_max(corner_moravec(im))\n results_rotated = peak_local_max(corner_moravec(im_rotated))\n assert (np.sort(results[:, 0]) == np.sort(results_rotated[:, 1])).all()\n assert (np.sort(results[:, 1]) == np.sort(results_rotated[:, 0])).all()\n\n # Harris\n results = peak_local_max(corner_harris(im))\n results_rotated = peak_local_max(corner_harris(im_rotated))\n assert (np.sort(results[:, 0]) == np.sort(results_rotated[:, 1])).all()\n assert (np.sort(results[:, 1]) == np.sort(results_rotated[:, 0])).all()\n\n # Shi-Tomasi\n results = peak_local_max(corner_shi_tomasi(im))\n results_rotated = peak_local_max(corner_shi_tomasi(im_rotated))\n assert (np.sort(results[:, 0]) == np.sort(results_rotated[:, 1])).all()\n assert (np.sort(results[:, 1]) == np.sort(results_rotated[:, 0])).all()\n\n\ndef test_subpix_edge():\n img = np.zeros((50, 50))\n img[:25, :25] = 255\n img[25:, 25:] = 255\n corner = peak_local_max(corner_harris(img), num_peaks=1)\n subpix = corner_subpix(img, corner)\n assert_array_equal(subpix[0], (24.5, 24.5))\n\n\ndef test_subpix_dot():\n img = np.zeros((50, 50))\n img[25, 25] = 255\n corner = peak_local_max(corner_harris(img), num_peaks=1)\n subpix = corner_subpix(img, corner)\n assert_array_equal(subpix[0], (25, 25))\n\n\ndef test_subpix_no_class():\n img = np.zeros((50, 50))\n subpix = corner_subpix(img, np.array([[25, 25]]))\n assert_array_equal(subpix[0], (np.nan, np.nan))\n\n img[25, 25] = 1e-10\n corner = peak_local_max(corner_harris(img), num_peaks=1)\n subpix = corner_subpix(img, np.array([[25, 25]]))\n assert_array_equal(subpix[0], (np.nan, np.nan))\n\n\ndef test_subpix_border():\n img = np.zeros((50, 50))\n img[1:25,1:25] = 255\n img[25:-1,25:-1] = 255\n corner = corner_peaks(corner_harris(img), min_distance=1)\n subpix = corner_subpix(img, corner, window_size=11)\n ref = np.array([[ 0.52040816, 0.52040816],\n [ 0.52040816, 24.47959184],\n [24.47959184, 0.52040816],\n [24.5 , 24.5 ],\n [24.52040816, 48.47959184],\n [48.47959184, 24.52040816],\n [48.47959184, 48.47959184]])\n assert_almost_equal(subpix, ref)\n\n\ndef test_num_peaks():\n \"\"\"For a bunch of different values of num_peaks, check that\n peak_local_max returns exactly the right amount of peaks. Test\n is run on Lena in order to produce a sufficient number of corners\"\"\"\n\n lena_corners = corner_harris(rgb2gray(data.lena()))\n\n for i in range(20):\n n = np.random.random_integers(20)\n results = peak_local_max(lena_corners, num_peaks=n)\n assert (results.shape[0] == n)\n\n\ndef test_corner_peaks():\n response = np.zeros((5, 5))\n response[2:4, 2:4] = 1\n\n corners = corner_peaks(response, exclude_border=False)\n assert len(corners) == 1\n\n corners = corner_peaks(response, exclude_border=False, min_distance=0)\n assert len(corners) == 4\n\n corners = corner_peaks(response, exclude_border=False, min_distance=0,\n indices=False)\n assert np.sum(corners) == 4\n\n\ndef test_blank_image_nans():\n \"\"\"Some of the corner detectors had a weakness in terms of returning\n NaN when presented with regions of constant intensity. This should\n be fixed by now. We test whether each detector returns something\n finite in the case of constant input\"\"\"\n\n detectors = [corner_moravec, corner_harris, corner_shi_tomasi,\n corner_kitchen_rosenfeld, corner_foerstner]\n constant_image = np.zeros((20, 20))\n\n for det in detectors:\n response = det(constant_image)\n assert np.all(np.isfinite(response))\n\n\ndef test_corner_fast_image_unsupported_error():\n img = np.zeros((20, 20, 3))\n assert_raises(ValueError, corner_fast, img)\n\n\ndef test_corner_fast_lena():\n img = rgb2gray(data.lena())\n expected = np.array([[ 67, 157],\n [204, 261],\n [247, 146],\n [269, 111],\n [318, 158],\n [386, 73],\n [413, 70],\n [435, 180],\n [455, 177],\n [461, 160]])\n actual = corner_peaks(corner_fast(img, 12, 0.3))\n assert_array_equal(actual, expected)\n\n\ndef test_corner_orientations_image_unsupported_error():\n img = np.zeros((20, 20, 3))\n assert_raises(ValueError, corner_orientations, img,\n np.asarray([[7, 7]]), np.ones((3, 3)))\n\n\ndef test_corner_orientations_even_shape_error():\n img = np.zeros((20, 20))\n assert_raises(ValueError, corner_orientations, img,\n np.asarray([[7, 7]]), np.ones((4, 4)))\n\n\ndef test_corner_orientations_lena():\n img = rgb2gray(data.lena())\n corners = corner_peaks(corner_fast(img, 11, 0.35))\n expected = np.array([-1.9195897 , -3.03159624, -1.05991162, -2.89573739,\n -2.61607644, 2.98660159])\n actual = corner_orientations(img, corners, octagon(3, 2))\n assert_almost_equal(actual, expected)\n\n\ndef test_corner_orientations_square():\n square = np.zeros((12, 12))\n square[3:9, 3:9] = 1\n corners = corner_peaks(corner_fast(square, 9), min_distance=1)\n actual_orientations = corner_orientations(square, corners, octagon(3, 2))\n actual_orientations_degrees = np.rad2deg(actual_orientations)\n expected_orientations_degree = np.array([ 45., 135., -45., -135.])\n assert_array_equal(actual_orientations_degrees,\n expected_orientations_degree)\n\n\nif __name__ == '__main__':\n from numpy import testing\n testing.run_module_suite()\n" ]
[ [ "numpy.testing.run_module_suite", "numpy.random.seed", "numpy.isfinite", "numpy.asarray", "numpy.rad2deg", "numpy.ones", "numpy.testing.assert_array_equal", "numpy.testing.assert_almost_equal", "numpy.sort", "numpy.testing.assert_raises", "numpy.random.random_integers", "numpy.random.uniform", "numpy.array", "numpy.zeros", "numpy.sum" ] ]
dawn-ico/grid-experiments
[ "882d73d2dc2f3dadeeb71dde6731c21397f37092" ]
[ "reordering.py" ]
[ "from grid_types import Grid, DEVICE_MISSING_VALUE, GridSet\nfrom location_type import LocationType\nfrom schemas import *\n\nimport numpy as np\nimport netCDF4\nfrom functools import cmp_to_key\n\nNaN = float(\"nan\")\n\n\ndef apply_permutation(\n ncf, perm: np.ndarray, schema: GridScheme, location_type: LocationType\n) -> None:\n rev_perm = revert_permutation(perm)\n\n for field_name, descr in schema.items():\n\n field = ncf.variables[field_name]\n array = np.copy(field[:])\n\n if (\n descr.location_type is location_type\n and not descr.do_not_reorder_primary_loc\n ):\n if 1 < len(array.shape):\n assert descr.primary_axis is not None\n array = np.take(array, perm, axis=descr.primary_axis)\n else:\n array = array[perm]\n\n if descr.indexes_into is location_type and not descr.do_not_reorder_indexes:\n # go from fortran's 1-based indexing to python's 0-based indexing\n array = array - 1\n\n # remap indices\n missing_values = array == DEVICE_MISSING_VALUE\n array = rev_perm[array]\n array[missing_values] = DEVICE_MISSING_VALUE\n\n array = array + 1\n\n field[:] = array\n\n\ndef fix_hole(ncf, schema: GridScheme):\n for field_name, descr in schema.items():\n\n field = ncf.variables[field_name]\n array = np.copy(field[:])\n\n nc = ncf.dimensions[\"cell\"].size\n ne = ncf.dimensions[\"edge\"].size\n nv = ncf.dimensions[\"vertex\"].size\n\n # NOTE: this seems extremely brittle, but not sure how to improve\n if field_name == \"end_idx_c\":\n array[0, 8] = nc\n field[:] = array\n\n if field_name == \"end_idx_v\":\n array[0, 7] = nv\n field[:] = array\n\n if field_name == \"end_idx_e\":\n array[0, 13] = ne\n field[:] = array\n\n\ndef get_grf_ranges(grid: Grid, location_type: LocationType = LocationType.Cell):\n\n # returns the index ranges of the grid refinement valid_regions\n # region 0 is the compute domain.\n # all other regions are the lateral boundary layers starting from most outer\n # and going to most inner.\n\n if location_type is LocationType.Vertex:\n n = grid.nv\n start, end = grid.v_grf[:, 0], grid.v_grf[:, 1]\n elif location_type is LocationType.Edge:\n n = grid.ne\n start, end = grid.e_grf[:, 0], grid.e_grf[:, 1]\n elif location_type is LocationType.Cell:\n n = grid.nc\n start, end = grid.c_grf[:, 0], grid.c_grf[:, 1]\n else:\n raise ValueError\n\n valid_regions = start <= end\n start = start[valid_regions]\n end = end[valid_regions]\n end = end + 1 # end is exclusive\n\n assert np.min(start) == 0\n assert np.max(end) <= n\n\n # There's something very weird going on:\n # Some few vertices/edges/cells (at the end) aren't in any valid region,\n # but without them, there will be a hole in the compute domain.\n # We fix this by assigning them to region `0` by default.\n\n end[0] = n\n\n return list(zip(start, end))\n\n\ndef range_to_slice(range: typing.Tuple[typing.Optional[int], typing.Optional[int]]):\n return slice(range[0], range[1])\n\n\ndef normalize_angle(angle):\n return np.fmod(angle, 2 * np.pi)\n\n\ndef get_angle(p):\n return np.arctan2(p[:, 1], p[:, 0])\n\n\ndef rotate(points, angle, origin=np.array([[0, 0]])):\n points = points - origin\n rotation_matrix = np.array(\n [[np.cos(angle), -np.sin(angle)], [np.sin(angle), np.cos(angle)]]\n )\n points = (rotation_matrix @ points.T).T\n return points + origin\n\n\nclass UnsupportedPentagonException(Exception):\n pass\n\n\ndef neighbor_array_to_set(array):\n nbhs = np.unique(array)\n return nbhs[nbhs != DEVICE_MISSING_VALUE]\n\n\n###############################################################################\n# Each vertex is the crossing point of 6 rays. Two of twos rays are defined as\n# the cartesian x- and y-axis (marked as double lines below).\n# With this, we can give each vertex a unique x/y coordinate, as shown below.\n#\n# 2 1\n# \\ //\n# \\ //\n# \\ //\n# [-1, 1] *-----------------------* [0, 1]\n# / \\ // \\\n# / \\ // \\\n# / \\ // \\\n# / \\ // \\\n# [-1, 0] / \\ // [0, 0] \\ [1, 0]\n# 3 ===========*======================*======================*=============== 0\n# \\ // \\ /\n# \\ // \\ /\n# \\ // \\ /\n# \\ // \\ /\n# \\ // \\ /\n# [0, -1] *-----------------------* [1, -1]\n# // \\\n# // \\\n# // \\\n# 4 5\n#\n###############################################################################\nstructured_v2v_offsets = np.array(\n [\n # neighbor id/ray 0\n [+1, +0],\n # neighbor id/ray 1\n [+0, +1],\n # neighbor id/ray 2\n [-1, +1],\n # neighbor id/ray 3\n [-1, +0],\n # neighbor id/ray 4\n [+0, -1],\n # neighbor id/ray 5\n [+1, -1],\n ],\n dtype=int,\n)\n\n###############################################################################\n# Once each vertex has a unique x/y coordinate, we use those to assign\n# to each edge & cell a x/y coordinate and a color. For each edge & cell\n# we look for the closest vertex in the bottom left direction. This vertex\n# determines the x/y coordinate of each edge & cell. Then the coloring is done\n# from left to right in a counter clock-wise direction.\n# (This is similar to dawn's `ICOChainSize`, but uses a slightly different ordering)\n#\n# / \\ / /\n# \\ / \\ / /\n# \\ / \\ / /\n# -*------------------------ [x, y+1] *==================================* [x+1, y+1]\n# / \\ // \\ // \\\n# \\ // \\ // \\\n# \\ // \\ [x, y, 1] // \\\n# \\ // \\ // \\\n# \\ // \\ // \\\n# \\ [x, y, 0] [x, y, 1] // \\\n# \\ // \\ // \\\n# \\ // \\ // \\\n# \\ // [x, y, 0] \\ //\n# \\ // \\ //\n# \\ // \\ //\n# ---- [x, y] *============[x, y, 2]=============* [x+1, y] ---------------------\n# / \\ / \\\n# / \\ / \\\n# / \\ / \\\n# / \\ / \\\n#\n###############################################################################\nstructured_v2e_offsets = np.array(\n [\n # neighbor id/ray 0\n [+0, +0, +2],\n # neighbor id/ray 1\n [+0, +0, +0],\n # neighbor id/ray 2\n [-1, +0, +1],\n # neighbor id/ray 3\n [-1, +0, +2],\n # neighbor id/ray 4\n [+0, -1, +0],\n # neighbor id/ray 5\n [+0, -1, +1],\n ],\n dtype=int,\n)\n# (for the cells, we shift the rays 15 degrees counter clock-wise)\nstructured_v2c_offsets = np.array(\n [\n # neighbor id/ray 0\n [+0, +0, +0],\n # neighbor id/ray 1\n [-1, +0, +1],\n # neighbor id/ray 2\n [-1, +0, +0],\n # neighbor id/ray 3\n [-1, -1, +1],\n # neighbor id/ray 4\n [+0, -1, +0],\n # neighbor id/ray 5\n [+0, -1, +1],\n ],\n dtype=int,\n)\n\n\[email protected]\nclass GridMapping:\n vertex_mapping: np.ndarray\n edge_mapping: np.ndarray\n cell_mapping: np.ndarray\n\n\ndef create_structured_grid_mapping(\n grid: Grid, right_direction_angle, start_vertex=None, angle_threshold=np.deg2rad(30)\n) -> GridMapping:\n # doesn't support pentagons!\n\n if start_vertex is None:\n start_vertex = 0\n\n if isinstance(right_direction_angle, np.ndarray):\n right_direction_angle = float(right_direction_angle)\n\n vertex_mapping = np.full((grid.nv, 2), NaN)\n edge_mapping = np.full((grid.ne, 3), NaN)\n cell_mapping = np.full((grid.nc, 3), NaN)\n\n vertex_mapping[start_vertex] = [0, 0]\n\n # This algorithms works as follows:\n #\n # * Carry out a breadth-first search starting from `start_vertex`.\n # * For each vertex:\n # * Determine for each neighbor edge, cell, vertex what is their relative id.\n # (see `structured_<...>_offsets`)\n # * For each neighbor edge, cell, vertex check which ones have no coordinates assigned yet:\n # * Assign new coordinates to neighbors if they don't have coordinates yet.\n # * Check if coordinates are consistent if they already have coordinates.\n # * Update the right direction angle based on the neighboring vertices of the vertex\n # (this way the algorithm can handle a small local curvature)\n # * Continue the bsf with the vertices that have newly assigned coordinates.\n\n def bfs(vertex_id, right_direction_angle):\n\n # neighbor cells, edges, vertices\n cell_ids = neighbor_array_to_set(grid.v2c[vertex_id])\n edge_ids = neighbor_array_to_set(grid.v2e[vertex_id])\n vertex_ids = neighbor_array_to_set(grid.e2v[edge_ids])\n vertex_ids = vertex_ids[vertex_ids != vertex_id]\n\n # some sanity checks\n if len(edge_ids) == 5 and len(cell_ids) == 5:\n raise UnsupportedPentagonException\n\n assert len(edge_ids) == len(cell_ids) == 6 or len(cell_ids) + 1 == len(edge_ids)\n assert len(vertex_ids) == len(edge_ids)\n assert 0 < len(cell_ids) <= len(edge_ids) <= 6\n\n # get the coordinates of this vertex\n x, y = vertex_mapping[vertex_id]\n\n assert not np.isnan(x) and not np.isnan(y)\n\n self_lon_lat = grid.v_lon_lat[vertex_id]\n\n # compute angles of neighbor vertices\n vertices_angle = normalize_angle(\n get_angle(grid.v_lon_lat[vertex_ids] - self_lon_lat) - right_direction_angle\n )\n vertices_nbh_ids = np.around(vertices_angle / (np.pi / 3)).astype(int)\n assert np.all(\n np.fabs(vertices_angle - vertices_nbh_ids * np.pi / 3) <= angle_threshold\n )\n\n # compute angles of neighbor edges\n edges_angle = normalize_angle(\n get_angle(grid.e_lon_lat[edge_ids] - self_lon_lat) - right_direction_angle\n )\n edges_nbh_ids = np.around(edges_angle / (np.pi / 3)).astype(int)\n assert np.all(\n np.fabs(edges_angle - edges_nbh_ids * np.pi / 3) <= angle_threshold\n )\n\n # compute angles of neighbor cells\n # (we rotate the cells by 30 degrees clock-wise (`-np.pi/6`) to get the angle id)\n cells_angle = normalize_angle(\n get_angle(grid.c_lon_lat[cell_ids] - self_lon_lat)\n - right_direction_angle\n - np.pi / 6\n )\n cells_nbh_ids = np.around(cells_angle / (np.pi / 3)).astype(int)\n assert np.all(\n np.fabs(cells_angle - cells_nbh_ids * np.pi / 3) <= angle_threshold\n )\n\n # update right direction angle\n self_right_direction_angle = (\n np.average(vertices_angle - vertices_nbh_ids * np.pi / 3)\n + right_direction_angle\n )\n\n # assign coordinates to vertex neighbors that don't have a coordinate yet\n vertices_nbh_structured_coords = structured_v2v_offsets[\n vertices_nbh_ids\n ] + np.array([[x, y]], dtype=int)\n new_vertex_ids = np.all(np.isnan(vertex_mapping[vertex_ids, :]), axis=-1)\n vertex_mapping[vertex_ids[new_vertex_ids], :] = vertices_nbh_structured_coords[\n new_vertex_ids\n ]\n # check vertex neighbors that already had a coordinate, that they are consistent with the ones we computed here\n assert np.all(vertex_mapping[vertex_ids, :] == vertices_nbh_structured_coords)\n\n # assign coordinates to edge neighbors that don't have a coordinate yet\n edges_nbh_structured_coords = structured_v2e_offsets[edges_nbh_ids] + np.array(\n [[x, y, 0]], dtype=int\n )\n new_edge_ids = np.all(np.isnan(edge_mapping[edge_ids, :]), axis=-1)\n edge_mapping[edge_ids[new_edge_ids], :] = edges_nbh_structured_coords[\n new_edge_ids\n ]\n # check edge neighbors that already had a coordinate, that they are consistent with the ones we computed here\n assert np.all(edge_mapping[edge_ids, :] == edges_nbh_structured_coords)\n\n # assign coordinates to cell neighbors that don't have a coordinate yet\n cells_nbh_structured_coords = structured_v2c_offsets[cells_nbh_ids] + np.array(\n [[x, y, 0]], dtype=int\n )\n new_cell_ids = np.all(np.isnan(cell_mapping[cell_ids, :]), axis=-1)\n cell_mapping[cell_ids[new_cell_ids], :] = cells_nbh_structured_coords[\n new_cell_ids\n ]\n # check cell neighbors that already had a coordinate, that they are consistent with the ones we computed here\n assert np.all(cell_mapping[cell_ids, :] == cells_nbh_structured_coords)\n\n # continue bfs with vertices that have newly assigned coordinates\n # (use the updated right direction angle for them)\n return {\n (int(next_vertex_id), self_right_direction_angle)\n for next_vertex_id in vertex_ids[new_vertex_ids]\n }\n\n current = set()\n next = {(start_vertex, right_direction_angle)}\n\n while 0 < len(next):\n # swap\n current, next = next, current\n next.clear()\n\n for vertex_args in current:\n next.update(bfs(*vertex_args))\n\n assert not np.any(np.isnan(vertex_mapping))\n assert not np.any(np.isnan(edge_mapping))\n assert not np.any(np.isnan(cell_mapping))\n\n return GridMapping(\n vertex_mapping=vertex_mapping,\n edge_mapping=edge_mapping,\n cell_mapping=cell_mapping,\n )\n\n\ndef argsort_simple(\n mapping: np.ndarray,\n cmp: typing.Callable[[typing.Any, typing.Any], int],\n idx_range: typing.Tuple[typing.Optional[int], typing.Optional[int]] = (None, None),\n) -> np.ndarray:\n # Sorts the first axis based on a `cmp` function within the range [start_idx:end_idx].\n # Returns the permutation array for the whole array.\n #\n # A permutation is an array `a` such that: `a[old_index] == new_index`\n\n total_end_idx = mapping.shape[0]\n start_idx, end_idx = idx_range\n\n if start_idx is None:\n start_idx = 0\n\n if end_idx is None:\n end_idx = total_end_idx\n\n ids = list(range(start_idx, end_idx))\n ids.sort(key=cmp_to_key(lambda a, b: cmp(mapping[a, :], mapping[b, :])))\n return np.concatenate(\n (np.arange(start_idx), np.array(ids), np.arange(end_idx, total_end_idx))\n )\n\n\ndef revert_permutation(perm: np.ndarray) -> np.ndarray:\n perm_rev = np.arange(perm.shape[0])\n perm_rev[perm] = np.copy(perm_rev)\n return perm_rev\n\n\nclass SimpleRowMajorSorting:\n # Provides comparison functions for mappings from `create_structured_grid_mapping`.\n\n @staticmethod\n def vertex_compare(a, b) -> int:\n return a[0] - b[0] if b[1] == a[1] else b[1] - a[1]\n\n @staticmethod\n def edge_compare(a, b) -> int:\n if a[2] == 2 and b[2] != 2:\n return b[1] - a[1] + 1 / 2\n if a[2] != 2 and b[2] == 2:\n return b[1] - a[1] - 1 / 2\n return (\n (a[2] - b[2] if a[0] == b[0] else a[0] - b[0])\n if b[1] == a[1]\n else b[1] - a[1]\n )\n\n @staticmethod\n def cell_compare(a, b) -> int:\n return (\n (a[2] - b[2] if a[0] == b[0] else a[0] - b[0])\n if b[1] == a[1]\n else b[1] - a[1]\n )\n\n\ndef reorder_pool_folder(grid_set: GridSet, fix_hole_in_grid: bool):\n grid_file = netCDF4.Dataset(grid_set.grid.fname + \".nc\")\n grid = Grid.from_netCDF4(grid_file)\n\n grid_set.make_data_sets(\"row-major\")\n\n # the line of the right direction angle for vertex #0:\n p1 = np.array([[0.18511014, 0.79054856]])\n p2 = np.array([[0.18593181, 0.79048109]])\n right_direction_angle = np.squeeze(get_angle(p2 - p1))\n\n mapping = create_structured_grid_mapping(\n grid, right_direction_angle, angle_threshold=np.deg2rad(15)\n )\n\n v_grf = get_grf_ranges(grid, LocationType.Vertex)\n e_grf = get_grf_ranges(grid, LocationType.Edge)\n c_grf = get_grf_ranges(grid, LocationType.Cell)\n\n v_perm = argsort_simple(\n mapping.vertex_mapping, SimpleRowMajorSorting.vertex_compare, v_grf[0]\n )\n e_perm = argsort_simple(\n mapping.edge_mapping, SimpleRowMajorSorting.edge_compare, e_grf[0]\n )\n c_perm = argsort_simple(\n mapping.cell_mapping, SimpleRowMajorSorting.cell_compare, c_grf[0]\n )\n\n for grid in grid_set:\n apply_permutation(grid.data_set, c_perm, grid.schema, LocationType.Cell)\n apply_permutation(grid.data_set, e_perm, grid.schema, LocationType.Edge)\n apply_permutation(grid.data_set, v_perm, grid.schema, LocationType.Vertex)\n\n if fix_hole_in_grid:\n fix_hole(grid_set.grid.data_set, grid_set.grid.schema)\n\n grid_set.sync_data_sets()\n" ]
[ [ "numpy.fmod", "numpy.take", "numpy.unique", "numpy.min", "numpy.arange", "numpy.isnan", "numpy.around", "numpy.cos", "numpy.full", "numpy.arctan2", "numpy.max", "numpy.deg2rad", "numpy.copy", "numpy.all", "numpy.sin", "numpy.average", "numpy.array", "numpy.fabs" ] ]
GuillaumeBalezo/SOD-python
[ "c54566d25e01b252815fbc613a8d0af1c77818b6" ]
[ "functions/get_Proposals.py" ]
[ "# Functions for generating the proposal set for optimization\n#\n# Jianming Zhang, Stan Sclaroff, Zhe Lin, Xiaohui Shen,\n# Brian Price and Radomír Mech. \"Unconstrained Salient\n# Object Detection via Proposal Subset Optimization.\"\n# CVPR, 2016.\n# Code written by Guillaume Balezo, 2020\n\nimport numpy as np\nfrom scipy import cluster\nimport cv2\nfrom tensorflow.keras.applications.vgg16 import preprocess_input\nfrom tensorflow.keras.preprocessing.image import load_img\nfrom functions.utils import get_iou_float, get_roi_bbox\n\ndef get_proposals(I_batch, net, param):\n \"\"\" Generate the proposal set for optimization.\n Args:\n I_batch: Images of the batch.\n net: CNN model.\n param: parameters of the model.\n Returns: P_batch: Prediction over the batch.\n S_batch: Scores associated with the predictions:\n \"\"\"\n Ip = prepare_image(I_batch, param)\n scores_batch = net.predict(Ip)\n P_batch = []\n S_batch = []\n for idx_batch in range(len(I_batch)):\n scores = scores_batch[idx_batch]\n I = I_batch[idx_batch]\n imsz = np.array([I.shape[0], I.shape[1]])\n top_idxs = np.argsort(-scores)\n scores = np.take(scores, top_idxs)\n BB = param['center'][:, top_idxs]\n P = BB[:, : param['masterImgPropN']].copy()\n S = scores[: param['masterImgPropN']].copy()\n # extract ROIs\n ROI = BB[:, : param['roiN']].copy()\n ROI = post_proc(ROI, imsz, param)\n ROI = cluster_boxes(ROI, param) # merge some ROI if needed\n # process ROIs\n Ip = crop_imgs_and_prepare(I.copy(), ROI, param)\n if Ip.size == 0:\n P_batch.append(P)\n S_batch.append(S)\n continue\n scores = net.predict(Ip)\n top_idxs = np.argsort(-scores, axis = 1)\n scores = np.take_along_axis(scores, top_idxs, axis = 1)\n for i in range(Ip.shape[0]):\n B = param['center'][:, top_idxs[i, : param['subImgPropN']]]\n roi = ROI[:, i] / np.tile(np.roll(imsz, 1), 2)\n B = get_roi_bbox(B.copy(), roi)\n P = np.hstack((P, B))\n S = np.hstack((S, scores[i, : param['subImgPropN']]))\n P_batch.append(P)\n S_batch.append(S)\n return P_batch, S_batch\n\n\ndef prepare_image(I_batch, param):\n \"\"\" Preprocess the images before the CNN predictions.\n Args:\n I_batch: Images of the batch.\n param: parameters of the model.\n Returns: Ip: Images of the batch preprocessed\n \"\"\"\n Ip = np.zeros((len(I_batch), param['width'], param['height'], 3))\n for i in range(len(I_batch)):\n img = I_batch[i]\n img = preprocess_input(img, mode='caffe')\n Ip[i] = np.expand_dims(cv2.resize(img, (param['width'], param['height']), interpolation = cv2.INTER_LINEAR), axis = 0)\n return Ip\n\ndef cluster_boxes(BB, param):\n if BB.shape[1] < 2:\n ROI = np.copy(BB)\n return ROI\n\n D = []\n for i in range(BB.shape[1]):\n for j in range(i + 1, BB.shape[1]):\n D.append(1 - get_iou_float(BB[:, j].reshape(-1, 1).T, BB[:, i]))\n Z = cluster.hierarchy.linkage(D)\n T = cluster.hierarchy.fcluster(Z, param['roiClusterCutoff'], criterion = 'distance')\n ROI = np.vstack((BB[:2, T == 1].min(axis = 1, keepdims=True), BB[2:, T == 1].max(axis = 1, keepdims=True))) # initialisation for the for loop\n for i in range(2, T.max() + 1):\n ROI = np.hstack((ROI, np.vstack((BB[:2, T == i].min(axis = 1, keepdims=True), BB[2:, T == i].max(axis = 1, keepdims=True)))))\n return ROI\n\ndef post_proc(ROI, imsz, param):\n \"\"\" Post processing of the CNN predictions.\n Args:\n ROI: Region of interest.\n imsz: Image size.\n param: parameters of the model.\n Returns: ROI: Post-processed CNN predictions\n \"\"\"\n # expand\n w = ROI[2] - ROI[0]\n h = ROI[3] - ROI[1]\n ROI[0] = ROI[0] - 0.5 * w * param['roiExpand']\n ROI[1] = ROI[1] - 0.5 * h * param['roiExpand']\n ROI[2] = ROI[0] + w * (1 + param['roiExpand'])\n ROI[3] = ROI[1] + h * (1 + param['roiExpand'])\n\n ROI = ROI * np.tile(np.roll(imsz, 1), 2).reshape(-1, 1)\n ROI[:2] = np.maximum(ROI[:2], 0)\n ROI[2] = np.minimum(ROI[2], imsz[1])\n ROI[3] = np.minimum(ROI[3], imsz[0])\n\n\n # removing\n area = (ROI[2] - ROI[0] + 1) * (ROI[3] - ROI[1] + 1)\n\n ROI = ROI[:, area < (0.9 * imsz[0] * imsz[1])]\n return ROI\n\n\ndef crop_imgs_and_prepare(img, roilist, param):\n \"\"\" Crop the image on the region of interest and preprocess the crops before the\n CNN network. The function is used in get_proposals, during the refinement step.\n Args:\n img: image.\n roilist: Regions of interest.\n param: parameters of the model.\n Returns: Ip: Preprocessed crops of the image\n \"\"\"\n Ip = []\n if len(roilist.shape) == 1:\n roilist = roilist.reshape(-1, 1)\n for i in range(roilist.shape[1]):\n roi = roilist[:, i]\n img_cropped = img[int(roi[1]) : int(roi[3]) + 1, int(roi[0]) : int(roi[2]) + 1, :]\n img_cropped = preprocess_input(img_cropped, mode = 'caffe')\n Ip.append(cv2.resize(img_cropped, (param['height'], param['width']), interpolation = cv2.INTER_LINEAR))\n return np.array(Ip)\n" ]
[ [ "numpy.take_along_axis", "numpy.hstack", "numpy.maximum", "numpy.minimum", "numpy.take", "tensorflow.keras.applications.vgg16.preprocess_input", "numpy.copy", "scipy.cluster.hierarchy.linkage", "numpy.argsort", "numpy.array", "numpy.roll", "scipy.cluster.hierarchy.fcluster" ] ]
dkkim93/gumbel-rl-gridworld
[ "2ec86bc6cf7c16d5ef0368c9dc4a83062d3d86e3" ]
[ "policy/td3.py" ]
[ "\"\"\"Modified Twin Delayed Deep Deterministic Policy Gradients (TD3)\nTD3 Ref: https://github.com/sfujim/TD3\n\"\"\"\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport numpy as np\nfrom misc.utils import onehot_from_logits, gumbel_softmax\n\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n\nclass Actor(nn.Module):\n def __init__(self, actor_input_dim, actor_output_dim, n_hidden, n_action, name):\n super(Actor, self).__init__()\n\n setattr(self, name + \"_l1\", nn.Linear(actor_input_dim, n_hidden))\n setattr(self, name + \"_l2\", nn.Linear(n_hidden, n_hidden))\n setattr(self, name + \"_l3\", nn.Linear(n_hidden, n_action))\n\n self.name = name\n\n def forward(self, x):\n x = F.relu(getattr(self, self.name + \"_l1\")(x))\n x = F.relu(getattr(self, self.name + \"_l2\")(x))\n x = getattr(self, self.name + \"_l3\")(x)\n\n return x\n\n\nclass Critic(nn.Module):\n def __init__(self, critic_input_dim, n_hidden, name):\n super(Critic, self).__init__()\n\n # Q1 architecture\n setattr(self, name + \"_l1\", nn.Linear(critic_input_dim, n_hidden))\n setattr(self, name + \"_l2\", nn.Linear(n_hidden, n_hidden))\n setattr(self, name + \"_l3\", nn.Linear(n_hidden, 1))\n\n # Q2 architecture\n setattr(self, name + \"_l4\", nn.Linear(critic_input_dim, n_hidden))\n setattr(self, name + \"_l5\", nn.Linear(n_hidden, n_hidden))\n setattr(self, name + \"_l6\", nn.Linear(n_hidden, 1))\n\n self.name = name\n\n def forward(self, x, u):\n xu = torch.cat([x, u], 1)\n\n x1 = F.relu(getattr(self, self.name + \"_l1\")(xu))\n x1 = F.relu(getattr(self, self.name + \"_l2\")(x1))\n x1 = getattr(self, self.name + \"_l3\")(x1)\n\n x2 = F.relu(getattr(self, self.name + \"_l4\")(xu))\n x2 = F.relu(getattr(self, self.name + \"_l5\")(x2))\n x2 = getattr(self, self.name + \"_l6\")(x2)\n\n return x1, x2\n\n def Q1(self, x, u):\n xu = torch.cat([x, u], 1)\n\n x1 = F.relu(getattr(self, self.name + \"_l1\")(xu))\n x1 = F.relu(getattr(self, self.name + \"_l2\")(x1))\n x1 = getattr(self, self.name + \"_l3\")(x1)\n\n return x1 \n\n\nclass TD3(object):\n def __init__(self, actor_input_dim, actor_output_dim, critic_input_dim, n_hidden, name, args):\n self.actor = Actor(\n actor_input_dim, actor_output_dim, n_hidden, args.n_action, name=name + \"_actor\").to(device)\n self.actor_target = Actor(\n actor_input_dim, actor_output_dim, n_hidden, args.n_action, name=name + \"_actor\").to(device)\n self.actor_target.load_state_dict(self.actor.state_dict())\n self.actor_optimizer = torch.optim.Adam(self.actor.parameters(), lr=args.actor_lr)\n\n self.critic = Critic(critic_input_dim, n_hidden, name=name + \"_critic\").to(device)\n self.critic_target = Critic(critic_input_dim, n_hidden, name=name + \"_critic\").to(device)\n self.critic_target.load_state_dict(self.critic.state_dict())\n self.critic_optimizer = torch.optim.Adam(self.critic.parameters(), lr=args.critic_lr)\n\n self.name = name\n self.args = args\n\n def select_action(self, state):\n state = torch.FloatTensor(state.reshape(1, -1)).to(device)\n\n action = self.actor(state)\n action = onehot_from_logits(logits=action)\n action = action.cpu().data.numpy()\n action = np.squeeze(action, axis=0)\n\n return action\n\n def train(self, replay_buffer, iterations, batch_size, discount, tau, policy_freq):\n debug = {}\n debug[\"critic_loss\"] = 0.\n debug[\"actor_loss\"] = 0.\n\n for it in range(iterations):\n # Sample replay buffer \n x, y, u, r, d = replay_buffer.sample(batch_size)\n state = torch.FloatTensor(x).to(device)\n action = torch.FloatTensor(u).to(device)\n next_state = torch.FloatTensor(y).to(device)\n done = torch.FloatTensor(1 - d).to(device)\n reward = torch.FloatTensor(r).to(device)\n\n # Select next action according to policy \n next_action = self.actor_target(next_state)\n next_action = onehot_from_logits(next_action)\n\n # Compute the target Q value\n target_Q1, target_Q2 = self.critic_target(next_state, next_action)\n target_Q = torch.min(target_Q1, target_Q2)\n target_Q = reward + (done * discount * target_Q).detach()\n\n # Get current Q estimates\n current_Q1, current_Q2 = self.critic(state, action)\n\n # Compute critic loss\n critic_loss = F.mse_loss(current_Q1, target_Q) + F.mse_loss(current_Q2, target_Q) \n\n # Optimize the critic\n self.critic_optimizer.zero_grad()\n critic_loss.backward()\n torch.nn.utils.clip_grad_norm_(self.critic.parameters(), 0.5)\n self.critic_optimizer.step()\n debug[\"critic_loss\"] += critic_loss.cpu().data.numpy().flatten()\n\n # Delayed policy updates\n if it % policy_freq == 0:\n # Compute actor loss\n action = self.actor(state)\n action = gumbel_softmax(action, hard=True)\n actor_loss = -self.critic.Q1(state, action).mean()\n\n # Optimize the actor \n self.actor_optimizer.zero_grad()\n actor_loss.backward()\n torch.nn.utils.clip_grad_norm_(self.actor.parameters(), 0.5)\n self.actor_optimizer.step()\n debug[\"actor_loss\"] += actor_loss.cpu().data.numpy().flatten()\n\n # Update the frozen target models\n for param, target_param in zip(self.critic.parameters(), self.critic_target.parameters()):\n target_param.data.copy_(tau * param.data + (1 - tau) * target_param.data)\n\n for param, target_param in zip(self.actor.parameters(), self.actor_target.parameters()):\n target_param.data.copy_(tau * param.data + (1 - tau) * target_param.data)\n\n return debug\n\n def save(self, filename, directory):\n torch.save(self.actor.state_dict(), '%s/%s_actor.pth' % (directory, filename))\n if \"worker\" not in self.name:\n torch.save(self.critic.state_dict(), '%s/%s_critic.pth' % (directory, filename))\n\n def load(self, filename, directory):\n from collections import OrderedDict\n\n actor_weight = torch.load('%s/%s_actor.pth' % (directory, filename), map_location='cpu')\n\n actor_weight_fixed = OrderedDict()\n for k, v in actor_weight.items():\n name_fixed = self.name\n for i_name, name in enumerate(k.split(\"_\")):\n if i_name > 0:\n name_fixed += \"_\" + name\n actor_weight_fixed[name_fixed] = v\n\n self.actor.load_state_dict(actor_weight_fixed)\n\n if \"worker\" not in self.name:\n critic_weight = torch.load('%s/%s_critic.pth' % (directory, filename), map_location='cpu')\n\n critic_weight_fixed = OrderedDict()\n for k, v in critic_weight.items():\n name_fixed = self.name\n for i_name, name in enumerate(k.split(\"_\")):\n if i_name > 0:\n name_fixed += \"_\" + name\n critic_weight_fixed[name_fixed] = v\n\n self.critic.load_state_dict(critic_weight_fixed)\n\n self.actor_target.load_state_dict(self.actor.state_dict())\n self.critic_target.load_state_dict(self.critic.state_dict())\n" ]
[ [ "torch.load", "torch.cat", "numpy.squeeze", "torch.min", "torch.nn.Linear", "torch.nn.functional.mse_loss", "torch.FloatTensor", "torch.cuda.is_available" ] ]
Hengstenberg11/RedesNeuronales
[ "62bda5c8a6fc7f0e4207f7b6eb47617800bb3ebf" ]
[ "librerias/redNeuronal.py" ]
[ "#Neural network utils\n#This code was not implemented by me\n\nimport numpy as np\nfrom functools import reduce\n\nflatten_list_of_arrays = lambda list_of_arrays: reduce(\n lambda acc, v: np.array([*acc.flatten(), *v.flatten()]),\n list_of_arrays\n)\n\ndef inflate_matrixes(flat_thetas, shapes):\n layers = len(shapes) + 1\n sizes = [shape[0] * shape[1] for shape in shapes]\n steps = np.zeros(layers, dtype=int)\n\n for i in range(layers - 1):\n steps[i + 1] = steps[i] + sizes[i]\n\n return [\n flat_thetas[steps[i]: steps[i + 1]].reshape(*shapes[i])\n for i in range(layers - 1)\n ]\n\ndef sigmoid(z):\n a = [(1 / (1 + np.exp(-x))) for x in z]\n return np.asarray(a).reshape(z.shape)\n\ndef back_propagation(flat_thetas, shapes, X, Y):\n m, layers = len(X), len(shapes) + 1\n thetas = inflate_matrixes(flat_thetas, shapes)\n \n a = feed_forward(thetas, X)\n\n deltas = [*range(layers - 1), a[-1] - Y]\n for i in range(layers - 2, 0, -1):\n deltas[i] = (deltas[i + 1] @ np.delete(thetas[i], 0, 1)) * (a[i] * (1 - a[i]))\n\n deltas_l = []\n for i in range(layers - 1):\n deltas_l.append(\n (deltas[i + 1].T\n @\n np.hstack((\n np.ones(len(a[i])).reshape(len(a[i]), 1),\n a[i]\n ))) / m\n )\n deltas_l = np.asarray(deltas_l)\n\n return flatten_list_of_arrays(\n deltas_l\n )\n\ndef cost_function(flat_thetas, shapes, X, Y):\n a = feed_forward(\n inflate_matrixes(flat_thetas, shapes),\n X\n )\n return -(Y * np.log(a[-1]) + (1 - Y) * np.log(1 - a[-1])).sum() / len(X)\n\n\ndef feed_forward(thetas, X):\n mt_a = [np.asarray(X)]\n\n for i in range(len(thetas)):\n mt_a.append(\n sigmoid(\n np.matmul(\n np.hstack((\n np.ones(len(X)).reshape(len(X), 1),\n mt_a[i]\n )), thetas[i].T\n )\n ) \n )\n return mt_a" ]
[ [ "numpy.log", "numpy.asarray", "numpy.delete", "numpy.exp", "numpy.zeros" ] ]
vinodsr/frigate
[ "3b04169c8b53b5653ad9b26d5bbe6313cbeff08d" ]
[ "frigate/object_processing.py" ]
[ "import json\nimport hashlib\nimport datetime\nimport time\nimport copy\nimport cv2\nimport threading\nimport queue\nimport copy\nimport numpy as np\nfrom collections import Counter, defaultdict\nimport itertools\nimport pyarrow.plasma as plasma\nimport matplotlib.pyplot as plt\nfrom frigate.util import draw_box_with_label, PlasmaFrameManager\nfrom frigate.edgetpu import load_labels\nimport base64\nfrom typing import Callable, Dict\nfrom statistics import mean, median\n\nPATH_TO_LABELS = '/labelmap.txt'\n\nLABELS = load_labels(PATH_TO_LABELS)\ncmap = plt.cm.get_cmap('tab10', len(LABELS.keys()))\n\nCOLOR_MAP = {}\nfor key, val in LABELS.items():\n COLOR_MAP[val] = tuple(int(round(255 * c)) for c in cmap(key)[:3])\n\ndef zone_filtered(obj, object_config):\n object_name = obj['label']\n object_filters = object_config.get('filters', {})\n\n if object_name in object_filters:\n obj_settings = object_filters[object_name]\n\n # if the min area is larger than the\n # detected object, don't add it to detected objects\n if obj_settings.get('min_area',-1) > obj['area']:\n return True\n \n # if the detected object is larger than the\n # max area, don't add it to detected objects\n if obj_settings.get('max_area', 24000000) < obj['area']:\n return True\n\n # if the score is lower than the threshold, skip\n if obj_settings.get('threshold', 0) > obj['computed_score']:\n return True\n \n return False\n\n# Maintains the state of a camera\nclass CameraState():\n def __init__(self, name, config, frame_manager):\n self.name = name\n self.config = config\n self.frame_manager = frame_manager\n\n self.best_objects = {}\n self.object_status = defaultdict(lambda: 'OFF')\n self.tracked_objects = {}\n self.zone_objects = defaultdict(lambda: [])\n self.current_frame = np.zeros((720,1280,3), np.uint8)\n self.current_frame_time = 0.0\n self.previous_frame_id = None\n self.callbacks = defaultdict(lambda: [])\n\n def false_positive(self, obj):\n # once a true positive, always a true positive\n if not obj.get('false_positive', True):\n return False\n\n threshold = self.config['objects'].get('filters', {}).get(obj['label'], {}).get('threshold', 0.85)\n if obj['computed_score'] < threshold:\n return True\n return False\n\n def compute_score(self, obj):\n scores = obj['score_history'][:]\n # pad with zeros if you dont have at least 3 scores\n if len(scores) < 3:\n scores += [0.0]*(3 - len(scores))\n return median(scores)\n\n def on(self, event_type: str, callback: Callable[[Dict], None]):\n self.callbacks[event_type].append(callback)\n\n def update(self, frame_time, tracked_objects):\n self.current_frame_time = frame_time\n # get the new frame and delete the old frame\n frame_id = f\"{self.name}{frame_time}\"\n self.current_frame = self.frame_manager.get(frame_id)\n if not self.previous_frame_id is None:\n self.frame_manager.delete(self.previous_frame_id)\n self.previous_frame_id = frame_id\n\n current_ids = tracked_objects.keys()\n previous_ids = self.tracked_objects.keys()\n removed_ids = list(set(previous_ids).difference(current_ids))\n new_ids = list(set(current_ids).difference(previous_ids))\n updated_ids = list(set(current_ids).intersection(previous_ids))\n\n for id in new_ids:\n self.tracked_objects[id] = tracked_objects[id]\n self.tracked_objects[id]['zones'] = []\n\n # start the score history\n self.tracked_objects[id]['score_history'] = [self.tracked_objects[id]['score']]\n\n # calculate if this is a false positive\n self.tracked_objects[id]['computed_score'] = self.compute_score(self.tracked_objects[id])\n self.tracked_objects[id]['false_positive'] = self.false_positive(self.tracked_objects[id])\n\n # call event handlers\n for c in self.callbacks['start']:\n c(self.name, tracked_objects[id])\n \n for id in updated_ids:\n self.tracked_objects[id].update(tracked_objects[id])\n\n # if the object is not in the current frame, add a 0.0 to the score history\n if self.tracked_objects[id]['frame_time'] != self.current_frame_time:\n self.tracked_objects[id]['score_history'].append(0.0)\n else:\n self.tracked_objects[id]['score_history'].append(self.tracked_objects[id]['score'])\n # only keep the last 10 scores\n if len(self.tracked_objects[id]['score_history']) > 10:\n self.tracked_objects[id]['score_history'] = self.tracked_objects[id]['score_history'][-10:]\n\n # calculate if this is a false positive\n self.tracked_objects[id]['computed_score'] = self.compute_score(self.tracked_objects[id])\n self.tracked_objects[id]['false_positive'] = self.false_positive(self.tracked_objects[id])\n\n # call event handlers\n for c in self.callbacks['update']:\n c(self.name, self.tracked_objects[id])\n \n for id in removed_ids:\n # publish events to mqtt\n self.tracked_objects[id]['end_time'] = frame_time\n for c in self.callbacks['end']:\n c(self.name, self.tracked_objects[id])\n del self.tracked_objects[id]\n\n # check to see if the objects are in any zones\n for obj in self.tracked_objects.values():\n current_zones = []\n bottom_center = (obj['centroid'][0], obj['box'][3])\n # check each zone\n for name, zone in self.config['zones'].items():\n contour = zone['contour']\n # check if the object is in the zone and not filtered\n if (cv2.pointPolygonTest(contour, bottom_center, False) >= 0 \n and not zone_filtered(obj, zone.get('filters', {}))):\n current_zones.append(name)\n obj['zones'] = current_zones\n \n # draw on the frame\n if not self.current_frame is None:\n # draw the bounding boxes on the frame\n for obj in self.tracked_objects.values():\n thickness = 2\n color = COLOR_MAP[obj['label']]\n \n if obj['frame_time'] != frame_time:\n thickness = 1\n color = (255,0,0)\n\n # draw the bounding boxes on the frame\n box = obj['box']\n draw_box_with_label(self.current_frame, box[0], box[1], box[2], box[3], obj['label'], f\"{int(obj['score']*100)}% {int(obj['area'])}\", thickness=thickness, color=color)\n # draw the regions on the frame\n region = obj['region']\n cv2.rectangle(self.current_frame, (region[0], region[1]), (region[2], region[3]), (0,255,0), 1)\n \n if self.config['snapshots']['show_timestamp']:\n time_to_show = datetime.datetime.fromtimestamp(frame_time).strftime(\"%m/%d/%Y %H:%M:%S\")\n cv2.putText(self.current_frame, time_to_show, (10, 30), cv2.FONT_HERSHEY_SIMPLEX, fontScale=.8, color=(255, 255, 255), thickness=2)\n\n if self.config['snapshots']['draw_zones']:\n for name, zone in self.config['zones'].items():\n thickness = 8 if any([name in obj['zones'] for obj in self.tracked_objects.values()]) else 2\n cv2.drawContours(self.current_frame, [zone['contour']], -1, zone['color'], thickness)\n\n # maintain best objects\n for obj in self.tracked_objects.values():\n object_type = obj['label']\n # if the object wasn't seen on the current frame, skip it\n if obj['frame_time'] != self.current_frame_time or obj['false_positive']:\n continue\n obj_copy = copy.deepcopy(obj)\n if object_type in self.best_objects:\n current_best = self.best_objects[object_type]\n now = datetime.datetime.now().timestamp()\n # if the object is a higher score than the current best score \n # or the current object is older than desired, use the new object\n if obj_copy['score'] > current_best['score'] or (now - current_best['frame_time']) > self.config.get('best_image_timeout', 60):\n obj_copy['frame'] = np.copy(self.current_frame)\n self.best_objects[object_type] = obj_copy\n for c in self.callbacks['snapshot']:\n c(self.name, self.best_objects[object_type],\"ON\")\n else:\n obj_copy['frame'] = np.copy(self.current_frame)\n self.best_objects[object_type] = obj_copy\n for c in self.callbacks['snapshot']:\n c(self.name, self.best_objects[object_type],\"ON\")\n \n # update overall camera state for each object type\n obj_counter = Counter()\n for obj in self.tracked_objects.values():\n if not obj['false_positive']:\n obj_counter[obj['label']] += 1\n \n # report on detected objects\n for obj_name, count in obj_counter.items():\n new_status = 'ON' if count > 0 else 'OFF'\n if new_status != self.object_status[obj_name]:\n self.object_status[obj_name] = new_status\n for c in self.callbacks['object_status']:\n c(self.name, obj_name, new_status)\n\n # expire any objects that are ON and no longer detected\n expired_objects = [obj_name for obj_name, status in self.object_status.items() if status == 'ON' and not obj_name in obj_counter]\n for obj_name in expired_objects:\n self.object_status[obj_name] = 'OFF'\n for c in self.callbacks['object_status']:\n c(self.name, obj_name, 'OFF')\n for c in self.callbacks['snapshot']:\n c(self.name, self.best_objects[obj_name],\"OFF\")\n\n\nclass TrackedObjectProcessor(threading.Thread):\n def __init__(self, camera_config, client, topic_prefix, tracked_objects_queue, event_queue, stop_event):\n threading.Thread.__init__(self)\n self.camera_config = camera_config\n self.client = client\n self.topic_prefix = topic_prefix\n self.tracked_objects_queue = tracked_objects_queue\n self.event_queue = event_queue\n self.stop_event = stop_event\n self.camera_states: Dict[str, CameraState] = {}\n self.plasma_client = PlasmaFrameManager(self.stop_event)\n\n def start(camera, obj):\n # publish events to mqtt\n self.client.publish(f\"{self.topic_prefix}/{camera}/events/start\", json.dumps(obj), retain=False)\n self.event_queue.put(('start', camera, obj))\n\n def update(camera, obj):\n pass\n\n def end(camera, obj):\n self.client.publish(f\"{self.topic_prefix}/{camera}/events/end\", json.dumps(obj), retain=False)\n self.event_queue.put(('end', camera, obj))\n \n def snapshot(camera, obj,status):\n if not 'frame' in obj:\n return\n best_frame = cv2.cvtColor(obj['frame'], cv2.COLOR_RGB2BGR)\n mqtt_config = self.camera_config[camera].get('mqtt', {'crop_to_region': False})\n if mqtt_config.get('crop_to_region'):\n region = obj['region']\n best_frame = best_frame[region[1]:region[3], region[0]:region[2]]\n if 'snapshot_height' in mqtt_config: \n height = int(mqtt_config['snapshot_height'])\n width = int(height*best_frame.shape[1]/best_frame.shape[0])\n best_frame = cv2.resize(best_frame, dsize=(width, height), interpolation=cv2.INTER_AREA)\n ret, jpg = cv2.imencode('.jpg', best_frame)\n if ret:\n jpg_bytes = jpg.tobytes()\n jpg_as_text = base64.b64encode(jpg).decode()\n payload={\n \"image\": jpg_as_text,\n \"status\": status,\n \"label\":obj[\"label\"],\n \"score\": obj[\"score\"],\n \"start_time\" : obj[\"start_time\"],\n \"id\": obj[\"id\"]\n }\n self.client.publish(f\"{self.topic_prefix}/{camera}/{obj['label']}/event\", json.dumps(payload), retain=True)\n self.client.publish(f\"{self.topic_prefix}/{camera}/{obj['label']}/snapshot\", jpg_bytes, retain=True)\n \n def object_status(camera, object_name, status):\n self.client.publish(f\"{self.topic_prefix}/{camera}/{object_name}\", status, retain=False)\n\n for camera in self.camera_config.keys():\n camera_state = CameraState(camera, self.camera_config[camera], self.plasma_client)\n camera_state.on('start', start)\n camera_state.on('update', update)\n camera_state.on('end', end)\n camera_state.on('snapshot', snapshot)\n camera_state.on('object_status', object_status)\n self.camera_states[camera] = camera_state\n\n self.camera_data = defaultdict(lambda: {\n 'best_objects': {},\n 'object_status': defaultdict(lambda: defaultdict(lambda: 'OFF')),\n 'tracked_objects': {},\n 'current_frame': np.zeros((720,1280,3), np.uint8),\n 'current_frame_time': 0.0,\n 'object_id': None\n })\n # {\n # 'zone_name': {\n # 'person': ['camera_1', 'camera_2']\n # }\n # }\n self.zone_data = defaultdict(lambda: defaultdict(lambda: set()))\n\n # set colors for zones\n all_zone_names = set([zone for config in self.camera_config.values() for zone in config['zones'].keys()])\n zone_colors = {}\n colors = plt.cm.get_cmap('tab10', len(all_zone_names))\n for i, zone in enumerate(all_zone_names):\n zone_colors[zone] = tuple(int(round(255 * c)) for c in colors(i)[:3])\n\n # create zone contours\n for camera_config in self.camera_config.values():\n for zone_name, zone_config in camera_config['zones'].items():\n zone_config['color'] = zone_colors[zone_name]\n coordinates = zone_config['coordinates']\n if isinstance(coordinates, list):\n zone_config['contour'] = np.array([[int(p.split(',')[0]), int(p.split(',')[1])] for p in coordinates])\n elif isinstance(coordinates, str):\n points = coordinates.split(',')\n zone_config['contour'] = np.array([[int(points[i]), int(points[i+1])] for i in range(0, len(points), 2)])\n else:\n print(f\"Unable to parse zone coordinates for {zone_name} - {camera}\")\n \n def get_best(self, camera, label):\n best_objects = self.camera_states[camera].best_objects\n if label in best_objects:\n return best_objects[label]\n else:\n return {}\n \n def get_current_frame(self, camera):\n return self.camera_states[camera].current_frame\n\n def run(self):\n while True:\n if self.stop_event.is_set():\n print(f\"Exiting object processor...\")\n break\n\n try:\n camera, frame_time, current_tracked_objects = self.tracked_objects_queue.get(True, 10)\n except queue.Empty:\n continue\n\n camera_state = self.camera_states[camera]\n\n camera_state.update(frame_time, current_tracked_objects)\n\n # update zone status for each label\n for zone in camera_state.config['zones'].keys():\n # get labels for current camera and all labels in current zone\n labels_for_camera = set([obj['label'] for obj in camera_state.tracked_objects.values() if zone in obj['zones'] and not obj['false_positive']])\n labels_to_check = labels_for_camera | set(self.zone_data[zone].keys())\n # for each label in zone\n for label in labels_to_check:\n camera_list = self.zone_data[zone][label]\n # remove or add the camera to the list for the current label\n previous_state = len(camera_list) > 0\n if label in labels_for_camera:\n camera_list.add(camera_state.name)\n elif camera_state.name in camera_list:\n camera_list.remove(camera_state.name)\n new_state = len(camera_list) > 0\n # if the value is changing, send over MQTT\n if previous_state == False and new_state == True:\n self.client.publish(f\"{self.topic_prefix}/{zone}/{label}\", 'ON', retain=False)\n elif previous_state == True and new_state == False:\n self.client.publish(f\"{self.topic_prefix}/{zone}/{label}\", 'OFF', retain=False)\n" ]
[ [ "numpy.copy", "numpy.zeros" ] ]
qiaozhijian/PLReg3D
[ "35e7df28eb64abf6f6dc9f31c77e8042123242f3" ]
[ "models/model_factory.py" ]
[ "# Author: Jacek Komorowski\n# Warsaw University of Technology\nimport torch\nimport models.minkloc as minkloc\n\n\ndef model_factory(params):\n in_channels = 32 if params.use_unet else 1\n\n if 'MinkFPN' in params.model_params.model:\n model = minkloc.MinkLoc(params.model_params.model, in_channels=in_channels,\n feature_size=params.model_params.feature_size,\n output_dim=params.model_params.output_dim, planes=params.model_params.planes,\n layers=params.model_params.layers, num_top_down=params.model_params.num_top_down,\n conv0_kernel_size=params.model_params.conv0_kernel_size, use_unet = params.use_unet, fix_frontbone = params.fix_frontbone)\n else:\n raise NotImplementedError('Model not implemented: {}'.format(params.model_params.model))\n\n if torch.cuda.is_available():\n device = \"cuda\"\n else:\n device = \"cpu\"\n\n if params.fix_frontbone and params.use_unet:\n model.frontend.load_state_dict(torch.load(params.frontbone_weight, map_location=device)[\"state_dict\"])\n\n\n return model\n" ]
[ [ "torch.cuda.is_available", "torch.load" ] ]
TuringApproved/Turing_Neural_Networks
[ "50101d20c8dc7cfa37db46e0f29bd6d79f66eaad" ]
[ "examples/context_free_grammar_simple_parser.py" ]
[ "__author__ = \"Giovanni Sirio Carmantini\"\n\n\"\"\"In this file we reproduce the simple parser from beim Graben, P.,\n & Potthast, R. (2014). Universal neural field computation. In Neural\n Fields (pp. 299-318).\n\nFirst, the Context Free Grammar is used to create a Generalized Shift,\nan NDA simulating the GS is then created, then the NDA-simulating\nR-ANN is constructed from the NDA.\n\nFinally, the R-ANN dynamics is simulated from given initial conditions\nand visualized.\n\n\"\"\"\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom matplotlib.patches import Rectangle\n\nfrom tnnpy import GodelEncoder, SimpleCFGeneralizedShift, NonlinearDynamicalAutomaton, NeuralTM, plot_symbologram\n\n# CFG description\ninput_symbols = [\"NP\", \"V\"]\nstack_symbols = [\"NP\", \"V\", \"VP\", \"S\"]\nparser_descr = {\n \"S\": [\"NP\", \"VP\"],\n \"VP\": [\"V\", \"NP\"],\n}\n\n# Godel Encoders\nge_s = GodelEncoder(stack_symbols)\nge_i = GodelEncoder(input_symbols)\n\n# CFG -> GS\ncfg_gs = SimpleCFGeneralizedShift(stack_symbols, input_symbols, parser_descr)\n# GS -> NDA\nnda = NonlinearDynamicalAutomaton(cfg_gs, ge_s, ge_i)\n\n# NDA -> R-ANN\ncfg_nn = NeuralTM(nda, cylinder_sets=True)\n\n# initial conditions\ninit_stack = ge_s.encode_cylinder(\"S\")\ninit_input = ge_i.encode_cylinder([\"NP\", \"V\", \"NP\"])\n\n# simulate NDA and R-ANN dynamics\nnda_states = nda.iterate(init_stack, init_input, 6)\ncfg_states = cfg_nn.run_net(init_x=init_stack, init_y=init_input, n_iterations=6)\n\n# and plot\nbase_z_order = 10\nplt.style.use(\"ggplot\")\nfig = plt.figure(figsize=[5, 5])\nax2 = plt.axes(aspect=\"equal\")\nax2.axis([0, 1, 0, 1])\n\nplot_symbologram(ax2, stack_symbols, input_symbols, ge_s, ge_i, TM=False)\n\nx_states, y_states = zip(*cfg_states)\nax2.set_xlabel(\"$c_x$ activation\", size=15)\nax2.set_ylabel(\"$c_y$ activation\", size=15)\nplt.tight_layout()\n\n# widths of x and y cells\nx_cell_w = nda.x_leftbounds[1] - nda.x_leftbounds[0]\ny_cell_w = nda.y_leftbounds[1] - nda.y_leftbounds[0]\n\ndef run():\n \"\"\"\n :param plot_cells: If True, plot the whole cell containing the current state,\n rather than the rectangle corresponding to its cylinder representation.\n \"\"\"\n states_old = None\n for i, cfg_state in enumerate(cfg_states):\n if np.array_equal(cfg_state, states_old):\n break\n\n x, y = cfg_state[0][0], cfg_state[1][0]\n w_x = cfg_state[0][1] - x\n w_y = cfg_state[1][1] - y\n\n rect = Rectangle((x, y), w_x, w_y, facecolor=\"orange\", edgecolor=\"black\", zorder=base_z_order + i)\n ax2.add_patch(rect)\n\n ax2.annotate(\n \"{}\".format(i + 1),\n xy=(x, y),\n xytext=(x + w_x / 2.0, y + w_y / 2.0),\n size=15,\n zorder=base_z_order + i,\n )\n\n states_old = cfg_state\n plt.draw()\n plt.pause(1)\n\n\nrun()\n\nprint(\n \"total number of neurons: {}\".format(\n cfg_nn.LTL.n_units\n + cfg_nn.BSLbx.n_units\n + cfg_nn.BSLby.n_units\n + cfg_nn.MCLx.n_units\n + cfg_nn.MCLy.n_units\n )\n)\nplt.show()" ]
[ [ "matplotlib.pyplot.tight_layout", "matplotlib.pyplot.pause", "numpy.array_equal", "matplotlib.patches.Rectangle", "matplotlib.pyplot.draw", "matplotlib.pyplot.axes", "matplotlib.pyplot.show", "matplotlib.pyplot.style.use", "matplotlib.pyplot.figure" ] ]
ahv15/Mininet
[ "b5cdc5282a0b4324b6ece5f4a1756fc0232af8dd" ]
[ "botnet_detection.py" ]
[ "import numpy as np\r\nfrom netml.ndm.iforest import IF\r\nfrom netml.pparser.parser import PCAP\r\nfrom sklearn.model_selection import train_test_split\r\nfrom netml.ndm.model import MODEL\r\nfrom netml.utils.tool import dump_data, load_data\r\n\r\npcap = PCAP(\r\n \"D:\\\\BotnetDetection\\\\isot_app_and_botnet_dataset\\\\application_data\\\\dns_application_2017.pcap\",\r\n flow_ptks_thres=2,\r\n random_state=42,\r\n verbose=10,\r\n)\r\n\r\npcap1 = PCAP(\r\n \"D:\\\\BotnetDetection\\\\isot_app_and_botnet_dataset\\\\botnet_data\\\\init6.pcap\",\r\n flow_ptks_thres=2,\r\n random_state=42,\r\n verbose=10,\r\n)\r\n\r\n\r\npcap.pcap2flows(q_interval=0.9)\r\npcap.flow2features('IAT', fft=False, header=False)\r\npcap1.pcap2flows(q_interval=0.9)\r\npcap1.flow2features('IAT', fft=False, header=False)\r\ndump_data((pcap.features, pcap.labels), out_file='out/IAT-features.dat')\r\ndump_data((pcap1.features, pcap1.labels), out_file='out/IAT-features1.dat')\r\n(features, labels) = load_data('out/IAT-features.dat')\r\n(features1, labels1) = load_data('out/IAT-features1.dat')\r\nfinal_features = np.vstack([features, features1])\r\nfinal_labels = []\r\nfor i in range(len(features)):\r\n final_labels.append(0)\r\nfor i in range(len(features1)):\r\n final_labels.append(1)\r\n(features_train, features_test, labels_train, labels_test) = train_test_split(final_features, final_labels, test_size= 0.33, random_state=42, shuffle=True)\r\nmodel = IF(n_estimators=100, random_state=42)\r\nmodel.name = 'IF'\r\nndm = MODEL(model, score_metric='auc', verbose=10, random_state=42)\r\nndm.train(features_train)\r\nndm.test(features_test, labels_test)\r\nprint(pcap.features.shape, pcap.pcap2flows.tot_time, pcap.flow2features.tot_time)\r\nprint(ndm.train.tot_time, ndm.test.tot_time, ndm.score)" ]
[ [ "sklearn.model_selection.train_test_split", "numpy.vstack" ] ]
ErikGartner/love-letter
[ "36995788292ea6fdfc72a8b09adad01ba6683c26" ]
[ "loveletter/card.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nLove Letter Card tools\nFunctions and constants to facilitate working with cards, which are represented as integers.\n\"\"\"\n\nimport numpy as np\n\n\nclass Card():\n \"\"\"Static Card class\"\"\"\n noCard = 0\n guard = 1\n priest = 2\n baron = 3\n handmaid = 4\n prince = 5\n king = 6\n countess = 7\n princess = 8\n\n # 0 1 2 3\n names = ['', 'Guard', 'Priest', 'Baron',\n # 4 5 6 7 8\n 'Handmaid', 'Prince', 'King', 'Countess', 'Princess']\n # 0 1 2 3 4 5 6 7 8\n symbols = ['☁️', '⚔️', '🕌', '🎲', '🛡️', '⚜️', '👑', '👸', '❤️']\n\n descriptions = ['None', # None\n 'Guess a player\\'s hand', # Guard\n 'Look at a hand', # Priest\n 'Compare hands; lower hand is out.', # Baron\n 'Protection until your next turn', # Handmaid\n 'One player discards their hand', # Prince\n 'Trade hands with target player', # King\n 'Discard if caught with King or Prince', # Countess\n 'Lose if discarded'] # Princess\n\n counts = [5, # Guard\n 2, # Priest\n 2, # Baron\n 2, # Handmaid\n 2, # Prince\n 1, # King\n 1, # Countess\n 1] # Princess\n\n only_self = [4, 7, 8]\n only_other = [1, 2, 3, 6]\n\n @staticmethod\n def render_card_number(card):\n \"\"\"Render a card name with padded length\"\"\"\n numbered_names = [\"{} {} ({})\".format(name, symbol, idx)\n for idx, (name, symbol) in enumerate(zip(Card.names, Card.symbols))]\n max_length = max([len(i) for i in numbered_names])\n str_base = \"{0: >\" + str(max_length) + \"}\"\n return str_base.format(numbered_names[card])\n\n @staticmethod\n def shuffle_deck(seed=451):\n \"\"\"A numpy array of shuffled cards\"\"\"\n deck = []\n for card_number, card_count in enumerate(Card.counts):\n card_id = card_number + 1\n deck = deck + [card_id] * card_count\n deck_np = np.array(deck)\n np.random.seed(seed=seed)\n np.random.shuffle(deck_np)\n return deck_np\n" ]
[ [ "numpy.random.shuffle", "numpy.array", "numpy.random.seed" ] ]
smearle/pytorch-a2c-ppo-acktr-micropolis
[ "699a6f6e65e8bab5533074945cc9aa7827919a59" ]
[ "envs.py" ]
[ "import os\nimport sys\n\nimport gym\nimport numpy as np\nimport torch\nfrom gym.spaces.box import Box\n\nfrom baselines import bench\n#from baselines.common.atari_wrappers import make_atari, wrap_deepmind\nfrom baselines.common.vec_env import VecEnvWrapper\nfrom baselines.common.vec_env.subproc_vec_env import SubprocVecEnv\nfrom baselines.common.vec_env.dummy_vec_env import DummyVecEnv\nfrom baselines.common.vec_env.vec_normalize import VecNormalize as VecNormalize_\n\nimport gym_micropolis\n\ntry:\n import dm_control2gym\nexcept ImportError:\n pass\n\ntry:\n import roboschool\nexcept ImportError:\n pass\n\ntry:\n import pybullet_envs\nexcept ImportError:\n pass\n\n\ndef make_env(env_id, seed, rank, log_dir, add_timestep, allow_early_resets, map_width=20, render_gui=False, print_map=False, parallel_py2gui=False, noreward=False, max_step=None, args=None):\n def _thunk():\n if env_id.startswith(\"dm\"):\n _, domain, task = env_id.split('.')\n env = dm_control2gym.make(domain_name=domain, task_name=task)\n else:\n env = gym.make(env_id)\n if 'micropolis' in env_id.lower():\n print(\"ENV RANK: \", rank)\n if rank == 0:\n env.setMapSize(map_width, print_map=print_map, parallel_gui=parallel_py2gui, render_gui=render_gui, empty_start=True, noreward=noreward, max_step=max_step, rank=rank)\n else:\n env.setMapSize(map_width, rank=rank)\n\n is_atari = hasattr(gym.envs, 'atari') and isinstance(\n env.unwrapped, gym.envs.atari.atari_env.AtariEnv)\n if is_atari:\n env = make_atari(env_id)\n env.seed(seed + rank)\n\n obs_shape = env.observation_space.shape\n\n if add_timestep and len(\n obs_shape) == 1 and str(env).find('TimeLimit') > -1:\n env = AddTimestep(env)\n\n if log_dir is not None:\n env = bench.Monitor(env, os.path.join(log_dir, str(rank)),\n allow_early_resets=allow_early_resets)\n\n if is_atari:\n env = wrap_deepmind(env)\n\n # If the input has shape (W,H,3), wrap for PyTorch convolutions\n obs_shape = env.observation_space.shape\n if len(obs_shape) == 3 and obs_shape[2] in [1, 3]:\n env = TransposeImage(env)\n\n return env\n\n return _thunk\n\ndef make_vec_envs(env_name, seed, num_processes, gamma, log_dir, add_timestep,\n device, allow_early_resets, num_frame_stack=None, \n args=None, map_width=20, render_gui=False, print_map=False, \n parallel_py2gui=False, noreward=False, max_step=None):\n envs = [make_env(env_name, seed, i, log_dir, add_timestep, \n allow_early_resets, map_width=map_width, render_gui=render_gui, \n print_map=print_map, parallel_py2gui=parallel_py2gui, noreward=noreward, max_step=max_step, args=args)\n for i in range(num_processes)]\n\n if len(envs) > 1:\n envs = SubprocVecEnv(envs)\n else:\n if sys.version[0] =='2':\n envs = DummyVecEnv('DummyVecEnv', (), {1:envs})\n else:\n envs = DummyVecEnv(envs)\n\n if len(envs.observation_space.shape) == 1:\n if gamma is None:\n envs = VecNormalize(envs, ret=False)\n else:\n envs = VecNormalize(envs, gamma=gamma)\n\n envs = VecPyTorch(envs, device)\n\n if num_frame_stack is not None:\n print(num_frame_stack)\n envs = VecPyTorchFrameStack(envs, num_frame_stack, device)\n elif len(envs.observation_space.shape) == 3:\n envs = VecPyTorchFrameStack(envs, 1, device)\n\n return envs\n\n\n# Can be used to test recurrent policies for Reacher-v2\nclass MaskGoal(gym.ObservationWrapper):\n def observation(self, observation):\n if self.env._elapsed_steps > 0:\n observation[-2:0] = 0\n return observation\n\n\nclass AddTimestep(gym.ObservationWrapper):\n def __init__(self, env=None):\n super(AddTimestep, self).__init__(env)\n self.observation_space = Box(\n self.observation_space.low[0],\n self.observation_space.high[0],\n [self.observation_space.shape[0] + 1],\n dtype=self.observation_space.dtype)\n\n def observation(self, observation):\n return np.concatenate((observation, [self.env._elapsed_steps]))\n\n\nclass TransposeImage(gym.ObservationWrapper):\n def __init__(self, env=None):\n super(TransposeImage, self).__init__(env)\n obs_shape = self.observation_space.shape\n self.observation_space = Box(\n self.observation_space.low[0, 0, 0],\n self.observation_space.high[0, 0, 0],\n [obs_shape[2], obs_shape[1], obs_shape[0]],\n dtype=self.observation_space.dtype)\n\n def observation(self, observation):\n return observation.transpose(2, 0, 1)\n\n\nclass VecPyTorch(VecEnvWrapper):\n def __init__(self, venv, device):\n \"\"\"Return only every `skip`-th frame\"\"\"\n super(VecPyTorch, self).__init__(venv)\n self.device = device\n # TODO: Fix data types\n\n def reset(self):\n obs = self.venv.reset()\n ### micropolis ###\n obs = np.array(obs)\n ### ########## ###\n obs = torch.from_numpy(obs).int().to(self.device)\n return obs\n\n def step_async(self, actions):\n actions_async = actions.squeeze(1).cpu().numpy()\n self.venv.step_async(actions_async)\n\n def step_wait(self):\n obs, reward, done, info = self.venv.step_wait()\n ### micropolis ###\n obs = np.array(obs)\n ### ########## ###\n obs = torch.from_numpy(obs).int().to(self.device)\n reward = torch.from_numpy(reward).unsqueeze(dim=1).float()\n return obs, reward, done, info\n\n\nclass VecNormalize(VecNormalize_):\n\n def __init__(self, *args, **kwargs):\n super(VecNormalize, self).__init__(*args, **kwargs)\n self.training = True\n\n def _obfilt(self, obs):\n if self.ob_rms:\n if self.training:\n self.ob_rms.update(obs)\n obs = np.clip((obs - self.ob_rms.mean) / np.sqrt(self.ob_rms.var + self.epsilon), -self.clipob, self.clipob)\n return obs\n else:\n return obs\n\n def train(self):\n self.training = True\n\n def eval(self):\n self.training = False\n\n\n# Derived from\n# https://github.com/openai/baselines/blob/master/baselines/common/vec_env/vec_frame_stack.py\nclass VecPyTorchFrameStack(VecEnvWrapper):\n def __init__(self, venv, nstack, device=None):\n self.venv = venv\n self.nstack = nstack\n\n wos = venv.observation_space # wrapped ob space\n self.shape_dim0 = wos.shape[0]\n\n low = np.repeat(wos.low, self.nstack, axis=0)\n high = np.repeat(wos.high, self.nstack, axis=0)\n\n if device is None:\n device = torch.device('cpu')\n self.stacked_obs = torch.zeros((venv.num_envs,) + low.shape).to(device)\n\n observation_space = gym.spaces.Box(\n low=low, high=high, dtype=venv.observation_space.dtype)\n VecEnvWrapper.__init__(self, venv, observation_space=observation_space)\n\n def step_wait(self):\n obs, rews, news, infos = self.venv.step_wait()\n self.stacked_obs[:, :-self.shape_dim0] = \\\n self.stacked_obs[:, self.shape_dim0:]\n for (i, new) in enumerate(news):\n if new:\n self.stacked_obs[i] = 0\n self.stacked_obs[:, -self.shape_dim0:] = obs\n return self.stacked_obs, rews, news, infos\n\n def reset(self):\n obs = self.venv.reset()\n self.stacked_obs.zero_()\n self.stacked_obs[:, -self.shape_dim0:] = obs\n return self.stacked_obs\n\n def close(self):\n self.venv.close()\n" ]
[ [ "numpy.sqrt", "torch.zeros", "torch.from_numpy", "numpy.concatenate", "torch.device", "numpy.repeat", "numpy.array" ] ]
ling-cai/Time2Box
[ "7c6f7467a0341f7c979103121d54e2f911806a6d" ]
[ "codes/run.py" ]
[ "#!/usr/bin/python3\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport argparse\nimport json\nimport logging\nimport os\nimport random\n\nimport numpy as np\nimport torch\n\nfrom torch.utils.data import DataLoader\n\nfrom model import Query2box\nfrom dataloader import *\nfrom tensorboardX import SummaryWriter\nimport time\nimport pickle\nimport collections\nimport math\nimport torch.optim.lr_scheduler as lr_scheduler\n\nfrom utils import * \n\nimport ray\nfrom ray import tune\n\nimport torch.multiprocessing\ntorch.multiprocessing.set_sharing_strategy('file_system')\n\n\ndef parse_time():\n return time.strftime(\"%Y.%m.%d-%H:%M:%S\", time.localtime())\n\ndef set_global_seed(seed):\n torch.manual_seed(seed)\n torch.cuda.manual_seed(seed)\n np.random.seed(seed)\n random.seed(seed)\n torch.backends.cudnn.deterministic=True\n\ndef parse_args(args=None):\n parser = argparse.ArgumentParser(\n description='Training and Testing Knowledge Graph Embedding Models',\n usage='train.py [<args>] [-h | --help]'\n )\n\n parser.add_argument('--cuda', action='store_true', help='use GPU')\n \n parser.add_argument('--do_train', action='store_true')\n parser.add_argument('--do_valid', action='store_true')\n parser.add_argument('--do_test', action='store_true')\n parser.add_argument('--evaluate_train', action='store_true', help='Evaluate on training data')\n \n parser.add_argument('--data_path', type=str, default=None)\n parser.add_argument('--model', default='TransE', type=str)\n \n parser.add_argument('-n', '--negative_sample_size', default=128, type=int)\n parser.add_argument('-d', '--hidden_dim', default=500, type=int)\n parser.add_argument('-g', '--gamma', default=12.0, type=float)\n parser.add_argument('-adv', '--negative_adversarial_sampling', action='store_true')\n parser.add_argument('-a', '--adversarial_temperature', default=1.0, type=float)\n parser.add_argument('-b', '--batch_size', default=1024, type=int)\n parser.add_argument('-r', '--regularization', default=0.0, type=float)\n parser.add_argument('--test_batch_size', default=4, type=int, help='valid/test batch size')\n parser.add_argument('--uni_weight', action='store_true', \n help='Otherwise use subsampling weighting like in word2vec')\n \n parser.add_argument('-lr', '--learning_rate', default=0.0001, type=float)\n parser.add_argument('-cpu', '--cpu_num', default=10, type=int)\n parser.add_argument('-init', '--init_checkpoint', default=None, type=str)\n parser.add_argument('-save', '--save_path', default=None, type=str)\n parser.add_argument('--max_steps', default=100000, type=int)\n parser.add_argument('--warm_up_steps', default=100000, type=int)\n \n # parser.add_argument('--save_checkpoint_steps', default=5000, type=int)\n parser.add_argument('--valid_steps', default=10000, type=int)\n parser.add_argument('--log_steps', default=100, type=int, help='train log every xx steps')\n parser.add_argument('--model_save_step', default=100, type=int, help='save model in every xx steps')\n parser.add_argument('--test_log_steps', default=1000, type=int, help='valid/test log every xx steps')\n \n parser.add_argument('--nentity', type=int, default=0, help='DO NOT MANUALLY SET')\n parser.add_argument('--nrelation', type=int, default=0, help='DO NOT MANUALLY SET')\n parser.add_argument('--ntimestamp', type=int, default=0, help='DO NOT MANUALLY SET')\n \n parser.add_argument('--geo', default='vec', type=str, help='vec or box')\n parser.add_argument('--print_on_screen', action='store_true')\n \n parser.add_argument('--task', default='1c.2c.3c.2i.3i', type=str)\n parser.add_argument('--stepsforpath', type=int, default=0)\n\n parser.add_argument('--offset_deepsets', default='vanilla', type=str, help='inductive or vanilla or min')\n parser.add_argument('--offset_use_center', action='store_true')\n parser.add_argument('--center_deepsets', default='vanilla', type=str, help='vanilla or attention or mean')\n parser.add_argument('--center_use_offset', action='store_true')\n parser.add_argument('--entity_use_offset', action='store_true')\n parser.add_argument('--att_reg', default=0.0, type=float)\n parser.add_argument('--off_reg', default=0.0, type=float)\n parser.add_argument('--att_tem', default=1.0, type=float)\n\n parser.add_argument('--seed', default=0, type=int)\n parser.add_argument('--gamma2', default=0, type=float)\n parser.add_argument('--train_onehop_only', action='store_true')\n parser.add_argument('--center_reg', default=0.0, type=float, help='alpha in the paper')\n parser.add_argument('--time_smooth_weight', default=0.0, type=float, help='weight for time smoother in the paper')\n parser.add_argument('--time_smoother', type=str, default='L2', help='regularizater used in the paper')\n parser.add_argument('--use_fixed_time_fun', action='store_true', help='pure learning or given the func form')\n parser.add_argument('--use_separate_relation_embedding', action='store_true', help='whether to use a separate relation embedding to consider time')\n parser.add_argument('--bn', default='no', type=str, help='no or before or after')\n parser.add_argument('--n_att', type=int, default=1)\n parser.add_argument('--activation', default='relu', type=str, help='relu or none or softplus')\n parser.add_argument('--act_time', default='none', type=str, help='periodical or non-periodical activation function')\n parser.add_argument('--label', default='test', type=str, help='checkpoint label-- label whether this is the best one')\n\n # whether to use samples for statements with full intervals\n parser.add_argument('--use_relation_time', action='store_true', help='use another relation when creating temporal statements')\n \n parser.add_argument('--use_one_sample', action='store_true', help='convert full interval to point-in-time, only sample one from the interval')\n parser.add_argument('--use_two_sample', action='store_true', help='still as full interval but the start and end is sampled from the true interval')\n parser.add_argument('--add_inverse', action='store_true', help='determine whether to add tuples with inverse relation')\n parser.add_argument('--add_hard_neg', action='store_true', help='determine whether to add hard negative samples')\n parser.add_argument('--double_point_in_time', action='store_true', help='determine whether to repeat time in point-in-time')\n parser.add_argument('--enumerate_time', action='store_true', help='determine whether to repeat time in point-in-time')\n parser.add_argument('--negative_sample_types', default='tail-batch', type=str)\n\n parser.add_argument('--time_score_weight', default=0.1, type=float)\n parser.add_argument('--num_time_negatives', default=0, type=int)\n\n parser.add_argument('--flag_use_weighted_partial_interval', action='store_true')\n\n parser.add_argument('--predict_o', action='store_true')\n parser.add_argument('--predict_t', action='store_true')\n parser.add_argument('--predict_r', action='store_true')\n\n return parser.parse_args(args)\n\ndef override_config(args): #! may update here\n '''s\n Override model and data configuration\n '''\n \n with open(os.path.join(args.init_checkpoint, args.data_path.split('/')[-2], args.model, args.label, 'config.json'), 'r') as fjson:\n argparse_dict = json.load(fjson)\n \n if args.data_path is None:\n args.data_path = argparse_dict['data_path']\n args.model = argparse_dict['model']\n args.hidden_dim = argparse_dict['hidden_dim']\n args.test_batch_size = argparse_dict['test_batch_size']\n # args.valid_steps = 20000\n # args.time_smooth_weight = 0.00001\n \ndef save_model(model, optimizer, save_variable_list, args, before_finetune=False, best_model=False):\n '''\n Save the parameters of the model and the optimizer,\n as well as some other variables such as step and learning_rate\n '''\n if best_model:\n if not os.path.exists(os.path.join(args.save_path, 'best_model')):\n os.makedirs(os.path.join(args.save_path, 'best_model'))\n save_path = os.path.join(args.save_path, 'best_model')\n else:\n save_path = args.save_path\n\n argparse_dict = vars(args)\n with open(os.path.join(save_path, 'config.json' if not before_finetune else 'config_before.json'), 'w') as fjson:\n json.dump(argparse_dict, fjson)\n\n torch.save({\n **save_variable_list,\n 'model_state_dict': model.state_dict(),\n 'optimizer_state_dict': optimizer.state_dict()},\n os.path.join(save_path, 'checkpoint' if not before_finetune else 'checkpoint_before')\n )\n \n entity_embedding = model.entity_embedding.detach().cpu().numpy()\n np.save(\n os.path.join(save_path, 'entity_embedding' if not before_finetune else 'entity_embedding_before'), \n entity_embedding\n )\n \n relation_embedding = model.relation_embedding.detach().cpu().numpy()\n np.save(\n os.path.join(save_path, 'relation_embedding' if not before_finetune else 'relation_embedding_before'), \n relation_embedding\n )\n if model.use_fixed_time_fun:\n time_frequency = model.time_frequency.detach().cpu().numpy()\n np.save(\n os.path.join(save_path, 'time_frequency' if not before_finetune else 'time_frequency_before'), \n time_frequency\n )\n\n time_shift = model.time_shift.detach().cpu().numpy()\n np.save(\n os.path.join(save_path, 'time_shift' if not before_finetune else 'time_shift_before'), \n time_shift\n )\n else:\n time_embedding = model.time_embedding.detach().cpu().numpy()\n np.save(\n os.path.join(save_path, 'time_embedding' if not before_finetune else 'time_embedding_before'), \n time_embedding\n )\n\n\ndef set_logger(args):\n '''\n Write logs to checkpoint and console\n '''\n\n if args.do_train:\n log_file = os.path.join(args.save_path or args.init_checkpoint, 'train.log')\n else:\n tag = 'predict_o'\n if args.predict_t:\n tag = 'predict_t'\n elif args.predict_r:\n tag = 'predict_r'\n\n log_file = os.path.join(args.save_path or args.init_checkpoint, 'test%s.log' % (tag))\n\n logging.basicConfig(\n format='%(asctime)s %(levelname)-8s %(message)s',\n level=logging.INFO,\n datefmt='%Y-%m-%d %H:%M:%S',\n filename=log_file,\n filemode='a'\n )\n if args.print_on_screen:\n console = logging.StreamHandler()\n console.setLevel(logging.INFO)\n formatter = logging.Formatter('%(asctime)s %(levelname)-8s %(message)s')\n console.setFormatter(formatter)\n logging.getLogger('').addHandler(console)\n\ndef log_metrics(mode, step, metrics):\n '''\n Print the evaluation logs\n '''\n for metric in metrics:\n logging.info('%s %s at step %d: %f' % (mode, metric, step, metrics[metric]))\n\nfrom argparse import Namespace\ndef parameter_tune_main(config):\n ## convert to namespace\n # print('get here')\n while True:\n main(args, mode='TUNE')\n\n \ndef main(args, mode=''):\n if isinstance(args, dict):\n args = Namespace(**args)\n\n set_global_seed(args.seed)\n # args.test_batch_size = 4\n assert args.bn in ['no', 'before', 'after']\n assert args.n_att >= 1 and args.n_att <= 3\n\n if args.geo == 'box':\n assert 'Box' in args.model\n elif args.geo == 'vec':\n assert 'Box' not in args.model\n \n if args.train_onehop_only:\n assert '1c' in args.task\n args.center_deepsets = 'mean'\n if args.geo == 'box':\n args.offset_deepsets = 'min'\n\n if (not args.do_train) and (not args.do_valid) and (not args.do_test) and (not args.evaluate_train):\n raise ValueError('one of train/val/test mode must be choosed.')\n \n if args.init_checkpoint:\n override_config(args)\n elif args.data_path is None:\n raise ValueError('one of init_checkpoint/data_path must be choosed.')\n\n # if args.do_train and args.save_path is None:\n # raise ValueError('Where do you want to save your trained model?')\n\n cur_time = parse_time()\n print (\"overide save string.\")\n if args.task == '1c':\n args.stepsforpath = 0\n else:\n assert args.stepsforpath <= args.max_steps\n \n args.save_path = 'logs/%s%s/%s/%s/%s'%(args.data_path.split('/')[-1], args.geo, args.data_path.split('/')[-2], args.model, args.label)\n writer = SummaryWriter(args.save_path)\n if args.save_path and not os.path.exists(args.save_path):\n os.makedirs(args.save_path)\n \n set_logger(args)\n\n with open('%s/stats.txt'%args.data_path) as f:\n entrel = f.readlines()\n nentity = int(entrel[0].split(' ')[-1])\n nrelation = int(entrel[1].split(' ')[-1])\n\n if 'wikidata' in args.data_path.lower() or 'yago' in args.data_path.lower():\n ntimestamp = int(entrel[2].split(' ')[-1])\n args.ntimestamp = ntimestamp\n \n args.nentity = nentity\n if args.add_inverse:\n logging.info('add inverse: True')\n args.nrelation = nrelation*2\n args.ntimestamp = args.ntimestamp*2\n else:\n args.nrelation = nrelation\n logging.info('add inverse: False')\n\n # print('double_point_in_time')\n \n logging.info('Geo: %s' % args.geo)\n logging.info('Model: %s' % args.model)\n logging.info('Data Path: %s' % args.data_path)\n logging.info('#entity: %d' % args.nentity)\n logging.info('#relation: %d' % args.nrelation)\n logging.info('#timestamps: %d' % args.ntimestamp)\n logging.info('#max steps: %d' % args.max_steps)\n \n\n tasks = args.task.split('.')\n # args.negative_sample_types = args.negative_sample_types.split('.')\n\n # args.model_save_step = args.model_save_step if args.model_save_step >= args.valid_steps else args.valid_steps\n \n train_ans = dict()\n valid_ans = dict()\n valid_ans_hard = dict()\n test_ans = dict()\n test_ans_hard = dict()\n\n # args.data_path = args.data_path.replace('wikidata_toy_expanded', 'wikidata_toy_new')\n ## num_triples in an epoch\n num_triples_per_task = {}\n ## in some datasets, they do not have 1c\n ## in that case, we use 1c-expanded but do not include the result in the final result.\n if '1c' in tasks:\n ## always use 1c-expanded when training\n with open('%s/train_triples_1c.pkl'%args.data_path, 'rb') as handle:\n train_triples = pickle.load(handle)\n\n with open('%s/valid_triples_1c.pkl'%args.data_path, 'rb') as handle:\n valid_triples = pickle.load(handle)\n # if len(valid_triples) == 0: ## no atemporal statements existing in the KB; used for monitoring the loss\n # with open('%s/valid_triples_1c_expanded.pkl'%args.data_path, 'rb') as handle:\n # valid_triples = pickle.load(handle)\n \n with open('%s/test_triples_1c.pkl'%args.data_path, 'rb') as handle:\n test_triples = pickle.load(handle)\n\n with open('%s/train_ans_1c.pkl'%args.data_path, 'rb') as handle:\n train_ans_1 = pickle.load(handle)\n with open('%s/valid_ans_1c.pkl'%args.data_path, 'rb') as handle:\n valid_ans_1 = pickle.load(handle)\n with open('%s/test_ans_1c.pkl'%args.data_path, 'rb') as handle:\n test_ans_1 = pickle.load(handle)\n\n with open('%s/valid_ans_1c_hard.pkl'%args.data_path, 'rb') as handle:\n valid_ans_1_hard = pickle.load(handle)\n with open('%s/test_ans_1c_hard.pkl'%args.data_path, 'rb') as handle:\n test_ans_1_hard = pickle.load(handle)\n\n valid_ans_hard.update(valid_ans_1_hard)\n test_ans_hard.update(test_ans_1_hard)\n train_ans.update(train_ans_1)\n valid_ans.update(valid_ans_1)\n test_ans.update(test_ans_1)\n\n num_triples_per_task['1c'] = len(train_triples)\n\n if '2i' in tasks:\n with open('%s/train_triples_2i.pkl'%args.data_path, 'rb') as handle:\n train_triples_2i = pickle.load(handle)\n with open('%s/train_ans_2i.pkl'%args.data_path, 'rb') as handle:\n train_ans_2i = pickle.load(handle)\n\n with open('%s/valid_triples_2i_begin.pkl'%args.data_path, 'rb') as handle:\n valid_triples_2i_begin = pickle.load(handle)\n with open('%s/valid_triples_2i_end.pkl'%args.data_path, 'rb') as handle:\n valid_triples_2i_end = pickle.load(handle)\n with open('%s/valid_ans_2i.pkl'%args.data_path, 'rb') as handle:\n valid_ans_2i = pickle.load(handle)\n with open('%s/valid_ans_2i_hard.pkl'%args.data_path, 'rb') as handle:\n valid_ans_2i_hard = pickle.load(handle)\n\n with open('%s/test_triples_2i_begin.pkl'%args.data_path, 'rb') as handle:\n test_triples_2i_begin = pickle.load(handle)\n with open('%s/test_triples_2i_end.pkl'%args.data_path, 'rb') as handle:\n test_triples_2i_end = pickle.load(handle)\n with open('%s/test_ans_2i.pkl'%args.data_path, 'rb') as handle:\n test_ans_2i = pickle.load(handle) \n with open('%s/test_ans_2i_hard.pkl'%args.data_path, 'rb') as handle:\n test_ans_2i_hard = pickle.load(handle)\n\n num_triples_per_task['2i'] = len(train_triples_2i)\n\n with open('%s/train_ans_2i.pkl'%args.data_path, 'rb') as handle:\n train_ans_2i = pickle.load(handle)\n with open('%s/valid_ans_2i.pkl'%args.data_path, 'rb') as handle:\n valid_ans_2i = pickle.load(handle)\n with open('%s/valid_ans_2i_hard.pkl'%args.data_path, 'rb') as handle:\n valid_ans_2i_hard = pickle.load(handle)\n with open('%s/test_ans_2i.pkl'%args.data_path, 'rb') as handle:\n test_ans_2i = pickle.load(handle) \n with open('%s/test_ans_2i_hard.pkl'%args.data_path, 'rb') as handle:\n test_ans_2i_hard = pickle.load(handle)\n\n\n valid_ans_hard.update(valid_ans_2i_hard)\n test_ans_hard.update(test_ans_2i_hard)\n train_ans.update(train_ans_2i)\n valid_ans.update(valid_ans_2i)\n test_ans.update(test_ans_2i)\n\n \n\n if '3i-2i' in tasks: ## the answer set is in '2i'; begin_only interval (3)end_only_interval\n with open('%s/train_triples_3i_2i.pkl'%args.data_path, 'rb') as handle:\n train_triples_3i_2i = pickle.load(handle)\n num_triples_per_task['3i-2i'] = len(train_triples_3i_2i)\n\n with open('%s/test_triples_3i_2i.pkl'%args.data_path, 'rb') as handle:\n test_triples_3i_2i = pickle.load(handle)\n\n with open('%s/valid_triples_3i_2i.pkl'%args.data_path, 'rb') as handle:\n valid_triples_3i_2i = pickle.load(handle)\n\n num_triples_per_task['3i-2i'] = len(train_triples_3i_2i)\n\n if '3i' in tasks:\n '''\n one case: full intervals with accurate begin date and end date \n '''\n with open('%s/train_triples_3i.pkl'%args.data_path, 'rb') as handle:\n train_triples_3i = pickle.load(handle)\n num_triples_per_task['3i'] = len(train_triples_3i)\n\n if args.use_two_sample:\n with open('%s/train_ans_3i.pkl'%args.data_path, 'rb') as handle:\n train_ans_3i = pickle.load(handle)\n train_ans.update(train_ans_3i)\n\n with open('%s/valid_triples_3i.pkl'%args.data_path, 'rb') as handle:\n valid_triples_3i = pickle.load(handle)\n with open('%s/valid_ans_3i.pkl'%args.data_path, 'rb') as handle:\n valid_ans_3i = pickle.load(handle)\n with open('%s/valid_ans_3i_hard.pkl'%args.data_path, 'rb') as handle:\n valid_ans_3i_hard = pickle.load(handle)\n\n with open('%s/test_triples_3i.pkl'%args.data_path, 'rb') as handle:\n test_triples_3i = pickle.load(handle)\n with open('%s/test_ans_3i.pkl'%args.data_path, 'rb') as handle:\n test_ans_3i = pickle.load(handle)\n with open('%s/test_ans_3i_hard.pkl'%args.data_path, 'rb') as handle:\n test_ans_3i_hard = pickle.load(handle)\n\n valid_ans_hard.update(valid_ans_3i_hard)\n test_ans_hard.update(test_ans_3i_hard)\n \n valid_ans.update(valid_ans_3i)\n test_ans.update(test_ans_3i)\n\n if 'time-batch' in args.negative_sample_types:\n with open('%s/ans_t.pkl'%args.data_path, 'rb') as handle:\n ans_t = pickle.load(handle)\n else:\n ans_t = None\n\n if args.predict_t:\n with open('%s/test_ans_t.pkl'%args.data_path, 'rb') as handle:\n test_ans_t = pickle.load(handle) \n with open('%s/valid_ans_t.pkl'%args.data_path, 'rb') as handle:\n valid_ans_t = pickle.load(handle)\n else:\n test_ans_t = None\n valid_ans_t = None\n \n ## get the share of each task used in train\n num_triples_per_epoch = sum([num for num in num_triples_per_task.values()])\n\n if '1c' in tasks:\n logging.info('#train: %d' % len(train_triples))\n logging.info('#valid: %d' % len(valid_triples))\n logging.info('#test: %d' % len(test_triples))\n\n # if '1c-t' in tasks:\n # logging.info('#train-t: %d' % len(train_triples_t))\n # logging.info('#valid-t: %d' % len(valid_triples_t))\n # # logging.info('#test-t: %d' % len(test_triples_t))\n \n if '2i' in tasks:\n logging.info('#train_2i: %d' % len(train_triples_2i))\n logging.info('#valid_2i_begin: %d' % len(valid_triples_2i_begin))\n logging.info('#valid_2i_end: %d' % len(valid_triples_2i_end))\n logging.info('#test_2i_begin: %d' % len(test_triples_2i_begin))\n logging.info('#test_2i_end: %d' % len(test_triples_2i_end))\n\n if '3i-2i' in tasks:\n logging.info('#train_3i_2i: %d' % len(train_triples_3i_2i))\n logging.info('#valid_3i_2i: %d' % len(valid_triples_3i_2i))\n logging.info('#test_3i_2i: %d' % len(test_triples_3i_2i))\n \n if '3i' in tasks:\n logging.info('#train_3i: %d' % len(train_triples_3i))\n logging.info('#valid_3i: %d' % len(valid_triples_3i))\n logging.info('#test_3i: %d' % len(test_triples_3i))\n\n ## in order to make balance between different sub datasets; merge 2i to 3i or 3i-2i\n if args.double_point_in_time and args.use_two_sample:\n raise NotImplementedError\n elif args.double_point_in_time:\n logging.info('merge 2i to 3i')\n train_triples_3i.extend(train_triples_2i)\n else:\n logging.info('merge 2i and 3i-2i')\n train_triples_3i_2i.extend(train_triples_2i)\n\n if args.do_train:\n # Set training dataloader iterator\n if '1c' in tasks: ## here we do not have time information \n train_dataloader_tail = DataLoader(\n TrainDataset(train_triples, nentity, nrelation, args.negative_sample_size, train_ans_1, 'tail-batch'), \n batch_size=args.batch_size,\n shuffle=True, \n num_workers=max(1, args.cpu_num),\n collate_fn=TrainDataset.collate_fn\n )\n train_iterator = SingledirectionalOneShotIterator(train_dataloader_tail, train_triples[0][-1])\n\n # if '2i' in tasks:\n # train_dataloader_2i_tail = DataLoader(\n # TrainInterDataset(train_triples_2i, nentity, nrelation, args.negative_sample_size, train_ans_2i, 'tail-batch', add_hard_neg=args.add_hard_neg), \n # batch_size=args.batch_size,\n # shuffle=True, \n # num_workers=max(1, args.cpu_num),\n # collate_fn=TrainInterDataset.collate_fn\n # )\n \n # train_iterator_2i = SingledirectionalOneShotIterator(train_dataloader_2i_tail, train_triples_2i[0][-1])\n\n if '3i-2i' in tasks:\n # if 'tail-batch' in args.negative_sample_types:\n train_dataloader_3i_2i_tail = DataLoader(\n TrainInterDataset(train_triples_3i_2i, nentity, nrelation, ntimestamp, args.negative_sample_size, train_ans_2i, mode=args.negative_sample_types, ans_t=ans_t, use_one_sample=args.use_one_sample, use_two_sample=args.use_two_sample, add_hard_neg=args.add_hard_neg, double_point_in_time=args.double_point_in_time, num_time_negatives=args.num_time_negatives), \n batch_size=args.batch_size,\n shuffle=True, \n num_workers=max(1, args.cpu_num),\n collate_fn=TrainInterDataset.collate_fn\n )\n if args.double_point_in_time:\n qtype = '2-3-inter'\n else:\n qtype = train_triples_2i[0][-1]\n train_iterator_3i_2i = SingledirectionalOneShotIterator(train_dataloader_3i_2i_tail, qtype)\n\n # if 'time-batch' in args.negative_sample_types:\n # train_dataloader_3i_2i_time = DataLoader(\n # TrainInterDataset(train_triples_3i_2i, nentity, nrelation, ntimestamp, args.negative_sample_size//8, ans_t, 'time-batch', add_hard_neg=args.add_hard_neg, double_point_in_time=args.double_point_in_time), \n # batch_size=args.batch_size,\n # shuffle=True, \n # num_workers=max(1, args.cpu_num),\n # collate_fn=TrainInterDataset.collate_fn\n # )\n # if args.double_point_in_time:\n # qtype = '2-3-inter'\n # else:\n # qtype = train_triples_2i[0][-1]\n # train_iterator_3i_2i_time = SingledirectionalOneShotIterator(train_dataloader_3i_2i_time, qtype)\n\n\n if '3i' in tasks: # there are three datasets that should be considered\n if args.use_one_sample:\n qtype = '2-inter'\n # train_ans_here = train_ans_2i\n else:\n # train_ans_here = train_ans_3i\n qtype = train_triples_3i[0][-1]\n\n # if 'tail-batch' in args.negative_sample_types:\n train_dataloader_3i_tail = DataLoader(\n TrainInterDataset(train_triples_3i, nentity, nrelation, ntimestamp, args.negative_sample_size, train_ans_2i if args.use_one_sample else train_ans, mode = args.negative_sample_types, ans_t=ans_t, use_one_sample=args.use_one_sample, use_two_sample=args.use_two_sample, add_hard_neg=args.add_hard_neg, double_point_in_time=args.double_point_in_time, num_time_negatives=args.num_time_negatives), \n batch_size=args.batch_size,\n shuffle=True, \n num_workers=max(1, args.cpu_num),\n collate_fn=TrainInterDataset.collate_fn\n )\n train_iterator_3i = SingledirectionalOneShotIterator(train_dataloader_3i_tail, qtype)\n\n # if 'time-batch' in args.negative_sample_types:\n # train_dataloader_3i_time = DataLoader(\n # TrainInterDataset(train_triples_3i, nentity, nrelation, ntimestamp, args.negative_sample_size//8, ans_t, 'time-batch', args.use_one_sample, args.use_two_sample, args.add_hard_neg), \n # batch_size=args.batch_size,\n # shuffle=True, \n # num_workers=max(1, args.cpu_num),\n # collate_fn=TrainInterDataset.collate_fn\n # )\n # train_iterator_3i_time = SingledirectionalOneShotIterator(train_dataloader_3i_time, qtype)\n\n\n if args.time_smooth_weight != 0.0:\n if args.time_smoother == 'Lambda3':\n time_reg = Lambda3(args.time_smooth_weight)\n elif args.time_smoother == 'L2':\n time_reg = L2(args.time_smooth_weight)\n else:\n time_reg = None\n\n query2box = Query2box(\n model_name=args.model,\n nentity=args.nentity,\n nrelation=args.nrelation,\n ntimestamp=args.ntimestamp,\n hidden_dim=args.hidden_dim,\n gamma=args.gamma,\n writer=writer,\n geo=args.geo,\n cen=args.center_reg,\n offset_deepsets = args.offset_deepsets, ## method used for aggregating off_sets of boxes\n center_deepsets = args.center_deepsets, ## method used for aggregating centers of boxes\n offset_use_center = args.offset_use_center, ## whether to use center information when aggregating the offsets\n center_use_offset = args.center_use_offset, ## whether to use offset information when aggregating the centers\n att_reg = args.att_reg,\n off_reg = args.off_reg,\n att_tem = args.att_tem,\n euo = args.entity_use_offset, ## whether to treat entities as boxes as well\n gamma2 = args.gamma2,\n bn = args.bn, # when to use batch normalization; no, before, after\n nat = args.n_att, ## num of layers in an attention module\n activation = args.activation,\n act_time=args.act_time,\n use_fixed_time_fun = args.use_fixed_time_fun,\n time_reg = time_reg,\n use_separate_relation_embedding = args.use_separate_relation_embedding,\n use_relation_time=args.use_relation_time\n )\n \n logging.info('Model Parameter Configuration:')\n num_params = 0\n for name, param in query2box.named_parameters():\n logging.info('Parameter %s: %s, require_grad = %s' % (name, str(param.size()), str(param.requires_grad)))\n if param.requires_grad:\n num_params += np.prod(param.size())\n logging.info('Parameter Number: %d' % num_params)\n\n if args.cuda:\n query2box = query2box.cuda()\n \n # Set training configuration\n current_learning_rate = args.learning_rate\n optimizer = torch.optim.Adam(\n filter(lambda p: p.requires_grad, query2box.parameters()), \n lr=current_learning_rate\n )\n scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, 'max', factor=0.1, patience=2, verbose=True) # mrr tracking\n # if args.warm_up_steps:\n # warm_up_steps = args.warm_up_steps\n # else:\n # warm_up_steps = args.max_steps // 2\n # args.data_path = args.data_path.replace('wikidata_toy_new', 'wikidata_toy_expanded')\n if args.init_checkpoint:\n # Restore model from checkpoint directory\n \n try:\n checkpoint = torch.load(os.path.join(args.init_checkpoint, args.data_path.split('/')[-2], args.model, args.label, 'best_model', 'checkpoint'))\n logging.info('Loading checkpoint %s...' % os.path.join(args.init_checkpoint, args.data_path.split('/')[-2], args.model, args.label, 'best_model', 'checkpoint'))\n except:\n checkpoint = torch.load(os.path.join(args.init_checkpoint, args.data_path.split('/')[-2], args.model, args.label, 'checkpoint'))\n logging.info('Loading checkpoint %s...' % os.path.join(args.init_checkpoint, args.data_path.split('/')[-2], args.model, args.label, 'checkpoint'))\n\n init_step = checkpoint['step']\n if 'best_valid_mrrm' in checkpoint:\n best_valid_mrrm = checkpoint['best_valid_mrrm']\n last_valid_mrrm = checkpoint['curr_valid_mrrm']\n else:\n best_valid_mrrm = 0.0\n last_valid_mrrm = 0.0\n query2box.load_state_dict(checkpoint['model_state_dict'])\n if args.do_train:\n current_learning_rate = checkpoint['current_learning_rate']\n # current_learning_rate = 0.0001\n # warm_up_steps = checkpoint['warm_up_steps']\n optimizer.load_state_dict(checkpoint['optimizer_state_dict'])\n ## change learning_rate\n for g in optimizer.param_groups:\n g['lr'] = 0.001\n scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, 'max', factor=0.1, patience=2, verbose=True)\n else:\n logging.info('Ramdomly Initializing %s Model...' % args.model)\n init_step = 0\n best_valid_mrrm = 0.0\n last_valid_mrrm = 0.0\n\n step = init_step \n\n logging.info('all param setting used in the model\\n %s' % args)\n logging.info('init_step = %d' % init_step)\n logging.info('best valid mrr = %f' % best_valid_mrrm)\n logging.info('last_valid_mrrm = %f' % last_valid_mrrm)\n # logging.info('number of triples in training = %d' % num_triples_per_epoch)\n if args.do_train:\n logging.info('Start Training...')\n logging.info('learning_rate = %f' % current_learning_rate)\n # logging.info('batch_size = %d' % args.batch_size)\n # logging.info('negative_adversarial_sampling = %d' % args.negative_adversarial_sampling)\n # logging.info('hidden_dim = %d' % args.hidden_dim)\n # logging.info('gamma = %f' % args.gamma)\n # logging.info('negative_adversarial_sampling = %s' % str(args.negative_adversarial_sampling))\n # if args.negative_adversarial_sampling:\n # logging.info('adversarial_temperature = %f' % args.adversarial_temperature)\n \n # Set valid dataloader as it would be evaluated during training\n \n def evaluate_test():\n average_metrics = collections.defaultdict(list)\n average_c_metrics = collections.defaultdict(list)\n average_c2_metrics = collections.defaultdict(list)\n average_i_metrics = collections.defaultdict(list)\n average_2i_metrics = collections.defaultdict(list)\n average_ex_metrics = collections.defaultdict(list)\n average_u_metrics = collections.defaultdict(list)\n total_number_triples = 0\n total_number_triples_i = 0\n total_number_triples_c = 0\n\n ## save rank results\n if not os.path.exists(args.save_path+'/rank_result/'):\n os.makedirs(args.save_path+'/rank_result/')\n\n checkpoint = torch.load(os.path.join(args.save_path, 'best_model', 'checkpoint'))\n\n query2box.load_state_dict(checkpoint['model_state_dict'])\n\n if '2i' in tasks:\n if len(test_triples_2i_begin)>0:\n metrics = query2box.test_step(query2box, test_triples_2i_begin, test_ans, test_ans_hard, args, '2i-begin', test_ans_t)\n # if args.predict_o:\n log_metrics('Test only-begin', step, metrics)\n num_triples = len(test_triples_2i_begin)\n total_number_triples += num_triples\n total_number_triples_i += num_triples\n for metric in metrics:\n writer.add_scalar('Test/Test_2i_begin_'+metric, metrics[metric], step)\n average_metrics[metric].append(metrics[metric]*num_triples)\n average_i_metrics[metric].append(metrics[metric]*num_triples)\n average_2i_metrics[metric].append(metrics[metric]*num_triples)\n if len(test_triples_2i_end)>0:\n metrics = query2box.test_step(query2box, test_triples_2i_end, test_ans, test_ans_hard, args, '2i-end', test_ans_t)\n # if args.predict_o:\n log_metrics('Test only-end', step, metrics)\n num_triples = len(test_triples_2i_end)\n total_number_triples += num_triples\n total_number_triples_i += num_triples\n for metric in metrics:\n writer.add_scalar('Test/Test_2i_end_'+metric, metrics[metric], step)\n average_metrics[metric].append(metrics[metric]*num_triples)\n average_i_metrics[metric].append(metrics[metric]*num_triples)\n average_2i_metrics[metric].append(metrics[metric]*num_triples)\n\n if '3i' in tasks:\n metrics = query2box.test_step(query2box, test_triples_3i, test_ans, test_ans_hard, args, '3i', test_ans_t)\n if args.predict_o:\n log_metrics('Test full time', step, metrics)\n num_triples = len(test_triples_3i)\n total_number_triples += num_triples\n total_number_triples_i += num_triples\n for metric in metrics:\n writer.add_scalar('Test/Test_3i_'+metric, metrics[metric], step)\n average_metrics[metric].append(metrics[metric]*num_triples)\n average_i_metrics[metric].append(metrics[metric]*num_triples)\n\n if '3i-2i' in tasks:\n metrics = query2box.test_step(query2box, test_triples_3i_2i, test_ans, test_ans_hard, args, '3i-2i', test_ans_t)\n # if args.predict_o:\n log_metrics('Test point time', step, metrics)\n num_triples = len(test_triples_3i_2i)\n total_number_triples += num_triples\n total_number_triples_i += num_triples\n for metric in metrics:\n writer.add_scalar('Test/Test_3i_2i'+metric, metrics[metric], step)\n average_metrics[metric].append(metrics[metric]*num_triples)\n average_i_metrics[metric].append(metrics[metric]*num_triples)\n\n # if '2c' in tasks:\n # metrics = query2box.test_step(query2box, test_triples_2, test_ans, test_ans_hard, args)\n # log_metrics('Test 2c', step, metrics)\n # for metric in metrics:\n # writer.add_scalar('Test_2c_'+metric, metrics[metric], step)\n # average_metrics[metric].append(metrics[metric])\n # average_c_metrics[metric].append(metrics[metric])\n # average_c2_metrics[metric].append(metrics[metric])\n # if '3c' in tasks:\n # metrics = query2box.test_step(query2box, test_triples_3, test_ans, test_ans_hard, args)\n # log_metrics('Test 3c', step, metrics)\n # for metric in metrics:\n # writer.add_scalar('Test_3c_'+metric, metrics[metric], step)\n # average_metrics[metric].append(metrics[metric])\n # average_c_metrics[metric].append(metrics[metric])\n # average_c2_metrics[metric].append(metrics[metric])\n \n if '1c' in tasks and args.predict_o:\n if len(test_triples) != 0:\n metrics = query2box.test_step(query2box, test_triples, test_ans, test_ans_hard, args, '1c')\n log_metrics('Test no time', step, metrics)\n num_triples = len(test_triples)\n total_number_triples += num_triples\n total_number_triples_c += num_triples\n for metric in metrics:\n writer.add_scalar('Test/Test_1c_'+metric, metrics[metric], step)\n average_metrics[metric].append(metrics[metric]*num_triples)\n average_c_metrics[metric].append(metrics[metric]*num_triples)\n\n # if 'ci' in tasks:\n # metrics = query2box.test_step(query2box, test_triples_ci, test_ans, test_ans_hard, args)\n # log_metrics('Test ci', step, metrics)\n # for metric in metrics:\n # writer.add_scalar('Test_ci_'+metric, metrics[metric], step)\n # average_metrics[metric].append(metrics[metric])\n # average_ex_metrics[metric].append(metrics[metric])\n # if 'ic' in tasks:\n # metrics = query2box.test_step(query2box, test_triples_ic, test_ans, test_ans_hard, args)\n # log_metrics('Test ic', step, metrics)\n # for metric in metrics:\n # writer.add_scalar('Test_ic_'+metric, metrics[metric], step)\n # average_metrics[metric].append(metrics[metric])\n # average_ex_metrics[metric].append(metrics[metric])\n # if '2u' in tasks:\n # metrics = query2box.test_step(query2box, test_triples_2u, test_ans, test_ans_hard, args)\n # log_metrics('Test 2u', step, metrics)\n # for metric in metrics:\n # writer.add_scalar('Test_2u_'+metric, metrics[metric], step)\n # average_metrics[metric].append(metrics[metric])\n # average_u_metrics[metric].append(metrics[metric])\n # if 'uc' in tasks:\n # metrics = query2box.test_step(query2box, test_triples_uc, test_ans, test_ans_hard, args)\n # log_metrics('Test uc', step, metrics)\n # for metric in metrics:\n # writer.add_scalar('Test_uc_'+metric, metrics[metric], step)\n # average_metrics[metric].append(metrics[metric])\n # average_u_metrics[metric].append(metrics[metric])\n for metric in average_metrics:\n writer.add_scalar('Test/Test_average_'+metric, np.sum(average_metrics[metric])/total_number_triples, step)\n log_metrics('Test average_metrics_', step, {metric: np.sum(average_metrics[metric])/total_number_triples})\n for metric in average_c_metrics:\n writer.add_scalar('Test/Test_average_no_time_'+metric, np.sum(average_c_metrics[metric])/total_number_triples_c, step)\n log_metrics('Test average_no_time_metrics_', step, {metric: np.sum(average_c_metrics[metric])/total_number_triples_c})\n # for metric in average_c2_metrics:\n # writer.add_scalar('Test/Test_average_c2_'+metric, np.sum(average_c2_metrics[metric]/tot), step)\n for metric in average_i_metrics:\n writer.add_scalar('Test/Test_average_i_'+metric, np.sum(average_i_metrics[metric])/total_number_triples_i, step)\n log_metrics('Test average_with_time_metrics_', step, {metric: np.sum(average_i_metrics[metric])/total_number_triples_i})\n\n for metric in average_2i_metrics:\n writer.add_scalar('Test/Test_average_partial_'+metric, np.sum(average_2i_metrics[metric])/(len(test_triples_2i_begin)+len(test_triples_2i_end)), step)\n log_metrics('Test average_partial_time_metrics_', step, {metric: np.sum(average_2i_metrics[metric])/(len(test_triples_2i_begin)+len(test_triples_2i_end))})\n # for metric in average_u_metrics:\n # writer.add_scalar('Test/Test_average_u_'+metric, np.mean(average_u_metrics[metric]), step)\n # for metric in average_ex_metrics:\n # writer.add_scalar('Test/Test_average_ex_'+metric, np.mean(average_ex_metrics[metric]), step)\n\n def print_named_parameters():\n for k, v in query2box.named_parameters():\n print(k, v[:10])\n\n def evaluate_val(tasks):\n average_metrics = collections.defaultdict(list)\n average_c_metrics = collections.defaultdict(list)\n average_c2_metrics = collections.defaultdict(list)\n average_i_metrics = collections.defaultdict(list)\n average_2i_metrics = collections.defaultdict(list)\n average_ex_metrics = collections.defaultdict(list)\n average_u_metrics = collections.defaultdict(list)\n\n total_number_triples = 0\n total_number_triples_i = 0\n total_number_triples_c = 0\n\n # ## save rank results\n if not os.path.exists(args.save_path+'/rank_result/'):\n os.makedirs(args.save_path+'/rank_result/')\n\n # checkpoint = torch.load(os.path.join(args.save_path, 'best_model', 'checkpoint'))\n\n # query2box.load_state_dict(checkpoint['model_state_dict'])\n\n # if '2i-1c' in tasks:\n # metrics = query2box.test_step(query2box, valid_triples_2i_1c, valid_ans, valid_ans_hard, args)\n # log_metrics('Valid 2i 1c', step, metrics)\n # num_triples = len(valid_triples_2i_1c)\n # total_number_triples += num_triples\n # total_number_triples_c += num_triples\n # for metric in metrics:\n # writer.add_scalar('Valid_2i_1c_'+metric, metrics[metric], step)\n # average_metrics[metric].append(metrics[metric]*num_triples)\n # average_i_metrics[metric].append(metrics[metric]*num_triples)\n if '2i' in tasks: \n if len(valid_triples_2i_begin) > 0: \n metrics = query2box.test_step(query2box, valid_triples_2i_begin, valid_ans, valid_ans_hard, args, '2i-begin-valid' if args.predict_t else '', valid_ans_t)\n log_metrics('Valid only-begin time', step, metrics)\n num_triples = len(valid_triples_2i_begin)\n total_number_triples += num_triples\n total_number_triples_i += num_triples\n for metric in metrics:\n writer.add_scalar('valid/Valid_2i_begin_'+metric, metrics[metric], step) \n average_metrics[metric].append(metrics[metric]*num_triples)\n average_i_metrics[metric].append(metrics[metric]*num_triples)\n average_2i_metrics[metric].append(metrics[metric]*num_triples)\n if len(valid_triples_2i_end) > 0:\n metrics = query2box.test_step(query2box, valid_triples_2i_end, valid_ans, valid_ans_hard, args, '2i-end-valid' if args.predict_t else '', valid_ans_t)\n log_metrics('Valid only-end time', step, metrics)\n num_triples = len(valid_triples_2i_end)\n total_number_triples += num_triples\n total_number_triples_i += num_triples\n for metric in metrics:\n writer.add_scalar('valid/Valid_2i_end_'+metric, metrics[metric], step)\n average_metrics[metric].append(metrics[metric]*num_triples)\n average_i_metrics[metric].append(metrics[metric]*num_triples)\n average_2i_metrics[metric].append(metrics[metric]*num_triples)\n\n if '3i-2i' in tasks: \n metrics = query2box.test_step(query2box, valid_triples_3i_2i, valid_ans, valid_ans_hard, args, '3i-2i-valid' if args.predict_t else '', valid_ans_t)\n log_metrics('Valid point time', step, metrics)\n num_triples = len(valid_triples_3i_2i)\n total_number_triples += num_triples\n total_number_triples_i += num_triples\n for metric in metrics:\n writer.add_scalar('valid/Valid_3i_2i_'+metric, metrics[metric], step)\n average_metrics[metric].append(metrics[metric]*num_triples)\n average_i_metrics[metric].append(metrics[metric]*num_triples)\n\n # if '3i-1c' in tasks:\n # metrics = query2box.test_step(query2box, valid_triples_3i_1c, valid_ans, valid_ans_hard, args)\n # log_metrics('Valid 3i 1c', step, metrics)\n # for metric in metrics:\n # writer.add_scalar('valid/Valid_3i_1c_'+metric, metrics[metric]*num_triples, step)\n # average_metrics[metric].append(metrics[metric])\n # average_i_metrics[metric].append(metrics[metric])\n\n if '3i' in tasks:\n metrics = query2box.test_step(query2box, valid_triples_3i, valid_ans, valid_ans_hard, args, '3i-valid' if args.predict_t else '', valid_ans_t)\n log_metrics('Valid full time', step, metrics)\n num_triples = len(valid_triples_3i)\n total_number_triples += num_triples\n total_number_triples_i += num_triples\n for metric in metrics:\n writer.add_scalar('valid/Valid_3i'+metric, metrics[metric], step)\n average_metrics[metric].append(metrics[metric]*num_triples)\n average_i_metrics[metric].append(metrics[metric]*num_triples)\n\n # if '2c' in tasks:\n # metrics = query2box.test_step(query2box, valid_triples_2, valid_ans, valid_ans_hard, args)\n # log_metrics('Valid 2c', step, metrics)\n # for metric in metrics:\n # writer.add_scalar('Valid_2c_'+metric, metrics[metric], step)\n # average_metrics[metric].append(metrics[metric])\n # average_c_metrics[metric].append(metrics[metric])\n # average_c2_metrics[metric].append(metrics[metric])\n\n # if '3c' in tasks:\n # metrics = query2box.test_step(query2box, valid_triples_3, valid_ans, valid_ans_hard, args)\n # log_metrics('Valid 3c', step, metrics)\n # for metric in metrics:\n # writer.add_scalar('Valid_3c_'+metric, metrics[metric], step)\n # average_metrics[metric].append(metrics[metric])\n # average_c_metrics[metric].append(metrics[metric])\n # average_c2_metrics[metric].append(metrics[metric])\n \n if '1c' in tasks:\n if len(valid_triples) != 0 and args.predict_o:\n metrics = query2box.test_step(query2box, valid_triples, valid_ans, valid_ans_hard, args)\n # assert len(valid_triples) != 0 ## always\n # tag = 'expanded' if len(test_triples) == 0 else ''\n log_metrics('Valid no time', step, metrics)\n num_triples = len(valid_triples)\n total_number_triples += num_triples\n total_number_triples_c += num_triples\n\n for metric in metrics:\n writer.add_scalar('valid/Valid_1c_' + metric, metrics[metric], step)\n average_metrics[metric].append(metrics[metric]*num_triples)\n average_c_metrics[metric].append(metrics[metric]*num_triples)\n\n # if '1c-t' in tasks:\n # metrics = query2box.test_step(query2box, valid_triples_t, valid_ans, valid_ans_hard, args)\n # log_metrics('Valid 1c t', step, metrics)\n # for metric in metrics:\n # writer.add_scalar('Valid_1c_t_'+metric, metrics[metric], step)\n # average_metrics[metric].append(metrics[metric])\n # average_c_metrics[metric].append(metrics[metric])\n\n # if 'ci' in tasks:\n # metrics = query2box.test_step(query2box, valid_triples_ci, valid_ans, valid_ans_hard, args)\n # log_metrics('Valid ci', step, metrics)\n # for metric in metrics:\n # writer.add_scalar('Valid_ci_'+metric, metrics[metric], step)\n # average_metrics[metric].append(metrics[metric])\n # average_ex_metrics[metric].append(metrics[metric])\n # if 'ic' in tasks:\n # metrics = query2box.test_step(query2box, valid_triples_ic, valid_ans, valid_ans_hard, args)\n # log_metrics('Valid ic', step, metrics)\n # for metric in metrics:\n # writer.add_scalar('Valid_ic_'+metric, metrics[metric], step)\n # average_metrics[metric].append(metrics[metric])\n # average_ex_metrics[metric].append(metrics[metric])\n # if '2u' in tasks:\n # metrics = query2box.test_step(query2box, valid_triples_2u, valid_ans, valid_ans_hard, args)\n # log_metrics('Valid 2u', step, metrics)\n # for metric in metrics:\n # writer.add_scalar('Valid_2u_'+metric, metrics[metric], step)\n # average_metrics[metric].append(metrics[metric])\n # average_u_metrics[metric].append(metrics[metric])\n # if 'uc' in tasks:\n # metrics = query2box.test_step(query2box, valid_triples_uc, valid_ans, valid_ans_hard, args)\n # log_metrics('Valid uc', step, metrics)\n # for metric in metrics:\n # writer.add_scalar('Valid_uc_'+metric, metrics[metric], step)\n # average_metrics[metric].append(metrics[metric])\n # average_u_metrics[metric].append(metrics[metric])\n for metric in average_metrics:\n writer.add_scalar('valid/Valid_average_'+metric, np.sum(average_metrics[metric])/total_number_triples, step)\n for metric in average_c_metrics:\n writer.add_scalar('valid/Valid_average_c_'+metric, np.sum(average_c_metrics[metric])/total_number_triples_c, step)\n # for metric in average_c2_metrics:\n # writer.add_scalar('Valid_average_c2_'+metric, np.mean(average_c2_metrics[metric]), step)\n for metric in average_i_metrics:\n writer.add_scalar('valid/Valid_average_i_'+metric, np.sum(average_i_metrics[metric])/total_number_triples_i, step)\n # for metric in average_u_metrics:\n # writer.add_scalar('Valid_average_u_'+metric, np.mean(average_u_metrics[metric]), step)\n # for metric in average_ex_metrics:\n # writer.add_scalar('Valid_average_ex_'+metric, np.mean(average_ex_metrics[metric]), step)\n for metric in average_2i_metrics:\n writer.add_scalar('valid/Valid_average_partial_'+metric, np.sum(average_2i_metrics[metric])/(len(valid_triples_2i_begin)+len(valid_triples_2i_end)), step)\n\n return np.sum(average_metrics['MRRm_new'])/total_number_triples\n \n def evaluate_train():\n average_metrics = collections.defaultdict(list)\n average_c_metrics = collections.defaultdict(list)\n average_c2_metrics = collections.defaultdict(list)\n average_i_metrics = collections.defaultdict(list)\n if '2i' in tasks:\n metrics = query2box.test_step(query2box, train_triples_2i, train_ans, train_ans, args)\n log_metrics('train 2i', step, metrics)\n for metric in metrics:\n writer.add_scalar('train/train_2i_'+metric, metrics[metric], step)\n average_metrics[metric].append(metrics[metric])\n average_i_metrics[metric].append(metrics[metric])\n\n if '3i' in tasks:\n # metrics = query2box.test_step(query2box, train_triples_3i, train_ans, train_ans, args)\n # log_metrics('train 3i', step, metrics)\n # for metric in metrics:\n # writer.add_scalar('train_3i_'+metric, metrics[metric], step)\n # average_metrics[metric].append(metrics[metric])\n # average_i_metrics[metric].append(metrics[metric])\n\n metrics = query2box.test_step(query2box, train_triples_3i_1c, train_ans, train_ans, args)\n log_metrics('train 3i 1c', step, metrics)\n for metric in metrics:\n writer.add_scalar('train_3i_1c_'+metric, metrics[metric], step)\n average_metrics[metric].append(metrics[metric])\n average_i_metrics[metric].append(metrics[metric])\n\n # metrics = query2box.test_step(query2box, train_triples_3i_begin, train_ans, train_ans, args, interval_type='begin-only')\n # log_metrics('train 3i begin', step, metrics)\n # for metric in metrics:\n # writer.add_scalar('train_3i_begin_'+metric, metrics[metric], step)\n # average_metrics[metric].append(metrics[metric])\n # average_i_metrics[metric].append(metrics[metric])\n\n # metrics = query2box.test_step(query2box, train_triples_3i_end, train_ans, train_ans, args, interval_type='end-only')\n # log_metrics('train 3i end', step, metrics)\n # for metric in metrics:\n # writer.add_scalar('train_3i_end_'+metric, metrics[metric], step)\n # average_metrics[metric].append(metrics[metric])\n # average_i_metrics[metric].append(metrics[metric])\n\n if '2c' in tasks:\n metrics = query2box.test_step(query2box, train_triples_2, train_ans, train_ans, args)\n log_metrics('train 2c', step, metrics)\n for metric in metrics:\n writer.add_scalar('train_2c_'+metric, metrics[metric], step)\n average_metrics[metric].append(metrics[metric])\n average_c_metrics[metric].append(metrics[metric])\n average_c2_metrics[metric].append(metrics[metric])\n if '3c' in tasks:\n metrics = query2box.test_step(query2box, train_triples_3, train_ans, train_ans, args)\n log_metrics('train 3c', step, metrics)\n for metric in metrics:\n writer.add_scalar('train_3c_'+metric, metrics[metric], step)\n average_metrics[metric].append(metrics[metric])\n average_c_metrics[metric].append(metrics[metric])\n average_c2_metrics[metric].append(metrics[metric])\n if '1c' in tasks:\n metrics = query2box.test_step(query2box, train_triples, train_ans, train_ans, args)\n log_metrics('train 1c', step, metrics)\n for metric in metrics:\n writer.add_scalar('train_1c_'+metric, metrics[metric], step)\n average_metrics[metric].append(metrics[metric])\n average_c_metrics[metric].append(metrics[metric])\n\n if '1c-t' in tasks:\n metrics = query2box.test_step(query2box, train_triples_t, train_ans, train_ans, args)\n log_metrics('train 1c-t', step, metrics)\n for metric in metrics:\n writer.add_scalar('train_1c_t_'+metric, metrics[metric], step)\n average_metrics[metric].append(metrics[metric])\n average_c_metrics[metric].append(metrics[metric])\n\n for metric in average_metrics:\n writer.add_scalar('train_average_'+metric, np.mean(average_metrics[metric]), step)\n for metric in average_c_metrics:\n writer.add_scalar('train_average_c_'+metric, np.mean(average_c_metrics[metric]), step)\n for metric in average_c2_metrics:\n writer.add_scalar('train_average_c2_'+metric, np.mean(average_c2_metrics[metric]), step)\n for metric in average_i_metrics:\n writer.add_scalar('train_average_i_'+metric, np.mean(average_i_metrics[metric]), step)\n\n def get_learning_rate(learning_rate, hidden_dim, learning_rate_warmup_steps, step):\n learning_rate *= (hidden_dim ** -0.5)\n # Apply linear warmup\n learning_rate *= min(1.0, step / learning_rate_warmup_steps)\n # Apply rsqrt decay\n learning_rate *= (max(step, learning_rate_warmup_steps))**-0.5\n\n return learning_rate\n\n\n ################################ TRAINING STARTING####################################\n # def Do_Train(args, mode=''):\n if args.do_train:\n training_logs = []\n training_logs_mean = []\n if args.task == '1c':\n begin_pq_step = args.max_steps\n else:\n begin_pq_step = args.max_steps - args.stepsforpath\n\n# Training Loop\n total_trained_num_triples = 0\n # _ = evaluate_val(tasks)\n for step in range(init_step, args.max_steps+1):\n # if step == 3000:\n # # print('setting regularization to 0')\n # args.regularization = 0.0\n\n if '1c' in tasks:\n # print(\"1c\")\n log, num_triples_per_step = query2box.train_step(query2box, optimizer, train_iterator, args, step)\n for metric in log:\n writer.add_scalar('train/1c_'+metric, log[metric], step)\n training_logs.append(log)\n\n total_trained_num_triples += num_triples_per_step\n\n # if (step % args.log_steps == 0):\n # logging.info('current learning_rate: %f' % (current_learning_rate))\n # metrics = {}\n # for metric in training_logs[0].keys():\n # metrics[metric] = sum([log[metric] for log in training_logs])/len(training_logs)\n\n # log_metrics('Training average 1c', step, metrics)\n # training_logs = []\n\n # ## decide which temporal task will be trained\n # task_selector = np.random.rand(1)\n # task_index = np.nonzero(task_selector < ratio_temporal_triples)[0][0]\n # selected_task = temporal_tasks[task_index]\n\n ## combine the datasets of 2i and 3i-2i\n # if '2i' in tasks:\n # # start = time.time()\n # log, num_triples_per_step = query2box.train_step(query2box, optimizer, train_iterator_2i, args, step, use_time=True)\n # # end = time.time()\n # # print('time used in training in total in 2i', end - start)\n\n # for metric in log:\n # writer.add_scalar('train/2i_'+metric, log[metric], step)\n # training_logs.append(log)\n\n # total_trained_num_triples += num_triples_per_step\n \n\n # if (step % args.log_steps == 0):\n # logging.info('current learning_rate: %f' % (current_learning_rate))\n # metrics = {}\n # for metric in training_logs[0].keys():\n # if metric == 'inter_loss':\n # continue\n # metrics[metric] = sum([log[metric] for log in training_logs])/len(training_logs)\n\n # if step % args.valid_steps == 0 and step > 0 and args.evaluate_train:\n # evaluate_train()\n # inter_loss_sum = 0.\n # inter_loss_num = 0.\n # for log in training_logs:\n # if 'inter_loss' in log:\n # inter_loss_sum += log['inter_loss']\n # inter_loss_num += 1\n # if inter_loss_num != 0:\n # metrics['inter_loss'] = inter_loss_sum / inter_loss_num\n # log_metrics('Training average 2i', step, metrics)\n # training_logs = []\n\n # if '3i-2i' in tasks:\n # start = time.time()\n # log, num_triples_per_step = query2box.train_step(query2box, optimizer, train_iterator_3i_2i, args, step, use_time=True)\n # end = time.time()\n # print('time used in training in total in 3i-2i', end - start)\n\n # for metric in log:\n # writer.add_scalar('train/3i_2i_'+metric, log[metric], step)\n # training_logs.append(log)\n\n # total_trained_num_triples += num_triples_per_step\n\n # if (step % args.log_steps == 0):\n # logging.info('current learning_rate: %f' % (current_learning_rate))\n # metrics = {}\n # for metric in training_logs[0].keys():\n # if metric == 'inter_loss':\n # continue\n # metrics[metric] = sum([log[metric] for log in training_logs])/len(training_logs)\n\n # if step % args.valid_steps == 0 and step > 0 and args.evaluate_train:\n # evaluate_train()\n # inter_loss_sum = 0.\n # inter_loss_num = 0.\n # for log in training_logs:\n # if 'inter_loss' in log:\n # inter_loss_sum += log['inter_loss']\n # inter_loss_num += 1\n # if inter_loss_num != 0:\n # metrics['inter_loss'] = inter_loss_sum / inter_loss_num\n # log_metrics('Training average 3i-2i', step, metrics)\n # training_logs = []\n\n\n # also contain three parts: train_iterator_3i, train_iterator_3i_begin, train_iterator_3i_end\n # start = time.time()\n if len(args.negative_sample_types)==2:\n if '3i' in tasks:\n # if step %2 == 0:\n log, num_triples_per_step = query2box.train_step(query2box, optimizer, train_iterator_3i, args, step, use_time=True)\n # else:\n # log, num_triples_per_step = query2box.train_step(query2box, optimizer, train_iterator_3i_time, args, step, use_time=True)\n\n for metric in log:\n writer.add_scalar('train/3i_'+metric, log[metric], step)\n training_logs.append(log)\n\n total_trained_num_triples += num_triples_per_step\n\n # print('next...')\n if '3i-2i' in tasks:\n # if step%2 != 0:\n log, num_triples_per_step = query2box.train_step(query2box, optimizer, train_iterator_3i_2i, args, step, use_time=True)\n # else:\n # log, num_triples_per_step = query2box.train_step(query2box, optimizer, train_iterator_3i_2i_time, args, step, use_time=True)\n # end = time.time()\n # print('time used in training in total in 2i', end - start)\n\n for metric in log:\n writer.add_scalar('train/3i_2i_'+metric, log[metric], step)\n training_logs.append(log)\n\n total_trained_num_triples += num_triples_per_step\n # end = time.time()\n # print('time used in training in total in 3i', end - start)\n elif 'tail-batch' in args.negative_sample_types:\n if '3i' in tasks:\n log, num_triples_per_step = query2box.train_step(query2box, optimizer, train_iterator_3i, args, step, use_time=True)\n\n for metric in log:\n writer.add_scalar('train/3i_'+metric, log[metric], step)\n training_logs.append(log)\n\n total_trained_num_triples += num_triples_per_step\n\n if '3i-2i' in tasks:\n # start = time.time()\n log, num_triples_per_step = query2box.train_step(query2box, optimizer, train_iterator_3i_2i, args, step, use_time=True)\n # end = time.time()\n # print('time used in training in total in 2i', end - start)\n\n for metric in log:\n writer.add_scalar('train/3i_2i_'+metric, log[metric], step)\n training_logs.append(log)\n\n total_trained_num_triples += num_triples_per_step\n\n elif 'time-batch' in args.negative_sample_types:\n if '3i' in tasks:\n log, num_triples_per_step = query2box.train_step(query2box, optimizer, train_iterator_3i_time, args, step, use_time=True)\n\n for metric in log:\n writer.add_scalar('train/3i_'+metric, log[metric], step)\n training_logs.append(log)\n\n total_trained_num_triples += num_triples_per_step\n\n if '3i-2i' in tasks:\n # start = time.time()\n log, num_triples_per_step = query2box.train_step(query2box, optimizer, train_iterator_3i_2i_time, args, step, use_time=True)\n # end = time.time()\n # print('time used in training in total in 2i', end - start)\n\n for metric in log:\n writer.add_scalar('train/3i_2i_'+metric, log[metric], step)\n training_logs.append(log)\n\n total_trained_num_triples += num_triples_per_step\n else:\n raise NotImplementedError\n \n\n if (step % args.log_steps == 0):\n logging.info('current learning_rate: %f' % (current_learning_rate))\n metrics = {}\n for metric in training_logs[-1].keys():\n # if metric == 'inter_loss':\n # continue\n metrics[metric] = sum([log[metric] for log in training_logs])/len(training_logs)\n\n if step % args.valid_steps == 0 and step > 0 and args.evaluate_train:\n evaluate_train()\n # inter_loss_sum = 0.\n # inter_loss_num = 0.\n # for log in training_logs:\n # if 'inter_loss' in log:\n # inter_loss_sum += log['inter_loss']\n # inter_loss_num += 1\n # if inter_loss_num != 0:\n # metrics['inter_loss'] = inter_loss_sum / inter_loss_num\n log_metrics('Training average loss', step, metrics)\n writer.add_scalar('train/average_loss', metrics['loss'], step)\n training_logs = []\n\n # if '2c' in tasks:\n # log = query2box.train_step(query2box, optimizer, train_iterator_2, args, step)\n # for metric in log:\n # writer.add_scalar('2c_'+metric, log[metric], step)\n # training_logs.append(log)\n \n # if '3c' in tasks:\n # log = query2box.train_step(query2box, optimizer, train_iterator_3, args, step)\n # for metric in log:\n # writer.add_scalar('3c_'+metric, log[metric], step)\n # training_logs.append(log)\n\n # writer.add_embedding(query2box.entity_embedding, metadata=meta)\n # writer.add_embedding(query2box.relation_embedding, metadata=meta)\n # writer.add_embedding(query2box.time_embedding, metadata=meta)\n\n # if training_logs == []:\n # raise Exception(\"No tasks are trained!!\")\n\n # if step >= warm_up_steps:\n # logging.info('warm up step: %d' % (warm_up_steps))\n # current_learning_rate = current_learning_rate / 10\n # logging.info('Change learning_rate to %f at step %d' % (current_learning_rate, step))\n # optimizer = torch.optim.Adam(\n # filter(lambda p: p.requires_grad, query2box.parameters()), \n # lr=current_learning_rate\n # )\n # warm_up_steps = 2*warm_up_steps \n\n \n # current_learning_rate = get_learning_rate(args.learning_rate, args.hidden_dim, warm_up_steps, step)\n # logging.info('Change learning_rate to %f at step %d' % (current_learning_rate, step))\n # optimizer = torch.optim.Adam(\n # filter(lambda p: p.requires_grad, query2box.parameters()), \n # lr=current_learning_rate\n # )\n\n if step%args.model_save_step==0 and step > 0:\n save_variable_list = {\n 'curr_valid_mrrm':last_valid_mrrm,\n 'best_valid_mrrm':best_valid_mrrm,\n 'step': step, \n 'current_learning_rate': current_learning_rate,\n # 'warm_up_steps': warm_up_steps\n }\n save_model(query2box, optimizer, save_variable_list, args)\n \n if args.do_valid and step % args.valid_steps == 0 and (step > 0):\n logging.info('Evaluating on Valid Dataset: %f epochs:', total_trained_num_triples/num_triples_per_epoch)\n last_valid_mrrm = evaluate_val(tasks)\n \n if last_valid_mrrm > best_valid_mrrm:\n save_variable_list = {\n 'best_valid_mrrm':last_valid_mrrm,\n 'curr_valid_mrrm':last_valid_mrrm,\n 'step': step, \n 'current_learning_rate': current_learning_rate,\n # 'warm_up_steps': warm_up_steps\n }\n save_model(query2box, optimizer, save_variable_list, args, best_model=True)\n logging.info('Update checkpoints to a better valid mrr: %3.3f', last_valid_mrrm)\n best_valid_mrrm = last_valid_mrrm\n\n # if step >= 3000:\n scheduler.step(best_valid_mrrm)\n # tune.report(iterations=step, valid_mrr=best_valid_mrrm)\n ## tune\n # if mode == 'TUNE':\n \n \n \n # try:`\n # print(step)\n # except:\n # step = 0\n\n if args.do_valid:\n logging.info('Evaluating on Valid Dataset...')\n evaluate_val(tasks)\n\n if args.do_test:\n logging.info('Evaluating on Test Dataset...')\n evaluate_test()\n\n # if args.evaluate_train:\n # logging.info('Evaluating on Training Dataset...')\n # evaluate_train()\n\n print ('Training finished!!')\n logging.info(\"training finished!!\")\n\n # Do_Train(args, mode)\n\nif __name__ == '__main__':\n main(parse_args())" ]
[ [ "torch.optim.lr_scheduler.ReduceLROnPlateau", "numpy.random.seed", "torch.cuda.manual_seed", "torch.manual_seed", "numpy.mean", "numpy.sum", "torch.multiprocessing.set_sharing_strategy" ] ]
AmirAlavi/scipr
[ "5807b8648526e41c2db413783c76fa33331cc710" ]
[ "api_tests/test_rigid.py" ]
[ "import numpy as np\n\nimport scipr\nfrom scipr.matching import Closest, Hungarian, Greedy, MNN\nfrom scipr.transform import Affine, Rigid, StackedAutoEncoder\n\nnp.random.seed(1817)\nA = np.random.random((10, 100))\nB = np.random.random((20, 100))\n\nmatchers = [Closest(), Greedy(), Hungarian(), MNN()]\nfor match in matchers:\n transform = Rigid()\n\n model = scipr.SCIPR(match, transform, n_iter=2)\n\n model.fit(A, B)\n\n A_new = model.transform(A)\n\n assert(np.all(np.not_equal(A, A_new)))" ]
[ [ "numpy.not_equal", "numpy.random.random", "numpy.random.seed" ] ]
Rathore25/Sapiens-QA
[ "0bf794784c3a3b26b541dbc2fa894756c979e5c4" ]
[ "Main App/app/main.py" ]
[ "from flask import Flask, request, jsonify\nfrom flask_cors import CORS\n\nimport os\nimport pandas as pd\nimport requests\nimport wget\nimport json\nimport wget\n\nfrom cdqa.pipeline import QAPipeline\n\napp = Flask(__name__)\nCORS(app)\n\nprint(\"Started main.py !!!\")\n\nresponse = requests.get('https://docs.google.com/uc?export=download&id=1oSUFKZMao_gQxGDpCZuRXV6EULjFjmoZ')\nsapiens_original = response.json()\n\nprint(\"Fetched sapiens original\")\n\nresponse = requests.get('https://docs.google.com/uc?export=download&id=1b5xy1Z4EuFVXkMOQIupl27a1kRNPguVr')\nsapiens_annotated = response.json()\nwith open('./sapiens_annotated.json', 'w') as file:\n json.dump(sapiens_annotated, file)\n\nprint(\"Fetched sapiens annotated\")\n\ndictionary_df = []\n\nfor item in sapiens_original['data']:\n title = item['title']\n paragraphs = []\n\n for paragraph in item['paragraphs']:\n paragraphs.append(paragraph['context'])\n \n dictionary_df.append({'title':title, 'paragraphs':paragraphs})\n\ndf = pd.DataFrame(dictionary_df)\n\n# Get original Bert_qa and then train on our annotated dataset\nwget.download(url='https://github.com/cdqa-suite/cdQA/releases/download/bert_qa/bert_qa.joblib', out='./')\ncdqa_pipeline = QAPipeline(reader='./bert_qa.joblib')\ncdqa_pipeline.fit_retriever(df=df)\ncdqa_pipeline.fit_reader('./sapiens_annotated.json')\n\n# Use the pretrained annotated Distilbert file\n#wget.download(url='https://github.com/Rathore25/Sapiens-QA/raw/main/Pretrained Data/sapiens_distilbert.joblib', out='./')\n#cdqa_pipeline = QAPipeline(reader='./sapiens_distilbert.joblib')\n#cdqa_pipeline.fit_retriever(df=df)\n\n# Use the pretrained annotated Bert file\n#wget.download(url='https://github.com/Rathore25/Sapiens-QA/raw/main/Pretrained Data/sapiens_bert.joblib', out='./')\n#cdqa_pipeline = QAPipeline(reader='./sapiens_bert.joblib')\n#cdqa_pipeline.fit_retriever(df=df)\n\[email protected](\"/api\", methods=[\"GET\"])\ndef api():\n\n query = request.args.get(\"query\")\n prediction = cdqa_pipeline.predict(query=query)\n\n return jsonify(\n query=query, answer=prediction[0], title=prediction[1], paragraph=prediction[2], score=prediction[3]\n )\n\n# A welcome message to test our server\[email protected]('/')\ndef index():\n return \"<h1>Welcome to Sapiens AI server !!</h1>\"" ]
[ [ "pandas.DataFrame" ] ]
iGEMDarmstadt/HoloPyGuy
[ "160bcf551efcc152d9b74cc38726647c0a7c1d2e" ]
[ "picprocessing.py" ]
[ "import os\nfrom PIL import Image\nimport numpy as np\n\n#Takes all pics in \"raw pics\" directory, convert to grey scale,\n#cutting into square images, and same to \"ready pics\" directory\n\n\ndef create_temp_pictures(filenames):\n filelist = []\n cnt = 0\n for filename in filenames:\n im = Image.open(filename)\n\n half_the_width = im.size[0] / 2\n half_the_height = im.size[1] / 2\n\n cut = np.minimum(half_the_height, half_the_width)\n\n im = im.crop(\n (\n half_the_width - cut,\n half_the_height - cut,\n half_the_width + cut,\n half_the_height + cut\n )\n )\n im = im.convert('L')\n im.save(\".{}.jpeg\".format(cnt))\n filelist.append(\".{}.jpeg\".format(cnt))\n cnt = cnt + 1\n\n print(filelist)\n return filelist\n\n# dir_pic = os.path.join(os.getcwd(), 'raw pics')\n# out_dir = os.path.join(os.getcwd(), 'ready pics')\n# dir_pic = os.path.join(os.getcwd(), 'new_pic_raw')\n# out_dir = os.path.join(os.getcwd(), 'new_pic_ready')\n\n\nif __name__ == \"__main__\":\n # dir_pic = os.path.join(os.getcwd(), 'raw pics')\n # out_dir = os.path.join(os.getcwd(), 'ready pics')\n # dir_pic = os.path.join(os.getcwd(), 'new_pic_raw')\n # out_dir = os.path.join(os.getcwd(), 'new_pic_ready')\n dir_pic = os.path.join(os.getcwd(), '10_26')\n out_dir = os.path.join(os.getcwd(), '10_26_processed')\n\n # os.chdir(dir_pic)\n\n for filename in os.listdir(dir_pic):\n print(filename)\n im = Image.open(dir_pic + '/' + filename)\n\n print(im.format, im.size, im.mode)\n\n half_the_width = im.size[0] / 2\n half_the_height = im.size[1] / 2\n\n cut = np.minimum(half_the_height, half_the_width)\n\n im1 = im.crop(\n (\n half_the_width - cut,\n half_the_height - cut,\n half_the_width + cut,\n half_the_height + cut\n )\n )\n\n print(im1.format, im1.size, im1.mode)\n im1.save(os.path.join(out_dir, filename))\n os.chdir(out_dir)\n os.system(\"convert \" + str(filename) + \" -set colorspace Gray -separate -average \" + str(filename))\n\n" ]
[ [ "numpy.minimum" ] ]
KanwarKelide/football
[ "149c03dfd90aaf652a61c656e40cefa5dc9e0454" ]
[ "gfootball/env/football_env_test.py" ]
[ "# coding=utf-8\n# Copyright 2019 Google LLC\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\n\"\"\"Football environment E2E test.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom absl.testing import parameterized\nfrom collections import Iterable\nfrom multiprocessing import pool\nfrom multiprocessing import Queue\nimport gfootball\nimport os\nimport random\nimport threading\nimport zlib\n\nfrom gfootball.env import config\nfrom gfootball.env import football_action_set\nfrom gfootball.env import football_env\nfrom gfootball.env import wrappers\nfrom gfootball.env import scenario_builder\nimport numpy as np\nimport psutil\nfrom six.moves import range\nimport unittest\n\nfast_run = False\n\n\ndef observation_hash(observation, hash_value = 0):\n for obs in observation:\n for key, value in sorted(obs.items()):\n hash_value = zlib.adler32(key.encode(), hash_value)\n hash_value = zlib.adler32(np.ascontiguousarray(value), hash_value)\n return hash_value\n\n\ndef compute_hash(env, actions, extensive=False):\n \"\"\"Computes hash of observations returned by environment for a given scenario.\n\n Args:\n env: environment\n actions: number of actions\n extensive: whether to run full episode\n\n Returns:\n hash\n \"\"\"\n o = env.reset()\n hash_value = observation_hash(o)\n done = False\n step = 0\n while not done:\n o, _, done, _ = env.step(step % actions)\n hash_value = observation_hash(o, hash_value)\n step += 1\n if not extensive and step >= 200:\n break\n return hash_value\n\n\ndef run_scenario(cfg, queue, actions, render=False, validation=True):\n env = football_env.FootballEnv(cfg)\n if render:\n env.render()\n obs = env.reset()\n queue.put(obs)\n if validation:\n env.tracker_setup(0, 999999999999999)\n done = False\n step = 0\n while True:\n if isinstance(actions, Iterable):\n if step >= len(actions):\n break\n action = actions[step]\n else:\n action = actions.get()\n if action is None:\n break\n step += 1\n if isinstance(action, Iterable):\n obs, _, done, _ = env.step(action)\n else:\n obs, _, done, _ = env.step([action, action])\n queue.put(obs)\n if done:\n break\n queue.put(None)\n env.close()\n\n\ndef normalize_observation(o):\n if o['ball'][0] == -0:\n o['ball'][0] = 0\n if o['ball'][1] == -0:\n o['ball'][1] = 0\n if o['ball_direction'][0] == -0:\n o['ball_direction'][0] = 0\n if o['ball_direction'][1] == -0:\n o['ball_direction'][1] = 0\n\n\nclass FootballEnvTest(parameterized.TestCase):\n\n def compare_observations(self, l1, l2):\n for o1, o2 in zip(l1, l2):\n if 'frame' in o1 and 'frame' not in o2:\n del o1['frame']\n elif 'frame' in o2 and 'frame' not in o1:\n del o2['frame']\n normalize_observation(o1)\n normalize_observation(o2)\n o1 = str(tuple(sorted(o1.items())))\n o2 = str(tuple(sorted(o2.items())))\n self.assertEqual(o1, o2)\n\n def check_determinism(self, extensive=False):\n \"\"\"Check that environment is deterministic.\"\"\"\n if 'UNITTEST_IN_DOCKER' in os.environ:\n return\n cfg = config.Config({\n 'level': 'tests.11_vs_11_hard_deterministic'\n })\n env = football_env.FootballEnv(cfg)\n actions = len(football_action_set.get_action_set(cfg))\n for episode in range(1 if extensive else 2):\n hash_value = compute_hash(env, actions, extensive)\n if extensive:\n\n if hash_value != 1174966789:\n self.assertEqual(hash_value, 29082684)\n elif episode % 2 == 0:\n\n if hash_value != 2275067030:\n self.assertEqual(hash_value, 2143616507)\n else:\n\n if hash_value != 2045063811:\n self.assertEqual(hash_value, 1264083657)\n env.close()\n\n def test___control_all_players(self):\n \"\"\"Validate MultiAgentToSingleAgent wrapper and control_all_players flag.\"\"\"\n try:\n gfootball.env.create_environment(\n env_name='tests.multiagent_wrapper',\n rewards='checkpoints,scoring',\n number_of_left_players_agent_controls=2)\n except AssertionError:\n pass\n else:\n self.assertTrue(False)\n\n env = gfootball.env.create_environment(\n env_name='tests.multiagent_wrapper',\n rewards='checkpoints,scoring',\n representation='simple115v2',\n number_of_left_players_agent_controls=11,\n number_of_right_players_agent_controls=11)\n obs = env.reset()\n self.assertLen(obs, 22)\n self.assertIn(obs, env.observation_space)\n\n env = gfootball.env.create_environment(\n env_name='tests.multiagent_wrapper',\n rewards='checkpoints,scoring',\n number_of_left_players_agent_controls=11,\n number_of_right_players_agent_controls=0)\n obs = env.reset()\n self.assertLen(obs, 11)\n self.assertIn(obs, env.observation_space)\n\n env = gfootball.env.create_environment(\n env_name='tests.multiagent_wrapper',\n rewards='checkpoints,scoring',\n representation='simple115v2',\n number_of_left_players_agent_controls=0,\n number_of_right_players_agent_controls=11)\n obs = env.reset()\n self.assertLen(obs, 11)\n self.assertIn(obs, env.observation_space)\n\n env = gfootball.env.create_environment(\n env_name='tests.multiagent_wrapper',\n rewards='checkpoints,scoring',\n number_of_left_players_agent_controls=1,\n number_of_right_players_agent_controls=1)\n obs = env.reset()\n self.assertLen(obs, 2)\n self.assertIn(obs, env.observation_space)\n\n env = gfootball.env.create_environment(\n env_name='tests.multiagent_wrapper',\n rewards='checkpoints,scoring',\n number_of_left_players_agent_controls=1)\n obs = env.reset()\n self.assertEqual(np.shape(obs), (72, 96, 4))\n self.assertIn(obs, env.observation_space)\n obs, _, _, _ = env.step([football_action_set.action_left])\n self.assertEqual(np.shape(obs), (72, 96, 4))\n env = gfootball.env.create_environment(\n env_name='tests.multiagent_wrapper',\n rewards='checkpoints,scoring',\n representation='raw',\n number_of_left_players_agent_controls=1,\n number_of_right_players_agent_controls=1)\n obs = env.reset()\n self.assertLen(obs, 2)\n self.assertEqual(obs[0]['sticky_actions'][0], 0)\n self.assertEqual(obs[1]['sticky_actions'][4], 0)\n obs, _, _, _ = env.step(\n [football_action_set.action_idle, football_action_set.action_idle])\n obs, _, _, _ = env.step(\n [football_action_set.action_left, football_action_set.action_right])\n self.assertLen(obs, 2)\n self.assertEqual(obs[0]['sticky_actions'][0], 1)\n self.assertEqual(obs[1]['sticky_actions'][4], 1)\n\n def test_score_empty_goal(self):\n \"\"\"Score on an empty goal.\"\"\"\n cfg = config.Config()\n\n env = football_env.FootballEnv(cfg)\n cfg['level'] = 'academy_empty_goal'\n last_o = env.reset()[0]\n for _ in range(120):\n o, reward, done, _ = env.step(football_action_set.action_right)\n o = o[0]\n if done:\n self.assertEqual(reward, 1)\n break\n self.assertFalse(done)\n self.assertGreaterEqual(o['ball'][0], last_o['ball'][0] - 0.01)\n self.assertGreaterEqual(\n o['left_team'][o['active']][0],\n last_o['left_team'][last_o['active']][0] - 0.01)\n last_o = o\n self.assertTrue(done)\n env.close()\n\n def test_second_half(self):\n \"\"\"Test second half feature.\"\"\"\n cfg = config.Config()\n cfg['level'] = 'tests.second_half'\n env = football_env.FootballEnv(cfg)\n for _ in range(5):\n o, _, done, _ = env.step(football_action_set.action_idle)\n self.assertFalse(done)\n self.assertAlmostEqual(o[0]['left_team'][o[0]['active']][0], 0, delta=0.1)\n for _ in range(6):\n self.assertFalse(done)\n o, _, done, _ = env.step(football_action_set.action_idle)\n self.assertAlmostEqual(\n o[0]['left_team'][o[0]['active']][0], -0.5, delta=0.1)\n self.assertTrue(done)\n env.close()\n\n def test___render(self):\n \"\"\"Make sure rendering is not broken.\"\"\"\n if 'UNITTEST_IN_DOCKER' in os.environ:\n # Rendering is not supported.\n return\n cfg = config.Config({\n 'level': 'tests.11_vs_11_hard_deterministic',\n })\n env = football_env.FootballEnv(cfg)\n env.render()\n o = env.reset()\n hash_value = observation_hash(o)\n for _ in range(10):\n o, _, _, _ = env.step(football_action_set.action_right)\n hash_value = observation_hash(o, hash_value)\n self.assertEqual(hash_value, 18699114)\n env.close()\n\n def test_dynamic_render(self):\n \"\"\"Verifies dynamic render support.\"\"\"\n if 'UNITTEST_IN_DOCKER' in os.environ:\n # Rendering is not supported.\n return\n cfg = config.Config({\n 'level': 'tests.11_vs_11_hard_deterministic',\n })\n env = football_env.FootballEnv(cfg)\n o = env.reset()\n for _ in range(10):\n o, _, _, _ = env.step(football_action_set.action_right)\n self.assertNotIn('frame', o[0])\n env.render()\n self.assertIn('frame', env.observation()[0])\n self.compare_observations(o, env.observation())\n o, _, _, _ = env.step(football_action_set.action_right)\n self.assertIn('frame', env.observation()[0])\n env.disable_render()\n self.compare_observations(o, env.observation())\n env.close()\n\n def test_different_action_formats(self):\n \"\"\"Verify different action formats are accepted.\"\"\"\n cfg = config.Config()\n env = football_env.FootballEnv(cfg)\n env.reset()\n env.step(football_action_set.action_right)\n env.step([football_action_set.action_right])\n env.step(np.array([football_action_set.action_right]))\n env.step(np.array(football_action_set.action_right))\n env.close()\n\n def test_determinism_extensive(self):\n self.check_determinism(extensive=True)\n\n def test_determinism(self):\n self.check_determinism()\n\n def test_multi_instance(self):\n \"\"\"Validates that two instances of the env can run in the same thread.\"\"\"\n tpool = pool.ThreadPool(processes=2)\n run1 = tpool.apply_async(self.check_determinism)\n run2 = tpool.apply_async(self.check_determinism)\n run1.get()\n run2.get()\n\n def test_multi_render(self):\n \"\"\"Only one rendering instance allowed at a time.\"\"\"\n if 'UNITTEST_IN_DOCKER' in os.environ:\n # Rendering is not supported.\n return\n cfg = config.Config({})\n env1 = football_env.FootballEnv(cfg)\n env1.render()\n env1.reset()\n\n env2 = football_env.FootballEnv(cfg)\n try:\n env2.render()\n except AssertionError:\n env1.close()\n env2.close()\n # It is still possible to render.\n env3 = football_env.FootballEnv(cfg)\n env3.reset()\n env3.close()\n return\n assert False, 'Exception expected'\n\n def test_scenarios_are_at_least_loading(self):\n cfg = config.Config()\n for l in scenario_builder.all_scenarios():\n cfg['level'] = l\n unused_game_cfg = cfg.ScenarioConfig()\n\n def memory_usage(self):\n process = psutil.Process(os.getpid())\n return process.memory_info().rss\n\n def test__memory_usage(self):\n \"\"\"Make sure memory usage is low when not recording videos.\"\"\"\n # This test has to go first, so that memory usage is not affected.\n if 'UNITTEST_IN_DOCKER' in os.environ:\n # Forge doesn't support rendering.\n return\n cfg = config.Config({'write_video': False})\n env = football_env.FootballEnv(cfg)\n env.render()\n env.reset()\n initial_memory = self.memory_usage()\n for _ in range(100):\n _, _, _, _ = env.step(football_action_set.action_right)\n memory_usage = self.memory_usage() - initial_memory\n env.close()\n self.assertGreaterEqual(10000000, memory_usage)\n\n def test_player_order_invariant(self):\n \"\"\"Checks that environment behaves the same regardless of players order.\"\"\"\n players = ['agent:right_players=1', 'lazy:left_players=11']\n cfg = config.Config({\n 'level': 'tests.11_vs_11_hard_deterministic',\n 'players': players\n })\n env = football_env.FootballEnv(cfg)\n actions = len(football_action_set.get_action_set(cfg))\n hash_value1 = compute_hash(env, actions)\n players = [players[1], players[0]]\n cfg = config.Config({\n 'level': 'tests.11_vs_11_hard_deterministic',\n 'players': players\n })\n env = football_env.FootballEnv(cfg)\n hash_value2 = compute_hash(env, actions)\n self.assertEqual(hash_value1, hash_value2)\n env.close()\n\n @parameterized.parameters(range(1))\n def test_setstate(self, seed):\n \"\"\"Checks setState functionality.\"\"\"\n cfg1 = config.Config({\n 'level': 'tests.symmetric',\n 'game_engine_random_seed': seed,\n 'reverse_team_processing' : False\n })\n cfg2 = config.Config({\n 'level': 'tests.symmetric',\n 'game_engine_random_seed': seed + 10,\n 'reverse_team_processing' : False\n })\n env1 = football_env.FootballEnv(cfg1)\n env2 = football_env.FootballEnv(cfg2)\n initial_obs = env1.reset()\n env2.reset()\n initial_state = env1.get_state()\n env2.set_state(initial_state)\n random.seed(seed)\n actions = len(football_action_set.get_action_set(cfg1))\n first_action = random.randint(0, actions - 1)\n first_obs, _, _, _ = env1.step(first_action)\n _, _, _, _ = env2.step(first_action)\n step = 0\n limit = 10 if fast_run else 3000\n while step < limit:\n step += 1\n action = random.randint(0, actions - 1)\n if step % 10 == 0:\n env2.set_state(initial_state)\n self.compare_observations(initial_obs, env2.observation())\n env2.step(first_action)\n self.compare_observations(first_obs, env2.observation())\n env2.set_state(env1.get_state())\n self.compare_observations(env1.observation(), env2.observation())\n _, _, done1, _ = env1.step(action)\n _, _, done2, _ = env2.step(action)\n self.assertEqual(done1, done2)\n if done1:\n break\n env1.close()\n env2.close()\n\n @parameterized.parameters(range(1))\n def test_symmetry(self, seed):\n \"\"\"Checks game symmetry.\"\"\"\n processes = []\n cfg1 = config.Config({\n 'level': 'tests.symmetric',\n 'game_engine_random_seed': seed,\n 'players': ['agent:left_players=1,right_players=1'],\n 'reverse_team_processing': False,\n })\n cfg2 = config.Config({\n 'level': 'tests.symmetric',\n 'game_engine_random_seed': seed,\n 'players': ['agent:left_players=1,right_players=1'],\n 'reverse_team_processing': True,\n })\n random.seed(seed)\n action_cnt = len(football_action_set.get_action_set(cfg1))\n actions = [random.randint(0, action_cnt - 1) for _ in range(10 if fast_run else 3000)]\n queue1 = Queue()\n thread1 = threading.Thread(\n target=run_scenario, args=(cfg1, queue1, actions))\n thread1.start()\n queue2 = Queue()\n thread2 = threading.Thread(\n target=run_scenario, args=(cfg2, queue2, actions))\n thread2.start()\n while True:\n o1 = queue1.get()\n o2 = queue2.get()\n if not o1 or not o2:\n self.assertEqual(o1, o2)\n break\n self.compare_observations(o1[:1], o2[1:])\n self.compare_observations(o2[:1], o1[1:])\n thread1.join()\n thread2.join()\n\n @parameterized.parameters((1, 'left', True), (0, 'right', True),\n (1, 'left', False), (0, 'right', False))\n def test_offside(self, episode, team2, reverse):\n cfg = config.Config({\n 'level': 'tests.offside_test',\n 'players': ['agent:{}_players=1'.format(team2)],\n 'episode_number': episode,\n 'reverse_team_processing': reverse,\n })\n env = football_env.FootballEnv(cfg)\n env.reset()\n o, _, done, _ = env.step(football_action_set.action_long_pass)\n done = False\n while not done and o[0]['right_team'][1][0] == 0:\n o, _, done, _ = env.step(football_action_set.action_idle)\n self.assertAlmostEqual(o[0]['ball'][0], 0.6, delta=0.4)\n self.assertAlmostEqual(o[0]['right_team'][0][0], 0.6, delta=0.4)\n self.assertAlmostEqual(o[0]['right_team'][1][0], 0.6, delta=0.4)\n self.assertAlmostEqual(o[0]['left_team'][0][0], -0.6, delta=0.4)\n self.assertAlmostEqual(o[0]['left_team'][1][0], -0.6, delta=0.4)\n env.close()\n\n @parameterized.parameters((0, 1, True), (1, -1, True), (0, 1, False),\n (1, -1, False))\n def test_corner(self, episode, factor, reverse):\n cfg = config.Config({\n 'level': 'tests.corner_test',\n 'players': ['agent:left_players=1,right_players=1'],\n 'episode_number': episode,\n 'reverse_team_processing': reverse,\n })\n env = football_env.FootballEnv(cfg)\n o = env.reset()\n done = False\n while not done:\n o, _, done, _ = env.step([football_action_set.action_left,\n football_action_set.action_left])\n self.assertAlmostEqual(o[0]['ball'][0], -0.95 * factor, delta=0.1)\n self.assertAlmostEqual(o[0]['ball'][1], 0.4 * factor, delta=0.1)\n self.assertAlmostEqual(o[0]['right_team'][0][0], 1, delta=0.1)\n self.assertAlmostEqual(o[0]['right_team'][1][0], -0.95 * factor, delta=0.1)\n self.assertAlmostEqual(o[0]['left_team'][0][0], -0.95, delta=0.1)\n self.assertAlmostEqual(o[0]['left_team'][1][0], -0.9 * factor, delta=0.2)\n env.close()\n\n def test_penalty(self):\n cfg = config.Config({\n 'level': 'tests.penalty',\n 'players': ['agent:left_players=1'],\n })\n env = football_env.FootballEnv(cfg)\n o = env.reset()\n done = False\n while not done:\n o, _, done, _ = env.step([football_action_set.action_sliding])\n self.assertAlmostEqual(o[0]['ball'][0], -0.809, delta=0.01)\n self.assertAlmostEqual(o[0]['ball'][1], 0.0, delta=0.01)\n self.assertAlmostEqual(o[0]['right_team'][0][0], 1, delta=0.1)\n self.assertAlmostEqual(o[0]['right_team'][1][0], -0.75, delta=0.1)\n self.assertAlmostEqual(o[0]['left_team'][0][0], -0.95, delta=0.1)\n self.assertAlmostEqual(o[0]['left_team'][1][0], -0.70, delta=0.1)\n env.close()\n\n @parameterized.parameters((0, -1, True), (1, 1, True), (0, -1, False),\n (1, 1, False))\n def test_keeper_ball(self, episode, factor, reverse):\n cfg = config.Config({\n 'level': 'tests.keeper_test',\n 'players': ['agent:left_players=1,right_players=1'],\n 'episode_number': episode,\n 'reverse_team_processing': reverse,\n })\n env = football_env.FootballEnv(cfg)\n o = env.reset()\n done = False\n while not done:\n o, _, done, _ = env.step([football_action_set.action_right,\n football_action_set.action_right])\n self.assertAlmostEqual(o[0]['ball'][0], -1.0 * factor, delta=0.1)\n self.assertAlmostEqual(o[0]['ball'][1], 0.0, delta=0.1)\n self.assertAlmostEqual(o[0]['right_team'][0][0], 1, delta=0.1)\n self.assertAlmostEqual(o[0]['right_team'][1][0], 0.4, delta=0.1)\n self.assertAlmostEqual(o[0]['left_team'][0][0], -0.9, delta=0.1)\n self.assertAlmostEqual(o[0]['left_team'][1][0], -0.33, delta=0.1)\n env.close()\n\n @parameterized.parameters((0, True), (1, True), (0, False), (1, False))\n def test_goal(self, episode, reverse):\n cfg = config.Config({\n 'level': 'tests.goal_test',\n 'players': ['agent:left_players=1,right_players=1'],\n 'episode_number': episode,\n 'reverse_team_processing': reverse,\n })\n env = football_env.FootballEnv(cfg)\n o = env.reset()\n done = False\n while not done:\n o, _, done, _ = env.step(\n [football_action_set.action_right, football_action_set.action_right])\n self.assertAlmostEqual(o[0]['ball'][0], 0.0, delta=0.1)\n self.assertEqual(o[0]['score'][episode], 1)\n self.assertEqual(o[0]['score'][1 - episode], 0)\n env.close()\n\n @parameterized.parameters(range(1))\n def test_render_state_equals_norender(self, seed):\n \"\"\"Checks that rendering game state is the same as non-rendering.\"\"\"\n if 'UNITTEST_IN_DOCKER' in os.environ:\n # Forge doesn't support rendering.\n return\n cfg1 = config.Config({\n 'level': 'tests.symmetric',\n 'game_engine_random_seed': seed,\n 'players': ['agent:left_players=1,right_players=1'],\n 'reverse_team_processing': False,\n })\n cfg2 = config.Config({\n 'level': 'tests.symmetric',\n 'game_engine_random_seed': seed,\n 'players': ['agent:left_players=1,right_players=1'],\n 'reverse_team_processing': False,\n })\n random.seed(seed)\n action_cnt = len(football_action_set.get_action_set(cfg1))\n actions = [random.randint(0, action_cnt - 1) for _ in range(50)]\n queue1 = Queue()\n thread1 = threading.Thread(\n target=run_scenario, args=(cfg1, queue1, actions, False, False))\n thread1.start()\n queue2 = Queue()\n thread2 = threading.Thread(\n target=run_scenario, args=(cfg2, queue2, actions, True, False))\n thread2.start()\n while True:\n o1 = queue1.get()\n o2 = queue2.get()\n if not o1 or not o2:\n self.assertEqual(o1, o2)\n break\n self.compare_observations(o1, o2)\n thread1.join()\n thread2.join()\n\n def test_get_state_wrapper(self):\n env = gfootball.env.create_environment(\n stacked=True,\n env_name='academy_empty_goal',\n rewards='checkpoints,scoring')\n o = env.reset()\n state = env.get_state()\n reward1 = 0\n hash1 = 0\n while reward1 < 0.9:\n o, r, _, _ = env.step(football_action_set.action_right)\n reward1 += r\n hash1 = zlib.adler32(o, hash1)\n self.assertAlmostEqual(reward1, 0.9, delta=0.01)\n env.set_state(state)\n hash2 = 0\n reward2 = 0\n while reward2 < 0.9:\n o, r, _, _ = env.step(football_action_set.action_right)\n reward2 += r\n hash2 = zlib.adler32(o, hash2)\n self.assertAlmostEqual(reward2, 0.9, delta=0.01)\n self.assertEqual(hash1, hash2)\n\n def test_restore_after_reset(self):\n cfg = config.Config({\n 'level': '11_vs_11_competition',\n })\n env = football_env.FootballEnv(cfg)\n obs = env.reset()\n state = env.get_state()\n env.reset()\n env.set_state(state)\n obs_ = env.observation()\n state_ = env.get_state()\n env.step(0) # Test if can take step\n self.compare_observations(obs, obs_)\n self.assertEqual(state, state_)\n\n def test_restore_after_done(self):\n cfg = config.Config({\n 'level': 'academy_empty_goal_close',\n })\n env = football_env.FootballEnv(cfg)\n env.reset()\n state = env.get_state()\n # Go right until reaching the goal.\n done = False\n while not done:\n _, _, done, _ = env.step(5)\n env.set_state(state)\n env.step(0) # Test if can take step\n\n\nif __name__ == '__main__':\n unittest.main(failfast=True)\n" ]
[ [ "numpy.ascontiguousarray", "numpy.array", "numpy.shape" ] ]
aswart/PSDA
[ "2bdd071e6a3dee89827900553185a98a38292843" ]
[ "psda/demo.py" ]
[ "import numpy as np\nfrom numpy.random import randn, randint\nimport matplotlib.pyplot as plt\n\nfrom psda import VMF, PSDA, decompose, atleast2\nfrom pyllr import quick_eval\n\n\"\"\"\nThis demo uses a quick-and-dirty data simulator, using Gaussians, not VMF.\nIt does not work for high dimensions. But you can play with dim = 2 or 3\nif you like.\n\"\"\"\ndim = 20\nb, w = 10, 50 # within, between concentrations\n\nns = 100 # number of training speakers\nn = 1000 # numer of training examples\n\n\n# set up model to sample from\nnorm, mu = decompose(randn(dim))\nmodel0 = PSDA(w, VMF(mu, b))\n\nZ = model0.sample_speakers(ns)\nlabels = randint(ns,size=(n,))\nuu, labels, counts = np.unique(labels, return_inverse=True, return_counts=True)\n\n# sample training data\nXtrain = model0.sample(Z, labels)\n\nif dim == 2:\n plt.figure()\n plt.scatter(Xtrain[:,0],Xtrain[:,1])\n plt.axis('square')\n plt.xlim(-1.2,1.2)\n plt.ylim(-1.2,1.2)\n plt.grid()\n plt.title('Embeddings')\n plt.show()\n\n\n# one hot label matrix\nL = np.full((n,len(counts)),False) # (n, ns)\nL[np.arange(n),labels] = True\n\n# these are the 1st-order stats required by the em traning\nmeans = (L.T @ Xtrain) / counts.reshape(-1,1)\n\n# filter out singleton speakers\nmeans, counts = atleast2(means, counts)\n\n# train the model!\nmodel, obj = PSDA.em(means, counts, niters=10)\n\nplt.figure()\nplt.plot(obj,'-*')\nplt.grid()\nplt.title('PSDA EM algorithm')\nplt.xlabel('iteration')\nplt.ylabel('marginal likelihood')\nplt.show()\n\n# generate some test data\nnt = 10000\nZ1 = model0.sample_speakers(nt)\nZ2 = model0.sample_speakers(nt)\nEnroll = model0.sample(Z1, np.arange(nt)) # enrollment embeddings\nTest1 = model0.sample(Z1, np.arange(nt)) # target test embeddings\nTest2 = model0.sample(Z2, np.arange(nt)) # nnotar test embeddings\n\n# compute PSDA scores\nE = model.prep(Enroll)\nT1 = model.prep(Test1)\nT2 = model.prep(Test2)\n\ntar = E.llr_vector(T1)\nnon = E.llr_vector(T2)\n\n# compute cosine scores\ntarc = (Enroll*Test1).sum(axis=-1)\nnonc = (Enroll*Test2).sum(axis=-1)\n\n\nplt.figure()\nplt.plot(non,nonc,'.',label='non')\nplt.plot(tar,tarc,'.',label='tar')\nplt.grid()\nplt.xlabel('PSDA score')\nplt.ylabel('cosine score')\nplt.legend()\nplt.show()\n\n\n\n# compute double-enroll PSDA scores\nEnroll2 = model0.sample(Z1, np.arange(nt)) # 2nd enrollment embeddings\nE2 = model.prep(Enroll + Enroll2)\ntar2 = E2.llr_vector(T1)\nnon2 = E2.llr_vector(T2)\n\n# compute double-enroll cosine scores\nE2c = decompose(Enroll + Enroll2)[1]\ntar2c = (E2c*Test1).sum(axis=-1)\nnon2c = (E2c*Test2).sum(axis=-1)\n\n\ntar12 = np.hstack([tar,tar2])\nnon12 = np.hstack([non,non2])\n\ntar12c = np.hstack([tarc,tar2c])\nnon12c = np.hstack([nonc,non2c])\n\n\neer_p, cllr_p, mincllr_p = quick_eval.tarnon_2_eer_cllr_mincllr(tar, non)\neer_p2, cllr_p2, mincllr_p2 = quick_eval.tarnon_2_eer_cllr_mincllr(tar2, non2)\n\neer_c, cllr_c, mincllr_c = quick_eval.tarnon_2_eer_cllr_mincllr(tarc, nonc)\neer_c2, cllr_c2, mincllr_c2 = quick_eval.tarnon_2_eer_cllr_mincllr(tar2c, non2c)\n\neer_p12, cllr_p12, mincllr_p12 = quick_eval.tarnon_2_eer_cllr_mincllr(tar12, non12)\neer_c12, cllr_c12, mincllr_c12 = quick_eval.tarnon_2_eer_cllr_mincllr(tar12c, non12c)\n\n\nprint(\"\\n\\nCosine scoring, single enroll:\")\nprint(f\" EER: {eer_c*100:.1f}%\")\nprint(f\" Cllr: {cllr_c:.3f}\")\nprint(f\" minCllr: {mincllr_c:.3f}\")\n\nprint(\"\\nPSDA scoring, single enroll:\")\nprint(f\" EER: {eer_p*100:.1f}%\")\nprint(f\" Cllr: {cllr_p:.3f}\")\nprint(f\" minCllr: {mincllr_p:.3f}\")\n\nprint(\"\\nCosine scoring, double enroll:\")\nprint(f\" EER: {eer_c2*100:.1f}%\")\nprint(f\" Cllr: {cllr_c2:.3f}\")\nprint(f\" minCllr: {mincllr_c2:.3f}\")\n\nprint(\"\\nPSDA scoring, double enroll:\")\nprint(f\" EER: {eer_p2*100:.1f}%\")\nprint(f\" Cllr: {cllr_p2:.3f}\")\nprint(f\" minCllr: {mincllr_p2:.3f}\")\n\nprint(\"\\nCosine scoring, mixed enroll:\")\nprint(f\" EER: {eer_c12*100:.1f}%\")\nprint(f\" Cllr: {cllr_c12:.3f}\")\nprint(f\" minCllr: {mincllr_c12:.3f}\")\n\nprint(\"\\nPSDA scoring, mixed enroll:\")\nprint(f\" EER: {eer_p12*100:.1f}%\")\nprint(f\" Cllr: {cllr_p12:.3f}\")\nprint(f\" minCllr: {mincllr_p12:.3f}\")\n" ]
[ [ "matplotlib.pyplot.legend", "numpy.hstack", "matplotlib.pyplot.title", "numpy.unique", "matplotlib.pyplot.scatter", "matplotlib.pyplot.ylim", "numpy.arange", "matplotlib.pyplot.plot", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.xlim", "numpy.random.randint", "numpy.random.randn", "matplotlib.pyplot.grid", "matplotlib.pyplot.axis", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.show", "matplotlib.pyplot.figure" ] ]
Raudcu/pyrodash
[ "3671086ef57c097fa055a908a65401eb6648c69a" ]
[ "pyrodash/geometrics/parallelepiped.py" ]
[ "import numpy as np\nfrom itertools import product\nimport plotly.graph_objects as go\n\n\nclass Parallelepiped:\n \"\"\"\n Class to build and draw a Parallelepiped.\n\n ...\n\n Attributes\n ----------\n L : float or numpy array\n x, y, z lengths of the parallelepiped sides. If float, a cube of \n side L is built\n initial_vertex_position : numpy array\n x, y, z coordinates of the initial vertex position.\n vertices : numpy array\n vertices coordinates of the parallelepiped.\n face_vertices : numpy array\n vertices coordinates of each face.\n faces : list of plotly go\n scatter 3d plotly objects of the parallelepiped faces.\n \"\"\"\n\n def __init__(\n self,\n L,\n initial_vertex_position=[0, 0, 0],\n edge_color=\"black\",\n edge_width=2,\n face_opacity=0,\n ):\n \"\"\"\n Parameters\n ----------\n L : float or list of float or numpy array\n x, y, z lengths of the parallelepiped sides. If float, a cube \n of side L is built.\n initial_vertex_position : list of float or numpy array, optional\n x, y, z coordinates of the initial vertex position, \n by default [0, 0, 0].\n edge_color : str, optional\n rgb, rgba, hex, hsl, hsv, or named color string for the edge\n color, by default \"black\".\n edge_width : float, optional\n edge width, by default 1.5.\n face_opacity : int between or equal to 0 and 1, optional\n opacity of the faces, by default 0.\n \"\"\"\n\n if isinstance(L, (list, np.ndarray)):\n self.L = np.array(L)\n else:\n self.L = L\n self.initial_vertex_position = np.array(initial_vertex_position)\n\n if isinstance(self.L, np.ndarray):\n self.vertices = self.initial_vertex_position + np.array(\n list(product([0, self.L[0]], [0, self.L[1]], [0, self.L[2]]))\n )\n else:\n self.vertices = self.initial_vertex_position + np.array(\n list(product([0, self.L], [0, self.L], [0, self.L]))\n )\n\n self.face_vertices = self._face_vertices_calculation()\n\n self.faces = self._draw_faces(edge_color, edge_width, face_opacity)\n\n def _face_vertices_calculation(self):\n \"\"\"Calculates the vertices coordinates of each parallelepiped face.\n\n Returns\n -------\n face_vertices : numpy array\n vertices coordinates of each face.\n \"\"\"\n\n face_vertices = []\n\n # The six ways of grabbing four points between the eight\n # parallelepiped vertices.\n faces = [\n (2, 0, 1, 3),\n (4, 6, 7, 5),\n (6, 2, 3, 7),\n (0, 4, 5, 1),\n (0, 4, 6, 2),\n (1, 5, 7, 3),\n ]\n\n for face in faces:\n # The x,y,z coordinates of each of the four face vertex.\n vert_x = self.vertices[face, 0]\n vert_y = self.vertices[face, 1]\n vert_z = self.vertices[face, 2]\n\n face_vertices.append(\n [np.array(vert) for vert in zip(vert_x, vert_y, vert_z)]\n )\n\n return np.array(face_vertices)\n\n def _draw_faces(self, edge_color, edge_width, face_opacity):\n \"\"\"Generates the plotly scatter 3d for the parallelepiped faces.\n\n It builds each face from the face vertices by generating two \n scatters: one to ensure the edges with the proper color and width, \n and the other one for the faces.\n\n Parameters\n ----------\n edge_color : str\n rgb, rgba, hex, hsl, hsv, or named color string for the edge\n color.\n edge_width : float.\n edge width.\n face_opacity : int between or equal to 0 and 1\n opacity of the faces.\n\n Returns\n -------\n faces : list of plotly go\n scatter 3d plotly objects of the parallelepiped faces.\n \"\"\"\n\n faces = []\n\n for i, vert in enumerate(self.face_vertices):\n\n faces.append(\n go.Scatter3d(\n x=vert[:, 0],\n y=vert[:, 1],\n z=vert[:, 2],\n mode=\"lines\",\n line=dict(color=edge_color, width=edge_width),\n hoverinfo=\"none\",\n showlegend=False,\n )\n )\n\n faces.append(\n go.Scatter3d(\n x=vert[:, 0],\n y=vert[:, 1],\n z=vert[:, 2],\n mode=\"lines\",\n line=dict(color=\"gray\", width=0),\n opacity=face_opacity,\n surfaceaxis=i // 2,\n surfacecolor=\"gray\",\n hoverinfo=\"none\",\n showlegend=False,\n )\n )\n\n return faces\n\n\nif __name__ == \"__main__\":\n\n import dash\n import dash_core_components as dcc\n import dash_html_components as html\n\n import plotly.graph_objects as go\n\n p = Parallelepiped([1, 2, 3], [1, 1, 1], \"orange\", 10, 0.2)\n\n fig = go.Figure(data=p.faces)\n\n external_stylesheets = [\"https://codepen.io/chriddyp/pen/bWLwgP.css\"]\n\n app = dash.Dash(__name__, external_stylesheets=external_stylesheets)\n\n app.layout = html.Div(dcc.Graph(figure=fig))\n\n app.run_server(debug=True)\n" ]
[ [ "numpy.array" ] ]
dongwang218/spark
[ "257236c3e17906098f801cbc2059e7a9054e8cab" ]
[ "examples/src/main/python/als.py" ]
[ "#\n# Licensed to the Apache Software Foundation (ASF) under one or more\n# contributor license agreements. See the NOTICE file distributed with\n# this work for additional information regarding copyright ownership.\n# The ASF licenses this file to You under the Apache License, Version 2.0\n# (the \"License\"); you may not use this file except in compliance with\n# the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n\"\"\"\nThis is an example implementation of ALS for learning how to use Spark. Please refer to\nALS in pyspark.mllib.recommendation for more conventional use.\n\nThis example requires numpy (http://www.numpy.org/)\n\"\"\"\nfrom __future__ import print_function\n\nimport sys\n\nimport numpy as np\nfrom numpy.random import rand\nfrom numpy import matrix\nfrom pyspark import SparkContext\n\nLAMBDA = 0.01 # regularization\nnp.random.seed(42)\n\n\ndef rmse(R, ms, us):\n diff = R - ms * us.T\n return np.sqrt(np.sum(np.power(diff, 2)) / M * U)\n\n\ndef update(i, vec, mat, ratings):\n uu = mat.shape[0]\n ff = mat.shape[1]\n\n XtX = mat.T * mat\n Xty = mat.T * ratings[i, :].T\n\n for j in range(ff):\n XtX[j, j] += LAMBDA * uu\n\n return np.linalg.solve(XtX, Xty)\n\n\nif __name__ == \"__main__\":\n\n \"\"\"\n Usage: als [M] [U] [F] [iterations] [partitions]\"\n \"\"\"\n\n print(\"\"\"WARN: This is a naive implementation of ALS and is given as an\n example. Please use the ALS method found in pyspark.mllib.recommendation for more\n conventional use.\"\"\", file=sys.stderr)\n\n sc = SparkContext(appName=\"PythonALS\")\n M = int(sys.argv[1]) if len(sys.argv) > 1 else 100\n U = int(sys.argv[2]) if len(sys.argv) > 2 else 500\n F = int(sys.argv[3]) if len(sys.argv) > 3 else 10\n ITERATIONS = int(sys.argv[4]) if len(sys.argv) > 4 else 5\n partitions = int(sys.argv[5]) if len(sys.argv) > 5 else 2\n\n print(\"Running ALS with M=%d, U=%d, F=%d, iters=%d, partitions=%d\\n\" %\n (M, U, F, ITERATIONS, partitions))\n\n R = matrix(rand(M, F)) * matrix(rand(U, F).T)\n ms = matrix(rand(M, F))\n us = matrix(rand(U, F))\n\n Rb = sc.broadcast(R)\n msb = sc.broadcast(ms)\n usb = sc.broadcast(us)\n\n for i in range(ITERATIONS):\n ms = sc.parallelize(range(M), partitions) \\\n .map(lambda x: update(x, msb.value[x, :], usb.value, Rb.value)) \\\n .collect()\n # collect() returns a list, so array ends up being\n # a 3-d array, we take the first 2 dims for the matrix\n ms = matrix(np.array(ms)[:, :, 0])\n msb = sc.broadcast(ms)\n\n us = sc.parallelize(range(U), partitions) \\\n .map(lambda x: update(x, usb.value[x, :], msb.value, Rb.value.T)) \\\n .collect()\n us = matrix(np.array(us)[:, :, 0])\n usb = sc.broadcast(us)\n\n error = rmse(R, ms, us)\n print(\"Iteration %d:\" % i)\n print(\"\\nRMSE: %5.4f\\n\" % error)\n\n sc.stop()\n" ]
[ [ "numpy.linalg.solve", "numpy.random.seed", "numpy.power", "numpy.random.rand", "numpy.array" ] ]
LiuxyEric/dscc202-402-spring2022
[ "f3877c2dde64656f9d84e3f913340f3fcefdc11b" ]
[ "project3-mlops/06-Model-Registry.py" ]
[ "# Databricks notebook source\n# MAGIC %md\n# MAGIC # Model Registry\n# MAGIC \n# MAGIC MLflow Model Registry is a collaborative hub where teams can share ML models, work together from experimentation to online testing and production, integrate with approval and governance workflows, and monitor ML deployments and their performance. This lesson explores how to manage models using the MLflow model registry.\n# MAGIC \n# MAGIC ## ![Spark Logo Tiny](https://files.training.databricks.com/images/105/logo_spark_tiny.png) In this lesson you:<br>\n# MAGIC - Register a model using MLflow\n# MAGIC - Deploy that model into production\n# MAGIC - Update a model in production to new version including a staging phase for testing\n# MAGIC - Archive and delete models\n# MAGIC \n# MAGIC ## Prerequisites\n# MAGIC - Web browser: Chrome\n# MAGIC - A cluster configured with **8 cores** and **DBR 7.0 ML**\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC ## ![Spark Logo Tiny](https://files.training.databricks.com/images/105/logo_spark_tiny.png) Classroom-Setup\n# MAGIC \n# MAGIC For each lesson to execute correctly, please make sure to run the **`Classroom-Setup`** cell at the<br/>\n# MAGIC start of each lesson (see the next cell) and the **`Classroom-Cleanup`** cell at the end of each lesson.\n\n# COMMAND ----------\n\n# MAGIC %run \"./Includes/Classroom-Setup\"\n\n# COMMAND ----------\n\n# MAGIC %md-sandbox\n# MAGIC ### Model Registry\n# MAGIC \n# MAGIC The MLflow Model Registry component is a centralized model store, set of APIs, and UI, to collaboratively manage the full lifecycle of an MLflow Model. It provides model lineage (which MLflow Experiment and Run produced the model), model versioning, stage transitions (e.g. from staging to production), annotations (e.g. with comments, tags), and deployment management (e.g. which production jobs have requested a specific model version).\n# MAGIC \n# MAGIC Model registry has the following features:<br><br>\n# MAGIC \n# MAGIC * **Central Repository:** Register MLflow models with the MLflow Model Registry. A registered model has a unique name, version, stage, and other metadata.\n# MAGIC * **Model Versioning:** Automatically keep track of versions for registered models when updated.\n# MAGIC * **Model Stage:** Assigned preset or custom stages to each model version, like “Staging” and “Production” to represent the lifecycle of a model.\n# MAGIC * **Model Stage Transitions:** Record new registration events or changes as activities that automatically log users, changes, and additional metadata such as comments.\n# MAGIC * **CI/CD Workflow Integration:** Record stage transitions, request, review and approve changes as part of CI/CD pipelines for better control and governance.\n# MAGIC \n# MAGIC <div><img src=\"https://files.training.databricks.com/images/eLearning/ML-Part-4/model-registry.png\" style=\"height: 400px; margin: 20px\"/></div>\n# MAGIC \n# MAGIC <img alt=\"Side Note\" title=\"Side Note\" style=\"vertical-align: text-bottom; position: relative; height:1.75em; top:0.05em; transform:rotate(15deg)\" src=\"https://files.training.databricks.com/static/images/icon-note.webp\"/> See <a href=\"https://mlflow.org/docs/latest/registry.html\" target=\"_blank\">the MLflow docs</a> for more details on the model registry.\n\n# COMMAND ----------\n\n# MAGIC %md-sandbox\n# MAGIC ### Registering a Model\n# MAGIC \n# MAGIC The following workflow will work with either the UI or in pure Python. This notebook will use pure Python.\n# MAGIC \n# MAGIC <img alt=\"Side Note\" title=\"Side Note\" style=\"vertical-align: text-bottom; position: relative; height:1.75em; top:0.05em; transform:rotate(15deg)\" src=\"https://files.training.databricks.com/static/images/icon-note.webp\"/> Explore the UI throughout this lesson by clicking the \"Models\" tab on the left-hand side of the screen.\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC Confirm that you are using `mlflow` version 1.7 or higher.\n\n# COMMAND ----------\n\nfrom distutils.version import LooseVersion, StrictVersion\nimport mlflow \n\nassert StrictVersion(mlflow.__version__) >= StrictVersion(\"1.7.0\"), \"Update MLflow to version 1.7.0+\"\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC Train a model and log it to MLflow.\n\n# COMMAND ----------\n\nimport mlflow\nimport mlflow.sklearn\nimport pandas as pd\nfrom sklearn.ensemble import RandomForestRegressor\nfrom sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score\nfrom sklearn.model_selection import train_test_split\n\ndf = pd.read_csv(\"/dbfs/mnt/training/airbnb/sf-listings/airbnb-cleaned-mlflow.csv\")\nX_train, X_test, y_train, y_test = train_test_split(df.drop([\"price\"], axis=1), df[[\"price\"]].values.ravel(), random_state=42)\n\nrf = RandomForestRegressor(n_estimators=100, max_depth=5)\nrf.fit(X_train, y_train)\n\nwith mlflow.start_run(run_name=\"RF Model\") as run:\n mlflow.sklearn.log_model(rf, \"model\")\n mlflow.log_metric(\"mse\", mean_squared_error(y_test, rf.predict(X_test)))\n\n runID = run.info.run_uuid\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC Create a unique model name so you don't clash with other workspace users.\n\n# COMMAND ----------\n\nimport uuid\n\nmodel_name = f\"airbnb_rf_model_{uuid.uuid4().hex[:10]}\"\nmodel_name\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC Register the model.\n\n# COMMAND ----------\n\nmodel_uri = \"runs:/{run_id}/model\".format(run_id=runID)\n\nmodel_details = mlflow.register_model(model_uri=model_uri, name=model_name)\n\n# COMMAND ----------\n\n# MAGIC %md-sandbox\n# MAGIC **Open the *Models* tab on the left of the screen to explore the registered model.** Note the following:<br><br>\n# MAGIC \n# MAGIC * It logged who trained the model and what code was used\n# MAGIC * It logged a history of actions taken on this model\n# MAGIC * It logged this model as a first version\n# MAGIC \n# MAGIC <div><img src=\"https://files.training.databricks.com/images/eLearning/ML-Part-4/model-registry-1.png\" style=\"height: 400px; margin: 20px\"/></div>\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC Check the status. It will initially be in `PENDING_REGISTRATION` status.\n\n# COMMAND ----------\n\nfrom mlflow.tracking.client import MlflowClient\n\nclient = MlflowClient()\nmodel_version_details = client.get_model_version(name=model_name, version=1)\n\nmodel_version_details.status\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC Now add a model description\n\n# COMMAND ----------\n\nclient.update_registered_model(\n name=model_details.name,\n description=\"This model forecasts Airbnb housing list prices based on various listing inputs.\"\n)\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC Add a version-specific description.\n\n# COMMAND ----------\n\nclient.update_model_version(\n name=model_details.name,\n version=model_details.version,\n description=\"This model version was built using sklearn.\"\n)\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC ### Deploying a Model\n# MAGIC \n# MAGIC The MLflow Model Registry defines several model stages: `None`, `Staging`, `Production`, and `Archived`. Each stage has a unique meaning. For example, `Staging` is meant for model testing, while `Production` is for models that have completed the testing or review processes and have been deployed to applications. \n# MAGIC \n# MAGIC Users with appropriate permissions can transition models between stages. In private preview, any user can transition a model to any stage. In the near future, administrators in your organization will be able to control these permissions on a per-user and per-model basis.\n# MAGIC \n# MAGIC If you have permission to transition a model to a particular stage, you can make the transition directly by using the `MlflowClient.update_model_version()` function. If you do not have permission, you can request a stage transition using the REST API; for example: ```%sh curl -i -X POST -H \"X-Databricks-Org-Id: <YOUR_ORG_ID>\" -H \"Authorization: Bearer <YOUR_ACCESS_TOKEN>\" https://<YOUR_DATABRICKS_WORKSPACE_URL>/api/2.0/preview/mlflow/transition-requests/create -d '{\"comment\": \"Please move this model into production!\", \"model_version\": {\"version\": 1, \"registered_model\": {\"name\": \"power-forecasting-model\"}}, \"stage\": \"Production\"}'\n# MAGIC ```\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC Now that you've learned about stage transitions, transition the model to the `Production` stage.\n\n# COMMAND ----------\n\nimport time\n\ntime.sleep(10) # In case the registration is still pending\n\n# COMMAND ----------\n\nclient.transition_model_version_stage(\n name=model_details.name,\n version=model_details.version,\n stage='Production',\n)\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC Fetch the model's current status.\n\n# COMMAND ----------\n\nmodel_version_details = client.get_model_version(\n name=model_details.name,\n version=model_details.version,\n)\nprint(\"The current model stage is: '{stage}'\".format(stage=model_version_details.current_stage))\n\n# COMMAND ----------\n\n# MAGIC %md-sandbox\n# MAGIC Fetch the latest model using a `pyfunc`. Loading the model in this way allows us to use the model regardless of the package that was used to train it.\n# MAGIC \n# MAGIC <img alt=\"Side Note\" title=\"Side Note\" style=\"vertical-align: text-bottom; position: relative; height:1.75em; top:0.05em; transform:rotate(15deg)\" src=\"https://files.training.databricks.com/static/images/icon-note.webp\"/> You can load a specific version of the model too.\n\n# COMMAND ----------\n\nimport mlflow.pyfunc\n\nmodel_version_uri = \"models:/{model_name}/1\".format(model_name=model_name)\n\nprint(\"Loading registered model version from URI: '{model_uri}'\".format(model_uri=model_version_uri))\nmodel_version_1 = mlflow.pyfunc.load_model(model_version_uri)\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC Apply the model.\n\n# COMMAND ----------\n\nmodel_version_1.predict(X_test)\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC ### Deploying a New Model Version\n# MAGIC \n# MAGIC The MLflow Model Registry enables you to create multiple model versions corresponding to a single registered model. By performing stage transitions, you can seamlessly integrate new model versions into your staging or production environments.\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC Create a new model version and register that model when it's logged.\n\n# COMMAND ----------\n\nimport mlflow\nimport mlflow.sklearn\nimport pandas as pd\nfrom sklearn.ensemble import RandomForestRegressor\nfrom sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score\nfrom sklearn.model_selection import train_test_split\n\ndf = pd.read_csv(\"/dbfs/mnt/training/airbnb/sf-listings/airbnb-cleaned-mlflow.csv\")\nX_train, X_test, y_train, y_test = train_test_split(df.drop([\"price\"], axis=1), df[[\"price\"]].values.ravel(), random_state=42)\n\nrf = RandomForestRegressor(n_estimators=300, max_depth=10)\nrf.fit(X_train, y_train)\n\nwith mlflow.start_run(run_name=\"RF Model\") as run:\n # Specify the `registered_model_name` parameter of the `mlflow.sklearn.log_model()`\n # function to register the model with the MLflow Model Registry. This automatically\n # creates a new model version\n mlflow.sklearn.log_model(\n sk_model=rf,\n artifact_path=\"sklearn-model\",\n registered_model_name=model_name,\n )\n mlflow.log_metric(\"mse\", mean_squared_error(y_test, rf.predict(X_test)))\n\n runID = run.info.run_uuid\n\n# COMMAND ----------\n\n# MAGIC %md-sandbox\n# MAGIC Check the UI to see the new model version.\n# MAGIC \n# MAGIC <div><img src=\"https://files.training.databricks.com/images/eLearning/ML-Part-4/model-registry-2.png\" style=\"height: 400px; margin: 20px\"/></div>\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC Use the search functionality to grab the latest model version.\n\n# COMMAND ----------\n\nmodel_version_infos = client.search_model_versions(f\"name = '{model_name}'\")\nnew_model_version = max([model_version_info.version for model_version_info in model_version_infos])\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC Add a description to this new version.\n\n# COMMAND ----------\n\nclient.update_model_version(\n name=model_name,\n version=new_model_version,\n description=\"This model version is a random forest containing 300 decision trees and a max depth of 10 that was trained in scikit-learn.\"\n)\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC Put this new model version into `Staging`\n\n# COMMAND ----------\n\nimport time\n\ntime.sleep(10) # In case the registration is still pending\n\n# COMMAND ----------\n\nclient.transition_model_version_stage(\n name=model_name,\n version=new_model_version,\n stage=\"Staging\",\n)\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC Sicne this model is now in staging, you can execute an automated CI/CD pipeline against it to test it before going into production. Once that is completed, you can push that model into production.\n\n# COMMAND ----------\n\nclient.transition_model_version_stage(\n name=model_name,\n version=new_model_version,\n stage=\"Production\",\n)\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC ### Archiving and Deleting\n# MAGIC \n# MAGIC You can now archive and delete old versions of the model.\n\n# COMMAND ----------\n\nclient.transition_model_version_stage(\n name=model_name,\n version=1,\n stage=\"Archived\",\n)\n\n# COMMAND ----------\n\n# MAGIC %md-sandbox\n# MAGIC Delete version 1.\n# MAGIC \n# MAGIC <img alt=\"Side Note\" title=\"Side Note\" style=\"vertical-align: text-bottom; position: relative; height:1.75em; top:0.05em; transform:rotate(15deg)\" src=\"https://files.training.databricks.com/static/images/icon-note.webp\"/> You cannot delete a model that is not first archived.\n\n# COMMAND ----------\n\nclient.delete_model_version(\n name=model_name,\n version=1\n)\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC Archive version 2 of the model too.\n\n# COMMAND ----------\n\nclient.transition_model_version_stage(\n name=model_name,\n version=2,\n stage=\"Archived\",\n)\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC Now delete the entire registered model.\n\n# COMMAND ----------\n\nclient.delete_registered_model(model_name)\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC ## Review\n# MAGIC **Question:** How does MLflow tracking differ from the model registry? \n# MAGIC **Answer:** Tracking is meant for experimentation and development. The model registry is designed to take a model from tracking and put it through staging and into production. This is often the point that a data engineer or a machine learning engineer takes responsibility for the depoloyment process.\n# MAGIC \n# MAGIC **Question:** Why do I need a model registry? \n# MAGIC **Answer:** Just as MLflow tracking provides end-to-end reproducibility for the machine learning training process, a model registry provides reproducibility and governance for the deployment process. Since production systems are mission critical, components can be isolated with ACL's so only specific individuals can alter production models. Version control and CI/CD workflow integration is also a critical dimension of deploying models into production.\n# MAGIC \n# MAGIC **Question:** What can I do programatically versus using the UI? \n# MAGIC **Answer:** Most operations can be done using the UI or in pure Python. A model must be tracked using Python, but from that point on everything can be done either way. For instance, a model logged using the MLflow tracking API can then be registered using the UI and can then be pushed into production.\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC ## ![Spark Logo Tiny](https://files.training.databricks.com/images/105/logo_spark_tiny.png) Classroom-Cleanup<br>\n# MAGIC \n# MAGIC Run the **`Classroom-Cleanup`** cell below to remove any artifacts created by this lesson.\n\n# COMMAND ----------\n\n# MAGIC %run \"./Includes/Classroom-Cleanup\"\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC ## ![Spark Logo Tiny](https://files.training.databricks.com/images/105/logo_spark_tiny.png) Next Steps\n# MAGIC \n# MAGIC Start the labs for this lesson, [Model Management Lab]($./Labs/05-Lab) \n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC ## Additional Topics & Resources\n# MAGIC \n# MAGIC **Q:** Where can I find out more information on MLflow Model Registry? \n# MAGIC **A:** Check out <a href=\"https://mlflow.org/docs/latest/registry.html\" target=\"_blank\">the MLflow documentation</a>\n\n# COMMAND ----------\n\n# MAGIC %md-sandbox\n# MAGIC &copy; 2020 Databricks, Inc. All rights reserved.<br/>\n# MAGIC Apache, Apache Spark, Spark and the Spark logo are trademarks of the <a href=\"http://www.apache.org/\">Apache Software Foundation</a>.<br/>\n# MAGIC <br/>\n# MAGIC <a href=\"https://databricks.com/privacy-policy\">Privacy Policy</a> | <a href=\"https://databricks.com/terms-of-use\">Terms of Use</a> | <a href=\"http://help.databricks.com/\">Support</a>\n" ]
[ [ "sklearn.ensemble.RandomForestRegressor", "pandas.read_csv" ] ]
Remorax/COMET
[ "e0a9c4116edb58fd2ddd2078329e06978e15b3b2" ]
[ "tests/integration/models/test_ranking_metric.py" ]
[ "# -*- coding: utf-8 -*-\nimport multiprocessing\nimport os\nimport shutil\nimport unittest\n\nimport torch\nfrom comet.models import RankingMetric\nfrom pytorch_lightning import seed_everything\nfrom pytorch_lightning.trainer.trainer import Trainer\nfrom scipy.stats import pearsonr\nfrom tests.data import DATA_PATH\nfrom torch.utils.data import DataLoader\n\nos.environ[\"TOKENIZERS_PARALLELISM\"] = \"false\"\nos.environ[\"OMP_NUM_THREADS\"] = \"1\"\n\n\nclass TestRankingMetric(unittest.TestCase):\n @classmethod\n def tearDownClass(cls):\n shutil.rmtree(os.path.join(DATA_PATH, \"checkpoints\"))\n\n def test_training(self):\n seed_everything(12)\n trainer = Trainer(\n gpus=0,\n max_epochs=4,\n deterministic=True,\n checkpoint_callback=True,\n default_root_dir=DATA_PATH,\n logger=False,\n weights_summary=None,\n progress_bar_refresh_rate=0,\n )\n model = RankingMetric(\n encoder_model=\"BERT\",\n pretrained_model=\"google/bert_uncased_L-2_H-128_A-2\",\n train_data=os.path.join(DATA_PATH, \"test_ranking_data.csv\"),\n validation_data=os.path.join(DATA_PATH, \"test_ranking_data.csv\"),\n layerwise_decay=0.95,\n batch_size=32,\n learning_rate=1e-04,\n encoder_learning_rate=1e-04,\n )\n trainer.fit(model)\n self.assertTrue(\n os.path.exists(\n os.path.join(DATA_PATH, \"checkpoints\", \"epoch=3-step=15.ckpt\")\n )\n )\n saved_model = RankingMetric.load_from_checkpoint(\n os.path.join(DATA_PATH, \"checkpoints\", \"epoch=3-step=15.ckpt\")\n )\n dataset = saved_model.read_csv(\n os.path.join(DATA_PATH, \"test_regression_data.csv\"), regression=True\n )\n y = [s[\"score\"] for s in dataset]\n dataloader = DataLoader(\n dataset=dataset,\n batch_size=256,\n collate_fn=lambda x: saved_model.prepare_sample(x, inference=True),\n num_workers=multiprocessing.cpu_count(),\n )\n y_hat = (\n torch.cat(\n trainer.predict(dataloaders=dataloader, return_predictions=True), dim=0\n )\n .cpu()\n .tolist()\n )\n # This shouldn't break!\n pearsonr(y_hat, y)[0]\n" ]
[ [ "scipy.stats.pearsonr" ] ]
rahatsantosh/ipf_severity_detection
[ "e08f72db344a6dd54868c83a2484c78f7ec7a6fe" ]
[ "models/xray_train.py" ]
[ "import numpy as np\nimport torch\nfrom torch import nn\nfrom torch.backends import cudnn\nfrom torch.utils.data import DataLoader\nimport torchvision\nimport matplotlib.pyplot as plt\nfrom shallow_autoenc import Autoencoder\nfrom autoencoder_dataset import Dataset\n\n# CUDA for PyTorch\nuse_cuda = torch.cuda.is_available()\ndevice = torch.device(\"cuda:0\" if use_cuda else \"cpu\")\nif use_cuda:\n print(torch.cuda.get_device_name())\ncudnn.benchmark = True\n\ntf = torchvision.transforms.Compose([\n torchvision.transforms.RandomHorizontalFlip(p=0.5),\n torchvision.transforms.RandomRotation(45),\n torchvision.transforms.RandomVerticalFlip(p=0.5),\n torchvision.transforms.Resize((1024, 1024)),\n torchvision.transforms.ToTensor(),\n torchvision.transforms.Lambda(lambda x:torch.reshape(x, (-1, x.shape[0], x.shape[1], x.shape[2])))\n])\n\nroot = \"../../data/external/chest_xray/img\"\n\ntraining_set = torchvision.datasets.ImageFolder(root, transform=tf)\ntraining_generator = DataLoader(training_set, batch_size = 32)\n\nmodel = Autoencoder()\nif use_cuda:\n\tmodel.to(device)\n\ndef train_model(model,train_loader,optimizer,n_epochs=10,gpu=True):\n loss_list=[]\n for epoch in range(n_epochs):\n for x, _ in train_loader:\n if gpu:\n # Transfer to GPU\n x = x.to(device)\n\n model.train()\n optimizer.zero_grad()\n y = model(x)\n loss = criterion(y, x)\n loss.backward()\n optimizer.step()\n loss_list.append(loss.data)\n print('Epoch : ',epoch,' Loss : ',loss.data)\n torch.save(model, '')\n\n return loss_list\n\ncriterion = nn.CosineSimilarity()\noptimizer = torch.optim.Adam(model.parameters())\n\nloss_list = train_model(\n model=model,\n train_loader=training_generator,\n optimizer=optimizer,\n n_epochs=100,\n gpu=True\n)\nprint(\"-------------Done--------------\")\n\nplt.plot(np.arange(len(loss_list)),loss_list)\nplt.savefig('../../reports/figures/xray_autoenc_loss.png')\n\nmodel_path = \"../../models/autoenc.pt\"\ntorch.save(model, model_path)\n" ]
[ [ "torch.reshape", "torch.utils.data.DataLoader", "matplotlib.pyplot.savefig", "torch.nn.CosineSimilarity", "torch.cuda.is_available", "torch.cuda.get_device_name", "torch.device", "torch.save" ] ]
nielsota/GANs
[ "7c4043022ba0fdd2d1f163abf70b7bd3f06be908" ]
[ "utils.py" ]
[ "from torchvision.utils import make_grid\nimport matplotlib.pyplot as plt\nimport argparse\nimport os\nimport torch\nfrom Data import *\n\n\n################################################################################\n############################## UTILITY FUNCTIONS ###############################\n################################################################################\n\n\n# Show images\ndef show_tensor_images(image_tensor, num_images=25, size=(1, 28, 28)):\n # The asterisk (*) can be used in python to unpack a list into its individual elements,\n # thus passing to view the correct form of input arguments it expects.\n # .detach(): cannot call view on a variable that requires grad\n # .cpu() because stores in CUDA\n\n image_tensor_unflat = image_tensor.detach().cpu().view(-1, *size)\n image_grid = make_grid(image_tensor_unflat[:num_images], nrow=5)\n plt.imshow(image_grid.permute(1, 2, 0).squeeze())\n #plt.show()\n\n\n# Show time series plots\ndef make_timeseries_plots(time_series_tensor, num_plots: int = 10):\n fig, axs = plt.subplots(num_plots)\n fig.set_size_inches(18.5, num_plots * 2)\n colors = [\"blue\", \"red\", \"green\", \"purple\"]\n # fig.suptitle('Time series plot', fontsize=12)\n for i in range(num_plots):\n axs[i].plot(time_series_tensor.detach().view(len(time_series_tensor), -1)[i], color=colors[i % len(colors)])\n axs[i].grid()\n return fig\n\n\ndef str2bool(v):\n if isinstance(v, bool):\n return v\n if v.lower() in ('yes', 'true', 't', 'y', '1'):\n return True\n elif v.lower() in ('no', 'false', 'f', 'n', '0'):\n return False\n else:\n raise argparse.ArgumentTypeError('Boolean value expected.')\n\n\ndef makedirectory(dir_name):\n parent_dir = os.getcwd()\n directory = dir_name\n models_path = os.path.join(str(parent_dir), directory)\n if not os.path.exists(models_path):\n os.mkdir(models_path)\n print(\"Directory '% s' created\" % directory)\n else:\n pass\n\n\ndef combine_vectors(z, y):\n return torch.cat((z.float(),y.float()), 1)\n\n\ndef combine_noise_and_labels(data, labels):\n \"\"\"\n Combine [32,100] and [32,C] into [32,C+1,100]\n where each element [32,C] is repeated over entire channel\n \"\"\"\n # shape -> [32, 6 , 1]\n labels = labels[:, :, None]\n\n # shape -> [32, 6 , 100]\n repeated_labels = labels.repeat(1, 1, 100)\n\n # Combine; data[:, None, :] has shape [32, 1, 100]\n data_and_labels = combine_vectors(data[:, None, :], repeated_labels)\n\n return data_and_labels\n\n\n################################################################################\n################################################################################\n\n\nif __name__ == '__main__':\n print(\"Building test dataset...\")\n test_dataloader = load_arima_data(batch_size=128, dgp = 'arma_11_variable')\n X, y = next(iter(test_dataloader))\n combined = combine_noise_and_labels(X, y)\n print(\"Output shape: {}\".format(X.shape))\n print(\"Labels shape: {}\".format(y.shape))\n print(\"Combined shape: {}\".format(combined.shape))\n print(combined[:, 0, :].shape)\n print(y.shape[1])\n make_timeseries_plots(combined[:, 0, :])\n" ]
[ [ "matplotlib.pyplot.subplots" ] ]
geek-guild/async-rl
[ "b208b023541cae468ca4c9eceec590b9bfd71abd" ]
[ "atari_environment.py" ]
[ "import tensorflow as tf\nfrom skimage.transform import resize\nfrom skimage.color import rgb2gray\nimport numpy as np\nfrom collections import deque\n\nclass AtariEnvironment(object):\n \"\"\"\n Small wrapper for gym atari environments.\n Responsible for preprocessing screens and holding on to a screen buffer \n of size agent_history_length from which environment state\n is constructed.\n \"\"\"\n def __init__(self, gym_env, resized_width, resized_height, agent_history_length):\n self.env = gym_env\n self.resized_width = resized_width\n self.resized_height = resized_height\n self.agent_history_length = agent_history_length\n\n self.gym_actions = range(gym_env.action_space.n)\n if (gym_env.spec.id == \"Pong-v0\" or gym_env.spec.id == \"Breakout-v0\"):\n print(\"Doing workaround for pong or breakout\")\n # Gym returns 6 possible actions for breakout and pong.\n # Only three are used, the rest are no-ops. This just lets us\n # pick from a simplified \"LEFT\", \"RIGHT\", \"NOOP\" action space.\n self.gym_actions = [1,2,3]\n\n # Screen buffer of size AGENT_HISTORY_LENGTH to be able\n # to build state arrays of size [1, AGENT_HISTORY_LENGTH, width, height]\n self.state_buffer = deque()\n\n def get_initial_state(self):\n \"\"\"\n Resets the atari game, clears the state buffer\n \"\"\"\n # Clear the state buffer\n self.state_buffer = deque()\n\n x_t = self.env.reset()\n x_t = self.get_preprocessed_frame(x_t)\n s_t = np.stack((x_t, x_t, x_t, x_t), axis = 0)\n \n for i in range(self.agent_history_length-1):\n self.state_buffer.append(x_t)\n return s_t\n\n def get_preprocessed_frame(self, observation):\n \"\"\"\n See Methods->Preprocessing in Mnih et al.\n 1) Get image grayscale\n 2) Rescale image\n \"\"\"\n return resize(rgb2gray(observation), (self.resized_width, self.resized_height))\n\n def step(self, action_index):\n \"\"\"\n Excecutes an action in the gym environment.\n Builds current state (concatenation of agent_history_length-1 previous frames and current one).\n Pops oldest frame, adds current frame to the state buffer.\n Returns current state.\n \"\"\"\n\n x_t1, r_t, terminal, info = self.env.step(self.gym_actions[action_index])\n x_t1 = self.get_preprocessed_frame(x_t1)\n\n previous_frames = np.array(self.state_buffer)\n s_t1 = np.empty((self.agent_history_length, self.resized_height, self.resized_width))\n s_t1[:self.agent_history_length-1, ...] = previous_frames\n s_t1[self.agent_history_length-1] = x_t1\n\n # Pop the oldest frame, add the current frame to the queue\n self.state_buffer.popleft()\n self.state_buffer.append(x_t1)\n\n return s_t1, r_t, terminal, info\n" ]
[ [ "numpy.empty", "numpy.array", "numpy.stack" ] ]
jbrowarczyk/jb-masters-thesis
[ "c345f43b32126d16f10c3706f5f798fde0665ee0" ]
[ "src/experiment3.py" ]
[ "from sklearn.svm import SVC\nfrom sklearn.metrics import accuracy_score, confusion_matrix, classification_report\nfrom global_settings import TRAIN_VERBOSE\nfrom utils import make_train_data, make_test_data, save_txt\nimport numpy as np\nimport joblib\nimport os\n\n\nEXPERIMENT_NAME = \"experiment3\"\n\nFEATURES = [\"ar_16\",\"ar_24\",\"dwt\",\"dwt_stat\",\"welch_16\",\"welch_32\",\"welch_64\"]\nC_VALUES = [0.01,0.1,1,10,100]\nGAMMA_VALUES = [0.1, 1, 10]\n\nSKIP_COMBINATIONS = set([('dwt',10,0.1),('dwt',10,1),('dwt',10,10),\n\t ('dwt',100,0.1),('dwt',100,1),('dwt',100,10),\n\t ('dwt_stat',10,10),\n\t ('ar_24',100,0.1),('ar_24',100,1),('ar_24',100,10)])\n\nSAVE_RESULTS = True # saves results in single file using joblib library\nSAVE_RESULTS_TXT = True # saves results in .txt file\nSAVE_MODEL = False # saves trained model\n\ndef experiment_svm_rbf(train_data,train_data_classes,test_data,test_data_classes,c,gamma,verbose):\n\ttry:\n\t\tsvm = SVC(C=c,kernel='rbf',gamma=gamma,verbose=verbose)\n\t\tsvm.fit(train_data,train_data_classes)\n\n\t\tresults = svm.predict(test_data)\n\t\tscore = accuracy_score(test_data_classes,results)\n\t\treport = classification_report(test_data_classes,results,digits=4,output_dict=False)\n\t\treport_dict = classification_report(test_data_classes,results,output_dict=True)\n\t\tcm = confusion_matrix(test_data_classes,results)\n\n\t\tres = {}\n\t\tres['results'] = results\n\t\tres['accuracy_score'] = score\n\t\tres['classification_report'] = report\n\t\tres['classification_report_dict'] = report_dict\n\t\tres['confusion_matrix'] = cm\n\n\t\treturn res,svm\n\n\texcept Exception as e:\n\t\tprint(e)\n\t\treturn None,None\n\ndef main():\n\tif(EXPERIMENT_NAME not in os.listdir()):\n\t\tos.mkdir(EXPERIMENT_NAME)\n\n\tfor feature in FEATURES:\n\t\ttry:\n\t\t\tdata = np.load(feature + \"_stats.npy\",allow_pickle=True).item()\n\t\t\tpca = joblib.load(\"pca_\" + feature + \"_stats\")\n\t\t\ttrain_data, train_data_classes = make_train_data(data,True)\n\t\t\ttest_data, test_data_classes = make_test_data(data)\n\t\t\ttrain_data_pca = np.array(pca.transform(train_data))\n\t\t\ttest_data_pca = np.array(pca.transform(test_data))\n\n\t\t\tfor c in C_VALUES:\n\t\t\t\tfor gamma in GAMMA_VALUES:\n\t\t\t\t\tif (feature,c,gamma) in SKIP_COMBINATIONS:\n\t\t\t\t\t\tprint(\"Skipping \" + feature + \" SVM-rbf C = \" + str(c) + \" gamma = \" + str(gamma))\n\t\t\t\t\t\tcontinue\n\n\t\t\t\t\tprint(\"Computing \" + feature + \" SVM-rbf C = \" + str(c) + \" gamma = \" + str(gamma))\n\t\t\t\t\tres,model = experiment_svm_rbf(train_data_pca,train_data_classes,test_data_pca,test_data_classes,c,gamma,TRAIN_VERBOSE)\n\n\t\t\t\t\tif res != None:\n\t\t\t\t\t\tif SAVE_RESULTS:\n\t\t\t\t\t\t\tfilename = EXPERIMENT_NAME + \"_\" + feature + \" svm_rbf_c_\" + str(c) + \"_gamma_\" + str(gamma) + \"_results\"\n\t\t\t\t\t\t\tpath = os.path.join(EXPERIMENT_NAME,filename)\n\t\t\t\t\t\t\tjoblib.dump(res,path)\n\n\t\t\t\t\t\tif SAVE_RESULTS_TXT:\n\t\t\t\t\t\t\tfilename = EXPERIMENT_NAME + \"_\" + feature + \" svm_rbf_c_\" + str(c) + \"_gamma_\" + str(gamma) + \"_results.txt\"\n\t\t\t\t\t\t\tpath = os.path.join(EXPERIMENT_NAME,filename)\n\t\t\t\t\t\t\tsave_txt(res,path)\n\n\t\t\t\t\t\tif SAVE_MODEL:\n\t\t\t\t\t\t\tfilename = EXPERIMENT_NAME + \"_\" + feature + \" svm_rbf_c_\" + str(c) + \"_gamma_\" + str(gamma) + \"_model\"\n\t\t\t\t\t\t\tpath = os.path.join(EXPERIMENT_NAME,filename)\n\t\t\t\t\t\t\tjoblib.dump(model,path)\n\n\t\texcept Exception as e:\n\t\t\tprint(\"Error during \" + EXPERIMENT_NAME + \" \" + feature + \" SVM-RBF C = \" + str(c) + \" gamma = \" + str(gamma))\n\t\t\tprint(e)\n\t\t\tpass\n\nif __name__ == \"__main__\":\n\tmain()\n" ]
[ [ "sklearn.metrics.confusion_matrix", "sklearn.svm.SVC", "numpy.load", "sklearn.metrics.classification_report", "sklearn.metrics.accuracy_score" ] ]
JOHNKYON/Kaggle_Learn
[ "6a45931e4ec1e189b95c61e27e90499347840180" ]
[ "hot_encoding/hot_encoding.py" ]
[ "\"\"\"Python script for kaggle house price predict practice\"\"\"\n\nfrom sklearn.ensemble import RandomForestRegressor\nfrom sklearn.preprocessing import Imputer\nimport pandas as pd\n\n\ndef main():\n \"\"\"Main script\"\"\"\n # Load data\n train_data = pd.read_csv('data/train.csv')\n test_data = pd.read_csv('data/test.csv')\n\n y_train = train_data.SalePrice\n x_train = train_data.drop(['SalePrice'], axis=1)\n x_test = test_data\n\n # Encoding data\n x_train = pd.get_dummies(x_train)\n x_test = pd.get_dummies(x_test)\n x_train, x_test = x_train.align(x_test, join='left', axis=1)\n\n # Impute data\n my_imputer = Imputer()\n x_train = my_imputer.fit_transform(x_train)\n x_test = my_imputer.transform(x_test)\n print(x_train)\n\n # Get model\n model = RandomForestRegressor()\n model.fit(x_train, y_train)\n pred = model.predict(x_test)\n\n # Output\n submission = pd.DataFrame({'Id': test_data.Id, 'SalePrice': pred})\n submission.to_csv(\"hot_encoding/submission.csv\", index=False)\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "sklearn.ensemble.RandomForestRegressor", "pandas.read_csv", "pandas.DataFrame", "sklearn.preprocessing.Imputer", "pandas.get_dummies" ] ]
nivethakesavan2203/haiku-generation
[ "ef66c0aa5a5ffcfcfa26b8e993d3efdfcc1be804" ]
[ "haiku_generation/src/models/embedding.py" ]
[ "import torch\n\n'''\none method:\nload RoBERTa from torch.hub\nimport torch\n\nroberta_torch = torch.hub.load('pytorch/fairseq', 'roberta.large')\nroberta_torch.eval()\n\nsentence = \"I Love RoBERTa!!! I Love Pytorch!!!\"\nApply Byte-Pair Encoding to input text, tokens should be a tensor\ntokens_torch = roberta_torch.encode(sentence)\n\nExtract features from RoBERTa using BPE text\nembedding_torch = roberta_torch.extract_features(tokens_torch, return_all_hiddens=True)[0]\n'''\n\n'''\nanother method:\nload RoBERTa from transformers, note it does not have .encode(), therefore we need RobertaTokenizer\nimport torch\nfrom transformers import RobertaModel, RobertaTokenizer\n\ntokenizer = RobertaTokenizer.from_pretrained(\"roberta-large\")\nroberta_trans = RobertaModel.from_pretrained(\"roberta-large\")\n\nsentence = \"I Love RoBERTa!!! I Love Pytorch!!!\"\nApply Byte-Pair Encoding to input text with RobertaTokenizer, note that tokenizer.encode() returns to you a list, but we need our tokens to be a tensor\ntokens_trans = torch.tensor([tokenizer.encode(sentence)])\n\nExtract features from RobertaModel using BPE text\nembedding_trans = roberta_trans.embeddings(tokens_trans)[0]\n'''\n\n\nclass RobertaModel():\n def __init__(self):\n self.model = torch.hub.load('pytorch/fairseq', 'roberta.large')\n self.model.eval().cuda()\n\n def __call__(self, content):\n tokens = self.model.encode(content)\n embed = self.model.extract_features(tokens, return_all_hiddens=True)[0]\n return embed\n\n\nif __name__ == '__main__':\n # example usage\n roberta = RobertaModel()\n encoding = roberta('trees')\n encoding2 = roberta('test')\n encoding3 = roberta('go')\n encoding4 = roberta('sandwich')\n print(encoding.shape)\n print(encoding)\n print(encoding2.shape)\n print(encoding3.shape)\n\n" ]
[ [ "torch.hub.load" ] ]
ghbtest/deep-learning-coursera
[ "95d343f2136e20f285963a2605739dc966d82b09" ]
[ "Improving Deep Neural Networks Hyperparameter tuning, Regularization and Optimization/Initialization.py" ]
[ "#!/usr/bin/env python\n# coding: utf-8\n\n# # Initialization\n# \n# Welcome to the first assignment of \"Improving Deep Neural Networks\". \n# \n# Training your neural network requires specifying an initial value of the weights. A well chosen initialization method will help learning. \n# \n# If you completed the previous course of this specialization, you probably followed our instructions for weight initialization, and it has worked out so far. But how do you choose the initialization for a new neural network? In this notebook, you will see how different initializations lead to different results. \n# \n# A well chosen initialization can:\n# - Speed up the convergence of gradient descent\n# - Increase the odds of gradient descent converging to a lower training (and generalization) error \n# \n# To get started, run the following cell to load the packages and the planar dataset you will try to classify.\n\n# In[1]:\n\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport sklearn\nimport sklearn.datasets\nfrom init_utils import sigmoid, relu, compute_loss, forward_propagation, backward_propagation\nfrom init_utils import update_parameters, predict, load_dataset, plot_decision_boundary, predict_dec\n\nget_ipython().run_line_magic('matplotlib', 'inline')\nplt.rcParams['figure.figsize'] = (7.0, 4.0) # set default size of plots\nplt.rcParams['image.interpolation'] = 'nearest'\nplt.rcParams['image.cmap'] = 'gray'\n\n# load image dataset: blue/red dots in circles\ntrain_X, train_Y, test_X, test_Y = load_dataset()\n\n\n# You would like a classifier to separate the blue dots from the red dots.\n\n# ## 1 - Neural Network model \n\n# You will use a 3-layer neural network (already implemented for you). Here are the initialization methods you will experiment with: \n# - *Zeros initialization* -- setting `initialization = \"zeros\"` in the input argument.\n# - *Random initialization* -- setting `initialization = \"random\"` in the input argument. This initializes the weights to large random values. \n# - *He initialization* -- setting `initialization = \"he\"` in the input argument. This initializes the weights to random values scaled according to a paper by He et al., 2015. \n# \n# **Instructions**: Please quickly read over the code below, and run it. In the next part you will implement the three initialization methods that this `model()` calls.\n\n# In[2]:\n\n\ndef model(X, Y, learning_rate=0.01, num_iterations=15000, print_cost=True, initialization=\"he\"):\n \"\"\"\n Implements a three-layer neural network: LINEAR->RELU->LINEAR->RELU->LINEAR->SIGMOID.\n \n Arguments:\n X -- input data, of shape (2, number of examples)\n Y -- true \"label\" vector (containing 0 for red dots; 1 for blue dots), of shape (1, number of examples)\n learning_rate -- learning rate for gradient descent \n num_iterations -- number of iterations to run gradient descent\n print_cost -- if True, print the cost every 1000 iterations\n initialization -- flag to choose which initialization to use (\"zeros\",\"random\" or \"he\")\n \n Returns:\n parameters -- parameters learnt by the model\n \"\"\"\n \n grads = {}\n costs = [] # to keep track of the loss\n m = X.shape[1] # number of examples\n layers_dims = [X.shape[0], 10, 5, 1]\n \n # Initialize parameters dictionary.\n if initialization == \"zeros\":\n parameters = initialize_parameters_zeros(layers_dims)\n elif initialization == \"random\":\n parameters = initialize_parameters_random(layers_dims)\n elif initialization == \"he\":\n parameters = initialize_parameters_he(layers_dims)\n\n # Loop (gradient descent)\n\n for i in range(0, num_iterations):\n\n # Forward propagation: LINEAR -> RELU -> LINEAR -> RELU -> LINEAR -> SIGMOID.\n a3, cache = forward_propagation(X, parameters)\n \n # Loss\n cost = compute_loss(a3, Y)\n\n # Backward propagation.\n grads = backward_propagation(X, Y, cache)\n \n # Update parameters.\n parameters = update_parameters(parameters, grads, learning_rate)\n \n # Print the loss every 1000 iterations\n if print_cost and i % 1000 == 0:\n print(\"Cost after iteration {}: {}\".format(i, cost))\n costs.append(cost)\n \n # plot the loss\n plt.plot(costs)\n plt.ylabel('cost')\n plt.xlabel('iterations (per hundreds)')\n plt.title(\"Learning rate =\" + str(learning_rate))\n plt.show()\n \n return parameters\n\n\n# ## 2 - Zero initialization\n# \n# There are two types of parameters to initialize in a neural network:\n# - the weight matrices $(W^{[1]}, W^{[2]}, W^{[3]}, ..., W^{[L-1]}, W^{[L]})$\n# - the bias vectors $(b^{[1]}, b^{[2]}, b^{[3]}, ..., b^{[L-1]}, b^{[L]})$\n# \n# **Exercise**: Implement the following function to initialize all parameters to zeros. You'll see later that this does not work well since it fails to \"break symmetry\", but lets try it anyway and see what happens. Use np.zeros((..,..)) with the correct shapes.\n\n# In[3]:\n\n\n# GRADED FUNCTION: initialize_parameters_zeros \n\ndef initialize_parameters_zeros(layers_dims):\n \"\"\"\n Arguments:\n layer_dims -- python array (list) containing the size of each layer.\n \n Returns:\n parameters -- python dictionary containing your parameters \"W1\", \"b1\", ..., \"WL\", \"bL\":\n W1 -- weight matrix of shape (layers_dims[1], layers_dims[0])\n b1 -- bias vector of shape (layers_dims[1], 1)\n ...\n WL -- weight matrix of shape (layers_dims[L], layers_dims[L-1])\n bL -- bias vector of shape (layers_dims[L], 1)\n \"\"\"\n \n parameters = {}\n L = len(layers_dims) # number of layers in the network\n \n for l in range(1, L):\n ### START CODE HERE ### (≈ 2 lines of code)\n parameters['W' + str(l)] = np.zeros((layers_dims[l], layers_dims[l - 1]))\n parameters['b' + str(l)] = np.zeros((layers_dims[l], 1))\n ### END CODE HERE ###\n return parameters\n\n\n# In[4]:\n\n\nparameters = initialize_parameters_zeros([3,2,1])\nprint(\"W1 = \" + str(parameters[\"W1\"]))\nprint(\"b1 = \" + str(parameters[\"b1\"]))\nprint(\"W2 = \" + str(parameters[\"W2\"]))\nprint(\"b2 = \" + str(parameters[\"b2\"]))\n\n\n\n\n# Run the following code to train your model on 15,000 iterations using zeros initialization.\n\n# In[5]:\n\n\nparameters = model(train_X, train_Y, initialization = \"zeros\")\nprint (\"On the train set:\")\npredictions_train = predict(train_X, train_Y, parameters)\nprint (\"On the test set:\")\npredictions_test = predict(test_X, test_Y, parameters)\n\n\n# The performance is really bad, and the cost does not really decrease, and the algorithm performs no better than random guessing. Why? Lets look at the details of the predictions and the decision boundary:\n\n# In[6]:\n\n\nprint(\"predictions_train = \" + str(predictions_train))\nprint(\"predictions_test = \" + str(predictions_test))\n\n\n# In[7]:\n\n\nplt.title(\"Model with Zeros initialization\")\naxes = plt.gca()\naxes.set_xlim([-1.5, 1.5])\naxes.set_ylim([-1.5, 1.5])\nplot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_X, train_Y)\n\n\n# The model is predicting 0 for every example. \n# \n# In general, initializing all the weights to zero results in the network failing to break symmetry. This means that every neuron in each layer will learn the same thing, and you might as well be training a neural network with $n^{[l]}=1$ for every layer, and the network is no more powerful than a linear classifier such as logistic regression. \n\n# <font color='blue'>\n# **What you should remember**:\n# - The weights $W^{[l]}$ should be initialized randomly to break symmetry. \n# - It is however okay to initialize the biases $b^{[l]}$ to zeros. Symmetry is still broken so long as $W^{[l]}$ is initialized randomly. \n# \n\n# ## 3 - Random initialization\n# \n# To break symmetry, lets intialize the weights randomly. Following random initialization, each neuron can then proceed to learn a different function of its inputs. In this exercise, you will see what happens if the weights are intialized randomly, but to very large values. \n# \n# **Exercise**: Implement the following function to initialize your weights to large random values (scaled by \\*10) and your biases to zeros. Use `np.random.randn(..,..) * 10` for weights and `np.zeros((.., ..))` for biases. We are using a fixed `np.random.seed(..)` to make sure your \"random\" weights match ours, so don't worry if running several times your code gives you always the same initial values for the parameters. \n\n# In[59]:\n\n\n# GRADED FUNCTION: initialize_parameters_random\n\ndef initialize_parameters_random(layers_dims):\n \"\"\"\n Arguments:\n layer_dims -- python array (list) containing the size of each layer.\n \n Returns:\n parameters -- python dictionary containing your parameters \"W1\", \"b1\", ..., \"WL\", \"bL\":\n W1 -- weight matrix of shape (layers_dims[1], layers_dims[0])\n b1 -- bias vector of shape (layers_dims[1], 1)\n ...\n WL -- weight matrix of shape (layers_dims[L], layers_dims[L-1])\n bL -- bias vector of shape (layers_dims[L], 1)\n \"\"\"\n \n np.random.seed(3) # This seed makes sure your \"random\" numbers will be the as ours\n parameters = {}\n L = len(layers_dims) # integer representing the number of layers\n \n for l in range(1, L):\n ### START CODE HERE ### (≈ 2 lines of code)\n parameters['W' + str(l)] = np.random.randn(layers_dims[l], layers_dims[l - 1]) * 3\n parameters['b' + str(l)] = np.zeros((layers_dims[l], 1))\n ### END CODE HERE ###\n\n return parameters\n\n\n# In[51]:\n\n\nparameters = initialize_parameters_random([3, 2, 1])\nprint(\"W1 = \" + str(parameters[\"W1\"]))\nprint(\"b1 = \" + str(parameters[\"b1\"]))\nprint(\"W2 = \" + str(parameters[\"W2\"]))\nprint(\"b2 = \" + str(parameters[\"b2\"]))\n\n\n\n\n# Run the following code to train your model on 15,000 iterations using random initialization.\n\n# In[52]:\n\n\nparameters = model(train_X, train_Y, initialization = \"random\")\nprint(\"On the train set:\")\npredictions_train = predict(train_X, train_Y, parameters)\nprint(\"On the test set:\")\npredictions_test = predict(test_X, test_Y, parameters)\n\n\n# If you see \"inf\" as the cost after the iteration 0, this is because of numerical roundoff; a more numerically sophisticated implementation would fix this. But this isn't worth worrying about for our purposes. \n# \n# Anyway, it looks like you have broken symmetry, and this gives better results. than before. The model is no longer outputting all 0s. \n\n# In[21]:\n\n\nprint(predictions_train)\nprint(predictions_test)\n\n\n# In[22]:\n\n\nplt.title(\"Model with large random initialization\")\naxes = plt.gca()\naxes.set_xlim([-1.5, 1.5])\naxes.set_ylim([-1.5, 1.5])\nplot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_X, train_Y)\n\n\n# **Observations**:\n# - The cost starts very high. This is because with large random-valued weights, the last activation (sigmoid) outputs results that are very close to 0 or 1 for some examples, and when it gets that example wrong it incurs a very high loss for that example. Indeed, when $\\log(a^{[3]}) = \\log(0)$, the loss goes to infinity.\n# - Poor initialization can lead to vanishing/exploding gradients, which also slows down the optimization algorithm. \n# - If you train this network longer you will see better results, but initializing with overly large random numbers slows down the optimization.\n# \n# <font color='blue'>\n# **In summary**:\n# - Initializing weights to very large random values does not work well. \n# - Hopefully intializing with small random values does better. The important question is: how small should be these random values be? Lets find out in the next part! \n\n# In[31]:\n\n\n#compare X and parameter values\nfrom scipy import stats\nstats.describe(train_X.flatten())\n\n# xmean=np.mean(train_X)\n# print (xmean)\n\n\n# In[28]:\n\n\nprint (parameters)\n\n\n# In[57]:\n\n\nallw=np.concatenate((parameters['W1'].flatten(), parameters['W2'].flatten(), parameters['W3'].flatten())) \n\n\n# In[58]:\n\n\nstats.describe(allw) \n\n\n# ## 4 - He initialization\n# \n# Finally, try \"He Initialization\"; this is named for the first author of He et al., 2015. (If you have heard of \"Xavier initialization\", this is similar except Xavier initialization uses a scaling factor for the weights $W^{[l]}$ of `sqrt(1./layers_dims[l-1])` where He initialization would use `sqrt(2./layers_dims[l-1])`.)\n# \n# **Exercise**: Implement the following function to initialize your parameters with He initialization.\n# \n# **Hint**: This function is similar to the previous `initialize_parameters_random(...)`. The only difference is that instead of multiplying `np.random.randn(..,..)` by 10, you will multiply it by $\\sqrt{\\frac{2}{\\text{dimension of the previous layer}}}$, which is what He initialization recommends for layers with a ReLU activation. \n\n# In[53]:\n\n\n# GRADED FUNCTION: initialize_parameters_he\n\ndef initialize_parameters_he(layers_dims):\n \"\"\"\n Arguments:\n layer_dims -- python array (list) containing the size of each layer.\n \n Returns:\n parameters -- python dictionary containing your parameters \"W1\", \"b1\", ..., \"WL\", \"bL\":\n W1 -- weight matrix of shape (layers_dims[1], layers_dims[0])\n b1 -- bias vector of shape (layers_dims[1], 1)\n ...\n WL -- weight matrix of shape (layers_dims[L], layers_dims[L-1])\n bL -- bias vector of shape (layers_dims[L], 1)\n \"\"\"\n \n np.random.seed(3)\n parameters = {}\n L = len(layers_dims) - 1 # integer representing the number of layers\n \n for l in range(1, L + 1):\n ### START CODE HERE ### (≈ 2 lines of code)\n parameters['W' + str(l)] = np.random.randn(layers_dims[l], layers_dims[l - 1]) * np.sqrt(2 / layers_dims[l - 1])\n parameters['b' + str(l)] = np.zeros((layers_dims[l], 1))\n ### END CODE HERE ###\n \n return parameters\n\n\n# In[54]:\n\n\nparameters = initialize_parameters_he([2, 4, 1])\nprint(\"W1 = \" + str(parameters[\"W1\"]))\nprint(\"b1 = \" + str(parameters[\"b1\"]))\nprint(\"W2 = \" + str(parameters[\"W2\"]))\nprint(\"b2 = \" + str(parameters[\"b2\"]))\n\n\n# **Expected Output**:\n# \n# <table> \n# <tr>\n# <td>\n# **W1**\n# </td>\n# <td>\n# [[ 1.78862847 0.43650985]\n# [ 0.09649747 -1.8634927 ]\n# [-0.2773882 -0.35475898]\n# [-0.08274148 -0.62700068]]\n# </td>\n# </tr>\n# <tr>\n# <td>\n# **b1**\n# </td>\n# <td>\n# [[ 0.]\n# [ 0.]\n# [ 0.]\n# [ 0.]]\n# </td>\n# </tr>\n# <tr>\n# <td>\n# **W2**\n# </td>\n# <td>\n# [[-0.03098412 -0.33744411 -0.92904268 0.62552248]]\n# </td>\n# </tr>\n# <tr>\n# <td>\n# **b2**\n# </td>\n# <td>\n# [[ 0.]]\n# </td>\n# </tr>\n# \n# </table> \n\n# Run the following code to train your model on 15,000 iterations using He initialization.\n\n# In[55]:\n\n\nparameters = model(train_X, train_Y, initialization = \"he\")\nprint(\"On the train set:\")\npredictions_train = predict(train_X, train_Y, parameters)\nprint(\"On the test set:\")\npredictions_test = predict(test_X, test_Y, parameters)\n\n\n# In[56]:\n\n\nplt.title(\"Model with He initialization\")\naxes = plt.gca()\naxes.set_xlim([-1.5, 1.5])\naxes.set_ylim([-1.5, 1.5])\nplot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_X, train_Y)\n\n\n# **Observations**:\n# - The model with He initialization separates the blue and the red dots very well in a small number of iterations.\n# \n\n# ## 5 - Conclusions\n\n# You have seen three different types of initializations. For the same number of iterations and same hyperparameters the comparison is:\n# \n# <table> \n# <tr>\n# <td>\n# **Model**\n# </td>\n# <td>\n# **Train accuracy**\n# </td>\n# <td>\n# **Problem/Comment**\n# </td>\n# \n# </tr>\n# <td>\n# 3-layer NN with zeros initialization\n# </td>\n# <td>\n# 50%\n# </td>\n# <td>\n# fails to break symmetry\n# </td>\n# <tr>\n# <td>\n# 3-layer NN with large random initialization\n# </td>\n# <td>\n# 83%\n# </td>\n# <td>\n# too large weights \n# </td>\n# </tr>\n# <tr>\n# <td>\n# 3-layer NN with He initialization\n# </td>\n# <td>\n# 99%\n# </td>\n# <td>\n# recommended method\n# </td>\n# </tr>\n# </table> \n\n# <font color='blue'>\n# **What you should remember from this notebook**:\n# - Different initializations lead to different results\n# - Random initialization is used to break symmetry and make sure different hidden units can learn different things\n# - Don't intialize to values that are too large\n# - He initialization works well for networks with ReLU activations. \n" ]
[ [ "matplotlib.pyplot.gca", "numpy.sqrt", "matplotlib.pyplot.title", "numpy.random.seed", "scipy.stats.describe", "matplotlib.pyplot.plot", "numpy.random.randn", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.show", "numpy.zeros", "matplotlib.pyplot.ylabel" ] ]
Unity-Technologies/lightning-hydra-template
[ "4bdf4e62c6f93021d7fae86a51c5d706990a933d" ]
[ "src/callbacks/wandb_callbacks.py" ]
[ "import glob\nimport os\nfrom typing import List\n\nimport matplotlib.pyplot as plt\nimport seaborn as sn\nimport torch\nimport wandb\nfrom pytorch_lightning import Callback, Trainer\nfrom pytorch_lightning.loggers import LoggerCollection, WandbLogger\nfrom sklearn import metrics\nfrom sklearn.metrics import f1_score, precision_score, recall_score\n\n\ndef get_wandb_logger(trainer: Trainer) -> WandbLogger:\n if isinstance(trainer.logger, WandbLogger):\n return trainer.logger\n\n if isinstance(trainer.logger, LoggerCollection):\n for logger in trainer.logger:\n if isinstance(logger, WandbLogger):\n return logger\n\n raise Exception(\n \"You are using wandb related callback, but WandbLogger was not found for some reason...\"\n )\n\n\nclass WatchModelWithWandb(Callback):\n \"\"\"Make WandbLogger watch model at the beginning of the run.\"\"\"\n\n def __init__(self, log: str = \"gradients\", log_freq: int = 100):\n self.log = log\n self.log_freq = log_freq\n\n def on_train_start(self, trainer, pl_module):\n logger = get_wandb_logger(trainer=trainer)\n logger.watch(model=trainer.model, log=self.log, log_freq=self.log_freq)\n\n\nclass UploadCodeToWandbAsArtifact(Callback):\n \"\"\"Upload all *.py files to wandb as an artifact, at the beginning of the run.\"\"\"\n\n def __init__(self, code_dir: str):\n self.code_dir = code_dir\n\n def on_train_start(self, trainer, pl_module):\n logger = get_wandb_logger(trainer=trainer)\n experiment = logger.experiment\n\n code = wandb.Artifact(\"project-source\", type=\"code\")\n for path in glob.glob(os.path.join(self.code_dir, \"**/*.py\"), recursive=True):\n code.add_file(path)\n\n experiment.use_artifact(code)\n\n\nclass UploadCheckpointsToWandbAsArtifact(Callback):\n \"\"\"Upload checkpoints to wandb as an artifact, at the end of run.\"\"\"\n\n def __init__(self, ckpt_dir: str = \"checkpoints/\", upload_best_only: bool = False):\n self.ckpt_dir = ckpt_dir\n self.upload_best_only = upload_best_only\n\n def on_train_end(self, trainer, pl_module):\n logger = get_wandb_logger(trainer=trainer)\n experiment = logger.experiment\n\n ckpts = wandb.Artifact(\"experiment-ckpts\", type=\"checkpoints\")\n\n if self.upload_best_only:\n ckpts.add_file(trainer.checkpoint_callback.best_model_path)\n else:\n for path in glob.glob(os.path.join(self.ckpt_dir, \"**/*.ckpt\"), recursive=True):\n ckpts.add_file(path)\n\n experiment.use_artifact(ckpts)\n\n\nclass LogConfusionMatrixToWandb(Callback):\n \"\"\"Generate confusion matrix every epoch and send it to wandb.\n Expects validation step to return predictions and targets.\n \"\"\"\n\n def __init__(self):\n self.preds = []\n self.targets = []\n self.ready = True\n\n def on_sanity_check_start(self, trainer, pl_module) -> None:\n self.ready = False\n\n def on_sanity_check_end(self, trainer, pl_module):\n \"\"\"Start executing this callback only after all validation sanity checks end.\"\"\"\n self.ready = True\n\n def on_validation_batch_end(\n self, trainer, pl_module, outputs, batch, batch_idx, dataloader_idx\n ):\n \"\"\"Gather data from single batch.\"\"\"\n if self.ready:\n self.preds.append(outputs[\"preds\"])\n self.targets.append(outputs[\"targets\"])\n\n def on_validation_epoch_end(self, trainer, pl_module):\n \"\"\"Generate confusion matrix.\"\"\"\n if self.ready:\n logger = get_wandb_logger(trainer)\n experiment = logger.experiment\n\n preds = torch.cat(self.preds).cpu().numpy()\n targets = torch.cat(self.targets).cpu().numpy()\n\n confusion_matrix = metrics.confusion_matrix(y_true=targets, y_pred=preds)\n\n # set figure size\n plt.figure(figsize=(14, 8))\n\n # set labels size\n sn.set(font_scale=1.4)\n\n # set font size\n sn.heatmap(confusion_matrix, annot=True, annot_kws={\"size\": 8}, fmt=\"g\")\n\n # names should be uniqe or else charts from different experiments in wandb will overlap\n experiment.log({f\"confusion_matrix/{experiment.name}\": wandb.Image(plt)}, commit=False)\n\n # according to wandb docs this should also work but it crashes\n # experiment.log(f{\"confusion_matrix/{experiment.name}\": plt})\n\n # reset plot\n plt.clf()\n\n self.preds.clear()\n self.targets.clear()\n\n\nclass LogF1PrecRecHeatmapToWandb(Callback):\n \"\"\"Generate f1, precision, recall heatmap every epoch and send it to wandb.\n Expects validation step to return predictions and targets.\n \"\"\"\n\n def __init__(self, class_names: List[str] = None):\n self.preds = []\n self.targets = []\n self.ready = True\n\n def on_sanity_check_start(self, trainer, pl_module):\n self.ready = False\n\n def on_sanity_check_end(self, trainer, pl_module):\n \"\"\"Start executing this callback only after all validation sanity checks end.\"\"\"\n self.ready = True\n\n def on_validation_batch_end(\n self, trainer, pl_module, outputs, batch, batch_idx, dataloader_idx\n ):\n \"\"\"Gather data from single batch.\"\"\"\n if self.ready:\n self.preds.append(outputs[\"preds\"])\n self.targets.append(outputs[\"targets\"])\n\n def on_validation_epoch_end(self, trainer, pl_module):\n \"\"\"Generate f1, precision and recall heatmap.\"\"\"\n if self.ready:\n logger = get_wandb_logger(trainer=trainer)\n experiment = logger.experiment\n\n preds = torch.cat(self.preds).cpu().numpy()\n targets = torch.cat(self.targets).cpu().numpy()\n f1 = f1_score(preds, targets, average=None)\n r = recall_score(preds, targets, average=None)\n p = precision_score(preds, targets, average=None)\n data = [f1, p, r]\n\n # set figure size\n plt.figure(figsize=(14, 3))\n\n # set labels size\n sn.set(font_scale=1.2)\n\n # set font size\n sn.heatmap(\n data,\n annot=True,\n annot_kws={\"size\": 10},\n fmt=\".3f\",\n yticklabels=[\"F1\", \"Precision\", \"Recall\"],\n )\n\n # names should be uniqe or else charts from different experiments in wandb will overlap\n experiment.log({f\"f1_p_r_heatmap/{experiment.name}\": wandb.Image(plt)}, commit=False)\n\n # reset plot\n plt.clf()\n\n self.preds.clear()\n self.targets.clear()\n\n\nclass ImagePredictionLogger(Callback):\n \"\"\"Logs a validation batch and their predictions to wandb.\n Example adapted from:\n https://wandb.ai/wandb/wandb-lightning/reports/Image-Classification-using-PyTorch-Lightning--VmlldzoyODk1NzY\n \"\"\"\n\n def __init__(self, num_samples: int = 8):\n super().__init__()\n self.num_samples = num_samples\n self.ready = True\n\n def on_sanity_check_start(self, trainer, pl_module):\n self.ready = False\n\n def on_sanity_check_end(self, trainer, pl_module):\n \"\"\"Start executing this callback only after all validation sanity checks end.\"\"\"\n self.ready = True\n\n def on_validation_epoch_end(self, trainer, pl_module):\n if self.ready:\n logger = get_wandb_logger(trainer=trainer)\n experiment = logger.experiment\n\n # get a validation batch from the validation dat loader\n val_samples = next(iter(trainer.datamodule.val_dataloader()))\n val_imgs, val_labels = val_samples\n\n # run the batch through the network\n val_imgs = val_imgs.to(device=pl_module.device)\n logits = pl_module(val_imgs)\n preds = torch.argmax(logits, axis=-1)\n\n # log the images as wandb Image\n experiment.log(\n {\n f\"Images/{experiment.name}\": [\n wandb.Image(x, caption=f\"Pred:{pred}, Label:{y}\")\n for x, pred, y in zip(\n val_imgs[: self.num_samples],\n preds[: self.num_samples],\n val_labels[: self.num_samples],\n )\n ]\n }\n )\n" ]
[ [ "torch.cat", "sklearn.metrics.precision_score", "torch.argmax", "sklearn.metrics.confusion_matrix", "matplotlib.pyplot.clf", "sklearn.metrics.f1_score", "sklearn.metrics.recall_score", "matplotlib.pyplot.figure" ] ]
zheang01/FACT
[ "a877cc86acc4d29fb7589c8ac571c8aef09e5fd8" ]
[ "data/data_utils.py" ]
[ "from torchvision import transforms\r\nimport random\r\nimport torch\r\nimport numpy as np\r\nfrom math import sqrt\r\n\r\ndef dataset_info(filepath):\r\n with open(filepath, 'r') as f:\r\n images_list = f.readlines()\r\n\r\n file_names = []\r\n labels = []\r\n for row in images_list:\r\n row = row.strip().split(' ')\r\n file_names.append(row[0])\r\n labels.append(int(row[1]))\r\n\r\n return file_names, labels\r\n\r\n\r\ndef get_img_transform(train=False, image_size=224, crop=False, jitter=0):\r\n mean = [0.485, 0.456, 0.406]\r\n std = [0.229, 0.224, 0.225]\r\n if train:\r\n if crop:\r\n img_transform = [transforms.RandomResizedCrop(image_size, scale=[0.8, 1.0])]\r\n else:\r\n img_transform = [transforms.Resize((image_size, image_size))]\r\n if jitter > 0:\r\n img_transform.append(transforms.ColorJitter(brightness=jitter,\r\n contrast=jitter,\r\n saturation=jitter,\r\n hue=min(0.5, jitter)))\r\n img_transform += [transforms.RandomHorizontalFlip(),\r\n transforms.ToTensor(),\r\n transforms.Normalize(mean, std)]\r\n img_transform = transforms.Compose(img_transform)\r\n else:\r\n img_transform = transforms.Compose([\r\n transforms.Resize((image_size, image_size)),\r\n transforms.ToTensor(),\r\n transforms.Normalize(mean, std)\r\n ])\r\n return img_transform\r\n\r\n\r\ndef get_pre_transform(image_size=224, crop=False, jitter=0):\r\n if crop:\r\n img_transform = [transforms.RandomResizedCrop(image_size, scale=[0.8, 1.0])]\r\n else:\r\n img_transform = [transforms.Resize((image_size, image_size))]\r\n if jitter > 0:\r\n img_transform.append(transforms.ColorJitter(brightness=jitter,\r\n contrast=jitter,\r\n saturation=jitter,\r\n hue=min(0.5, jitter)))\r\n img_transform += [transforms.RandomHorizontalFlip(), lambda x: np.asarray(x)]\r\n img_transform = transforms.Compose(img_transform)\r\n return img_transform\r\n\r\n\r\ndef get_post_transform(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]):\r\n img_transform = transforms.Compose([\r\n transforms.ToTensor(),\r\n transforms.Normalize(mean, std)\r\n ])\r\n return img_transform\r\n\r\n\r\ndef get_spectrum(img):\r\n img_fft = np.fft.fft2(img)\r\n img_abs = np.abs(img_fft)\r\n img_pha = np.angle(img_fft)\r\n return img_abs, img_pha\r\n\r\ndef get_centralized_spectrum(img):\r\n img_fft = np.fft.fft2(img)\r\n img_fft = np.fft.fftshift(img_fft)\r\n img_abs = np.abs(img_fft)\r\n img_pha = np.angle(img_fft)\r\n return img_abs, img_pha\r\n\r\n\r\ndef colorful_spectrum_mix(img1, img2, alpha, ratio=1.0):\r\n \"\"\"Input image size: ndarray of [H, W, C]\"\"\"\r\n lam = np.random.uniform(0, alpha)\r\n\r\n assert img1.shape == img2.shape\r\n h, w, c = img1.shape\r\n h_crop = int(h * sqrt(ratio))\r\n w_crop = int(w * sqrt(ratio))\r\n h_start = h // 2 - h_crop // 2\r\n w_start = w // 2 - w_crop // 2\r\n\r\n img1_fft = np.fft.fft2(img1, axes=(0, 1))\r\n img2_fft = np.fft.fft2(img2, axes=(0, 1))\r\n img1_abs, img1_pha = np.abs(img1_fft), np.angle(img1_fft)\r\n img2_abs, img2_pha = np.abs(img2_fft), np.angle(img2_fft)\r\n\r\n img1_abs = np.fft.fftshift(img1_abs, axes=(0, 1))\r\n img2_abs = np.fft.fftshift(img2_abs, axes=(0, 1))\r\n\r\n img1_abs_ = np.copy(img1_abs)\r\n img2_abs_ = np.copy(img2_abs)\r\n img1_abs[h_start:h_start + h_crop, w_start:w_start + w_crop] = \\\r\n lam * img2_abs_[h_start:h_start + h_crop, w_start:w_start + w_crop] + (1 - lam) * img1_abs_[\r\n h_start:h_start + h_crop,\r\n w_start:w_start + w_crop]\r\n img2_abs[h_start:h_start + h_crop, w_start:w_start + w_crop] = \\\r\n lam * img1_abs_[h_start:h_start + h_crop, w_start:w_start + w_crop] + (1 - lam) * img2_abs_[\r\n h_start:h_start + h_crop,\r\n w_start:w_start + w_crop]\r\n\r\n img1_abs = np.fft.ifftshift(img1_abs, axes=(0, 1))\r\n img2_abs = np.fft.ifftshift(img2_abs, axes=(0, 1))\r\n\r\n img21 = img1_abs * (np.e ** (1j * img1_pha))\r\n img12 = img2_abs * (np.e ** (1j * img2_pha))\r\n img21 = np.real(np.fft.ifft2(img21, axes=(0, 1)))\r\n img12 = np.real(np.fft.ifft2(img12, axes=(0, 1)))\r\n img21 = np.uint8(np.clip(img21, 0, 255))\r\n img12 = np.uint8(np.clip(img12, 0, 255))\r\n\r\n return img21, img12" ]
[ [ "numpy.fft.fft2", "numpy.fft.ifft2", "numpy.abs", "numpy.clip", "numpy.asarray", "numpy.fft.fftshift", "numpy.copy", "numpy.fft.ifftshift", "numpy.random.uniform", "numpy.angle" ] ]
kazarinov/cfdr
[ "bf93428614af15440b60fb894097e94fa4efd168" ]
[ "hccf/utils/mathematics.py" ]
[ "# -*- coding: utf-8 -*-\nimport math\nimport scipy as sp\n\n\ndef sigmoid(z):\n s = 1.0 / (1.0 + math.exp(-z))\n return s\n\n\ndef log_loss(act, pred):\n epsilon = 1e-15\n pred = sp.maximum(epsilon, pred)\n pred = sp.minimum(1 - epsilon, pred)\n ll = sum(act * sp.log(pred.astype(float)) + sp.subtract(1, act.astype(float)) * sp.log(\n sp.subtract(1, pred.astype(float))))\n ll = ll * -1.0 / len(act)\n return ll\n\n\ndef loglikelihood(shows, clicks):\n if clicks == 0 or shows == 0 or clicks == shows:\n return 0\n\n ctr = float(clicks) / shows\n return -1 * (clicks * math.log(ctr) + (shows - clicks) * math.log(1 - ctr))\n" ]
[ [ "scipy.minimum", "scipy.maximum" ] ]
alexliniger/AdversarialRoadModel
[ "14157760687c22acc8b91c39128875005ada7563" ]
[ "LearningSafeSets/Validation/Validation.py" ]
[ "## Copyright 2020 Alexander Liniger\n\n## Licensed under the Apache License, Version 2.0 (the \"License\");\n## you may not use this file except in compliance with the License.\n## You may obtain a copy of the License at\n\n## http://www.apache.org/licenses/LICENSE-2.0\n\n## Unless required by applicable law or agreed to in writing, software\n## distributed under the License is distributed on an \"AS IS\" BASIS,\n## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n## See the License for the specific language governing permissions and\n## limitations under the License.\n###########################################################################\n###########################################################################\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport numpy as np\nimport json\n\n\nclass Validation:\n def __init__(self,config):\n self.config = config\n self.cut_off = config[\"cut_off\"]\n self.data = {}\n self.result_dir = config[\"result_dir\"]\n\n def validate(self,model,data):\n # model.to(device)\n criterion_bce = nn.BCELoss()\n # criterion = nn.BCEWithLogitsLoss()\n criterion_mse = nn.MSELoss()\n model.eval()\n\n correct = 0\n # false_safe = 0\n under_approx = 0\n over_approx = 0\n total = 0\n metric_mse = []\n metric_bce = []\n\n for i in range(data.n_all_batches): \n state, safe = data.giveBatch(i)\n\n safe_model = model(state).view(-1)\n safe_model_max = (safe_model >= self.cut_off).type(torch.FloatTensor)\n\n metric_mse.append(criterion_mse(safe_model, safe).item())\n metric_bce.append(criterion_bce(safe_model, safe).item())\n total += safe.size(0)\n\n correct += (safe_model_max == safe).sum().item()\n under_approx += (safe_model_max < safe).sum().item()\n over_approx += (safe_model_max > safe).sum().item()\n\n print('\\tMSE: %.4f, BCE: %.4f, Acc: %.4f, UnderApprox: %.4f, OverApprox: %.4f'\n % (np.mean(metric_mse), np.mean(metric_bce), correct / total, under_approx / total, over_approx / total))\n\n self.data['full_set'] = []\n self.data['full_set'].append({\n 'acc': correct / total,\n 'under': under_approx / total,\n 'over': over_approx / total,\n 'total': total,\n 'correct': correct,\n 'mse': np.mean(metric_mse),\n 'bce': np.mean(metric_bce)\n })\n\n\n def validateTest(self,model,data):\n\n criterion_bce = nn.BCELoss()\n criterion_mse = nn.MSELoss()\n model.eval()\n\n correct = 0\n # false_safe = 0\n under_approx = 0\n over_approx = 0\n total = 0\n metric_mse = []\n metric_bce = []\n\n for i in range(data.n_train_batches,data.n_all_batches): \n state, safe = data.giveBatch(i)\n\n safe_model = model(state).view(-1)\n safe_model_max = (safe_model >= self.cut_off).type(torch.FloatTensor)\n\n metric_mse.append(criterion_mse(safe_model, safe).item())\n metric_bce.append(criterion_bce(safe_model, safe).item())\n total += safe.size(0)\n \n correct += (safe_model_max == safe).sum().item()\n under_approx += (safe_model_max < safe).sum().item()\n over_approx += (safe_model_max > safe).sum().item()\n\n print('\\tMSE: %.4f, BCE: %.4f, Acc: %.4f, UnderApprox: %.4f, OverApprox: %.4f'\n % (np.mean(metric_mse), np.mean(metric_bce), correct / total, under_approx / total, over_approx / total))\n\n self.data['val_set'] = []\n self.data['val_set'].append({\n 'acc': correct / total,\n 'under': under_approx / total,\n 'over': over_approx / total,\n 'total': total,\n 'correct': correct,\n 'mse': np.mean(metric_mse),\n 'bce': np.mean(metric_bce)\n })\n\n def validateTestUnseen(self,model,data):\n\n criterion_bce = nn.BCELoss()\n criterion_mse = nn.MSELoss()\n model.eval()\n\n correct = 0\n false_safe = 0\n under_approx = 0\n over_approx = 0\n total = 0\n metric_mse = []\n metric_bce = []\n\n for i in range(self.config['NGKAPPA_T']):\n state, safe = data.giveTest(i)\n\n safe_model = model(state).view(-1)\n safe_model_max = (safe_model >= self.cut_off).type(torch.FloatTensor)\n\n metric_mse.append(criterion_mse(safe_model, safe).item())\n metric_bce.append(criterion_bce(safe_model, safe).item())\n total += safe.size(0)\n\n correct += (safe_model_max == safe).sum().item()\n under_approx += (safe_model_max < safe).sum().item()\n over_approx += (safe_model_max > safe).sum().item()\n\n name = self.result_dir+\"/RobustInv-Pred-\"+str(i)+\".bin\"\n fh = open(name, \"bw\")\n safe_model_max.detach().numpy().astype(bool).tofile(fh)\n\n\n print('\\tMSE: %.4f, BCE: %.4f, Acc: %.4f, UnderApprox: %.4f, OverApprox: %.4f'\n % (np.mean(metric_mse), np.mean(metric_bce),correct/total,under_approx/total,over_approx/total))\n\n self.data['test_set'] = []\n self.data['test_set'].append({\n 'acc': correct / total,\n 'under': under_approx / total,\n 'over': over_approx / total,\n 'total': total,\n 'correct': correct,\n 'mse': np.mean(metric_mse),\n 'bce': np.mean(metric_bce)\n })\n\n def save_val(self):\n with open(self.result_dir + '/val.txt', 'w') as outfile:\n json.dump(self.data, outfile, indent=4)\n\n def save_model(self,model):\n model_dict = {}\n k = 0\n for i in range(len(model._modules['model']._modules)):\n if len(model._modules['model']._modules[str(i)]._parameters) > 0:\n W = model._modules['model']._modules[str(i)]._parameters['weight'].data.detach().numpy().tolist()\n b = model._modules['model']._modules[str(i)]._parameters['bias'].data.detach().numpy().tolist()\n model_dict[str(k)] = []\n model_dict[str(k)].append({\n 'W': W,\n 'b': b })\n k+=1\n\n model_dict[\"length\"] = k\n\n with open(self.result_dir+'/model.txt', 'w') as outfile:\n json.dump(model_dict, outfile, indent=4)\n" ]
[ [ "numpy.mean", "torch.nn.MSELoss", "torch.nn.BCELoss" ] ]
XDZhelheim/TrafficDataAnalysis
[ "a73dde10f91fb88af3a7b2edd7a04adaa5ea57f5" ]
[ "Supersegment/temp.py" ]
[ "import matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport geopandas as gp\nfrom shapely.geometry import Polygon, MultiLineString, Point\nimport shapely.wkt as wkt\nimport supersegment\n\n# def randrange(n, vmin, vmax):\n# '''\n# Helper function to make an array of random numbers having shape (n, )\n# with each number distributed Uniform(vmin, vmax).\n# '''\n# return (vmax - vmin)*np.random.rand(n) + vmin\n\n# fig = plt.figure()\n# ax = fig.add_subplot(111, projection='3d')\n\n# n = 100\n\n# # For each set of style and range settings, plot n random points in the box\n# # defined by x in [23, 32], y in [0, 100], z in [zlow, zhigh].\n# for m, zlow, zhigh in [('o', -50, -25), ('^', -30, -5)]:\n# xs = randrange(n, 23, 32)\n# ys = randrange(n, 0, 100)\n# zs = randrange(n, zlow, zhigh)\n# ax.scatter(xs, ys, zs, marker=m)\n\n# print(type(zs[0]))\n\n# ax.set_xlabel('X Label')\n# ax.set_ylabel('Y Label')\n# ax.set_zlabel('Z Label')\n\n# plt.show()\n\n\n# df=pd.read_table(\"./TrafficDataAnalysis/boundary.txt\", nrows=10000)\n# df['tti']=[0]*len(df)\n# df['speed']=[0]*len(df)\n# # print(df.loc[df['obj_id']==841])\n\n# # 2018-1-1\n# df2=pd.read_table(\"./TrafficDataAnalysis/city_district.txt\", nrows=2736)\n# # print(df2)\n# for index, row in df2.iterrows():\n# df.loc[df['obj_id']==row['obj_id'], 'tti']=row['tti']\n# df.loc[df['obj_id']==row['obj_id'], 'speed']=row['speed']\n# print(df)\n\n# df1=pd.read_table(\"./TrafficDataAnalysis/res2.txt\", header=None, sep=' ')\n# print(df1[0])\n# x = np.linspace(200, 3500, 2000)\n# plt.plot(x, x, '-r')\n# # plt.plot(x, 0.15*x+750, '-r')\n# plt.scatter(df1[0], df1[1])\n# plt.show()\n\n# df=pd.read_csv(\"./TrafficDataAnalysis/chengdushi_1001_1010.csv\", nrows=1, header=0, names=[\"track\"], usecols=[2])\n# track=[]\n# for temp in df[\"track\"]:\n# temp=temp.lstrip(\"[\").rstrip(\"]\")\n# # print(temp)\n# # temp=temp.replace(\", \", \";\")\n# temp=temp.split(\", \")\n# for i in range(len(temp)):\n# temp[i]=temp[i].split(\" \")\n# for item in temp:\n# item[0]=float(item[0])\n# item[1]=float(item[1])\n# item[2]=int(item[2])\n# track.append(temp)\n# print(track)\n\n# with open(\"./TrafficDataAnalysis/chengdushi_1001_1010.csv\") as f:\n# temp=f.readline()\n# print(temp)\n\ndf=pd.read_table(\"../boundary.txt\", nrows=10000)\ndf['geometry']=df['geometry'].apply(lambda z: wkt.loads(z))\ndf=gp.GeoDataFrame(df)\ndf.crs={'init':'epsg:4326'}\n\nroads=df.loc[(df[\"obj_id\"]==283504) | (df[\"obj_id\"]==283505) | (df[\"obj_id\"]==283506), \"geometry\"].apply(lambda x: x.buffer(distance=0.0001))\n# b=roads.iloc[0].bounds\n# print(b)\n\n# minx, miny, maxx, maxy=roads.total_bounds\n# print(minx, miny, maxx, maxy)\n\n# a=np.array([1, 2, 3, 4, 5, 6])\n# b=np.array([0, 0, 1, 1, 2, 3])\n# c=a[b==2]\n\n# print(c)\n\n# print(type(roads.iloc[0]))\n\nroads=roads.to_crs(\"epsg:2432\")\nprint(roads.length)\n\n# temp=[]\n# tracks=get_tracks(2000)\n# for i in tracks:\n# for j in i:\n# temp.append(Point(j[0], j[1]))\n# show_geom(gp.GeoSeries(temp), \"black\", \"allpoints\")" ]
[ [ "pandas.read_table" ] ]
Priyashbhugra/yolact
[ "ef871057f2768dcb13e6d9636d49402c9862fcd4" ]
[ "layers/output_utils.py" ]
[ "\"\"\" Contains functions used to sanitize and prepare the output of Yolact. \"\"\"\n\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport numpy as np\nimport cv2\n\nfrom data import cfg, mask_type, MEANS, STD, activation_func\nfrom utils.augmentations import Resize\nfrom utils import timer\nfrom .box_utils import crop, sanitize_coordinates\n\ndef postprocess(det_output, w, h, batch_idx=0, interpolation_mode='bilinear',\n visualize_lincomb=False, crop_masks=True, score_threshold=0):\n \"\"\"\n Postprocesses the output of Yolact on testing mode into a format that makes sense,\n accounting for all the possible configuration settings.\n\n Args:\n - det_output: The lost of dicts that Detect outputs.\n - w: The real with of the image.\n - h: The real height of the image.\n - batch_idx: If you have multiple images for this batch, the image's index in the batch.\n - interpolation_mode: Can be 'nearest' | 'area' | 'bilinear' (see torch.nn.functional.interpolate)\n\n Returns 4 torch Tensors (in the following order):\n - classes [num_det]: The class idx for each detection.\n - scores [num_det]: The confidence score for each detection.\n - boxes [num_det, 4]: The bounding box for each detection in absolute point form.\n - masks [num_det, h, w]: Full image masks for each detection.\n \"\"\"\n \n dets = det_output[batch_idx]\n net = dets['net']\n dets = dets['detection']\n\n if dets is None:\n return [torch.Tensor()] * 4 # Warning, this is 4 copies of the same thing\n\n if score_threshold > 0:\n keep = dets['score'] > score_threshold\n\n for k in dets:\n if k != 'proto':\n dets[k] = dets[k][keep]\n \n if dets['score'].size(0) == 0:\n return [torch.Tensor()] * 4\n \n # Actually extract everything from dets now\n classes = dets['class']\n boxes = dets['box']\n scores = dets['score']\n masks = dets['mask']\n \n # for car detection and masking\n \n\n if cfg.mask_type == mask_type.lincomb and cfg.eval_mask_branch:\n # At this points masks is only the coefficients\n proto_data = dets['proto']\n \n # Test flag, do not upvote\n if cfg.mask_proto_debug:\n np.save('scripts/proto.npy', proto_data.cpu().numpy())\n \n if visualize_lincomb:\n display_lincomb(proto_data, masks)\n\n masks = proto_data @ masks.t()\n masks = cfg.mask_proto_mask_activation(masks)\n\n # Crop masks before upsampling because you know why\n if crop_masks:\n masks = crop(masks, boxes)\n\n # Permute into the correct output shape [num_dets, proto_h, proto_w]\n masks = masks.permute(2, 0, 1).contiguous()\n\n if cfg.use_maskiou:\n with timer.env('maskiou_net'): \n with torch.no_grad():\n maskiou_p = net.maskiou_net(masks.unsqueeze(1))\n maskiou_p = torch.gather(maskiou_p, dim=1, index=classes.unsqueeze(1)).squeeze(1)\n if cfg.rescore_mask:\n if cfg.rescore_bbox:\n scores = scores * maskiou_p\n else:\n scores = [scores, scores * maskiou_p]\n\n # Scale masks up to the full image\n masks = F.interpolate(masks.unsqueeze(0), (h, w), mode=interpolation_mode, align_corners=False).squeeze(0)\n\n # Binarize the masks\n masks.gt_(0.5)\n\n \n boxes[:, 0], boxes[:, 2] = sanitize_coordinates(boxes[:, 0], boxes[:, 2], w, cast=False)\n boxes[:, 1], boxes[:, 3] = sanitize_coordinates(boxes[:, 1], boxes[:, 3], h, cast=False)\n boxes = boxes.long()\n\n if cfg.mask_type == mask_type.direct and cfg.eval_mask_branch:\n # Upscale masks\n full_masks = torch.zeros(masks.size(0), h, w)\n\n for jdx in range(masks.size(0)):\n x1, y1, x2, y2 = boxes[jdx, :]\n\n mask_w = x2 - x1\n mask_h = y2 - y1\n\n # Just in case\n if mask_w * mask_h <= 0 or mask_w < 0:\n continue\n \n mask = masks[jdx, :].view(1, 1, cfg.mask_size, cfg.mask_size)\n mask = F.interpolate(mask, (mask_h, mask_w), mode=interpolation_mode, align_corners=False)\n mask = mask.gt(0.5).float()\n full_masks[jdx, y1:y2, x1:x2] = mask\n \n masks = full_masks\n\n return classes, scores, boxes, masks\n\n\n \n\n\ndef undo_image_transformation(img, w, h):\n \"\"\"\n Takes a transformed image tensor and returns a numpy ndarray that is untransformed.\n Arguments w and h are the original height and width of the image.\n \"\"\"\n img_numpy = img.permute(1, 2, 0).cpu().numpy()\n img_numpy = img_numpy[:, :, (2, 1, 0)] # To BRG\n\n if cfg.backbone.transform.normalize:\n img_numpy = (img_numpy * np.array(STD) + np.array(MEANS)) / 255.0\n elif cfg.backbone.transform.subtract_means:\n img_numpy = (img_numpy / 255.0 + np.array(MEANS) / 255.0).astype(np.float32)\n \n img_numpy = img_numpy[:, :, (2, 1, 0)] # To RGB\n img_numpy = np.clip(img_numpy, 0, 1)\n\n return cv2.resize(img_numpy, (w,h))\n\n\ndef display_lincomb(proto_data, masks):\n out_masks = torch.matmul(proto_data, masks.t())\n # out_masks = cfg.mask_proto_mask_activation(out_masks)\n\n for kdx in range(1):\n jdx = kdx + 0\n import matplotlib.pyplot as plt\n coeffs = masks[jdx, :].cpu().numpy()\n idx = np.argsort(-np.abs(coeffs))\n # plt.bar(list(range(idx.shape[0])), coeffs[idx])\n # plt.show()\n \n coeffs_sort = coeffs[idx]\n arr_h, arr_w = (4,8)\n proto_h, proto_w, _ = proto_data.size()\n arr_img = np.zeros([proto_h*arr_h, proto_w*arr_w])\n arr_run = np.zeros([proto_h*arr_h, proto_w*arr_w])\n test = torch.sum(proto_data, -1).cpu().numpy()\n\n for y in range(arr_h):\n for x in range(arr_w):\n i = arr_w * y + x\n\n if i == 0:\n running_total = proto_data[:, :, idx[i]].cpu().numpy() * coeffs_sort[i]\n else:\n running_total += proto_data[:, :, idx[i]].cpu().numpy() * coeffs_sort[i]\n\n running_total_nonlin = running_total\n if cfg.mask_proto_mask_activation == activation_func.sigmoid:\n running_total_nonlin = (1/(1+np.exp(-running_total_nonlin)))\n\n arr_img[y*proto_h:(y+1)*proto_h, x*proto_w:(x+1)*proto_w] = (proto_data[:, :, idx[i]] / torch.max(proto_data[:, :, idx[i]])).cpu().numpy() * coeffs_sort[i]\n arr_run[y*proto_h:(y+1)*proto_h, x*proto_w:(x+1)*proto_w] = (running_total_nonlin > 0.5).astype(np.float)\n plt.imshow(arr_img)\n plt.show()\n # plt.imshow(arr_run)\n # plt.show()\n # plt.imshow(test)\n # plt.show()\n plt.imshow(out_masks[:, :, jdx].cpu().numpy())\n plt.show()\n" ]
[ [ "matplotlib.pyplot.imshow", "numpy.abs", "torch.Tensor", "numpy.clip", "torch.max", "torch.sum", "torch.no_grad", "torch.nn.functional.interpolate", "numpy.exp", "numpy.array", "matplotlib.pyplot.show", "numpy.zeros" ] ]
bipinupd/beam
[ "fffb85a35df6ae3bdb2934c077856f6b27559aa7" ]
[ "sdks/python/apache_beam/dataframe/expressions.py" ]
[ "#\n# Licensed to the Apache Software Foundation (ASF) under one or more\n# contributor license agreements. See the NOTICE file distributed with\n# this work for additional information regarding copyright ownership.\n# The ASF licenses this file to You under the Apache License, Version 2.0\n# (the \"License\"); you may not use this file except in compliance with\n# the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport contextlib\nimport random\nimport threading\nfrom typing import Any\nfrom typing import Callable\nfrom typing import Iterable\nfrom typing import Optional\nfrom typing import TypeVar\n\nfrom apache_beam.dataframe import partitionings\n\n\nclass Session(object):\n \"\"\"A session represents a mapping of expressions to concrete values.\n\n The bindings typically include required placeholders, but may be any\n intermediate expression as well.\n \"\"\"\n def __init__(self, bindings=None):\n self._bindings = dict(bindings or {})\n\n def evaluate(self, expr): # type: (Expression) -> Any\n if expr not in self._bindings:\n self._bindings[expr] = expr.evaluate_at(self)\n return self._bindings[expr]\n\n def lookup(self, expr): # type: (Expression) -> Any\n return self._bindings[expr]\n\n\nclass PartitioningSession(Session):\n \"\"\"An extension of Session that enforces actual partitioning of inputs.\n\n Each expression is evaluated multiple times for various supported\n partitionings determined by its `requires_partition_by` specification. For\n each tested partitioning, the input is partitioned and the expression is\n evaluated on each partition separately, as if this were actually executed in\n a parallel manner.\n\n For each input partitioning, the results are verified to be partitioned\n appropriately according to the expression's `preserves_partition_by`\n specification.\n\n For testing only.\n \"\"\"\n def evaluate(self, expr):\n import pandas as pd\n import collections\n\n def is_scalar(expr):\n return not isinstance(expr.proxy(), pd.core.generic.NDFrame)\n\n def difficulty(partitioning):\n \"\"\"Imposes an ordering on partitionings where the largest schemes are the\n most likely to reveal an error. This order is different from the one\n defined by is_subpartitioning_of:\n\n Nothing() > Index() > ... > Index([i,j]) > Index([j]) > Singleton()\n \"\"\"\n if isinstance(partitioning, partitionings.Singleton):\n return -float('inf')\n elif isinstance(partitioning, partitionings.Index):\n if partitioning._levels is None:\n return 1_000_000\n else:\n return len(partitioning._levels)\n elif isinstance(partitioning, partitionings.Nothing):\n return float('inf')\n\n if expr not in self._bindings:\n if is_scalar(expr) or not expr.args():\n result = super(PartitioningSession, self).evaluate(expr)\n else:\n scaler_args = [arg for arg in expr.args() if is_scalar(arg)]\n\n def evaluate_with(input_partitioning):\n parts = collections.defaultdict(\n lambda: Session({arg: self.evaluate(arg)\n for arg in scaler_args}))\n for arg in expr.args():\n if not is_scalar(arg):\n input = self.evaluate(arg)\n for key, part in input_partitioning.test_partition_fn(input):\n parts[key]._bindings[arg] = part\n if not parts:\n parts[None] # Create at least one entry.\n\n results = []\n for session in parts.values():\n if any(len(session.lookup(arg)) for arg in expr.args()\n if not is_scalar(arg)):\n results.append(session.evaluate(expr))\n\n expected_output_partitioning = expr.preserves_partition_by(\n ) if input_partitioning.is_subpartitioning_of(\n expr.preserves_partition_by()) else input_partitioning\n\n if not expected_output_partitioning.check(results):\n raise AssertionError(\n f\"\"\"Expression does not preserve partitioning!\n Expression: {expr}\n Requires: {expr.requires_partition_by()}\n Preserves: {expr.preserves_partition_by()}\n Input partitioning: {input_partitioning}\n Expected output partitioning: {expected_output_partitioning}\n \"\"\")\n\n if results:\n return pd.concat(results)\n else:\n # Choose any single session.\n return next(iter(parts.values())).evaluate(expr)\n\n # Store random state so it can be re-used for each execution, in case\n # the expression is part of a test that relies on the random seed.\n random_state = random.getstate()\n\n # Run with all supported partitionings in order of ascending\n # \"difficulty\". This way the final result is computed with the\n # most challenging partitioning. Avoids heisenbugs where sometimes\n # the result is computed trivially with Singleton partitioning and\n # passes.\n for input_partitioning in sorted(set([expr.requires_partition_by(),\n partitionings.Nothing(),\n partitionings.Index(),\n partitionings.Singleton()]),\n key=difficulty):\n if not input_partitioning.is_subpartitioning_of(\n expr.requires_partition_by()):\n continue\n\n random.setstate(random_state)\n\n result = evaluate_with(input_partitioning)\n\n self._bindings[expr] = result\n return self._bindings[expr]\n\n\n# The return type of an Expression\nT = TypeVar('T')\n\n\nclass Expression(object):\n \"\"\"An expression is an operation bound to a set of arguments.\n\n An expression represents a deferred tree of operations, which can be\n evaluated at a specific bindings of root expressions to values.\n \"\"\"\n def __init__(\n self,\n name, # type: str\n proxy, # type: T\n _id=None # type: Optional[str]\n ):\n self._name = name\n self._proxy = proxy\n # Store for preservation through pickling.\n self._id = _id or '%s_%s_%s' % (name, type(proxy).__name__, id(self))\n\n def proxy(self): # type: () -> T\n return self._proxy\n\n def __hash__(self):\n return hash(self._id)\n\n def __eq__(self, other):\n return self._id == other._id\n\n def __ne__(self, other):\n return not self == other\n\n def __repr__(self):\n return '%s[%s]' % (self.__class__.__name__, self._id)\n\n def placeholders(self):\n \"\"\"Returns all the placeholders that self depends on.\"\"\"\n raise NotImplementedError(type(self))\n\n def evaluate_at(self, session): # type: (Session) -> T\n \"\"\"Returns the result of self with the bindings given in session.\"\"\"\n raise NotImplementedError(type(self))\n\n def requires_partition_by(self): # type: () -> partitionings.Partitioning\n \"\"\"Returns the partitioning, if any, require to evaluate this expression.\n\n Returns partitioning.Nothing() to require no partitioning is required.\n \"\"\"\n raise NotImplementedError(type(self))\n\n def preserves_partition_by(self): # type: () -> partitionings.Partitioning\n \"\"\"Returns the partitioning, if any, preserved by this expression.\n\n This gives an upper bound on the partitioning of its ouput. The actual\n partitioning of the output may be less strict (e.g. if the input was\n less partitioned).\n \"\"\"\n raise NotImplementedError(type(self))\n\n\nclass PlaceholderExpression(Expression):\n \"\"\"An expression whose value must be explicitly bound in the session.\"\"\"\n def __init__(\n self, # type: PlaceholderExpression\n proxy, # type: T\n reference=None, # type: Any\n ):\n \"\"\"Initialize a placeholder expression.\n\n Args:\n proxy: A proxy object with the type expected to be bound to this\n expression. Used for type checking at pipeline construction time.\n \"\"\"\n super(PlaceholderExpression, self).__init__('placeholder', proxy)\n self._reference = reference\n\n def placeholders(self):\n return frozenset([self])\n\n def args(self):\n return ()\n\n def evaluate_at(self, session):\n return session.lookup(self)\n\n def requires_partition_by(self):\n return partitionings.Nothing()\n\n def preserves_partition_by(self):\n return partitionings.Nothing()\n\n\nclass ConstantExpression(Expression):\n \"\"\"An expression whose value is known at pipeline construction time.\"\"\"\n def __init__(\n self, # type: ConstantExpression\n value, # type: T\n proxy=None # type: Optional[T]\n ):\n \"\"\"Initialize a constant expression.\n\n Args:\n value: The constant value to be produced by this expression.\n proxy: (Optional) a proxy object with same type as `value` to use for\n rapid type checking at pipeline construction time. If not provided,\n `value` will be used directly.\n \"\"\"\n if proxy is None:\n proxy = value\n super(ConstantExpression, self).__init__('constant', proxy)\n self._value = value\n\n def placeholders(self):\n return frozenset()\n\n def args(self):\n return ()\n\n def evaluate_at(self, session):\n return self._value\n\n def requires_partition_by(self):\n return partitionings.Nothing()\n\n def preserves_partition_by(self):\n return partitionings.Nothing()\n\n\nclass ComputedExpression(Expression):\n \"\"\"An expression whose value must be computed at pipeline execution time.\"\"\"\n def __init__(\n self, # type: ComputedExpression\n name, # type: str\n func, # type: Callable[...,T]\n args, # type: Iterable[Expression]\n proxy=None, # type: Optional[T]\n _id=None, # type: Optional[str]\n requires_partition_by=partitionings.Index(), # type: partitionings.Partitioning\n preserves_partition_by=partitionings.Nothing(), # type: partitionings.Partitioning\n ):\n \"\"\"Initialize a computed expression.\n\n Args:\n name: The name of this expression.\n func: The function that will be used to compute the value of this\n expression. Should accept arguments of the types returned when\n evaluating the `args` expressions.\n args: The list of expressions that will be used to produce inputs to\n `func`.\n proxy: (Optional) a proxy object with same type as the objects that this\n ComputedExpression will produce at execution time. If not provided, a\n proxy will be generated using `func` and the proxies of `args`.\n _id: (Optional) a string to uniquely identify this expression.\n requires_partition_by: The required (common) partitioning of the args.\n preserves_partition_by: The level of partitioning preserved.\n \"\"\"\n if (not _get_allow_non_parallel() and\n requires_partition_by == partitionings.Singleton()):\n raise NonParallelOperation(\n \"Using non-parallel form of %s \"\n \"outside of allow_non_parallel_operations block.\" % name)\n args = tuple(args)\n if proxy is None:\n proxy = func(*(arg.proxy() for arg in args))\n super(ComputedExpression, self).__init__(name, proxy, _id)\n self._func = func\n self._args = args\n self._requires_partition_by = requires_partition_by\n self._preserves_partition_by = preserves_partition_by\n\n def placeholders(self):\n return frozenset.union(\n frozenset(), *[arg.placeholders() for arg in self.args()])\n\n def args(self):\n return self._args\n\n def evaluate_at(self, session):\n return self._func(*(session.evaluate(arg) for arg in self._args))\n\n def requires_partition_by(self):\n return self._requires_partition_by\n\n def preserves_partition_by(self):\n return self._preserves_partition_by\n\n\ndef elementwise_expression(name, func, args):\n return ComputedExpression(\n name,\n func,\n args,\n requires_partition_by=partitionings.Nothing(),\n preserves_partition_by=partitionings.Singleton())\n\n\n_ALLOW_NON_PARALLEL = threading.local()\n_ALLOW_NON_PARALLEL.value = False\n\n\ndef _get_allow_non_parallel():\n return _ALLOW_NON_PARALLEL.value\n\n\[email protected]\ndef allow_non_parallel_operations(allow=True):\n if allow is None:\n yield\n else:\n old_value, _ALLOW_NON_PARALLEL.value = _ALLOW_NON_PARALLEL.value, allow\n yield\n _ALLOW_NON_PARALLEL.value = old_value\n\n\nclass NonParallelOperation(Exception):\n pass\n" ]
[ [ "pandas.concat" ] ]
teguhkhg/3dv_tutorial_py
[ "5e7bc614c5a71cd9d125b1bd8767b0b502ef9241" ]
[ "python/sfm_global.py" ]
[ "import numpy as np\nimport g2o\nimport cv2\nimport glob\n\nfrom bundle_adjustment import MonoBA\n\n# def makeNoisyPoints(Xs, xs, )\n\nclass Frame(object):\n def __init__(self):\n pass\n\nclass Mappoint(object):\n def __init__(self):\n pass\n\nclass Measurement(object):\n def __init__(self):\n pass\n\nclass CovisibilityGraph(object):\n def __init__(self):\n pass\n\ndef main():\n img_resize = 0.25\n f_init = 500\n cx_init = -1\n cy_init = -1\n Z_init = 2\n Z_limit = 100\n ba_loss_width = 9\n min_inlier_num = 200\n ba_inlier_num = 200\n show_match = False\n\n fdetector = cv2.BRISK_create()\n img_keypoint = []\n img_set = []\n img_descriptor = []\n\n files = sorted(glob.glob(\"../bin/data/relief/*.jpg\"))\n for filename in files:\n image = cv2.imread(filename)\n if img_resize != 1:\n width = int(image.shape[1] * img_resize)\n height = int(image.shape[0] * img_resize)\n dim = (width, height)\n image = cv2.resize(image, dim, interpolation=cv2.INTER_AREA)\n\n keypoint, descriptor = fdetector.detectAndCompute(image, None)\n img_set.append(image)\n img_keypoint.append(keypoint)\n img_descriptor.append(descriptor)\n \n if len(img_set) < 2:\n return\n if cx_init < 0:\n cx_init = int(img_set[0].shape[1]/2)\n if cy_init < 0:\n cy_init = int(img_set[0].shape[0]/2)\n print(cx_init, cy_init)\n\n fmatcher = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=False)\n match_pair = []\n match_inlier = []\n for i in range(len(img_set)):\n for j in range(i + 1, len(img_set)):\n matches = fmatcher.match(img_descriptor[i], img_descriptor[j])\n inlier = []\n src = []\n dst = []\n for itr in matches:\n src.append(img_keypoint[i][itr.queryIdx].pt)\n dst.append(img_keypoint[j][itr.trainIdx].pt)\n src = np.asarray(src)\n dst = np.asarray(dst)\n F, inlier_mask = cv2.findFundamentalMat(src, dst, cv2.RANSAC)\n for k in range(len(inlier_mask)):\n if inlier_mask[k]:\n inlier.append(matches[k])\n print(\"3DV Tutorial: Image %d - %d are matched (%d / %d).\\n\"\n % (i, j, len(inlier), len(inlier_mask)))\n\n if len(inlier) < min_inlier_num:\n continue\n print(\"3DV Tutorial: Image %d - %d are selected.\\n\" % (i, j))\n match_pair.append((i, j))\n match_inlier.append(inlier)\n if show_match:\n match_image = cv2.drawMatches(\n img_set[i], img_keypoint[i], img_set[j], img_keypoint[j], matches, None, None, None, inlier_mask)\n cv2.imshow(\"3DV Tutorial: Structure-from-Motion\", match_image)\n cv2.waitKey()\n if len(match_pair) < 1:\n return\n\n ba = MonoBA()\n ba.set_camera(float(f_init), np.array([cx_init, cy_init]).astype(float))\n\n\n\nif __name__ == \"__main__\":\n main()" ]
[ [ "numpy.asarray", "numpy.array" ] ]
diabloxenon/dautils
[ "064307b0fd9bbca2adcc7df5c6a0289954c74d58" ]
[ "dautils/nb.py" ]
[ "\"\"\" IPython/Jupyter notebook widgets and utilities. \"\"\"\nfrom IPython.display import display\nfrom IPython.display import Math\n# from IPython.html import widgets : DEPRECATED OPTION\nimport ipywidgets as widgets\nfrom dautils import collect\nfrom dautils import conf\nfrom dautils import ts\nfrom dautils import log_api\nimport matplotlib as mpl\nfrom matplotlib.colors import rgb2hex\nimport pprint\nfrom matplotlib.colors import ColorConverter\n\n\ndef create_month_widget(month, *args, **kwargs):\n \"\"\" Creates a dropdown wiget with short month names\n as labels.\n\n :param month: The month to select by default.\n\n :returns: The configured month widget.\n \"\"\"\n return widgets.Dropdown(options=ts.short_months(),\n selected_label=month, *args, **kwargs)\n\n\nclass WidgetFactory():\n ''' A factory for IPython widgets part of the \\\n `RcWidget` GUI.\n\n :ivar rc_widget: A `RcWidget` instance.\n '''\n def __init__(self, rcw):\n self.rc_widget = rcw\n\n # TODO allow ‘rgbcmykw’\n # TODO allow standard names like 'aqua'\n def color_chooser(self, property):\n \"\"\" Creates a box with widgets related to choosing a color.\n\n :param property: A color related key in matplotlib.rcParams.\n\n :returns: A box with widgets.\n \"\"\"\n cc = ColorConverter()\n rgb = cc.to_rgb(mpl.rcParams[property])\n logger = log_api.env_logger()\n logger.debug('{0} {1}'.format(property, rgb))\n\n r = widgets.FloatSlider(min=0, max=1, value=rgb[0], description='Red')\n r.border_color = 'red'\n\n g = widgets.FloatSlider(min=0, max=1, value=rgb[1],\n description='Green')\n g.border_color = 'green'\n\n b = widgets.FloatSlider(min=0, max=1, value=rgb[2], description='Blue')\n b.border_color = 'blue'\n\n h = widgets.widget_string.HTML(property)\n # TODO put this in a func\n hex = rgb2hex((rgb[0], rgb[1], rgb[2]))\n h.value = '<p style=\"background-color: {0};\">{0}</p>'.format(hex)\n\n def update(name, value):\n hex = rgb2hex((r.value, g.value, b.value))\n h.value = '<p style=\"background-color: {0};\">{0}</p>'.format(hex)\n self.rc_widget.process(property, hex)\n\n r.on_trait_change(update, 'value')\n g.on_trait_change(update, 'value')\n b.on_trait_change(update, 'value')\n\n box = widgets.VBox(children=(r, g, b, h))\n box.border_style = 'dotted'\n box.description = property\n\n return box\n\n\nclass PageBuilder():\n \"\"\" Creates a page with widgets for the `RcWidget`.\n\n :ivar widgets: A dictionary containing widgets.\n :ivar prefix: The prefix for the widget, for instance 'axes.'.\n :ivar factory: A `WidgetFactory`.\n 'ivar keys: A list of matplotlib properties.\n \"\"\"\n def __init__(self, prefix, factory):\n self.widgets = {}\n self.prefix = prefix\n self.factory = factory\n self.keys = collect.filter_dict_keys(\n lambda x: x.startswith(self.prefix), mpl.rcParams)\n self.add_color_choosers()\n\n def add(self, widget):\n \"\"\" Adds a new widget to the internal dictionary.\n\n :param widget: An IPython HTML widget.\n \"\"\"\n self.widgets[widget.description] = widget\n\n def add_color_choosers(self):\n \"\"\" Adds color choosers for relevant properties starting\n with a prefix such as axes.\n \"\"\"\n logger = log_api.env_logger()\n logger.debug('self.keys {}'.format(self.keys))\n color_keys = collect.filter_list(lambda x: x.endswith('color'),\n self.keys)\n logger.debug('Color keys {}'.format(color_keys))\n\n for key in color_keys:\n self.widgets[key] = self.factory.color_chooser(key)\n\n def build(self):\n \"\"\" Builds an `Accordion` containing widgets.\n\n :returns: The `Accordion` with widgets sorted by descriptions.\n \"\"\"\n self.widgets = collect.sort_dict_by_keys(self.widgets)\n box = widgets.Accordion()\n box.children = [self.widgets[k] for k in self.widgets.keys()]\n\n for i, k in enumerate(self.widgets.keys()):\n box.set_title(i, k)\n\n return box\n\n\n# TODO mix DIY widgets with WidgetFactory calls\nclass RcWidget():\n \"\"\" This widget configures the\n `matplotlib.rcParams` global settings.\n\n :ivar context: A `Context` instance.\n :ivar factory: A `WidgetFactory` instance.\n \"\"\"\n def __init__(self, context=None):\n self.context = context\n self.factory = WidgetFactory(self)\n\n if self.context:\n rc = self.context.read_rc()\n\n if rc:\n mpl.rcParams.update(rc)\n\n self.old = mpl.rcParams.copy()\n\n tab = widgets.Tab(children=[self.axes_page(), self.figure_page(),\n self.font_page(), self.grid_page(),\n self.lines_page()])\n tab.set_title(0, 'Axes')\n tab.set_title(1, 'Figure')\n tab.set_title(2, 'Font')\n tab.set_title(3, 'Grid')\n tab.set_title(4, 'Lines')\n display(tab)\n\n self.updates_text = widgets.HTML()\n display(self.updates_text)\n\n self.params_text = widgets.HTML()\n display(self.params_text)\n\n self.show_params = widgets.widget_button.Button()\n self.show_params.description = 'Show rcParams'\n self.show_params.on_click(self.print_params)\n display(self.show_params)\n\n def print_params(self, button_instance):\n \"\"\" Prints the current matplotlib.rcParams in a textarea.\n\n :param button_instance: The button to click on.\n \"\"\"\n html = '<textarea rows=\"5\" cols=\"50\" readonly>{}</textarea>'\n self.params_text.value = html.format(pprint.pformat(mpl.rcParams))\n\n def process(self, param, value):\n \"\"\" Processes changes to the GUI and updates `matplotlib.rcParams`.\n\n :param param: A key in the `matplotlib.rcParams` dictionary.\n :param value: A value in the `matplotlib.rcParams` dictionary.\n \"\"\"\n logger = log_api.env_logger()\n logger.debug('name={0}, value={1}'.format(param, value))\n self.params_text.value = ''\n mpl.rcParams[param] = value\n updates = collect.dict_updates(self.old, mpl.rcParams)\n\n if self.context:\n self.context.update_rc(updates)\n\n self.updates_text.value = ('<p>mpl.RcParams updates {}</p>'.\n format(updates))\n\n def axes_page(self):\n \"\"\" Creates a tab page for the `matplotlib.rcParams`\n keys which start with **axes.**\"\"\"\n linewidth = create_linewidth_slider('axes.linewidth')\n\n titlesize = create_size_slider('axes.titlesize')\n\n def update_axes_linewidth(name, value):\n self.process(linewidth.description, value)\n\n def update_titlesize(name, value):\n self.process(titlesize.description, value)\n\n linewidth.on_trait_change(update_axes_linewidth, 'value')\n titlesize.on_trait_change(update_titlesize, 'value')\n\n page = PageBuilder('axes.', self.factory)\n page.add(linewidth)\n page.add(titlesize)\n\n return page.build()\n\n def font_page(self):\n \"\"\" Creates a tab page for the `matplotlib.rcParams`\n keys which start with **font.**\"\"\"\n size = create_size_slider('font.size')\n\n def update_font_size(name, value):\n self.process(size.description, value)\n\n size.on_trait_change(update_font_size, 'value')\n\n page = PageBuilder('font.', self.factory)\n page.add(size)\n\n return page.build()\n\n def figure_page(self):\n \"\"\" Creates a tab page for the `matplotlib.rcParams`\n keys which start with **figure.**\"\"\"\n figsize = widgets.Box()\n figsize.description = 'figure.figsize'\n figsize_val = mpl.rcParams[figsize.description]\n height = widgets.FloatSlider(min=0, max=16,\n value=figsize_val[0],\n description='Height')\n width = widgets.FloatSlider(\n min=0, max=12, value=figsize_val[1],\n description='Width')\n figsize.children = [height, width]\n\n def update_fig_size(name, value):\n self.process(figsize.description, (height.value, width.value))\n\n height.on_trait_change(update_fig_size, 'value')\n width.on_trait_change(update_fig_size, 'value')\n page = PageBuilder('figure.', self.factory)\n page.add(figsize)\n\n return page.build()\n\n def grid_page(self):\n \"\"\" Creates a tab page for the `matplotlib.rcParams`\n keys which start with **grid.**\"\"\"\n logger = log_api.env_logger()\n logger.debug('Created grid page')\n linewidth = create_linewidth_slider('grid.linewidth')\n\n def update_linewidth(name, value):\n self.process(linewidth.description, value)\n\n linewidth.on_trait_change(update_linewidth, 'value')\n\n page = PageBuilder('grid.', self.factory)\n page.add(linewidth)\n\n return page.build()\n\n def lines_page(self):\n \"\"\" Creates a tab page for the `matplotlib.rcParams`\n keys which start with **lines.**\"\"\"\n linewidth = create_linewidth_slider('lines.linewidth')\n\n def update_linewidth(name, value):\n self.process(linewidth.description, value)\n\n linewidth.on_trait_change(update_linewidth, 'value')\n\n page = PageBuilder('lines.', self.factory)\n page.add(linewidth)\n\n return page.build()\n\n\ndef create_linewidth_slider(desc, *args, **kwargs):\n \"\"\" Creates a slider for linewidth-type settings\n in `matplotlib.rcParams`.\n\n :param desc: The description label of the widget.\n\n :returns: The configured slider.\n \"\"\"\n from_rc = mpl.rcParams[desc]\n\n val = 0\n\n # TODO deal with strings\n if not isinstance(from_rc, str):\n val = from_rc\n\n return widgets.IntSlider(min=0, max=9, value=val,\n description=desc, *args, **kwargs)\n\n\ndef create_size_slider(desc, *args, **kwargs):\n \"\"\" Creates a slider for size-type settings\n in `matplotlib.rcParams`.\n\n :param desc: The description label of the widget.\n\n :returns: The configured slider.\n \"\"\"\n from_rc = mpl.rcParams[desc]\n\n val = 0\n\n # TODO deal with strings\n if not isinstance(from_rc, str):\n val = from_rc\n\n return widgets.FloatSlider(min=0, value=val,\n description=desc, *args, **kwargs)\n\n\nclass LatexRenderer():\n \"\"\" Utility class which helps number and render Latex\n in a IPython/Jupyter notebook.\n\n :ivar chapter: Chapter number.\n :ivar curr: Current equation number.\n :ivar numbers: List of used equation numbers.\n :ivar context: A `Context` instance.\n\n .. code-block:: python\n\n import dautils as dl\n lr = dl.nb.LatexRenderer(chapter=6, start=6, context=context)\n lr.render(r'Y_j= \\sum _{i=-(m-1)/2}')\n \"\"\"\n def __init__(self, chapter=None, start=1, context=None):\n self.chapter = chapter\n self.curr = start\n self.numbers = []\n self.context = context\n\n if self.context:\n from_context = self.context.read_latex()\n\n if from_context:\n log_api.Printer().print(from_context)\n eqn_list = list(from_context.values())\n assert start not in collect.flatten(eqn_list), from_context\n\n # DIY numbering because IPython doesn't\n # support numbering\n def number_equation(self):\n \"\"\" Creates a Latex string relating\n to the numbering of equations.\n\n :returns: A Latex string with the correct equation number.\n \"\"\"\n number = '('\n\n if self.chapter:\n number += str(self.chapter) + '.'\n\n number += str(self.curr) + ')\\hspace{1cm}'\n\n return number\n\n def render(self, equation):\n \"\"\" Renders an equation.\n\n :param equation: A string containing the equation.\n \"\"\"\n number = self.number_equation()\n self.numbers.append(self.curr)\n logger = log_api.env_logger()\n\n if self.context:\n logger.debug(self.numbers)\n self.context.update_latex(self.numbers)\n\n display(Math(r'%s' % (number + equation)))\n self.curr += 1\n\n\n# TODO store key/id information in sql lite db in CONF_DIR\n# with key mnemonic, creation time etc\nclass Context():\n \"\"\" A mediator for the storing and retrieving\n of configuration settings.\n\n :ivar fname: Name of the context, this should be unique \\\n such as the name of a notebook\n \"\"\"\n def __init__(self, fname):\n self.fname = fname\n self.labels = fname + '.labels'\n self.latex = fname + '.latex'\n\n def read_rc(self):\n \"\"\" Reads the current configuration\n settings related to `matplotlib.rcParams`,\n which are used by `RcWidget`.\n\n :returns: The current configuration settings or \\\n an empty dict.\n \"\"\"\n config = conf.read_rc()\n\n if config:\n config = config.get(self.fname, {})\n\n logger = log_api.env_logger()\n logger.debug('config %s', config)\n\n return config\n\n def update_rc(self, updates):\n \"\"\" Updates the configuration settings related to\n `matplotlib.rcParams` used by `RcWidget`.\n\n :param updates: Changes to the configuration.\n \"\"\"\n conf.update_rc(self.fname, updates)\n\n def read_labels(self):\n \"\"\" Reads the current configuration settings related\n to the `matplotlib.rcParams` used by `LabelWidget`.\n\n :returns: The current configuration settings\\\n or None if no settings are found.\n \"\"\"\n config = conf.read_rc()\n\n if config:\n config = config.get(self.labels, None)\n\n return config\n\n def update_labels(self, updates):\n \"\"\" Updates the configuration settings related to\n `matplotlib.rcParams` used by `LabelWidget`.\n\n :param updates: Changes to the configuration.\n \"\"\"\n conf.update_rc(self.labels, updates)\n\n def read_latex(self):\n \"\"\" Reads the current configuration settings related\n to the `LatexRenderer`.\n\n :returns: The current configuration settings\\\n or None if no settings are found.\n \"\"\"\n config = conf.read_rc()\n\n if config:\n keys = collect.filter_dict_keys(lambda x: x.endswith('.latex'),\n config)\n config = collect.dict_from_keys(config, keys)\n config.pop(self.latex, None)\n\n return config\n\n def update_latex(self, updates):\n \"\"\" Updates the configuration settings related to\n `LatexRenderer`.\n\n :param updates: Changes to the configuration.\n \"\"\"\n conf.update_rc(self.latex, updates)\n\n\nclass NullContext(Context):\n \"\"\" A context following the Null Object Pattern\n which does nothing \"\"\"\n def __init__(self):\n pass\n\n def __bool__(self):\n return False\n\n def read_rc(self):\n pass\n\n def update_rc(self, updates):\n pass\n\n def read_labels(self):\n pass\n\n def update_labels(self, updates):\n pass\n\n def read_latex(self):\n pass\n\n def update_latex(self, updates):\n pass\n\n\nclass LabelWidget():\n \"\"\" A widget you can use to easily fill\n in strings for titles, xlabels and ylabels\n of matplotlib subplots.\n\n :ivar context: A `Context` instance.\n :ivar labels: A grid of labels.\n\n .. code-block:: python\n\n import dautils as dl\n dl.nb.LabelWidget(2, 2, context)\n \"\"\"\n def __init__(self, nrows=1, ncols=1, context=NullContext()):\n assert context, 'Define context'\n self.context = context\n self.labels = collect.GridList(nrows, ncols, {})\n self.read_old_labels()\n\n for i in range(nrows):\n children = []\n\n for j in range(ncols):\n labels_box = self.create_mpl_labels_box(i, j,\n self.labels.grid[i][j])\n children.append(labels_box)\n\n display(widgets.HBox(children=children))\n display(widgets.HTML('<br/>'))\n\n def read_old_labels(self):\n \"\"\" Reads the labels from a configuration file. \"\"\"\n old = self.context.read_labels()\n\n if old:\n self.labels.fill(old)\n\n def update(self, name, value, row, col):\n \"\"\" Updates an internal data structure and\n related configuration file.\n\n :param name: title, xlabel, legend or ylabel.\n :param value: A string representing a label. \\\n If needed use curly braces as used by the Python \\\n format() string method.\n :param row: The number of the row.\n :param col: The number of the col.\n \"\"\"\n self.labels.update(row, col, {name: value})\n self.context.update_labels(self.labels.grid)\n\n def create_mpl_labels_box(self, row, col, old):\n \"\"\" Creates a box with the widgets for a single\n subplot (cell).\n\n :param row: The row number of the subplot.\n :param col: The column number of the subplot.\n :param old: The setting for this subplot from a configuration file.\n\n :returns: The box with widgets.\n \"\"\"\n box = widgets.VBox()\n box.border_color = 'red'\n coord = ' [{0}][{1}]'.format(row, col)\n title = widgets.widget_string.Text(old.get('title', ''),\n description='title' + coord)\n xlabel = widgets.widget_string.Text(old.get('xlabel', ''),\n description='xlabel' + coord)\n ylabel = widgets.widget_string.Text(old.get('ylabel', ''),\n description='ylabel' + coord)\n legend = widgets.Dropdown(options=['No Legend', 'loc=best',\n 'loc=upper right',\n 'loc=upper left'],\n selected_label=old.get('legend',\n 'No Legend'))\n box.children = [title, xlabel, ylabel, legend]\n\n def update_title(name, value):\n self.update('title', value, row, col)\n\n title.on_trait_change(update_title, 'value')\n\n def update_xlabel(name, value):\n self.update('xlabel', value, row, col)\n\n xlabel.on_trait_change(update_xlabel, 'value')\n\n def update_ylabel(name, value):\n self.update('ylabel', value, row, col)\n\n ylabel.on_trait_change(update_ylabel, 'value')\n\n def update_legend(name, value):\n self.update('legend', value, row, col)\n\n legend.on_trait_change(update_legend, 'value')\n\n return box\n" ]
[ [ "matplotlib.rcParams.copy", "matplotlib.rcParams.update", "matplotlib.colors.ColorConverter", "matplotlib.colors.rgb2hex" ] ]
luckyzflQ/py4fix
[ "bbf7b41d375e4f7b0344bc9b1e97d7910ad1e6ec" ]
[ "python36/dxa/sn_random_numbers.py" ]
[ "import numpy as np\n\ndef sn_random_numbers(shape, antithetic=True, moment_matching=True,\n fixed_seed=False):\n ''' Returns an array of shape shape with (pseudo)random numbers\n that are standard normally distributed.\n \n Parameters\n ==========\n shape : tuple (o, n, m)\n generation of array with shape (o, n, m)\n antithetic : Boolean\n generation of antithetic variates\n moment_matching : Boolean\n matching of first and second moments\n fixed_seed : Boolean\n flag to fix the seed\n \n Results\n =======\n ran : (o, n, m) array of (pseudo)random numbers\n '''\n if fixed_seed:\n np.random.seed(1000)\n if antithetic:\n ran = np.random.standard_normal((shape[0], shape[1], int(shape[2] / 2)))\n ran = np.concatenate((ran, -ran), axis=2)\n else:\n ran = np.random.standard_normal(shape)\n if moment_matching:\n ran = ran - np.mean(ran)\n ran = ran / np.std(ran)\n if shape[0] == 1:\n return ran[0]\n else:\n return ran" ]
[ [ "numpy.random.seed", "numpy.random.standard_normal", "numpy.concatenate", "numpy.std", "numpy.mean" ] ]
avantikasharma/HackerRank-Solutions
[ "a980859ac352688853fcbcf3c7ec6d95685f99ea" ]
[ "Practice/Python/EyeAndIdentity.py" ]
[ "import numpy\nN,M=map(int,input().split())\nprint(numpy.eye(N,M,k=0))\n" ]
[ [ "numpy.eye" ] ]
dblenkus/resolwe-bio
[ "5077a162f454576dbe1bc41e97923bde49420261" ]
[ "resolwe_bio/tools/samplehcluster.py" ]
[ "#!/usr/bin/env python3\n\"\"\"Hierarchical clustering of samples.\"\"\"\n\nimport argparse\nimport json\n\nimport numpy as np\nimport pandas as pd\nimport resdk\nfrom resolwe_runtime_utils import error, warning\nfrom scipy.cluster.hierarchy import dendrogram, linkage\nfrom scipy.stats import spearmanr, zscore\n\n\ndef parse_args():\n \"\"\"Parse command-line arguments.\"\"\"\n parser = argparse.ArgumentParser(description=\"Hierarchical clustering of samples\")\n parser.add_argument(\n \"-f\", \"--sample-files\", nargs=\"+\", help=\"Sample files\", required=True\n )\n parser.add_argument(\n \"-i\", \"--sample-ids\", nargs=\"+\", help=\"Sample IDs\", type=int, required=True\n )\n parser.add_argument(\n \"-n\", \"--sample-names\", nargs=\"+\", help=\"Sample names\", required=True\n )\n parser.add_argument(\"-s\", \"--source\", help=\"Source\", required=True)\n parser.add_argument(\"-p\", \"--species\", help=\"Species\", required=True)\n parser.add_argument(\n \"-g\", \"--gene-labels\", nargs=\"+\", default=[], help=\"Subset of gene labels\"\n )\n parser.add_argument(\"-t\", \"--log2\", action=\"store_true\", help=\"Log2 transformation\")\n parser.add_argument(\n \"-z\", \"--z-score\", action=\"store_true\", help=\"Z-score normalization\"\n )\n parser.add_argument(\n \"-r\",\n \"--remove-const\",\n action=\"store_true\",\n help=\"Remove samples with constant expression\",\n )\n parser.add_argument(\n \"-d\", \"--distance-metric\", default=\"euclidean\", help=\"Distance metric\"\n )\n parser.add_argument(\n \"-l\", \"--linkage-method\", default=\"average\", help=\"Linkage method\"\n )\n parser.add_argument(\"-o\", \"--order\", action=\"store_true\", help=\"Optimal ordering\")\n parser.add_argument(\"--output\", help=\"Output JSON filename\")\n return parser.parse_args()\n\n\ndef get_expression(fname, sep=\"\\t\", gene_set=[]):\n \"\"\"Read expressions from file and return only expressions of genes in gene_set.\"\"\"\n df = pd.read_csv(\n filepath_or_buffer=fname,\n sep=sep,\n header=0,\n index_col=0,\n compression=\"gzip\",\n dtype={0: str, 1: float,},\n keep_default_na=False,\n )\n df.index = df.index.map(str)\n if not gene_set:\n return df\n intersection = [gene for gene in gene_set if gene in df.index]\n return df.loc[intersection]\n\n\ndef get_expressions(fnames, sep=\"\\t\", gene_set=[]):\n \"\"\"Read expressions from files.\n\n Return only expressions of genes that are listed in all samples and in gene_set.\n\n \"\"\"\n dfs = [get_expression(fname, sep=sep, gene_set=gene_set) for fname in fnames]\n inner = pd.concat(dfs, axis=1, join=\"inner\")\n outer = pd.concat(dfs, axis=1, join=\"outer\", sort=True)\n if gene_set:\n excluded = sorted(set(gene_set).difference(set(inner.index)))\n else:\n excluded = sorted(outer.index.difference(inner.index))\n return inner, excluded\n\n\ndef transform(expressions, log2=False, const=1.0, z_score=False, ddof=1):\n \"\"\"Compute log2 and normalize expression values.\n\n Parameters:\n - log2: use log2(x+const) transformation\n - const: an additive constant used in computation of log2\n - z_score: use Z-score normalization\n - ddof: degrees of freedom used in computation of Z-score\n\n \"\"\"\n if log2:\n expressions = expressions.applymap(lambda x: np.log2(x + const))\n if expressions.isnull().values.any():\n msg = \"Cannot apply log2 to expression values.\"\n set_error(msg)\n if z_score:\n expressions = expressions.apply(\n lambda x: zscore(x, ddof=ddof), axis=1, result_type=\"broadcast\"\n )\n expressions.fillna(value=0.0, inplace=True)\n return expressions\n\n\ndef get_distance_metric(distance_metric):\n \"\"\"Get distance metric.\"\"\"\n if distance_metric == \"spearman\":\n return lambda x, y: 1.0 - spearmanr(x, y).correlation\n elif distance_metric == \"pearson\":\n return \"correlation\"\n return distance_metric\n\n\ndef is_const(values):\n \"\"\"Return True, if all values are approximately equal, otherwise return False.\"\"\"\n mn = np.min(values)\n mx = np.max(values)\n if mn + mx == 0.0:\n return mn == mx\n else:\n return (mx - mn) / abs(mx + mn) < 1.0e-6\n\n\ndef remove_const_samples(expressions):\n \"\"\"Remove samples with constant expression profile across genes.\"\"\"\n matches = expressions.apply(lambda x: not is_const(x), axis=0)\n return expressions.loc[:, matches], matches.values.tolist()\n\n\ndef get_clustering(\n expressions, distance_metric=\"euclidean\", linkage_method=\"average\", order=False\n):\n \"\"\"Compute linkage, order, and produce a dendrogram.\"\"\"\n try:\n link = linkage(\n y=expressions.transpose(),\n method=linkage_method,\n metric=distance_metric,\n optimal_ordering=order,\n )\n except Exception:\n msg = \"Cannot compute linkage.\"\n set_error(msg)\n try:\n dend = dendrogram(link, no_plot=True)\n except Exception:\n msg = \"Cannot compute dendrogram.\"\n set_error(msg)\n return link, dend\n\n\ndef output_json(result=dict(), fname=None):\n \"\"\"Print json if fname=None else write json to file 'fname'.\"\"\"\n if fname:\n with open(fname, \"w\") as f:\n json.dump(result, f)\n else:\n print(json.dumps({\"cluster\": result}, separators=(\",\", \":\")))\n\n\ndef set_error(msg):\n \"\"\"Print error message and raise ValueError.\"\"\"\n print(error(msg))\n raise ValueError(msg)\n\n\ndef get_gene_names(feature_ids, source, species):\n \"\"\"Map feature IDs to gene names.\"\"\"\n res = resdk.Resolwe()\n features = res.feature.filter(\n feature_id__in=feature_ids, source=source, species=species\n )\n return [feature.name for feature in features]\n\n\ndef main():\n \"\"\"Compute sample hierarchical clustering.\"\"\"\n args = parse_args()\n\n if len(args.sample_files) != len(args.sample_ids):\n msg = \"The number of sample files does not match the number of sample IDs.\"\n set_error(msg)\n\n if len(args.sample_files) != len(args.sample_names):\n msg = \"The number of sample files does not match the number of sample names.\"\n set_error(msg)\n\n if len(args.sample_files) < 2:\n msg = (\n \"Select at least two samples to compute hierarchical clustering of samples.\"\n )\n set_error(msg)\n\n if len(args.gene_labels) == 1 and args.distance_metric != \"euclidean\":\n msg = (\n \"Select at least two genes to compute hierarchical clustering of samples with \"\n \"correlation distance metric or use Euclidean distance metric.\"\n )\n set_error(msg)\n\n expressions, excluded = get_expressions(\n fnames=args.sample_files, gene_set=args.gene_labels\n )\n\n if len(expressions.index) == 0:\n if not args.gene_labels:\n msg = \"The selected samples do not have any common genes.\"\n else:\n msg = \"None of the selected genes are present in all samples.\"\n set_error(msg)\n\n if len(expressions.index) == 1 and args.distance_metric != \"euclidean\":\n if not args.gene_labels:\n msg = (\n \"The selected samples contain only one common gene ({}). At least two common \"\n \"genes are required to compute hierarchical clustering of samples with \"\n \"correlation distance metric. Select a different set of samples or use Euclidean \"\n \"distance metric.\".format(\n get_gene_names(list(expressions.index), args.source, args.species)[\n 0\n ]\n )\n )\n else:\n msg = (\n \"Only one of the selected genes ({}) is present in all samples but at least two \"\n \"such genes are required to compute hierarchical clustering of samples with \"\n \"correlation distance metric. Select more genes or use Euclidean distance \"\n \"metric.\".format(\n get_gene_names(list(expressions.index), args.source, args.species)[\n 0\n ]\n )\n )\n set_error(msg)\n\n expressions = transform(expressions, log2=args.log2, z_score=args.z_score)\n\n if args.remove_const:\n expressions, matches = remove_const_samples(expressions)\n if len(expressions.columns) == 0:\n msg = (\n \"All of the selected samples have constant expression across genes. Hierarchical \"\n \"clustering of samples cannot be computed.\"\n )\n set_error(msg)\n if len(expressions.columns) == 1:\n sample_name = [id for i, id in enumerate(args.sample_names) if matches[i]][\n 0\n ]\n msg = (\n \"Only one of the selected samples ({}) has a non-constant expression across \"\n \"genes. However, hierarchical clustering of samples cannot be computed with \"\n \"just one sample.\".format(sample_name)\n )\n set_error(msg)\n removed = [name for i, name in enumerate(args.sample_names) if not matches[i]]\n suffix = \"\" if len(removed) <= 3 else \", ...\"\n if removed:\n msg = (\n \"{} of the selected samples ({}) have constant expression across genes. \"\n \"Those samples are excluded from the computation of hierarchical clustering of \"\n \"samples with correlation distance \"\n \"metric.\".format(len(removed), \", \".join(removed[:3]) + suffix)\n )\n print(warning(msg))\n else:\n matches = [True] * len(args.sample_files)\n\n suffix = \"\" if len(excluded) <= 3 else \", ...\"\n if excluded:\n excluded_names = get_gene_names(excluded[:3], args.source, args.species)\n if len(excluded) == 1:\n if not args.gene_labels:\n msg = (\n \"Gene {} is present in some but not all of the selected samples. This \"\n \"gene is excluded from the computation of hierarchical clustering of \"\n \"samples.\".format(len(excluded), \", \".join(excluded_names))\n )\n else:\n msg = (\n \"{} of the selected genes ({}) is missing in at least one of the selected \"\n \"samples. This gene is excluded from the computation of hierarchical \"\n \"clustering of samples.\".format(\n len(excluded), \", \".join(excluded_names)\n )\n )\n print(warning(msg))\n if len(excluded) > 1:\n if not args.gene_labels:\n msg = (\n \"{} genes ({}) are present in some but not all of the selected samples. Those \"\n \"genes are excluded from the computation of hierarchical clustering of \"\n \"samples.\".format(len(excluded), \", \".join(excluded_names))\n )\n else:\n msg = (\n \"{} of the selected genes ({}) are missing in at least one of the selected \"\n \"samples. Those genes are excluded from the computation of hierarchical \"\n \"clustering of samples.\".format(\n len(excluded), \", \".join(excluded_names)\n )\n )\n print(warning(msg))\n\n linkage, dendrogram = get_clustering(\n expressions,\n distance_metric=get_distance_metric(args.distance_metric),\n linkage_method=args.linkage_method,\n order=args.order,\n )\n\n sample_ids = [\n sample_id for i, sample_id in enumerate(args.sample_ids) if matches[i]\n ]\n result = {\n \"sample_ids\": {i: {\"id\": sample_id} for i, sample_id in enumerate(sample_ids)},\n \"linkage\": linkage.tolist(),\n \"order\": dendrogram[\"leaves\"],\n }\n output_json(result, args.output)\n\n\nmain()\n" ]
[ [ "pandas.concat", "pandas.read_csv", "numpy.log2", "numpy.min", "scipy.stats.zscore", "numpy.max", "scipy.cluster.hierarchy.linkage.tolist", "scipy.cluster.hierarchy.dendrogram", "scipy.stats.spearmanr" ] ]
starcroce/tf_dl_cookbook
[ "65c0cb9c9df230e551df5f04c5e2345dcbe53552" ]
[ "house_price_mlp.py" ]
[ "import matplotlib.pyplot as plt\nimport pandas as pd\nimport tensorflow as tf\nimport tensorflow.contrib.layers as layers\n\nfrom sklearn import datasets\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import MinMaxScaler\n\nboston = datasets.load_boston()\ndf = pd.DataFrame(boston.data, columns=boston.feature_names)\ndf[\"target\"] = boston.target\n\nX_train, X_test, y_train, y_test = train_test_split(\n df[[\"RM\", \"LSTAT\", \"PTRATIO\"]], df[[\"target\"]], test_size=0.3, random_state=0\n)\nX_train = MinMaxScaler().fit_transform(X_train)\nX_test = MinMaxScaler().fit_transform(X_test)\ny_train = MinMaxScaler().fit_transform(y_train)\ny_test = MinMaxScaler().fit_transform(y_test)\n\nm = len(X_train)\nn = 3 # number of features\nn_hidden = 20 # number of hidden neurons\nbatch_size = 200\neta = 0.01\nmax_epoch = 1000\n\n\ndef multilayer_perceptron(x):\n fcl = layers.fully_connected(x, n_hidden, activation_fn=tf.nn.relu, scope=\"fcl\")\n out = layers.fully_connected(fcl, 1, activation_fn=tf.nn.sigmoid, scope=\"out\")\n return out\n\n\n# build model, loss and train op\nx = tf.placeholder(tf.float32, name=\"X\", shape=[m, n])\ny = tf.placeholder(tf.float32, name=\"Y\")\ny_hat = multilayer_perceptron(x)\ncorrect_prediction = tf.square(y - y_hat)\nmse = tf.reduce_mean(tf.cast(correct_prediction, \"float\"))\ntrain = tf.train.AdamOptimizer(learning_rate=eta).minimize(mse)\ninit = tf.global_variables_initializer()\n\nwith tf.Session() as sess:\n sess.run(init)\n writer = tf.summary.FileWriter(\"graphs\", sess.graph)\n for i in range(max_epoch):\n _, l, p = sess.run([train, mse, y_hat], feed_dict={x: X_train, y: y_train})\n if i % 100 == 0:\n print(f\"Epoch {i}: Loss {l}\")\n\n print(\"Training Done!\")\n correct_prediction = tf.square(y - y_hat)\n accuracy = tf.reduce_mean(tf.cast(correct_prediction, \"float\"))\n print(\"Mean square error:\", accuracy.eval({x: X_train, y: y_train}))\n plt.scatter(y_train, p)\n plt.show()\n writer.close()\n" ]
[ [ "tensorflow.summary.FileWriter", "matplotlib.pyplot.scatter", "tensorflow.cast", "tensorflow.placeholder", "sklearn.model_selection.train_test_split", "pandas.DataFrame", "tensorflow.contrib.layers.fully_connected", "tensorflow.global_variables_initializer", "tensorflow.square", "sklearn.datasets.load_boston", "tensorflow.Session", "tensorflow.train.AdamOptimizer", "matplotlib.pyplot.show", "sklearn.preprocessing.MinMaxScaler" ] ]
ncassereau-idris/stylebank
[ "2884d5eb8175622a03684ee621fd44736a431e82" ]
[ "stylebank/datasets.py" ]
[ "# /usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport logging\nfrom hydra.utils import to_absolute_path\nimport torch\nimport torch.distributed as dist\nfrom torch.utils.data import Dataset, DataLoader\nfrom torch.utils.data.distributed import DistributedSampler\nimport torchvision.transforms.functional as TF\nfrom torchvision.io import read_image\nimport torchvision.transforms as transforms\nfrom PIL import Image\nimport numpy as np\nimport glob\nimport os\nfrom . import tools\nfrom .plasma import PlasmaStorage\n\n\nlog = logging.getLogger(__name__)\n\n\nclass PhotoDataset(Dataset):\n\n def __init__(\n self, path, transform, quantity=-1,\n store_transformed=False, preload=False\n ):\n assert store_transformed or not preload\n self.store_transformed = store_transformed\n self.filenames = glob.glob(\n to_absolute_path(os.path.join(path, \"*.jpg\"))\n )\n self.filenames.sort()\n if 0 < quantity <= len(self.filenames):\n self.filenames = self.filenames[:quantity]\n\n self.transform = transform\n\n if preload:\n log.info(f\"Preloading data ({len(self.filenames)} files)\")\n self.files = self.preload()\n log.info(f\"{len(self.filenames)} files have been preloaded!\")\n else:\n self.files = PlasmaStorage(autocuda=True)\n\n def preload(self):\n files = PlasmaStorage(autocuda=True)\n for i, filename in enumerate(self.filenames):\n if (i - tools.rank) % tools.size == 0:\n files[i] = self.load_image(filename)\n dist.barrier()\n\n # pooling across all tasks\n return files.merge()\n\n def load_image(self, filename):\n image = read_image(filename)\n image = TF.to_pil_image(image)\n return self.transform(image).cuda()\n\n def get_image_from_filename(self, filename):\n return self.get_image_from_idx(self.filenames.index(filename))\n\n def get_image_from_idx(self, idx):\n img = self.files[idx]\n if img is None:\n img = self.load_image(self.filenames[idx])\n if self.store_transformed:\n self.files[idx] = img\n return img\n\n def __len__(self):\n return len(self.filenames)\n\n def get_image(self, fileId):\n if isinstance(fileId, int): # that's an index\n return self.get_image_from_idx(fileId)\n elif isinstance(fileId, str): # that's a filename\n return self.get_image_from_filename(fileId)\n\n def get_names(self, indices):\n return [\n os.path.splitext(\n os.path.basename(self.filenames[idx])\n )[0]\n for idx in indices\n ]\n\n def __getitem__(self, idx):\n if isinstance(idx, int):\n return idx, self.get_image(idx)\n return idx, torch.stack([self.get_image(i) for i in idx])\n\n\nclass TrainingDataset(Dataset):\n\n def __init__(self, cfg, content_dataset, style_dataset):\n self.cfg = cfg\n self.content_dataset = content_dataset\n self.style_dataset = style_dataset\n\n def __len__(self):\n return self.cfg.training.repeat * len(self.style_dataset)\n\n def __getitem__(self, idx):\n return (\n self.content_dataset[np.random.randint(len(self.content_dataset))],\n self.style_dataset[idx % len(self.style_dataset)]\n )\n\n\nclass Resize(object):\n \"\"\"\n Resize with aspect ratio preserved.\n \"\"\"\n def __init__(self, size):\n self.size = size\n\n def __call__(self, img):\n m = min(img.size)\n new_size = (\n int(img.size[0] / m * self.size),\n int(img.size[1] / m * self.size)\n )\n return img.resize(new_size, resample=Image.BILINEAR)\n\n\nclass DataManager:\n\n def __init__(self, cfg):\n self.cfg = cfg\n self.transform = transforms.Compose([\n Resize(513),\n transforms.CenterCrop([513, 513]),\n transforms.ToTensor(),\n ])\n self.load_datasets()\n if self.cfg.training.train:\n self.make_training_dataloader()\n\n def load_datasets(self):\n log.info(\"Loading real pictures dataset\")\n self.content_dataset = PhotoDataset(\n path=self.cfg.data.photo,\n transform=self.transform,\n store_transformed=self.cfg.data.store_transformed,\n preload=self.cfg.data.preload_transformed\n )\n\n log.info(\n f\"Real pictures dataset has {len(self.content_dataset)} samples\"\n )\n\n log.info(\"Loading monet paintings dataset\")\n self.style_dataset = PhotoDataset(\n path=self.cfg.data.monet,\n transform=self.transform,\n quantity=self.cfg.data.style_quantity,\n store_transformed=self.cfg.data.store_transformed,\n preload=self.cfg.data.preload_transformed\n )\n log.info(f\"Paintings dataset has {len(self.style_dataset)} samples\")\n\n def _distributed_sampler(self, dataset, **kwargs):\n sampler = DistributedSampler(\n dataset,\n num_replicas=tools.size,\n rank=tools.rank,\n shuffle=False,\n **kwargs\n )\n return sampler\n\n def _dataloader(self, dataset, sampler=None, **kwargs):\n if sampler is None:\n sampler = self._distributed_sampler(dataset)\n dataloader = DataLoader(\n dataset,\n batch_size=self.cfg.training.batch_size,\n sampler=sampler,\n **kwargs\n )\n return dataloader\n\n def make_training_dataloader(self):\n training_dataset = TrainingDataset(\n self.cfg, self.content_dataset, self.style_dataset\n )\n return self._dataloader(training_dataset)\n\n def make_preload_dataloaders(self):\n content_dataloader = self._dataloader(self.content_dataset)\n style_dataloader = self._dataloader(self.style_dataset)\n return content_dataloader, style_dataloader\n\n def cycle(self, iterable):\n # This version of cycle shuffles the dataset between\n # each epoch unlike itertools' version\n while True:\n for x in iterable:\n yield x\n\n def make_generation_dataloader(self):\n combined_dataset = TrainingDataset(\n self.cfg, self.content_dataset, self.style_dataset\n )\n dataloader = self._dataloader(combined_dataset)\n return self.cycle(dataloader)\n" ]
[ [ "torch.utils.data.DataLoader", "torch.utils.data.distributed.DistributedSampler", "torch.distributed.barrier" ] ]
caj380/lifx-lan-gui
[ "610f1cea7c915dd6d9c2d5108a1c5a19309527f2" ]
[ "lights.py" ]
[ "#!/usr/bin/env python3\n\nimport sys\nif sys.version_info < (3, 3):\n sys.stdout.write(\"Sorry, This module requires Python 3.3 (or higher), not Python 2.x. You are using Python {0}.{1}\\n\".format(sys.version_info[0],sys.version_info[1]))\n sys.exit(1)\n \nfrom appJar import gui\nimport os\nimport time\nimport binascii\nimport lifxlan\nimport colorsys\nfrom colour import Color\nimport math\nimport sys\nfrom time import sleep\nfrom lifxlan import BLUE, CYAN, GREEN, ORANGE, PINK, PURPLE, RED, YELLOW\nfrom configobj import ConfigObj\nimport pickle as pkl\nfrom random import randint\nfrom platform import system\nfrom PIL import Image\nimport appJar as aJ\nimport numpy as np\nimport cv2\nfrom scipy.stats import itemfreq\nfrom mss import mss\n\nmyos = system()\nif (myos == 'Windows') or (myos == 'Darwin'):\n from PIL import ImageGrab\nelif (myos == 'Linux'):\n import pyscreenshot as ImageGrab\n\nif (myos == 'Windows'):\n mygreen = 'lime'\nelif (myos == 'Darwin') or (myos == 'Linux') :\n mygreen = 'green'\n\ndef resource_path(relative_path):\n if (myos == 'Windows'):\n \"\"\" Get absolute path to resource, works for dev and for PyInstaller \"\"\"\n try:\n # PyInstaller creates a temp folder and stores path in _MEIPASS\n base_path = sys._MEIPASS\n except Exception:\n base_path = os.path.abspath(\".\")\n \n return os.path.join(base_path, relative_path)\n\n elif (myos == 'Darwin') or (myos == 'Linux') :\n \"\"\" Get absolute path to resource, works for dev and for PyInstaller \"\"\"\n base_path = getattr(sys, '_MEIPASS', os.path.dirname(os.path.abspath(__file__)))\n return os.path.join(base_path, relative_path)\n\nDECIMATE = 1 # skip every DECIMATE number of pixels to speed up calculation\nTRANSIENT_TIP = \"If selected, return to the original color after the specified number of cycles. If not selected, set light to specified color\"\nPERIOD_TIP = \"Period is the length of one cycle in milliseconds\"\nCYCLES_TIP = \"Cycles is the number of times to repeat the waveform\"\nDUTY_CYCLE_TIP = \"Duty Cycle is an integer between -32768 and 32767. Its effect is most obvious with the Pulse waveform. Set Duty Cycle to 0 to spend an equal amount of time on the original color and the new color. Set Duty Cycle to positive to spend more time on the original color. Set Duty Cycle to negative to spend more time on the new color\"\nEXPECTED_TIP = \"Select 0 to find all available bulbs. Select any number to look for exactly that number of bulbs\"\nTRANSITION_TIME_TIP = \"The time (in ms) that a color transition takes\"\nFOLLOW_DESKTOP_TIP = \"Make your bulbs' color match your desktop\"\nDESKTOP_MODE_TIP = \"Select between following the whole desktop screen or just a small portion of it (useful for letterbox movies)\"\nEXPECTED_BULBS = 0\nTRANSITION_TIME_DEFAULT = 400\nCONFIG = resource_path(\"lights.ini\")\nPICKLE = resource_path(\"lifxList.pkl\")\nSCENE1_C = resource_path(\"scene1_c.pkl\")\nSCENE1_P = resource_path(\"scene1_p.pkl\")\nSCENE2_C = resource_path(\"scene2_c.pkl\")\nSCENE2_P = resource_path(\"scene2_p.pkl\")\nSCENE3_C = resource_path(\"scene3_c.pkl\")\nSCENE3_P = resource_path(\"scene3_p.pkl\")\nCYCLES = \"Cycles\"\nTRANSITION_TIME = \"Transition Time(ms)\"\nFOLLOW_DESKTOP = \"Follow Desktop\"\nDESKTOP_MODE = \"Desktop Mode\"\nREGION_COLOR = \"regioncolor\"\nMAX_SATURATION = \"Max Saturation\"\nMAX_BRIGHTNESS = \"Max Brightness\"\n\nalreadyDone = False\nconfig = {}\nbulbs = []\nselected_bulb = 0\ndetails = str(0)\ngSelectAll = False\nlan = 0\ngExpectedBulbs = EXPECTED_BULBS\nlifxList = []\nlifxDict = {}\ngwaveformcolor = \"#FF0000\"\nis_follow = False\ntest_string = \"\"\"\n\n\"\"\"\noriginal_colors1 = {}\noriginal_powers1 = {}\noriginal_colors2 = {}\noriginal_powers2 = {}\noriginal_colors3 = {}\noriginal_powers3 = {}\nr = None\nselectedMode = \"Whole Screen\"\nmaxSaturation = False\nmaxBrightness = False\n\nclass App(aJ.gui):\n def __init__(self, *args, **kwargs):\n aJ.gui.__init__(self, *args, **kwargs)\n\n def winfo_screenheight(self):\n # shortcut to height\n # alternatively return self.topLevel.winfo_screenheight() since topLevel is Tk (root) instance!\n return self.appWindow.winfo_screenheight()\n\n def winfo_screenwidth(self):\n # shortcut to width\n # alternatively return self.topLevel.winfo_screenwidth() since topLevel is Tk (root) instance!\n return self.appWindow.winfo_screenwidth()\n\n\n\ndef SceneNameChanged(name):\n #print(name, \"Entry changed\")\n config[name] = app.getEntry(name)\n config.write()\n\n\n\ndef Scene(name):\n global original_colors1\n global original_powers1\n global original_colors2\n global original_powers2\n global original_colors3\n global original_powers3\n global lan\n global config\n\n print(name, \"button pressed\")\n if len(bulbs) < 1:\n app.errorBox(\"Error\", \"Error. No bulbs were found yet. Please click the 'Find Bulbs' button and try again.\")\n return\n try:\n\n if name == 'Save Scene 1':\n print(\"Saving Scene 1\")\n original_colors1 = lan.get_color_all_lights()\n original_powers1 = lan.get_power_all_lights()\n #print(\"colors:\",original_colors)\n #print(type(original_colors1))\n pkl.dump(original_colors1, open(SCENE1_C, \"wb\" ))\n pkl.dump(original_powers1, open(SCENE1_P, \"wb\" ))\n\n\n elif name == 'Restore Scene 1':\n print(\"Restoring Scene 1\")\n if (os.path.exists(SCENE1_C) and os.path.exists(SCENE1_P) ):\n original_colors1 = pkl.load(open(SCENE1_C, \"rb\"))\n original_powers1 = pkl.load(open(SCENE1_P, \"rb\"))\n\n if ( (len(original_colors1) == 0) or (len(original_powers1) == 0) ):\n print(\"Nothing saved yet.\")\n return\n\n print(\"Restoring original color to all lights...\")\n #print(\"colors:\",original_colors)\n for light in original_colors1:\n light.set_color(original_colors1[light])\n\n sleep(1)\n\n print(\"Restoring original power to all lights...\")\n for light in original_powers1:\n light.set_power(original_powers1[light])\n elif name == 'Save Scene 2':\n print(\"Saving Scene 2\")\n original_colors2 = lan.get_color_all_lights()\n original_powers2 = lan.get_power_all_lights()\n #print(\"colors:\",original_colors)\n pkl.dump(original_colors2, open(SCENE2_C, \"wb\" ))\n pkl.dump(original_powers2, open(SCENE2_P, \"wb\" ))\n\n\n elif name == 'Restore Scene 2':\n print(\"Restoring Scene 2\")\n if (os.path.exists(SCENE2_C) and os.path.exists(SCENE2_P) ):\n original_colors2 = pkl.load(open(SCENE2_C, \"rb\"))\n original_powers2 = pkl.load(open(SCENE2_P, \"rb\"))\n\n if ( (len(original_colors2) == 0) or (len(original_powers2) == 0) ):\n print(\"Nothing saved yet.\")\n return\n\n print(\"Restoring original color to all lights...\")\n #print(\"colors:\",original_colors)\n for light in original_colors2:\n light.set_color(original_colors2[light])\n\n sleep(1)\n\n print(\"Restoring original power to all lights...\")\n for light in original_powers2:\n light.set_power(original_powers2[light])\n elif name == 'Save Scene 3':\n print(\"Saving Scene 3\")\n original_colors3 = lan.get_color_all_lights()\n original_powers3 = lan.get_power_all_lights()\n #print(\"colors:\",original_colors)\n pkl.dump(original_colors3, open(SCENE3_C, \"wb\" ))\n pkl.dump(original_powers3, open(SCENE3_P, \"wb\" ))\n\n elif name == 'Restore Scene 3':\n print(\"Restoring Scene 3\")\n if (os.path.exists(SCENE3_C) and os.path.exists(SCENE3_P) ):\n original_colors3 = pkl.load(open(SCENE3_C, \"rb\"))\n original_powers3 = pkl.load(open(SCENE3_P, \"rb\"))\n\n if ( (len(original_colors3) == 0) or (len(original_powers3) == 0) ):\n print(\"Nothing saved yet.\")\n return\n\n print(\"Restoring original color to all lights...\")\n #print(\"colors:\",original_colors)\n for light in original_colors3:\n light.set_color(original_colors3[light])\n\n sleep(1)\n\n print(\"Restoring original power to all lights...\")\n for light in original_powers3:\n light.set_power(original_powers3[light])\n except Exception as e:\n print (\"Ignoring error: \", str(e))\n app.errorBox(\"Error\", str(e) + \"\\n\\n Scene Operation failed. This feature is buggy and only works about 50% of the time. Sometimes, you can still save and restore a scene despite this error. If you keep getting this error and can not perform a 'Restore', try restarting the app then try again.\")\n return\n\n\n\ndef updateSliders(hsbk):\n #print(\"h:\",hsbk[0])\n #print(\"s:\",hsbk[1])\n #print(\"b:\",hsbk[2])\n #print(\"k:\",hsbk[3])\n\n app.setSpinBox(\"hueSpin\", int(hsbk[0]), callFunction=False)\n app.setSpinBox(\"satSpin\", int(hsbk[1]), callFunction=False)\n app.setSpinBox(\"briSpin\", int(hsbk[2]), callFunction=False)\n app.setSpinBox(\"kelSpin\", int(hsbk[3]), callFunction=False)\n app.setScale(\"hueScale\", int(hsbk[0]), callFunction=False)\n app.setScale(\"satScale\", int(hsbk[1]), callFunction=False)\n app.setScale(\"briScale\", int(hsbk[2]), callFunction=False)\n app.setScale(\"kelScale\", int(hsbk[3]), callFunction=False)\n\ndef RGBtoHSBK (RGB, temperature = 3500):\n cmax = max(RGB)\n cmin = min(RGB)\n cdel = cmax - cmin\n\n brightness = int((cmax/255) * 65535)\n\n if cdel != 0:\n saturation = int(((cdel) / cmax) * 65535)\n\n redc = (cmax - RGB[0]) / (cdel)\n greenc = (cmax - RGB[1]) / (cdel)\n bluec = (cmax - RGB[2]) / (cdel)\n\n if RGB[0] == cmax:\n hue = bluec - greenc\n else:\n if RGB[1] == cmax:\n hue = 2 + redc - bluec\n else:\n hue = 4 + greenc - redc\n\n hue = hue / 6\n if hue < 0:\n hue = hue + 1\n\n hue = int(hue*65535)\n else:\n saturation = 0\n hue = 0\n\n return (hue, saturation, brightness, temperature)\n\n\n\n# function to convert the scale values to an RGB hex code\ndef getHSB():\n H = app.getScale(\"hueScale\")\n S = app.getScale(\"satScale\")\n B = app.getScale(\"briScale\")\n K = app.getScale(\"kelScale\")\n\n #RGB = \"#\"+str(R)+str(G)+str(B)\n\n return {'H':H, 'S':S,'B':B, 'K':K }\n\n\n# funciton to update widgets\ndef updateHSB(name):\n # this stops the changes in slider/spin from constantly calling each other\n #print (\"name:\",name)\n global alreadyDone\n if alreadyDone:\n alreadyDone = False\n return\n else:\n alreadyDone = True\n\n # split the widget's name into the type & colour\n colour = name[0:3]\n widg = name[3:]\n\n # get the current RGB value\n HSB = getHSB()\n #print(\"HSB:\",HSB,\"type(HSB)\",type(HSB))\n #print(\"H\",HSB[\"H\"])\n #print(\"S\",HSB[\"S\"])\n #print(\"B\",HSB[\"B\"])\n\n # depending on the type, get & set...\n if widg == \"Scale\":\n value = app.getScale(name)\n app.setSpinBox(colour + \"Spin\", value)\n elif widg == \"Spin\":\n value = app.getSpinBox(name)\n app.setScale(colour + \"Scale\", value)\n\n # update the label\n h = HSB[\"H\"] / 65535.0;#print(\"h:\",h)\n s = HSB[\"S\"] / 65535.0;#print(\"s:\",s)\n v = HSB[\"B\"] / 65535.0;#print(\"v:\",v)\n k = HSB[\"K\"];#print(\"v:\",v)\n\n rgb1 = hsv_to_rgb(h, s, v);#print(\"rgb1:\",rgb1)\n c = Color(rgb=(rgb1[0], rgb1[1], rgb1[2]))\n #print(\"c:\",c)\n app.setLabelBg(\"bulbcolor\", c.hex_l)\n\n global selected_bulb\n bulbHSBK = [HSB[\"H\"],HSB[\"S\"],HSB[\"B\"],k]\n #print (\"bulbHSBK:\",bulbHSBK)\n\n if gSelectAll:\n lan.set_color_all_lights(bulbHSBK, duration=0, rapid=False)\n\n elif selected_bulb:\n #print(\"sending color\",hsv)\n selected_bulb.set_color(bulbHSBK, duration=0, rapid=False)\n\n #app.setEntry(\"colCode\", RGB)\n\n\ndef selectAllPressed (name):\n global bulbs\n if len(bulbs) < 1:\n app.errorBox(\"Error\", \"Error. No bulbs were found yet. Please click the 'Find Bulbs' button and try again.\")\n app.setCheckBox(\"Select All\", ticked=False, callFunction=False)\n return\n\n global gSelectAll\n gSelectAll = app.getCheckBox(\"Select All\")\n #print(\"gSelectAll:\",gSelectAll)\n\ndef expectedPressed (name):\n global gExpectedBulbs\n global config\n gExpectedBulbs = int(app.getSpinBox(\"Expected Bulbs\"))\n config['expectedbulbs'] = gExpectedBulbs\n config.write()\n #print(\"gExpectedBulbs:\",gExpectedBulbs)\n\n\ndef rgb_to_hsv(r, g, b):\n r = float(r)\n g = float(g)\n b = float(b)\n high = max(r, g, b)\n low = min(r, g, b)\n h, s, v = high, high, high\n\n d = high - low\n s = 0 if high == 0 else d / high\n\n if high == low:\n h = 0.0\n else:\n h = {\n r: (g - b) / d + (6 if g < b else 0),\n g: (b - r) / d + 2,\n b: (r - g) / d + 4,\n }[high]\n h /= 6\n\n return h, s, v\n\n\ndef hsv_to_rgb(h, s, v):\n i = math.floor(h * 6)\n f = h * 6 - i\n p = v * (1 - s)\n q = v * (1 - f * s)\n t = v * (1 - (1 - f) * s)\n\n r, g, b = [\n (v, t, p),\n (q, v, p),\n (p, v, t),\n (p, q, v),\n (t, p, v),\n (v, p, q),\n ][int(i % 6)]\n\n return r, g, b\n\ndef modeChanged():\n global selectedMode\n selectedMode = (app.getOptionBox(\"Desktop Mode\"))#;print(\"selectedMode: \",selectedMode)\n\ndef listChanged():\n app.clearTextArea(\"Result\"); # TODO. Put this in another thread\n app.setTextArea(\"Result\", \"Loading bulb details\") # TODO. Put this in another thread\n selected = (app.getOptionBox(\"LIFX Bulbs\"))#;print(\"selected: \",selected)\n global bulbs\n global selected_bulb\n global details\n try:\n for bulb in bulbs:\n if (bulb.label == selected):\n #print(\"Found selected bulb\")\n selected_bulb = bulb\n details = str(selected_bulb)\n #print(\"type(bulb)\",type(bulb))\n #print(bulb)\n #print(\"breaking\")\n break\n except Exception as e:\n print (\"Ignoring error: \", str(e))\n app.errorBox(\"Error\", str(e))\n app.clearTextArea(\"Result\");\n app.setTextArea(\"Result\", str(e))\n\n return\n\n\n app.clearTextArea(\"Result\")\n app.setTextArea(\"Result\", details)\n\n try:\n if \"Power: On\" in details:\n #print (\"BULB is ON\")\n app.setButtonImage(\"Light\", resource_path(\"bulb_on.gif\"))\n elif \"Power: Off\" in details:\n #print (\"BULB is OFF \")\n app.setButtonImage(\"Light\", resource_path(\"bulb_off.gif\"))\n except Exception as e:\n print (\"Ignoring error:\", str(e))\n\n app.setButton ( \"Light\", \"Toggle \" + selected )\n app.showButton(\"Light\")\n color = bulb.get_color();#print(color[0],color[1],color[2]);\n h = color[0] / 65535.0;#print(\"h:\",h)\n s = color[1] / 65535.0;#print(\"s:\",s)\n v = color[2] / 65535.0;#print(\"v:\",v)\n\n rgb1 = hsv_to_rgb(h, s, v);#print(\"rgb1:\",rgb1)\n c = Color(rgb=(rgb1[0], rgb1[1], rgb1[2]))\n #print(\"c:\",c)\n app.setLabelBg(\"bulbcolor\", c.hex_l)\n updateSliders(color)\n\n\ndef finder():\n global bulbList\n global lan\n global gExpectedBulbs\n global config\n global lifxList\n global lifxDict\n global config\n bulbList.clear()\n bulbList.append(\"-Select Bulb-\")\n try:\n global bulbs\n #print(\"finder().gExpectedBulbs:\",gExpectedBulbs)\n lan = lifxlan.LifxLAN(int(gExpectedBulbs) if int(gExpectedBulbs) != 0 else None)\n bulbs = lan.get_lights()\n #print(type(bulbs))\n #print(bulbs[0].label)\n if len(bulbs) < 1:\n app.errorBox(\"Error\", \"No bulbs found. Please try again. If you switched WiFi networks, please re-start the app and try again.\")\n app.setLabelBg(\"lbl2\", \"red\")\n app.setLabel(\"lbl2\", \"Found 0 bulbs\")\n return\n else:\n app.setLabelBg(\"lbl2\", mygreen)\n app.hideLabel(\"f1\")\n\n app.setLabel(\"lbl2\", \"Found \" + str(len(bulbs)) + \" bulbs\")\n app.setCheckBox(\"Select All\")\n #app.setSpinBox(\"Expected Bulbs\", str(len(bulbs)))\n del lifxList[:]\n for bulb in bulbs:\n #print(\".get_label()\",bulb.get_label()) # this gets the actual label\n #print(\".label:\",bulb.label) # this returns None\n label = bulb.get_label()\n ip = bulb.ip_addr\n mac = bulb.mac_addr\n #print (label,ip,mac)\n lifxDict['label'] = label\n lifxDict['mac'] = mac\n lifxDict['ip'] = ip\n lifxList.append(lifxDict.copy())\n bulbList.append(label)\n app.changeOptionBox(\"LIFX Bulbs\", bulbList, callFunction=False)\n app.showButton ( \"Pick Color\" )\n #print(lifxList)\n #config['bulbs'] = lifxList\n pkl.dump(lifxList, open(PICKLE, \"wb\" )) #this pickles\n#exit(0)\n #config.write()\n\n\n except Exception as e:\n print (\"Ignoring error:\", str(e))\n app.setLabelBg(\"lbl2\", \"gray\")\n app.setLabel(\"lbl2\", \"Found 0 bulbs\")\n app.errorBox(\"Error\", str(e) + \"\\n\\nPlease try again. If you keep getting this error, check/toggle your WiFi, ensure that 'Expected Bulbs' is either 0 or the number of bulbs you have and finally, try restarting the app\")\n\n# config['bulbs'] = bulbs\n# config.write()\n print (\"finder() Ended\")\n\ndef press(name):\n global bulbs\n global details\n global gSelectAll\n global lan\n global gwaveformcolor\n global selected_bulb\n\n #print(name, \"button pressed\")\n\n if (name == \"Find Bulbs\"):\n finder()\n elif (name == \"All Off\"):\n if len(bulbs) < 1:\n return\n lan.set_power_all_lights(False, rapid=True)\n elif (name == \"All Random\"):\n if len(bulbs) < 1:\n return\n selected = (app.getOptionBox(\"LIFX Bulbs\"))\n for bulb in bulbs:\n hue = (randint(0, 65535))\n sat = (randint(40000, 65535))\n bulb.set_color([hue, sat, 65535, 3500], duration=0, rapid=True)\n if (bulb.label == selected):\n h = hue / 65535.0;#print(\"h:\",h)\n s = sat / 65535.0;#print(\"s:\",s)\n v = 1;#print(\"v:\",v)\n rgb1 = hsv_to_rgb(h, s, v);#print(\"rgb1:\",rgb1)\n c = Color(rgb=(rgb1[0], rgb1[1], rgb1[2]))\n app.setLabelBg(\"bulbcolor\", c.hex_l)\n updateSliders([hue,sat,65535,3500])\n\n elif (name == \"All On\"):\n if len(bulbs) < 1:\n return\n lan.set_power_all_lights(True, rapid=True)\n elif (name == \"All White\"):\n if len(bulbs) < 1:\n return\n lan.set_color_all_lights([0,0,65535,3500], duration=0, rapid=True)\n updateSliders([0,0,65535,3500])\n app.setLabelBg(\"bulbcolor\", \"#FFFFFF\")\n\n elif (name == \"Execute\"):\n waveform = app.getRadioButton(\"waveform\")\n config['waveform'] = waveform\n if waveform == \"Saw\":\n waveform = 0\n elif waveform == \"Sine\":\n waveform = 1\n elif waveform == \"HalfSine\":\n waveform = 2\n elif waveform == \"Triangle\":\n waveform = 3\n elif waveform == \"Pulse (Strobe)\":\n waveform = 4\n #print (\"waveform:\",waveform)\n is_transient = app.getCheckBox(\"Transient\")\n config['transient'] = is_transient\n \n if (is_transient):\n is_transient = 1\n else:\n is_transient = 0\n\n #print(\"is_transient:\",is_transient)\n #pickedColor = app.getLabelBg(\"lblwaveformcolor\")\n #print(\"gwaveformcolor:\",gwaveformcolor)\n config['secondary_color'] = gwaveformcolor\n c = Color(str(gwaveformcolor))\n hsv = rgb_to_hsv(c.red, c.green, c.blue)\n #print(\"hsv:\",hsv)\n bulbHSBK = [hsv[0] * 65535.0,hsv[1] * 65535.0,hsv[2] * 65535.0,3500]\n #print (bulbHSBK)\n period = app.getEntry(\"Period(ms)\")\n cycles = app.getEntry(CYCLES)\n duty_cycle = app.getEntry(\"Duty Cycle\")\n config['period'] = period\n config['cycles'] = cycles\n config['duty_cycle'] = duty_cycle\n config.write()\n \n #print(\"period:\",period)\n #print(\"cycles:\",cycles)\n #print(\"duty_cycle:\",duty_cycle)\n\n if gSelectAll:\n lan.set_waveform_all_lights(is_transient, bulbHSBK, period, cycles, duty_cycle, waveform, [1])\n\n elif selected_bulb:\n #print(\"sending color\",hsv)\n selected_bulb.set_waveform(is_transient, bulbHSBK, period, cycles, duty_cycle, waveform)\n else:\n app.errorBox(\"Error\", \"Error. No bulb was selected. Please select a bulb from the pull-down menu (or tick the 'Select All' checkbox) and try again.\")\n return\n\n elif (name == \"Secondary Color\"):\n pickedColor = app.colourBox(colour=\"#FF0000\")\n app.setLabelBg(\"lblwaveformcolor\", pickedColor)\n gwaveformcolor = pickedColor\n elif (name == \"Pick Color\"):\n pickedColor = app.colourBox(colour=\"#FFFFFF\")\n app.setLabelBg(\"bulbcolor\", pickedColor)\n #print(\"pickedColor:\",pickedColor)\n if pickedColor == None:\n return\n c = Color(str(pickedColor))\n hsv = rgb_to_hsv(c.red, c.green, c.blue)\n #print(\"hsv:\",hsv)\n bulbHSBK = [hsv[0] * 65535.0,hsv[1] * 65535.0,hsv[2] * 65535.0,3500]\n #print (\"bulbHSBK:\",bulbHSBK)\n if gSelectAll:\n lan.set_color_all_lights(bulbHSBK, duration=0, rapid=False)\n elif selected_bulb:\n #print(\"sending color\",hsv)\n selected_bulb.set_color(bulbHSBK, duration=0, rapid=False)\n else:\n app.errorBox(\"Error\", \"Error. No bulb was selected. Please select a bulb from the pull-down menu (or tick the 'Select All' checkbox) and try again.\")\n return\n\n updateSliders(bulbHSBK)\n\n\n elif (name == \"Light\"):\n #print(\"selected: \",selected_bulb.label)\n #print(\"Power is Currently: {}\".format(selected_bulb.power_level))\n try:\n onOff = selected_bulb.power_level;\n except Exception as e:\n print (\"Ignoring error:\", str(e))\n app.errorBox(\"Error\", str(e) + \"\\n\\nTry selecting a bulb from the list first.\")\n return\n\n #selected_bulb.set_power(not selected_bulb.get_power(), duration=0, rapid=True)\n\n if \"Power: Off\" in details:\n selected_bulb.set_power(65535, duration=0, rapid=False)\n try:\n app.setButtonImage(\"Light\", resource_path(\"bulb_on.gif\"));#print(\"PowerOn\");\n except Exception as e:\n print (\"Ignoring error:\", str(e))\n details = details.replace(\"Power: Off\", \"Power: On\");\n app.clearTextArea(\"Result\")\n app.setTextArea(\"Result\", details)\n\n else:\n selected_bulb.set_power(0, duration=0, rapid=False)\n try:\n app.setButtonImage(\"Light\", resource_path(\"bulb_off.gif\"));#print(\"PowerOff\");\n except Exception as e:\n print (\"Ignoring error:\", str(e))\n details = details.replace(\"Power: On\", \"Power: Off\"); #print(\"details:\\n\",details)\n app.clearTextArea(\"Result\")\n app.setTextArea(\"Result\", details)\n\n app.setButton ( \"Light\", \"Toggle \" + (app.getOptionBox(\"LIFX Bulbs\")) )\n app.showButton(\"Light\")\n\n\n #listChanged()\n\ndef rainbow_press(name):\n global gExpectedBulbs\n global bulbs\n global lan\n #print (\"len(bulbs):\",len(bulbs)) \n try:\n print(\"Discovering lights...\")\n lan = lifxlan.LifxLAN(int(gExpectedBulbs) if int(gExpectedBulbs) != 0 else None)\n if lan is None:\n print(\"Error finding bulbs\")\n return\n bulbs = lan.get_lights()\n if len(bulbs) < 1:\n print(\"No bulbs found. Exiting.\")\n return\n \n #print(\"lan:\",lan,\"type(lan):\",type(lan))\n original_colors = lan.get_color_all_lights()\n original_powers = lan.get_power_all_lights()\n\n print(\"Turning on all lights...\")\n lan.set_power_all_lights(True)\n sleep(1)\n\n print(\"Flashy fast rainbow\")\n rainbow(lan, 0.4)\n\n #print(\"Smooth slow rainbow\")\n #rainbow(lan, 1, smooth=True)\n print(\"Restoring original color to all lights...\")\n for light in original_colors:\n light.set_color(original_colors[light])\n\n sleep(1)\n\n print(\"Restoring original power to all lights...\")\n for light in original_powers:\n light.set_power(original_powers[light])\n except Exception as e:\n print (\"Ignoring error:\", str(e))\n\ndef rainbow(lan, duration_secs=0.5, smooth=False):\n colors = [RED, ORANGE, YELLOW, GREEN, CYAN, BLUE, PURPLE, PINK]\n transition_time_ms = duration_secs * 1000 if smooth else 500\n rapid = True if duration_secs < 1 else False\n for i in range(0, 3):\n for color in colors:\n lan.set_color_all_lights(color, transition_time_ms, rapid)\n sleep(duration_secs)\n\ndef maxPressed(name):\n global maxSaturation\n global maxBrightness\n \n if (name == MAX_SATURATION):\n maxSaturation = app.getCheckBox(MAX_SATURATION)\n print(name, \" is \", maxSaturation)\n config['maxSaturation'] = maxSaturation\n elif (name == MAX_BRIGHTNESS):\n maxBrightness = app.getCheckBox(MAX_BRIGHTNESS)\n print(name, \" is \", maxBrightness)\n config['maxBrightness']=maxBrightness\n \n config.write()\n \n \ndef followDesktop():\n global gSelectAll\n global lan\n global is_follow\n global selected_bulb\n global r\n global maxSaturation\n global maxBrightness\n screen_width = app.winfo_screenwidth()\n screen_height = app.winfo_screenheight()\n print(\"screen_width:\", screen_width, \" screen_height:\", screen_height)\n print(\"Follow:\", is_follow)\n duration = app.getEntry(TRANSITION_TIME)\n is_evening = app.getCheckBox(\"Evening Mode\")\n config['transtime'] = duration\n config['is_evening'] = is_evening\n config.write()\n\n print(\"r:\", r)\n print(\"Starting Loop\")\n\n left = r[0] # The x-offset of where your crop box starts\n top = r[1] # The y-offset of where your crop box starts\n width = r[2] # The width of crop box\n height = r[3] # The height of crop box\n box = (left, top, left + width, top + height)\n\n if (is_follow):\n app.hideEntry(TRANSITION_TIME)\n app.hideOptionBox(DESKTOP_MODE)\n app.showLabel(REGION_COLOR)\n app.hideCheckBox(\"Evening Mode\")\n\n sct = mss()\n while (is_follow):\n start = time.time()\n try:\n # fast screenshot with mss module\n sct_img = sct.grab(box)\n image = Image.frombytes('RGB', sct_img.size, sct_img.rgb)\n except Exception as e:\n print (\"Ignoring error:\", str(e))\n\n try:\n # downsample to 1/10th and calculate average RGB color\n pixels = np.array(image, dtype=np.float32)\n pixels = pixels[::10,::10,:]\n pixels = np.transpose(pixels)\n dominant_color = [np.mean(channel) for channel in pixels]\n c = Color(rgb=(dominant_color[0]/255, dominant_color[1]/255, dominant_color[2]/255))\n app.setLabelBg(REGION_COLOR, c.hex_l)\n # get HSVK color from RGB color\n # during evenings, kelvin is 3500 (default value returned above)\n # during the daytime, saturated colors are still 3500 K,\n # but the whiter the color, the cooler, up to 5000 K\n (h, s, v, k) = RGBtoHSBK(dominant_color)\n if not is_evening:\n k = int(5000 - (s/65535 * 1500))\n if (maxSaturation) and (s > 6553):\n s = 65535\n if (maxBrightness) and (True):\n v = 65535\n bulbHSBK = [h, s, v, k]\n try:\n if gSelectAll:\n lan.set_color_all_lights(bulbHSBK, duration=duration, rapid=True)\n elif selected_bulb:\n selected_bulb.set_color(bulbHSBK, duration=duration, rapid=True)\n else:\n app.errorBox(\"Error\", \"Error. No bulb was selected. Please select a bulb from the pull-down menu (or tick the 'Select All' checkbox) and try again.\")\n app.setCheckBox(\"FOLLOW_DESKTOP\", False)\n is_follow = False\n return\n except Exception as e:\n print (\"Ignoring error: \", str(e))\n except Exception as e:\n print(\"Ignoring error: \", str(e))\n\n # rate limit to prevent from spamming bulbs\n # the theoretical max speed that the bulbs can handle is one packet\n # every 0.05 seconds, but empirically I found that 0.1 sec looked better\n max_speed_sec = 0.1\n elapsed_time = time.time() - start\n wait_time = max_speed_sec - elapsed_time\n if wait_time > 0:\n sleep(wait_time)\n #print(elapsed_time, time.time()-start)\n print(\"Exiting loop\")\n\n\ndef followDesktopPressed(name):\n global is_follow\n global r\n global selectedMode\n is_follow = app.getCheckBox(FOLLOW_DESKTOP)\n app.showEntry(TRANSITION_TIME)\n app.showOptionBox(DESKTOP_MODE)\n app.showCheckBox(\"Evening Mode\")\n app.hideLabel(REGION_COLOR)\n\n if (is_follow):\n print(\"Pressed:\", name, \" Follow:\", is_follow)\n if (selectedMode == \"Whole Screen\"):\n print(\"Doing Whole Screen processing\")\n screen_width = app.winfo_screenwidth()\n screen_height = app.winfo_screenheight()\n r = (0, 0, screen_width, screen_height)\n else:\n print(\"Doing Partial Screen processing\")\n\n app.setTransparency(0)\n app.infoBox(\"Select Region\", \"A new window entitled \\\"Screenshot\\\" will pop up. Drag a rectangle around the region of interest and press ENTER . This region's dominant color will be sent to the bulbs to match. To Cancel, press c .\", parent=None)\n myos = system()\n image = ImageGrab.grab()\n if (myos == 'Linux') or (myos == 'Darwin'):\n print(\"Mac OS detected.\")\n open_cv_image = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2BGR)\n elif (myos == 'Windows'):\n print(\"Windows OS detected.\")\n open_cv_image = np.array(image)\n\n # Convert RGB to BGR\n im = open_cv_image[:,:,::-1].copy()\n\n if (myos == 'Linux') or (myos == 'Darwin'):\n screen_width = app.winfo_screenwidth()\n screen_height = app.winfo_screenheight()\n im = cv2.resize(im, (int(screen_width * 0.9), int(screen_height * 0.9)))\n cv2.namedWindow(\"Screenshot\", cv2.WINDOW_AUTOSIZE)\n cv2.moveWindow(\"Screenshot\", 0, 0)\n cv2.imshow(\"Screenshot\", im)\n elif (myos == 'Windows'):\n cv2.namedWindow(\"Screenshot\", cv2.WINDOW_NORMAL)\n\n r = cv2.selectROI(\"Screenshot\", im, False)\n #cv2.waitKey()\n print (\"r type:\", type(r))\n print(\"r is\", r)\n if not any(r):\n print(\"No region selected. Exiting\")\n cv2.destroyAllWindows()\n app.setCheckBox(FOLLOW_DESKTOP, False)\n is_follow = False\n app.setTransparency(1)\n return\n #cv2.waitKey(0)\n cv2.destroyAllWindows()\n app.setTransparency(1)\n\n app.thread(followDesktop)\n\n\nbulbList = [\"-None- \"]\n\napp = App(\"LIFX Controller\")\n#app = gui(\"LIFX Controller\")\napp.setStretch(\"both\")\napp.setResizable(True)\n#app.setFont(12)\napp.setFont(size=12, family=\"Arial\")\n\n\napp.setSticky(\"new\")\n\napp.startLabelFrame(\"\", 0, 0)\napp.setSticky(\"new\")\napp.startLabelFrame(\"Find\", 0, 0)\napp.setSticky(\"new\")\napp.setPadding(1)\napp.addFlashLabel(\"f1\", \"Start here --->\", 0, 0)\napp.addButton(\"Find Bulbs\", press, 0, 1)\nexpected_range = list(range(1, 20))\napp.addLabelSpinBox ( \"Expected Bulbs\", list(reversed(range(20))), 0, 2 )\napp.setSpinBox(\"Expected Bulbs\", EXPECTED_BULBS)\ngExpecteBulbs = app.getSpinBox(\"Expected Bulbs\")\napp.setSpinBoxChangeFunction(\"Expected Bulbs\", expectedPressed)\napp.setSpinBoxWidth(\"Expected Bulbs\", 2)\n\napp.setSpinBoxTooltip(\"Expected Bulbs\", EXPECTED_TIP)\napp.setLabelTooltip(\"Expected Bulbs\", EXPECTED_TIP)\n\napp.addLabel(\"lbl2\", \" \", 1, 0)\napp.setLabelBg(\"lbl2\", \"white\")\napp.addNamedCheckBox(\"Select All Bulbs\", \"Select All\", 1, 2)\napp.setCheckBoxChangeFunction(\"Select All\", selectAllPressed)\n\n\napp.addOptionBox(\"LIFX Bulbs\", bulbList, 1, 1)\napp.setOptionBoxChangeFunction(\"LIFX Bulbs\", listChanged)\napp.setSticky(\"n\")\ntry:\n app.addImageButton(\"Light\", press, resource_path(\"bulb_off.gif\"), 2, 2)\nexcept Exception as e:\n print (\"Ignoring error:\", str(e))\n #app.errorBox(\"Error\", str(e)+\"\\n\\nTry selecting a bulb from the list first.\")\n #return\napp.setButton( \"Light\", \"Toggle Selected\" )\n#app.setButtonHeight ( \"Light\", 40 )\n\n#app.hideButton(\"Light\")\n\napp.stopLabelFrame()\n#-------------------------------------------------------------------------------\napp.startLabelFrame(\"Scenes\", 0, 1)\napp.setSticky(\"news\")\napp.addEntry(\"Scene 1\", 0, 0)\napp.setEntryChangeFunction(\"Scene 1\", SceneNameChanged)\napp.addNamedButton(\"Save\", \"Save Scene 1\", Scene, 0, 1)\napp.addNamedButton(\"Restore\", \"Restore Scene 1\", Scene, 0, 2)\napp.addEntry(\"Scene 2\", 1, 0)\napp.setEntryChangeFunction(\"Scene 2\", SceneNameChanged)\napp.addNamedButton(\"Save\", \"Save Scene 2\", Scene, 1, 1)\napp.addNamedButton(\"Restore\", \"Restore Scene 2\", Scene, 1, 2)\napp.addEntry(\"Scene 3\", 2, 0)\napp.setEntryChangeFunction(\"Scene 3\", SceneNameChanged)\napp.addNamedButton(\"Save\", \"Save Scene 3\", Scene, 2, 1)\napp.addNamedButton(\"Restore\", \"Restore Scene 3\", Scene, 2, 2)\napp.stopLabelFrame()\n#-------------------------------------------------------------------------------\n#app.setButtonImage(\"picker\", resource_path(\"colorpicker.gif\"), align=None)\n###\napp.setSticky(\"ne\")\napp.startLabelFrame(\"All LAN Bulbs\", 0, 2)\napp.setSticky(\"new\")\napp.addButton(\"All Off\", press, 2, 2)\napp.addButton(\"All On\", press, 3, 2)\napp.addButton(\"All White\", press, 4, 2)\napp.addButton(\"All Rainbow\", rainbow_press, 5, 2)\napp.addButton(\"All Random\", press, 6, 2)\n#app.addButton(\"All Waveform\", rainbow_press,6,2)\napp.stopLabelFrame()\n\n#-------------------------------------------\napp.setSticky(\"sew\")\napp.startLabelFrame(\"HSBK Values\", 1, 0)\napp.setSticky(\"news\")\napp.setPadding(5, 5)\n\napp.addButton(\"Pick Color\", press, 3, 3)\n#app.hideButton ( \"Pick Color\" )\n\n\napp.addLabel(\"hueLab\", \"Hue (H):\", 0, 0)\napp.addLabel(\"satLab\", \"Saturation (S):\", 1, 0)\napp.addLabel(\"briLab\", \"Brightness (B):\", 2, 0)\napp.addLabel(\"kelLab\", \"Kelvin (K) Warmth:\", 3, 0)\n\napp.setLabelAlign(\"hueLab\", \"left\")\napp.setLabelAlign(\"satLab\", \"left\")\napp.setLabelAlign(\"briLab\", \"left\")\napp.setLabelAlign(\"kelLab\", \"left\")\n\napp.addSpinBox(\"hueSpin\", list(reversed(range(65536))), 0, 1)\napp.addSpinBox(\"satSpin\", list(reversed(range(65536))), 1, 1)\napp.addSpinBox(\"briSpin\", list(reversed(range(65536))), 2, 1)\napp.addSpinBox(\"kelSpin\", list(reversed(range(2500, 9001, 1))), 3, 1)\n\napp.setSpinBox(\"hueSpin\", 0)\napp.setSpinBox(\"satSpin\", 0)\napp.setSpinBox(\"briSpin\", 0)\napp.setSpinBox(\"kelSpin\", 3500)\n\napp.setSpinBoxWidth(\"hueSpin\", 5)\napp.setSpinBoxWidth(\"satSpin\", 5)\napp.setSpinBoxWidth(\"briSpin\", 5)\napp.setSpinBoxWidth(\"kelSpin\", 5)\n\napp.setSpinBoxChangeFunction(\"hueSpin\", updateHSB)\napp.setSpinBoxChangeFunction(\"satSpin\", updateHSB)\napp.setSpinBoxChangeFunction(\"briSpin\", updateHSB)\napp.setSpinBoxChangeFunction(\"kelSpin\", updateHSB)\n\n\napp.addScale(\"hueScale\", 0, 2)\napp.addScale(\"satScale\", 1, 2)\napp.addScale(\"briScale\", 2, 2)\napp.addScale(\"kelScale\", 3, 2)\n\napp.setScaleRange(\"hueScale\", 0, 65535)\napp.setScaleRange(\"satScale\", 0, 65535)\napp.setScaleRange(\"briScale\", 0, 65535)\napp.setScaleRange(\"kelScale\", 2500, 9000)\n\napp.setScaleChangeFunction(\"hueScale\", updateHSB)\napp.setScaleChangeFunction(\"satScale\", updateHSB)\napp.setScaleChangeFunction(\"briScale\", updateHSB)\napp.setScaleChangeFunction(\"kelScale\", updateHSB)\n\napp.startLabelFrame(\"Bulb Color\", 0, 3, 3, 3)\napp.setSticky(\"news\")\napp.addLabel(\"bulbcolor\", \"\", 0, 3, 3, 3)\napp.setLabel(\"bulbcolor\", \" \")\napp.setLabelHeight(\"bulbcolor\", 5)\napp.setLabelWidth(\"bulbcolor\", 10)\napp.setLabelBg(\"bulbcolor\", \"gray\")\napp.stopLabelFrame()\n\napp.stopLabelFrame()\n#-------------------------------------------\napp.startLabelFrame(\"Waveform\", 1, 1, 5, 1)\n#app.setFrameWidth(\"Waveform\",20)\n#app.setSticky(\"news\")\napp.setSticky(\"w\")\n\napp.addRadioButton(\"waveform\", \"Saw\")\napp.addRadioButton(\"waveform\", \"Sine\")\napp.addRadioButton(\"waveform\", \"HalfSine\")\napp.addRadioButton(\"waveform\", \"Triangle\")\napp.addRadioButton(\"waveform\", \"Pulse (Strobe)\")\n\napp.setSticky(\"e\")\napp.addCheckBox(\"Transient\", 0, 2)\napp.setCheckBox(\"Transient\")\napp.addButton(\"Secondary Color\", press, 1, 1)\napp.addLabel(\"lblwaveformcolor\", \" \", 1, 2)\napp.setLabelBg(\"lblwaveformcolor\", \"#FF0000\")\napp.setLabelWidth(\"lblwaveformcolor\", 20)\napp.addLabelEntry(\"Period(ms)\", 2, 2)\napp.setEntryWidth(\"Period(ms)\", 6)\napp.setEntry(\"Period(ms)\", \"500\")\n\napp.addLabelEntry(CYCLES, 3, 2)\napp.setEntryWidth(CYCLES, 6)\napp.setEntry(CYCLES, \"5\")\n\napp.addLabelEntry(\"Duty Cycle\", 4, 2)\napp.setEntryWidth(\"Duty Cycle\", 6)\napp.setEntry(\"Duty Cycle\", \"0\")\n\napp.setEntryTooltip(\"Duty Cycle\", DUTY_CYCLE_TIP)\napp.setLabelTooltip(\"Duty Cycle\", DUTY_CYCLE_TIP)\napp.setEntryTooltip(\"Cycles\", CYCLES_TIP)\napp.setLabelTooltip(CYCLES, CYCLES_TIP)\napp.setEntryTooltip(\"Period(ms)\", PERIOD_TIP)\napp.setLabelTooltip(\"Period(ms)\", PERIOD_TIP)\napp.setCheckBoxTooltip(\"Transient\", TRANSIENT_TIP)\napp.setSticky(\"ew\")\n\napp.addButton(\"Execute\", press, 5, 0, colspan=3)\napp.setButtonBg(\"Execute\", \"cyan\")\n\napp.stopLabelFrame()\n#-------------------------------------------\n\n\n\napp.stopLabelFrame()\n\n#----------------------------------------------------\n#app.setSticky(\"news\")\napp.startLabelFrame(\"Bulb Details\", 5, 0)\napp.setSticky(\"ew\")\napp.addScrolledTextArea(\"Result\", 0, 0)\n#app.setTextAreaWidth(\"Result\", 45)\napp.setTextAreaHeight(\"Result\", 25)\napp.setTextArea(\"Result\", test_string)\napp.stopLabelFrame()\n#-----------------------------------------------------\n\n#-------------------------------------------\napp.startLabelFrame(FOLLOW_DESKTOP, 2, 0)\n#app.setSticky(\"n\")\nmodeList = [\"-Select Region- \"]\nmodeList.append(\"Whole Screen\")\nmodeList.append(\"Rectangular Region\")\napp.setSticky(\"w\")\napp.addCheckBox(FOLLOW_DESKTOP, 0, 0)\napp.setCheckBoxChangeFunction(FOLLOW_DESKTOP, followDesktopPressed)\napp.addOptionBox(DESKTOP_MODE, modeList, 0, 1)\napp.setOptionBoxChangeFunction(DESKTOP_MODE, modeChanged)\napp.setOptionBox(DESKTOP_MODE, \"Whole Screen\", callFunction=False)\napp.addLabelEntry(TRANSITION_TIME, 0, 2)\napp.setEntryWidth(TRANSITION_TIME, 6)\napp.setEntry(TRANSITION_TIME, TRANSITION_TIME_DEFAULT)\n#app.startLabelFrame(\"Region Color\", 0, 3)\napp.addLabel(REGION_COLOR, \"\", 1, 0, colspan=5)\napp.setLabel(REGION_COLOR, \" Desktop Region's Dominant Color\")\napp.setLabelHeight(REGION_COLOR, 1)\napp.setLabelBg(REGION_COLOR, \"gray\")\napp.hideLabel(REGION_COLOR)\napp.setSticky(\"e\")\napp.addCheckBox(MAX_SATURATION, 0, 3)\napp.addCheckBox(MAX_BRIGHTNESS, 0, 4)\napp.setCheckBoxChangeFunction(MAX_SATURATION, maxPressed)\napp.setCheckBoxChangeFunction(MAX_BRIGHTNESS, maxPressed)\napp.addCheckBox(\"Evening Mode\",0,5)\n#app.hideCheckBox(MAX_SATURATION)\n#app.hideCheckBox(MAX_BRIGHTNESS)\n\napp.setEntryTooltip(TRANSITION_TIME, TRANSITION_TIME_TIP)\napp.setLabelTooltip(TRANSITION_TIME, TRANSITION_TIME_TIP)\napp.setCheckBoxTooltip(FOLLOW_DESKTOP, FOLLOW_DESKTOP_TIP)\napp.setOptionBoxTooltip(DESKTOP_MODE, DESKTOP_MODE_TIP)\n\napp.stopLabelFrame()\n#-------------------------------------------\n\nif not os.path.exists(CONFIG):\n print(\"Creating .ini file\")\n open(CONFIG, 'w').close()\n config = ConfigObj(CONFIG)\n config['expectedbulbs'] = 0\n config['Scene 1'] = \"Scene 1\"\n config['Scene 2'] = \"Scene 2\"\n config['Scene 3'] = \"Scene 3\"\n config['transtime'] = 200\n config['waveform'] = 'Saw'\n config['transient'] = True \n config['period'] = 500\n config['cycles'] = 5\n config['duty_cycle'] = 0\n config['secondary_color'] = \"#FF0000\"\n config['maxSaturation'] = False\n config['maxBrightness'] = False\n config['is_evening'] = False\n config.write()\n\n\n#print(\".ini file exists\")\nconfig = ConfigObj(CONFIG)\nprint(\"config:\", config)\nif 'maxSaturation' in config:\n maxSaturation = (config['maxSaturation']=='True')\n app.setCheckBox(MAX_SATURATION,ticked=(config['maxSaturation']=='True'),callFunction=False)\nif 'maxBrightness' in config:\n maxBrightness = (config['maxBrightness']=='True')\n app.setCheckBox(MAX_BRIGHTNESS,ticked=(config['maxBrightness']=='True'),callFunction=False)\nif 'is_evening' in config:\n app.setCheckBox(\"Evening Mode\",ticked=(config['is_evening']=='True'),callFunction=False)\nif 'waveform' in config:\n app.setRadioButton(\"waveform\",config['waveform'])\nif 'transient' in config:\n app.setCheckBox(\"Transient\",config['transient'])\nif 'period' in config:\n app.setEntry(\"Period(ms)\",config['period'])\nif 'cycles' in config:\n app.setEntry(CYCLES,config['cycles'])\nif 'duty_cycle' in config:\n app.setEntry(\"Duty Cycle\",config['duty_cycle'])\nif 'secondary_color' in config:\n app.setLabelBg(\"lblwaveformcolor\", config['secondary_color'])\nif 'expectedbulbs' in config:\n app.setSpinBox(\"Expected Bulbs\", config['expectedbulbs'])\nif 'transtime' in config:\n app.setEntry(TRANSITION_TIME, config['transtime'])\nif 'Scene 1' in config: \n app.setEntry(\"Scene 1\", config[\"Scene 1\"], callFunction=False)\nif 'Scene 2' in config: \n app.setEntry(\"Scene 2\", config[\"Scene 2\"], callFunction=False)\nif 'Scene 3' in config: \n app.setEntry(\"Scene 3\", config[\"Scene 3\"], callFunction=False)\n#print(\"config['bulbs']:\",config['bulbs'])\n#print(\"type(config['bulbs']):\",type(config['bulbs']))\nif os.path.exists(PICKLE):\n bulbPickle = pkl.load(open(PICKLE, \"rb\")) #this reads the pickle\n #print (bulbPickle)\n bulbList.clear()\n bulbList.append(\"-Select Bulb-\")\n\n for i, bulb in enumerate(bulbPickle):\n #print (\"mac:\",bulb['mac']);\n light = lifxlan.Light(bulb['mac'], bulb['ip'])\n light.label = bulb['label']\n bulbs.append(light)\n bulbList.append(bulb['label'])\n\n if len(bulbs) > 0:\n app.clearOptionBox(\"LIFX Bulbs\", callFunction=False)\n app.changeOptionBox(\"LIFX Bulbs\", bulbList, callFunction=False)\n app.setLabelBg(\"lbl2\", mygreen)\n app.hideLabel(\"f1\")\n app.setLabel(\"lbl2\", \"Recalled \" + str(len(bulbs)) + \" bulbs\")\n app.setCheckBox(\"Select All\")\n\n\n#light = Light(\"12:34:56:78:9a:bc\", \"192.168.1.42\")\n#print(\"bulbs:\",bulbs)\nlan = lifxlan.LifxLAN()\n\napp.go()\n" ]
[ [ "numpy.array", "numpy.mean", "numpy.transpose" ] ]
hustcxl/tensorflow_cookbook
[ "4f57ea4ad79c8111fb29bad3da5d151858c6a050" ]
[ "04_Support_Vector_Machines/02_Working_with_Linear_SVMs/02_linear_svm.py" ]
[ "# Linear Support Vector Machine: Soft Margin\n# ----------------------------------\n#\n# This function shows how to use TensorFlow to\n# create a soft margin SVM\n#\n# We will use the iris data, specifically:\n# x1 = Sepal Length\n# x2 = Petal Width\n# Class 1 : I. setosa\n# Class -1: not I. setosa\n#\n# We know here that x and y are linearly seperable\n# for I. setosa classification.\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport tensorflow as tf\nfrom sklearn import datasets\nfrom tensorflow.python.framework import ops\nops.reset_default_graph()\n\n# Set random seeds\nnp.random.seed(7)\ntf.set_random_seed(7)\n\n# Create graph\nsess = tf.Session()\n\n# Load the data\n# iris.data = [(Sepal Length, Sepal Width, Petal Length, Petal Width)]\niris = datasets.load_iris()\nx_vals = np.array([[x[0], x[3]] for x in iris.data])\ny_vals = np.array([1 if y == 0 else -1 for y in iris.target])\n\n# Split data into train/test sets\ntrain_indices = np.random.choice(len(x_vals),\n round(len(x_vals)*0.9),\n replace=False)\ntest_indices = np.array(list(set(range(len(x_vals))) - set(train_indices)))\nx_vals_train = x_vals[train_indices]\nx_vals_test = x_vals[test_indices]\ny_vals_train = y_vals[train_indices]\ny_vals_test = y_vals[test_indices]\n\n# Declare batch size\nbatch_size = 135\n\n# Initialize placeholders\nx_data = tf.placeholder(shape=[None, 2], dtype=tf.float32)\ny_target = tf.placeholder(shape=[None, 1], dtype=tf.float32)\n\n# Create variables for linear regression\nA = tf.Variable(tf.random_normal(shape=[2, 1]))\nb = tf.Variable(tf.random_normal(shape=[1, 1]))\n\n# Declare model operations\nmodel_output = tf.subtract(tf.matmul(x_data, A), b)\n\n# Declare vector L2 'norm' function squared\nl2_norm = tf.reduce_sum(tf.square(A))\n\n# Declare loss function\n# Loss = max(0, 1-pred*actual) + alpha * L2_norm(A)^2\n# L2 regularization parameter, alpha\nalpha = tf.constant([0.01])\n# Margin term in loss\nclassification_term = tf.reduce_mean(tf.maximum(0., tf.subtract(1., tf.multiply(model_output, y_target))))\n# Put terms together\nloss = tf.add(classification_term, tf.multiply(alpha, l2_norm))\n\n# Declare prediction function\nprediction = tf.sign(model_output)\naccuracy = tf.reduce_mean(tf.cast(tf.equal(prediction, y_target), tf.float32))\n\n# Declare optimizer\nmy_opt = tf.train.GradientDescentOptimizer(0.01)\ntrain_step = my_opt.minimize(loss)\n\n# Initialize variables\ninit = tf.global_variables_initializer()\nsess.run(init)\n\n# Training loop\nloss_vec = []\ntrain_accuracy = []\ntest_accuracy = []\nfor i in range(500):\n rand_index = np.random.choice(len(x_vals_train), size=batch_size)\n rand_x = x_vals_train[rand_index]\n rand_y = np.transpose([y_vals_train[rand_index]])\n sess.run(train_step, feed_dict={x_data: rand_x, y_target: rand_y})\n\n temp_loss = sess.run(loss, feed_dict={x_data: rand_x, y_target: rand_y})\n loss_vec.append(temp_loss)\n\n train_acc_temp = sess.run(accuracy, feed_dict={\n x_data: x_vals_train,\n y_target: np.transpose([y_vals_train])})\n train_accuracy.append(train_acc_temp)\n\n test_acc_temp = sess.run(accuracy, feed_dict={\n x_data: x_vals_test,\n y_target: np.transpose([y_vals_test])})\n test_accuracy.append(test_acc_temp)\n\n if (i + 1) % 100 == 0:\n print('Step #{} A = {}, b = {}'.format(\n str(i+1),\n str(sess.run(A)),\n str(sess.run(b))\n ))\n print('Loss = ' + str(temp_loss))\n\n# Extract coefficients\n[[a1], [a2]] = sess.run(A)\n[[b]] = sess.run(b)\nslope = -a2/a1\ny_intercept = b/a1\n\n# Extract x1 and x2 vals\nx1_vals = [d[1] for d in x_vals]\n\n# Get best fit line\nbest_fit = []\nfor i in x1_vals:\n best_fit.append(slope*i+y_intercept)\n\n# Separate I. setosa\nsetosa_x = [d[1] for i, d in enumerate(x_vals) if y_vals[i] == 1]\nsetosa_y = [d[0] for i, d in enumerate(x_vals) if y_vals[i] == 1]\nnot_setosa_x = [d[1] for i, d in enumerate(x_vals) if y_vals[i] == -1]\nnot_setosa_y = [d[0] for i, d in enumerate(x_vals) if y_vals[i] == -1]\n\n# Plot data and line\nplt.plot(setosa_x, setosa_y, 'o', label='I. setosa')\nplt.plot(not_setosa_x, not_setosa_y, 'x', label='Non-setosa')\nplt.plot(x1_vals, best_fit, 'r-', label='Linear Separator', linewidth=3)\nplt.ylim([0, 10])\nplt.legend(loc='lower right')\nplt.title('Sepal Length vs Pedal Width')\nplt.xlabel('Pedal Width')\nplt.ylabel('Sepal Length')\nplt.show()\n\n# Plot train/test accuracies\nplt.plot(train_accuracy, 'k-', label='Training Accuracy')\nplt.plot(test_accuracy, 'r--', label='Test Accuracy')\nplt.title('Train and Test Set Accuracies')\nplt.xlabel('Generation')\nplt.ylabel('Accuracy')\nplt.legend(loc='lower right')\nplt.show()\n\n# Plot loss over time\nplt.plot(loss_vec, 'k-')\nplt.title('Loss per Generation')\nplt.xlabel('Generation')\nplt.ylabel('Loss')\nplt.show()\n" ]
[ [ "matplotlib.pyplot.legend", "tensorflow.sign", "tensorflow.equal", "matplotlib.pyplot.plot", "tensorflow.Session", "tensorflow.square", "tensorflow.python.framework.ops.reset_default_graph", "tensorflow.matmul", "matplotlib.pyplot.title", "matplotlib.pyplot.ylim", "sklearn.datasets.load_iris", "tensorflow.placeholder", "tensorflow.global_variables_initializer", "tensorflow.train.GradientDescentOptimizer", "numpy.transpose", "tensorflow.set_random_seed", "numpy.array", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel", "tensorflow.multiply", "tensorflow.constant", "numpy.random.seed", "matplotlib.pyplot.xlabel", "tensorflow.random_normal" ] ]
ruppysuppy/numpy
[ "a89f3ebaec7441f4ba5e30eb07206c2a7269778e" ]
[ "numpy/__init__.py" ]
[ "\"\"\"\nNumPy\n=====\n\nProvides\n 1. An array object of arbitrary homogeneous items\n 2. Fast mathematical operations over arrays\n 3. Linear Algebra, Fourier Transforms, Random Number Generation\n\nHow to use the documentation\n----------------------------\nDocumentation is available in two forms: docstrings provided\nwith the code, and a loose standing reference guide, available from\n`the NumPy homepage <https://www.scipy.org>`_.\n\nWe recommend exploring the docstrings using\n`IPython <https://ipython.org>`_, an advanced Python shell with\nTAB-completion and introspection capabilities. See below for further\ninstructions.\n\nThe docstring examples assume that `numpy` has been imported as `np`::\n\n >>> import numpy as np\n\nCode snippets are indicated by three greater-than signs::\n\n >>> x = 42\n >>> x = x + 1\n\nUse the built-in ``help`` function to view a function's docstring::\n\n >>> help(np.sort)\n ... # doctest: +SKIP\n\nFor some objects, ``np.info(obj)`` may provide additional help. This is\nparticularly true if you see the line \"Help on ufunc object:\" at the top\nof the help() page. Ufuncs are implemented in C, not Python, for speed.\nThe native Python help() does not know how to view their help, but our\nnp.info() function does.\n\nTo search for documents containing a keyword, do::\n\n >>> np.lookfor('keyword')\n ... # doctest: +SKIP\n\nGeneral-purpose documents like a glossary and help on the basic concepts\nof numpy are available under the ``doc`` sub-module::\n\n >>> from numpy import doc\n >>> help(doc)\n ... # doctest: +SKIP\n\nAvailable subpackages\n---------------------\ndoc\n Topical documentation on broadcasting, indexing, etc.\nlib\n Basic functions used by several sub-packages.\nrandom\n Core Random Tools\nlinalg\n Core Linear Algebra Tools\nfft\n Core FFT routines\npolynomial\n Polynomial tools\ntesting\n NumPy testing tools\nf2py\n Fortran to Python Interface Generator.\ndistutils\n Enhancements to distutils with support for\n Fortran compilers support and more.\n\nUtilities\n---------\ntest\n Run numpy unittests\nshow_config\n Show numpy build configuration\ndual\n Overwrite certain functions with high-performance SciPy tools.\n Note: `numpy.dual` is deprecated. Use the functions from NumPy or Scipy\n directly instead of importing them from `numpy.dual`.\nmatlib\n Make everything matrices.\n__version__\n NumPy version string\n\nViewing documentation using IPython\n-----------------------------------\nStart IPython with the NumPy profile (``ipython -p numpy``), which will\nimport `numpy` under the alias `np`. Then, use the ``cpaste`` command to\npaste examples into the shell. To see which functions are available in\n`numpy`, type ``np.<TAB>`` (where ``<TAB>`` refers to the TAB key), or use\n``np.*cos*?<ENTER>`` (where ``<ENTER>`` refers to the ENTER key) to narrow\ndown the list. To view the docstring for a function, use\n``np.cos?<ENTER>`` (to view the docstring) and ``np.cos??<ENTER>`` (to view\nthe source code).\n\nCopies vs. in-place operation\n-----------------------------\nMost of the functions in `numpy` return a copy of the array argument\n(e.g., `np.sort`). In-place versions of these functions are often\navailable as array methods, i.e. ``x = np.array([1,2,3]); x.sort()``.\nExceptions to this rule are documented.\n\n\"\"\"\nimport sys\nimport warnings\n\nfrom ._globals import ModuleDeprecationWarning, VisibleDeprecationWarning\nfrom ._globals import _NoValue\n\n# We first need to detect if we're being called as part of the numpy setup\n# procedure itself in a reliable manner.\ntry:\n __NUMPY_SETUP__\nexcept NameError:\n __NUMPY_SETUP__ = False\n\nif __NUMPY_SETUP__:\n sys.stderr.write('Running from numpy source directory.\\n')\nelse:\n try:\n from numpy.__config__ import show as show_config\n except ImportError:\n msg = \"\"\"Error importing numpy: you should not try to import numpy from\n its source directory; please exit the numpy source tree, and relaunch\n your python interpreter from there.\"\"\"\n raise ImportError(msg)\n\n from .version import git_revision as __git_revision__\n from .version import version as __version__\n\n __all__ = ['ModuleDeprecationWarning',\n 'VisibleDeprecationWarning']\n\n # Allow distributors to run custom init code\n from . import _distributor_init\n\n from . import core\n from .core import *\n from . import compat\n from . import lib\n # NOTE: to be revisited following future namespace cleanup.\n # See gh-14454 and gh-15672 for discussion.\n from .lib import *\n\n from . import linalg\n from . import fft\n from . import polynomial\n from . import random\n from . import ctypeslib\n from . import ma\n from . import matrixlib as _mat\n from .matrixlib import *\n\n # Make these accessible from numpy name-space\n # but not imported in from numpy import *\n # TODO[gh-6103]: Deprecate these\n from builtins import bool, int, float, complex, object, str\n from .compat import long, unicode\n\n from .core import round, abs, max, min\n # now that numpy modules are imported, can initialize limits\n core.getlimits._register_known_types()\n\n __all__.extend(['__version__', 'show_config'])\n __all__.extend(core.__all__)\n __all__.extend(_mat.__all__)\n __all__.extend(lib.__all__)\n __all__.extend(['linalg', 'fft', 'random', 'ctypeslib', 'ma'])\n\n # These are added by `from .core import *` and `core.__all__`, but we\n # overwrite them above with builtins we do _not_ want to export.\n __all__.remove('long')\n __all__.remove('unicode')\n\n # Remove things that are in the numpy.lib but not in the numpy namespace\n # Note that there is a test (numpy/tests/test_public_api.py:test_numpy_namespace)\n # that prevents adding more things to the main namespace by accident.\n # The list below will grow until the `from .lib import *` fixme above is\n # taken care of\n __all__.remove('Arrayterator')\n del Arrayterator\n\n # Filter out Cython harmless warnings\n warnings.filterwarnings(\"ignore\", message=\"numpy.dtype size changed\")\n warnings.filterwarnings(\"ignore\", message=\"numpy.ufunc size changed\")\n warnings.filterwarnings(\"ignore\", message=\"numpy.ndarray size changed\")\n\n # oldnumeric and numarray were removed in 1.9. In case some packages import\n # but do not use them, we define them here for backward compatibility.\n oldnumeric = 'removed'\n numarray = 'removed'\n\n if sys.version_info[:2] >= (3, 7):\n # Importing Tester requires importing all of UnitTest which is not a\n # cheap import Since it is mainly used in test suits, we lazy import it\n # here to save on the order of 10 ms of import time for most users\n #\n # The previous way Tester was imported also had a side effect of adding\n # the full `numpy.testing` namespace\n #\n # module level getattr is only supported in 3.7 onwards\n # https://www.python.org/dev/peps/pep-0562/\n def __getattr__(attr):\n if attr == 'testing':\n import numpy.testing as testing\n return testing\n elif attr == 'Tester':\n from .testing import Tester\n return Tester\n else:\n raise AttributeError(\"module {!r} has no attribute \"\n \"{!r}\".format(__name__, attr))\n\n def __dir__():\n return list(globals().keys() | {'Tester', 'testing'})\n\n else:\n # We don't actually use this ourselves anymore, but I'm not 100% sure that\n # no-one else in the world is using it (though I hope not)\n from .testing import Tester\n\n # Pytest testing\n from numpy._pytesttester import PytestTester\n test = PytestTester(__name__)\n del PytestTester\n\n\n def _sanity_check():\n \"\"\"\n Quick sanity checks for common bugs caused by environment.\n There are some cases e.g. with wrong BLAS ABI that cause wrong\n results under specific runtime conditions that are not necessarily\n achieved during test suite runs, and it is useful to catch those early.\n\n See https://github.com/numpy/numpy/issues/8577 and other\n similar bug reports.\n\n \"\"\"\n try:\n x = ones(2, dtype=float32)\n if not abs(x.dot(x) - 2.0) < 1e-5:\n raise AssertionError()\n except AssertionError:\n msg = (\"The current Numpy installation ({!r}) fails to \"\n \"pass simple sanity checks. This can be caused for example \"\n \"by incorrect BLAS library being linked in, or by mixing \"\n \"package managers (pip, conda, apt, ...). Search closed \"\n \"numpy issues for similar problems.\")\n raise RuntimeError(msg.format(__file__))\n\n _sanity_check()\n del _sanity_check\n\n def _mac_os_check():\n \"\"\"\n Quick Sanity check for Mac OS look for accelerate build bugs.\n Testing numpy polyfit calls init_dgelsd(LAPACK)\n \"\"\"\n try:\n c = array([3., 2., 1.])\n x = linspace(0, 2, 5)\n y = polyval(c, x)\n _ = polyfit(x, y, 2, cov=True)\n except ValueError:\n pass\n\n import sys\n if sys.platform == \"darwin\":\n with warnings.catch_warnings(record=True) as w:\n _mac_os_check()\n # Throw runtime error, if the test failed Check for warning and error_message\n error_message = \"\"\n if len(w) > 0:\n error_message = \"{}: {}\".format(w[-1].category.__name__, str(w[-1].message))\n msg = (\n \"Polyfit sanity test emitted a warning, most likely due \"\n \"to using a buggy Accelerate backend. If you compiled \"\n \"yourself, more information is available at \"\n \"https://numpy.org/doc/stable/user/building.html#accelerated-blas-lapack-libraries \"\n \"Otherwise report this to the vendor \"\n \"that provided NumPy.\\n{}\\n\".format(error_message))\n raise RuntimeError(msg)\n del _mac_os_check\n\n # We usually use madvise hugepages support, but on some old kernels it\n # is slow and thus better avoided.\n # Specifically kernel version 4.6 had a bug fix which probably fixed this:\n # https://github.com/torvalds/linux/commit/7cf91a98e607c2f935dbcc177d70011e95b8faff\n import os\n use_hugepage = os.environ.get(\"NUMPY_MADVISE_HUGEPAGE\", None)\n if sys.platform == \"linux\" and use_hugepage is None:\n use_hugepage = 1\n kernel_version = os.uname().release.split(\".\")[:2]\n kernel_version = tuple(int(v) for v in kernel_version)\n if kernel_version < (4, 6):\n use_hugepage = 0\n elif use_hugepage is None:\n # This is not Linux, so it should not matter, just enable anyway\n use_hugepage = 1\n else:\n use_hugepage = int(use_hugepage)\n\n # Note that this will currently only make a difference on Linux\n core.multiarray._set_madvise_hugepage(use_hugepage)\n" ]
[ [ "numpy._pytesttester.PytestTester" ] ]
Sukriti1312/DSCI-522_City_of_A-Stars_310
[ "3cbfd1c238a86bcc4c3ddeb4d4cf83b90310e4ad" ]
[ "scripts/eda_script.py" ]
[ "# author: A. Muhammad\n# date: 2020-02-01\n\n'''This script performs EDA on the students performance datasets\nfor portuguese and math students and outputs necessary tables and\nfigures to path provided.\n\nUsage: eda_script.py --file_path=<file_path> --results_path=<results_path>\n\nExample: \n python scripts/eda_script.py --file_path=data/ --results_path=results/\n\nOptions:\n--file_path=<file_path> Path (excluding filenames) to the csv file.\n--results_path=<results_path> Path for saving plots.\n'''\n\nimport pandas as pd\nimport numpy as np\nfrom docopt import docopt\nimport altair as alt\nimport re\nimport os\n\n\nopt = docopt(__doc__)\n\ndef test_function():\n \"\"\"\n Tests the input and output\n file paths.\n \"\"\"\n file_path_check = re.match(\"([A-Za-z]+[.]{1}[A-Za-z]+)\", opt[\"--file_path\"]) \n out_path_check = re.match(\"([A-Za-z]+[.]{1}[A-Za-z]+)\", opt[\"--results_path\"])\n assert file_path_check == None, \"you can not have extensions in path, only directories.\"\n assert out_path_check == None, \"you can not have extensions in path, only directories.\"\n try:\n os.listdir(opt[\"--file_path\"])\n os.listdir(opt[\"--results_path\"])\n except Exception as e:\n print(e)\n\n# test function runs here\ntest_function()\n\n\nopt = docopt(__doc__)\n\ndef main(file_path, results_path):\n # read in data\n df_mat = pd.read_csv(file_path + \"student-mat_clean.csv\")\n df_por = pd.read_csv(file_path + \"student-por_clean.csv\")\n\n # register the custom theme under a chosen name\n alt.themes.register('mds_special', mds_special)\n\n # enable the newly registered theme\n alt.themes.enable('mds_special')\n \n ## tables\n # agg table math\n df_math_agg = df_mat[[\"romantic\", \"total_grade\"]].groupby(\"romantic\").agg(['count', 'mean', 'std'])\n df_math_agg['total_grade'].reset_index().round(4).to_csv(results_path + \"math_table.csv\", index=False)\n\n # agg table por\n df_por_agg = df_por[[\"romantic\", \"total_grade\"]].groupby(\"romantic\").agg(['count', 'mean', 'std'])\n df_por_agg['total_grade'].reset_index().round(4).to_csv(results_path + \"por_table.csv\", index=False)\n\n ## print certain findings\n print(\"{} math students were in relationships and {} were not.\".format(\n df_mat['romantic'].value_counts()['yes'], \n df_mat['romantic'].value_counts()['no']))\n print(\"{} portuguese language students were in relationships and {} were not.\".format(\n df_por['romantic'].value_counts()['yes'], \n df_por['romantic'].value_counts()['no']))\n print(\"The average total grade for math students in relationships was: {:.2f}/60\".format(\n df_mat[df_mat['romantic'] == 'yes']['total_grade'].mean()))\n print(\"The average total grade for math students not in relationships was: {:.2f}/60\".format(\n df_mat[df_mat['romantic'] == 'no']['total_grade'].mean()))\n print(\"The average total grade for portuguese students in relationships was: {:.2f}/60\".format(\n df_por[df_por['romantic'] == 'yes']['total_grade'].mean()))\n print(\"The average total grade for portuguese students not in relationships was: {:.2f}/60\".format(\n df_por[df_por['romantic'] == 'no']['total_grade'].mean()))\n\n ## make plots\n p_1_1 = alt.Chart(df_mat[df_mat['romantic']==\"yes\"]).transform_density(\n 'total_grade',\n as_=['total_grade', 'density'],\n ).mark_bar().encode(\n x=alt.X(\"total_grade:Q\", title=\"Total grade\", bin = alt.Bin(extent=[0, 60], step=5)),\n y='density:Q',\n ).properties(\n width = 300,\n height = 400,\n title = \"In relationship\"\n )\n p_1_2 = alt.Chart(df_mat[df_mat['romantic']==\"no\"]).transform_density(\n 'total_grade',\n as_=['total_grade', 'density'],\n ).mark_bar(color='orange').encode(\n x=alt.X(\"total_grade:Q\", title=\"Total grade\", bin = alt.Bin(extent=[0, 60], step=5)),\n y='density:Q',\n ).properties(\n width = 300,\n height = 400,\n title = \"Not in relationship\"\n )\n P_math = p_1_1 | p_1_2\n \n P_math.configure_title(\n fontSize=14,\n )\n\n p_2_1 = alt.Chart(df_por[df_por['romantic']==\"yes\"]).transform_density(\n 'total_grade',\n as_=['total_grade', 'density'],\n ).mark_bar().encode(\n x=alt.X(\"total_grade:Q\", title=\"Total grade\", bin = alt.Bin(extent=[0, 60], step=5)),\n y='density:Q',\n ).properties(\n width = 300,\n height = 400,\n title = \"In relationship\"\n )\n p_2_2 = alt.Chart(df_por[df_por['romantic']==\"no\"]).transform_density(\n 'total_grade',\n as_=['total_grade', 'density'],\n ).mark_bar(color='orange').encode(\n x=alt.X(\"total_grade:Q\", title=\"Total grade\", bin = alt.Bin(extent=[0, 60], step=5)),\n y='density:Q',\n ).properties(\n width = 300,\n height = 400,\n title = \"Not in relationship\"\n )\n P_por = p_2_1 | p_2_2\n P_por.configure_title(\n fontSize=14,\n )\n\n ## save plots\n P_math.save(results_path + \"figures/math_plot.png\", webdriver='chrome')\n P_por.save(results_path + \"figures/por_plot.png\", webdriver='chrome')\n\n\ndef mds_special():\n \"\"\"\n Applies mds_special theme to plots \n created by\n Firas Moosvi, instructor at UBC \n Master of Data Science program.\n \"\"\"\n font = \"Arial\"\n axisColor = \"#000000\"\n gridColor = \"#DEDDDD\"\n return {\n \n \"config\": {\n \"title\": {\n \"fontSize\": 24,\n \"font\": font,\n \"anchor\": \"start\", # equivalent of left-aligned.\n \"fontColor\": \"#000000\"\n },\n \"background\": \"white\",\n \"axisX\": {\n \"domain\": True,\n #\"domainColor\": axisColor,\n \"gridColor\": gridColor,\n \"domainWidth\": 1,\n \"grid\": False,\n \"labelFont\": font,\n \"labelFontSize\": 12,\n \"labelAngle\": 0, \n #\"tickColor\": axisColor,\n \"tickSize\": 5, # default, including it just to show you can change it\n #\"titleFont\": font,\n \"titleFontSize\": 18,\n \"titlePadding\": 10, # guessing, not specified in styleguide\n \"title\": \"X Axis Title (units)\", \n },\n \"axisY\": {\n \"domain\": False,\n \"grid\": True,\n \"gridColor\": gridColor,\n \"gridWidth\": 1,\n \"labelFont\": font,\n \"labelFontSize\": 12,\n \"labelAngle\": 0, \n #\"ticks\": False, # even if you don't have a \"domain\" you need to turn these off.\n \"titleFont\": font,\n \"titleFontSize\": 18,\n \"titlePadding\": 10, # guessing, not specified in styleguide\n \"title\": \"Y Axis Title (units)\", \n # titles are by default vertical left of axis so we need to hack this \n #\"titleAngle\": 0, # horizontal\n #\"titleY\": -10, # move it up\n #\"titleX\": 18, # move it to the right so it aligns with the labels \n },\n }\n }\n \n\nif __name__ == \"__main__\":\n main(opt[\"--file_path\"], opt[\"--results_path\"])\n" ]
[ [ "pandas.read_csv" ] ]
indigoLovee/DQN
[ "21a30484014331b21047ecddac4fa584828ee80a" ]
[ "DQN.py" ]
[ "import torch as T\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.nn.functional as F\nimport numpy as np\nfrom buffer import ReplayBuffer\n\ndevice = T.device(\"cuda:0\" if T.cuda.is_available() else \"cpu\")\n\n\nclass DeepQNetwork(nn.Module):\n def __init__(self, alpha, state_dim, action_dim, fc1_dim, fc2_dim):\n super(DeepQNetwork, self).__init__()\n\n self.fc1 = nn.Linear(state_dim, fc1_dim)\n self.fc2 = nn.Linear(fc1_dim, fc2_dim)\n self.q = nn.Linear(fc2_dim, action_dim)\n\n self.optimizer = optim.Adam(self.parameters(), lr=alpha)\n self.to(device)\n\n def forward(self, state):\n x = T.relu(self.fc1(state))\n x = T.relu(self.fc2(x))\n\n q = self.q(x)\n\n return q\n\n def save_checkpoint(self, checkpoint_file):\n T.save(self.state_dict(), checkpoint_file, _use_new_zipfile_serialization=False)\n\n def load_checkpoint(self, checkpoint_file):\n self.load_state_dict(T.load(checkpoint_file))\n\n\nclass DQN:\n def __init__(self, alpha, state_dim, action_dim, fc1_dim, fc2_dim, ckpt_dir,\n gamma=0.99, tau=0.005, epsilon=1.0, eps_end=0.01, eps_dec=5e-4,\n max_size=1000000, batch_size=256):\n self.tau = tau\n self.gamma = gamma\n self.epsilon = epsilon\n self.eps_min = eps_end\n self.eps_dec = eps_dec\n self.batch_size = batch_size\n self.action_space = [i for i in range(action_dim)]\n self.checkpoint_dir = ckpt_dir\n\n self.q_eval = DeepQNetwork(alpha=alpha, state_dim=state_dim, action_dim=action_dim,\n fc1_dim=fc1_dim, fc2_dim=fc2_dim)\n self.q_target = DeepQNetwork(alpha=alpha, state_dim=state_dim, action_dim=action_dim,\n fc1_dim=fc1_dim, fc2_dim=fc2_dim)\n\n self.memory = ReplayBuffer(state_dim=state_dim, action_dim=action_dim,\n max_size=max_size, batch_size=batch_size)\n\n self.update_network_parameters(tau=1.0)\n\n def update_network_parameters(self, tau=None):\n if tau is None:\n tau = self.tau\n\n for q_target_params, q_eval_params in zip(self.q_target.parameters(), self.q_eval.parameters()):\n q_target_params.data.copy_(tau * q_eval_params + (1 - tau) * q_target_params)\n\n def remember(self, state, action, reward, state_, done):\n self.memory.store_transition(state, action, reward, state_, done)\n\n def choose_action(self, observation, isTrain=True):\n state = T.tensor([observation], dtype=T.float).to(device)\n actions = self.q_eval.forward(state)\n action = T.argmax(actions).item()\n\n if (np.random.random() < self.epsilon) and isTrain:\n action = np.random.choice(self.action_space)\n\n return action\n\n def learn(self):\n if not self.memory.ready():\n return\n\n states, actions, rewards, next_states, terminals = self.memory.sample_buffer()\n batch_idx = np.arange(self.batch_size)\n\n states_tensor = T.tensor(states, dtype=T.float).to(device)\n rewards_tensor = T.tensor(rewards, dtype=T.float).to(device)\n next_states_tensor = T.tensor(next_states, dtype=T.float).to(device)\n terminals_tensor = T.tensor(terminals).to(device)\n\n with T.no_grad():\n q_ = self.q_target.forward(next_states_tensor)\n q_[terminals_tensor] = 0.0\n target = rewards_tensor + self.gamma * T.max(q_, dim=-1)[0]\n q = self.q_eval.forward(states_tensor)[batch_idx, actions]\n\n loss = F.mse_loss(q, target.detach())\n self.q_eval.optimizer.zero_grad()\n loss.backward()\n self.q_eval.optimizer.step()\n\n self.update_network_parameters()\n self.epsilon = self.epsilon - self.eps_dec if self.epsilon > self.eps_min else self.eps_min\n\n def save_models(self, episode):\n self.q_eval.save_checkpoint(self.checkpoint_dir + 'Q_eval/DQN_q_eval_{}.pth'.format(episode))\n print('Saving Q_eval network successfully!')\n self.q_target.save_checkpoint(self.checkpoint_dir + 'Q_target/DQN_Q_target_{}.pth'.format(episode))\n print('Saving Q_target network successfully!')\n\n def load_models(self, episode):\n self.q_eval.load_checkpoint(self.checkpoint_dir + 'Q_eval/DQN_q_eval_{}.pth'.format(episode))\n print('Loading Q_eval network successfully!')\n self.q_target.load_checkpoint(self.checkpoint_dir + 'Q_target/DQN_Q_target_{}.pth'.format(episode))\n print('Loading Q_target network successfully!')\n\n\n\n" ]
[ [ "numpy.random.random", "torch.max", "numpy.random.choice", "torch.load", "numpy.arange", "torch.tensor", "torch.nn.Linear", "torch.no_grad", "torch.cuda.is_available", "torch.argmax" ] ]
lapaniku/GAS
[ "e49ce302689af683da744cd172e0359c0ba0af86" ]
[ "examples/6a586378-063a-427c-92b2-87d6236615c6.py" ]
[ "# This program was generated by \"Generative Art Synthesizer\" \n# Generation date: 2021-11-28 02:06:28 UTC \n# GAS change date: 2021-11-28 01:31:12 UTC \n# GAS md5 hash: c291ffb9de6ad6dea37797c00163f591 \n# Python version: 3.7.9 (tags/v3.7.9:13c94747c7, Aug 17 2020, 18:58:18) [MSC v.1900 64 bit (AMD64)] \n# For more information visit: https://github.com/volotat/GAS\n\n#import python libraries \nimport os #OS version: default \nimport numpy as np #Numpy version: 1.19.5 \nfrom PIL import Image #PIL version: 8.1.2 \n\n#set initial params\nSIZE = 768 \nGRID_CHANNELS = 30 \n\ndef test_values(arr):\n if np.isnan(arr).any():\n raise Exception('Array has None elements!') \n \n if np.amin(arr) < -1 or np.amax(arr) > 1:\n raise Exception('Values went to far! [ %.2f : %.2f ]'%(np.amin(arr), np.amax(arr)) ) \n \n return arr\n\n#define grid transformation methods\n\ndef transit(x, t_indx, s_indx, alphas):\n res = x.copy()\n res[:,:,t_indx] = np.sum(x[:,:,s_indx] * alphas, axis = -1)\n return test_values(res.clip(-1,1)) \n\ndef sin(x, t_indx, s_indx, scale = 1, shift = 0): \n res = x.copy()\n res[:,:,t_indx] = np.sin(x[:,:,s_indx] * 0.5 * np.pi * scale + shift)\n return test_values(res) \n\ndef power(x, t_indx, s_indx, p = 1): \n res = x.copy()\n res[:,:,t_indx] = np.sign(x[:,:,s_indx]) * np.abs(x[:,:,s_indx]) ** p \n return test_values(res) \n\ndef magnitude(x, t_indx, s_indx, ord = 2): \n res = x.copy()\n res[:,:,t_indx] = np.linalg.norm(x[:,:,s_indx], axis = -1, ord = ord) / np.sqrt(len(s_indx))\n return test_values(res) \n\n#set initial grid\ngrid = np.zeros((SIZE, SIZE, GRID_CHANNELS))\n\nx = ((np.arange(SIZE)/(SIZE-1) - 0.5) * 2).reshape((1, SIZE)).repeat(SIZE, 0)\ny = ((np.arange(SIZE)/(SIZE-1) - 0.5) * 2).reshape((SIZE, 1)).repeat(SIZE, 1)\n\nphi = np.pi / 4\nrx = (x * np.cos(phi) - y * np.sin(phi)) / 1.5\nry = (x * np.cos(phi) + y * np.sin(phi)) / 1.5\n\n\nfor i in range(15):\n grid[:,:,i * 2 + 0] = x\n grid[:,:,i * 2 + 1] = y\n'''\n\ngrid[:,:,0 ] = x\ngrid[:,:,1 ] = y\ngrid[:,:,2 ] = rx\ngrid[:,:,3 ] = ry\n\ngrid[:,:, 4] = np.sin(x * 0.5 * np.pi)\ngrid[:,:, 5] = np.sin(y * 0.5 * np.pi)\ngrid[:,:, 6: 8] = magnitude(grid[:,:,4:6], [0, 1])\n\ngrid[:,:, 8] = np.sin(-x * 0.5 * np.pi)\ngrid[:,:, 9] = np.sin(-y * 0.5 * np.pi)\ngrid[:,:,10] = np.sin(rx * 0.5 * np.pi)\ngrid[:,:,11] = np.sin(ry * 0.5 * np.pi)\ngrid[:,:,12] = np.sin(-rx * 0.5 * np.pi)\ngrid[:,:,13] = np.sin(-ry * 0.5 * np.pi)\n\ngrid[:,:,14] = np.sin(x * 0.5 * np.pi* 2)\ngrid[:,:,15] = np.sin(y * 0.5 * np.pi* 2)\ngrid[:,:,16:18] = magnitude(grid[:,:,14:16], [0, 1])\n\ngrid[:,:,18] = np.cos(x * 0.5 * np.pi* 2)\ngrid[:,:,19] = np.cos(y * 0.5 * np.pi* 2)\ngrid[:,:,20:22] = magnitude(grid[:,:,18:20], [0, 1])\n\ngrid[:,:,22] = np.sin(rx * 0.5 * np.pi* 2)\ngrid[:,:,23] = np.sin(ry * 0.5 * np.pi* 2)\ngrid[:,:,24:26] = magnitude(grid[:,:,22:24], [0, 1])\n\ngrid[:,:,26] = np.cos(rx * 0.5 * np.pi* 2)\ngrid[:,:,27] = np.cos(ry * 0.5 * np.pi* 2)\ngrid[:,:,28:30] = magnitude(grid[:,:,26:28], [0, 1])\n'''\n'''\ngrid[:,:,30:32] = np.sin(x * 0.5 * np.pi * 3), np.sin(y * 0.5 * np.pi * 3)\ngrid[:,:,32:34] = magnitude(grid, [30, 31])\ngrid[:,:,34:36] = np.cos(x * 0.5 * np.pi * 3), np.cos(y * 0.5 * np.pi * 3)\ngrid[:,:,36:38] = magnitude(grid, [34, 35])\n\ngrid[:,:,38:40] = np.sin(rx * 0.5 * np.pi * 3), np.sin(ry * 0.5 * np.pi * 3)\ngrid[:,:,40:42] = magnitude(grid, [40, 41])\ngrid[:,:,42:44] = np.cos(rx * 0.5 * np.pi * 3), np.cos(ry * 0.5 * np.pi * 3)\ngrid[:,:,44:46] = magnitude(grid, [44, 45])\n'''\n\n#apply transformations to the grid\ngrid = transit(grid, 29, [19, 22], [0.162570065071097, 0.837429934928903])\ngrid = magnitude(grid, 12, [27, 0, 11, 8, 21, 25, 22], 2)\ngrid = power(grid, 16, 19, 0.6832593210243594)\ngrid = magnitude(grid, 5, [12, 29, 19, 16, 25, 5, 22, 20, 13, 18, 2, 17, 1, 7], 2)\ngrid = transit(grid, 19, [29, 17, 18, 24, 6, 12, 13, 11, 8, 20, 0], [0.11956496211881872, 0.05013356366149157, 0.13305054541369926, 0.15246500360275328, 0.05802002562963354, 0.04238582486905315, 0.19554926469073888, 0.08033490218765624, 0.04525393101315875, 0.03341790476201002, 0.08982407205098653])\ngrid = sin(grid, 29, 4, -2.803277175027569, 43.39456443062289)\ngrid = sin(grid, 23, 24, 2.0425954175290886, 11.358624030534827)\ngrid = magnitude(grid, 24, [0, 5, 18, 17, 29, 1, 10, 11, 14], 2)\ngrid = sin(grid, 3, 16, 0.08116001882496733, 29.607535899235273)\ngrid = transit(grid, 9, [24, 19], [0.14255129423463317, 0.8574487057653668])\ngrid = sin(grid, 3, 12, -2.4201168205485177, -42.76220889484386)\ngrid = transit(grid, 23, [0, 17, 16, 10, 1, 29, 12, 24, 11, 6, 23, 14, 2, 3], [0.04185852737464411, 0.0718958562209562, 0.04144628951866288, 0.06440992347220259, 0.05504490607061211, 0.06002707501087633, 0.14086245922498628, 0.05953422331122396, 0.13174771512588848, 0.09554676788043852, 0.054710845949554616, 0.07072254937205642, 0.046008773998673064, 0.06618408746922441])\ngrid = sin(grid, 28, 4, -1.1167240340353377, -33.51597872481756)\ngrid = sin(grid, 6, 16, -5.309685991010303, -73.84592367786468)\ngrid = sin(grid, 9, 23, -2.6796169840930895, -40.5412027819841)\ngrid = sin(grid, 15, 6, 1.6192227145410403, 18.29042695814111)\ngrid = sin(grid, 27, 3, 3.67033472109074, -92.21663905335483)\ngrid = transit(grid, 3, [4, 7, 29], [0.3386535705350276, 0.5538253330679384, 0.10752109639703403])\ngrid = sin(grid, 12, 28, 3.3754171464010443, 72.59735794386486)\ngrid = sin(grid, 28, 23, 3.9592736322225717, -28.872965332555125)\ngrid = magnitude(grid, 23, [16, 3, 27, 0, 21], 2)\ngrid = transit(grid, 14, [12, 6, 0, 10], [0.030935598884591263, 0.5285731990934327, 0.3402810463602381, 0.10021015566173799])\ngrid = transit(grid, 22, [19, 7, 5, 29, 6, 26], [0.14337063648845627, 0.24142817057917984, 0.11685279951452533, 0.023075679344775824, 0.2754286905791286, 0.19984402349393413])\ngrid = sin(grid, 26, 6, -2.4091211781495296, -82.64154311894532)\ngrid = magnitude(grid, 14, [3, 10, 14, 23, 26, 21, 20, 13, 17], 2)\ngrid = magnitude(grid, 7, [23, 6, 2], 2)\ngrid = sin(grid, 10, 22, 2.050315504251981, 89.5744631928493)\ngrid = transit(grid, 20, [25, 29, 1, 6, 12, 8, 14, 17, 23], [0.004899579789304808, 0.09314319527557183, 0.03998202780338693, 0.19272244068492897, 0.34501737224324885, 0.17740036381342622, 0.06353176938513716, 0.03203715570193849, 0.05126609530305695])\ngrid = sin(grid, 11, 20, 4.390960200726324, -71.44216611954899)\ngrid = magnitude(grid, 7, [20, 16, 1, 25, 13], 2)\ngrid = magnitude(grid, 22, [20, 27, 16, 11, 23, 15, 29, 25, 0, 17, 2, 5, 1, 28, 12], 2)\ngrid = transit(grid, 19, [20, 4, 3, 16], [0.1526830825248949, 0.7081439352898777, 0.06775906310079587, 0.07141391908443141])\ngrid = sin(grid, 26, 26, -3.0877394923548906, 44.03402898112404)\ngrid = transit(grid, 23, [15, 24, 12, 22, 19, 23], [0.2974634270268527, 0.18571837778194167, 0.22122718321511456, 0.05894778995186554, 0.19810172312674557, 0.03854149889747992])\ngrid = sin(grid, 11, 9, 1.0239782977935226, 65.26260230502231)\ngrid = sin(grid, 9, 25, -7.566790267054586, -75.63082272426975)\ngrid = transit(grid, 15, [9, 15, 26, 2, 24, 28, 19, 12, 5, 13, 1, 4, 16, 23, 20, 6, 10, 18], [0.06616516411967799, 0.09777590819145016, 0.0002712313941477737, 0.09746330541437898, 0.005397802149808619, 0.07718643014961299, 0.13148160411157372, 0.09102197762563803, 0.03185803109227711, 0.0241181740051075, 0.034195430141271195, 0.03951611394918786, 0.0787746792428292, 0.03692743163524459, 0.002994098366014297, 0.062803596094192, 0.08020173317182712, 0.041847289145760795])\ngrid = transit(grid, 4, [25, 29, 22, 1, 6, 9, 19, 21, 18, 16, 17, 5, 20, 0, 12], [0.08432611754036021, 0.009140664239356671, 0.08060088757913146, 0.06166245074953199, 0.1638729657005139, 0.034479801216239156, 0.014854982191717304, 0.08772065521432443, 0.043708056308515354, 0.11043876578842901, 0.12519722186516116, 0.023175558417975587, 0.01018347967163066, 0.1176477109575453, 0.032990682559567955])\ngrid = sin(grid, 12, 7, -1.7660206046047084, -85.28896741511835)\ngrid = transit(grid, 14, [18, 28, 1, 25, 26, 15, 16, 19, 12, 14, 5, 8], [0.061399029075051736, 0.16128653484720623, 0.04238018700257984, 0.07363074210463408, 0.09276563659827074, 0.044181324827153534, 0.1458446676143112, 0.002814862772849515, 0.12681141102429905, 0.09165683421119886, 0.05453631469851343, 0.10269245522393174])\ngrid = power(grid, 28, 29, 0.3779782940331584)\ngrid = sin(grid, 19, 27, -2.5705555933884487, 80.63298070706631)\ngrid = transit(grid, 25, [0, 5, 12, 29, 27, 7, 2, 25, 18, 8], [0.13649589534415188, 0.014678989258920187, 0.07847237192131681, 0.11124864055343385, 0.08219416634833716, 0.04507998114443801, 0.1618879569706191, 0.2700382467691338, 0.05609092564881936, 0.043812826040829804])\ngrid = sin(grid, 27, 5, 2.9386845297236146, -8.401158648822786)\ngrid = transit(grid, 9, [29, 10, 7, 18, 25, 11, 16, 21], [0.029167418654736797, 0.003595910474137072, 0.29287241571346795, 0.07022515471929672, 0.10158497813413986, 0.059246305191391915, 0.32077383459013076, 0.12253398252269886])\ngrid = power(grid, 4, 27, 2.403332922052619)\ngrid = sin(grid, 12, 27, -6.706458142837571, -67.86915645909208)\ngrid = sin(grid, 12, 16, -2.708811323493574, 61.963825635946876)\ngrid = sin(grid, 27, 21, 2.979990355413568, 55.608556726719144)\ngrid = transit(grid, 10, [25, 12], [0.917610969357756, 0.08238903064224394])\ngrid = sin(grid, 7, 7, 1.3211939890122422, 72.76020788877838)\ngrid = sin(grid, 14, 4, 0.5306507527772861, 80.17684312260022)\ngrid = sin(grid, 3, 10, -0.5503493938868814, -5.962185131409427)\ngrid = transit(grid, 15, [25, 28, 1, 5], [0.321600534827853, 0.10542445081098709, 0.3304333599084767, 0.2425416544526832])\ngrid = magnitude(grid, 20, [6, 9], 2)\ngrid = sin(grid, 18, 25, 1.3894004680099752, 89.25662087299591)\ngrid = transit(grid, 20, [4, 17, 22, 9, 2, 3], [0.28470977015474064, 0.11783602510587528, 0.14947881924125034, 0.07641898119264072, 0.299966212618196, 0.07159019168729713])\ngrid = sin(grid, 2, 12, -0.5867291269053801, 28.167611255741008)\ngrid = transit(grid, 16, [17, 27, 6, 23, 19, 25, 16, 9, 14, 4, 0], [0.10447719623269758, 0.048407918591629864, 0.007763166119990237, 0.0022140470040574907, 0.24849459533294363, 0.12023286519064905, 0.17871974770364935, 0.05735132208762337, 0.036326220968249515, 0.10867143758787537, 0.08734148318063453])\ngrid = transit(grid, 1, [7, 23, 26, 14, 20, 19, 28, 8, 9, 17, 22, 0, 5], [0.013121733291342785, 0.11301130736686454, 0.01352586294053668, 0.15913305810858402, 0.11915145281306491, 0.0006517312319511964, 0.008548387417477, 0.0904394241347426, 0.01789058436883307, 0.07037753745683703, 0.07076335166856433, 0.07800578985114522, 0.24537977935005661])\ngrid = sin(grid, 18, 28, -1.9841443767920823, -69.03014320058003)\ngrid = transit(grid, 5, [22, 19, 25, 27, 20, 21, 18, 3], [0.13269119455929568, 0.13927419514581135, 0.2353360212273103, 0.05358973946883631, 0.11709248299854554, 0.05695975943841826, 0.03345046365270227, 0.2316061435090803])\ngrid = sin(grid, 1, 9, -3.8143949239238193, -90.24439830058608)\ngrid = magnitude(grid, 22, [13, 26, 6, 9, 7, 15, 8, 21, 12, 25, 29, 5, 23, 19], 2)\ngrid = magnitude(grid, 7, [15, 10, 23], 2)\ngrid = transit(grid, 18, [29, 8, 1, 11, 13, 9, 27, 6, 21, 4, 10, 15, 19, 5, 0], [0.1772994331856414, 0.068125216107354, 0.18999349792890638, 0.019818681800181075, 0.04415831002215933, 0.03035314916143176, 0.019871427752056706, 0.17461556101263392, 0.043587710771764196, 0.037194038159689476, 0.05795222455290955, 0.022431635860234538, 0.07352074703380847, 0.020324565267279607, 0.020753801383949444])\ngrid = magnitude(grid, 28, [27, 11, 1, 5, 28], 2)\ngrid = sin(grid, 10, 29, 0.9754517253039042, 19.167473301905645)\ngrid = sin(grid, 12, 27, -2.8452733457996318, 79.15482610320453)\ngrid = transit(grid, 23, [23, 19, 27, 29, 5, 2, 20, 0, 6, 14, 28], [0.08798444677572927, 0.12029606201225304, 0.10947313847863878, 0.12859008053676993, 0.045403922186473065, 0.12432237963799758, 0.14016659493536382, 0.04300443803477972, 0.01734694652028419, 0.0981174832644981, 0.0852945076172126])\ngrid = magnitude(grid, 23, [12, 14, 19, 21, 9], 2)\ngrid = power(grid, 29, 5, 1.817083710798804)\ngrid = magnitude(grid, 2, [13, 6, 29, 24, 27, 21, 16, 14, 22, 4], 2)\ngrid = transit(grid, 8, [3], [1.0])\ngrid = sin(grid, 18, 29, 0.6389415083446274, 80.8749712491909)\ngrid = transit(grid, 14, [25, 2, 18, 8, 3, 15, 1, 16, 14, 5, 6, 13, 20, 0, 26, 9, 22, 28, 10, 17, 4, 29, 24, 11, 7, 23], [0.005871309937873514, 0.018003505494664063, 0.004367270790055393, 0.02131452056580914, 0.04892744693923117, 0.058471392811199306, 0.07048395159287357, 0.017318473991941724, 0.07994321420066938, 0.05394716631490228, 0.03519745217167912, 0.02235005759125536, 0.04715412242850838, 0.02747045659126205, 0.07155917229027325, 0.060822977475415284, 0.04361415578730645, 0.0354280883191885, 0.005739110717730895, 0.04491271555615977, 0.04349743113332699, 0.026095715559849145, 0.026899299768024996, 0.037327346757871395, 0.012878407330765023, 0.080405237882164])\ngrid = sin(grid, 10, 6, 1.615810670294585, 6.059030950147061)\ngrid = transit(grid, 4, [21], [1.0])\ngrid = transit(grid, 0, [9, 6, 21, 23, 10, 25, 5, 29], [0.008407947275370496, 0.22332672029591105, 0.17669528269181908, 0.061943871236512044, 0.00631614100316715, 0.04896761458648364, 0.219980694309923, 0.25436172860081346])\ngrid = transit(grid, 7, [28, 3, 8, 24, 5, 26, 16, 25, 20], [0.008076733658800364, 0.13788018636125093, 0.047745229040943256, 0.08590191794838145, 0.3359021344945731, 0.13627021446299625, 0.07199274602464636, 0.05807097072801425, 0.11815986728039397])\ngrid = magnitude(grid, 5, [13, 16], 2)\ngrid = sin(grid, 23, 19, -0.6582224493825697, 87.34462867779135)\ngrid = sin(grid, 28, 10, 2.201883125073067, 80.07621747819877)\ngrid = magnitude(grid, 10, [12, 0, 4, 21, 8, 20, 3, 27, 7, 13, 6], 2)\ngrid = magnitude(grid, 26, [12, 16, 6, 22, 27, 20, 14, 26, 19, 18, 8], 2)\ngrid = transit(grid, 21, [10, 1, 21, 23, 6, 22, 7, 24, 15], [0.14098959190193464, 0.16885649738040365, 0.21508280162861007, 0.08756191883026329, 0.06797141331659777, 0.040586793475855774, 0.07556426350656567, 0.13577049344872752, 0.06761622651104154])\ngrid = sin(grid, 19, 21, -1.7650391823704892, 89.53123311944441)\ngrid = sin(grid, 27, 20, 4.102907734657698, -18.506040345024942)\ngrid = sin(grid, 8, 16, -3.2047717871756047, 50.031074019769875)\ngrid = sin(grid, 1, 3, -1.2298315940257807, 67.92621901520556)\ngrid = transit(grid, 10, [5, 12, 6, 7, 21], [0.04017259629004803, 0.035935993838594436, 0.38852998766486463, 0.39464597353448644, 0.14071544867200647])\ngrid = transit(grid, 17, [9, 22, 0, 8], [0.07232347283831883, 0.23909484436189507, 0.16476406248235922, 0.523817620317427])\ngrid = sin(grid, 11, 23, 3.4291605572557367, 77.65408388973503)\ngrid = sin(grid, 20, 27, 1.9438852600878178, -69.26160333661483)\ngrid = transit(grid, 19, [13, 7, 14, 12, 29], [0.15078244000703844, 0.30329951250855647, 0.052826921314074654, 0.014009457594495888, 0.47908166857583445])\ngrid = magnitude(grid, 4, [13, 5, 9, 16, 11, 6, 24, 14, 12, 1, 3, 22, 20, 8, 7, 15], 2)\ngrid = power(grid, 5, 12, 0.32534340921919336)\ngrid = sin(grid, 10, 18, -1.5546372261064723, 79.40807200485779)\ngrid = transit(grid, 0, [2, 17, 23, 0, 9, 10, 6, 20, 26, 22, 25, 11, 27, 21, 13, 12, 1, 16], [0.04666741358409691, 0.10041235530271467, 0.13656890415482237, 0.0505888093029676, 0.07654195022307062, 0.18419004471071113, 0.05046245679380782, 0.007552503418946401, 0.0004481713476168337, 0.011885682921671083, 0.032088980266198504, 0.0023404498982659153, 0.03348183036453658, 0.045952614669238355, 0.10372072735870042, 0.01789271596791753, 0.04799484234941445, 0.05120954736530277])\ngrid = transit(grid, 5, [25, 27], [0.5725941398341273, 0.4274058601658726])\ngrid = transit(grid, 19, [15, 29, 23, 21, 11, 10, 20, 4, 17, 2, 0, 13, 3, 1, 5, 8], [0.031537796589242675, 0.02689608568216389, 0.006352970412025167, 0.02945197836138472, 0.004733254055032123, 0.031841731262449186, 0.09701814809592517, 0.021578470501320998, 0.0071680768933244385, 0.11952705984922679, 0.05536518282979036, 0.06581861347697791, 0.2343306944232139, 0.10779797912646302, 0.09300120880000046, 0.06758074964145924])\ngrid = sin(grid, 11, 0, 2.563909435379265, 0.4450018649816627)\ngrid = magnitude(grid, 27, [16, 11, 8, 0, 28, 12, 17, 15, 18, 20, 26], 2)\ngrid = sin(grid, 19, 21, -3.4132246278785883, 84.16701903091374)\ngrid = sin(grid, 28, 13, -2.3675718627702755, 41.175741022703875)\ngrid = transit(grid, 7, [10, 9, 27, 23, 19, 5, 1, 22, 7], [0.14363062171830737, 0.01687188812140151, 0.13196113638415463, 0.17866921525288296, 0.07172903294369104, 0.1268434984434265, 0.09262190525507281, 0.13843366504033602, 0.09923903684072709])\ngrid = sin(grid, 1, 17, 0.4190217510777763, -96.31752118334663)\ngrid = sin(grid, 4, 25, -3.0130782974573114, 0.045638670109738655)\ngrid = sin(grid, 23, 16, -6.362468253360612, 24.730444687537883)\ngrid = sin(grid, 17, 14, -2.3747658845203916, -57.23440657206675)\ngrid = sin(grid, 19, 19, 2.4592230816940326, -51.76853764043066)\ngrid = magnitude(grid, 26, [4], 2)\ngrid = sin(grid, 0, 26, 2.879410066683457, 5.223173332129804)\ngrid = sin(grid, 10, 6, -3.2555765761277127, -17.443575197843472)\ngrid = transit(grid, 25, [24, 2, 25, 0, 12, 4], [0.03568795619526225, 0.08500737200701228, 0.05240229364632595, 0.603980978240824, 0.17712678127987705, 0.04579461863069845])\ngrid = sin(grid, 6, 29, -0.12051802831906497, 89.64443842624468)\ngrid = transit(grid, 5, [27, 16, 23, 9, 17], [0.22131895230663756, 0.07144315447485797, 0.49736096611646524, 0.13402807138531572, 0.07584885571672335])\ngrid = transit(grid, 12, [22, 8, 29, 16], [0.25792429467789524, 0.25168913008212207, 0.38751847922685195, 0.10286809601313074])\ngrid = magnitude(grid, 24, [12], 2)\ngrid = sin(grid, 21, 18, 6.904882453110925, 43.76686597000625)\ngrid = sin(grid, 27, 12, -2.3149706703321784, 91.4634229451533)\ngrid = transit(grid, 11, [18, 8, 23], [0.9456048289219839, 0.02282944678495521, 0.031565724293060864])\ngrid = sin(grid, 3, 22, -5.427035197241231, 70.63770520279803)\ngrid = sin(grid, 27, 15, -0.8306409707765449, 16.388610614890496)\ngrid = magnitude(grid, 22, [19, 20, 0, 14, 29, 16, 13, 11, 12, 7], 2)\ngrid = sin(grid, 22, 2, -0.2131858223375026, 23.110302271816437)\ngrid = transit(grid, 15, [26, 17, 18, 3, 20, 6], [0.26298407289730785, 0.036929318879447975, 0.21956318893577373, 0.12140448131206344, 0.2932362214654605, 0.06588271650994641])\ngrid = transit(grid, 6, [10, 0, 14, 4, 16, 26, 29], [0.3266526570756889, 0.010367316493219989, 0.06038405155138366, 0.18542143850276785, 0.15350917236048142, 0.1066459060285463, 0.1570194579879119])\ngrid = transit(grid, 3, [29, 9, 12, 18, 10, 17], [0.30897372209328766, 0.10630103874152365, 0.15658027364196273, 0.29474023685015555, 0.1326016707345515, 0.0008030579385190207])\ngrid = magnitude(grid, 11, [16, 7, 25, 22, 3, 17, 13, 0, 12, 27], 2)\ngrid = sin(grid, 13, 1, 2.9658129882147084, -41.317540719432344)\ngrid = magnitude(grid, 2, [19, 13, 16, 27, 26], 2)\ngrid = sin(grid, 1, 8, 6.699130217836646, 5.293135687331116)\ngrid = sin(grid, 0, 27, -6.580745881619362, 70.25836976864827)\ngrid = transit(grid, 24, [5], [1.0])\ngrid = sin(grid, 10, 7, -3.7620909835549288, -17.85297224969564)\ngrid = sin(grid, 18, 17, 6.374775580070441, -82.34320143877852)\ngrid = transit(grid, 27, [21, 3, 19, 27, 10, 17, 13], [0.03817904844946292, 0.12948414697169902, 0.02053094019023183, 0.17470975944365325, 0.2705479342577574, 0.164395301382941, 0.20215286930425458])\n\n#create color space \ncolors = np.zeros((6, 3)) \ncolors[0] = [51, 169, 182] \ncolors[1] = [8, 23, 138] \ncolors[2] = [93, 97, 239] \ncolors[3] = [98, 25, 66] \ncolors[4] = [60, 71, 233] \ncolors[5] = [191, 187, 173] \n\nres = np.zeros((SIZE, SIZE, 3)) \nres += (grid[:,:,0:0+1].repeat(3, -1) + 1) / 2 * colors[0] \nres += (grid[:,:,1:1+1].repeat(3, -1) + 1) / 2 * colors[1] \nres += (grid[:,:,2:2+1].repeat(3, -1) + 1) / 2 * colors[2] \nres += (grid[:,:,3:3+1].repeat(3, -1) + 1) / 2 * colors[3] \nres += (grid[:,:,4:4+1].repeat(3, -1) + 1) / 2 * colors[4] \nres += (grid[:,:,5:5+1].repeat(3, -1) + 1) / 2 * colors[5] \n\nres = res / colors.sum(0) * 255 \n\n#save results \nim = Image.fromarray(np.uint8(res))\nim.save(os.path.basename(__file__) + '.png')\n'''\n#save layers\nimg = np.zeros((SIZE * 6, SIZE * 6))\nfor j in range(GRID_CHANNELS):\n x = j % 6\n y = j // 6\n img[x*SIZE:(x + 1)*SIZE, y*SIZE:(y+1)*SIZE] = grid[:,:,j]\n\nimg = (img + 1) * 127.5 \nim = Image.fromarray(np.uint8(img))\nim.save(os.path.basename(__file__) + '_layers.png')\n'''\n" ]
[ [ "numpy.amax", "numpy.abs", "numpy.isnan", "numpy.uint8", "numpy.amin", "numpy.arange", "numpy.linalg.norm", "numpy.cos", "numpy.sin", "numpy.sign", "numpy.zeros", "numpy.sum" ] ]
PankajPatil1/SageMaker-Deployment
[ "be608dd09e82098fc87f2522a380472773dd9a37" ]
[ "Project/serve/predict.py" ]
[ "import argparse\nimport json\nimport os\nimport pickle\nimport sys\nimport sagemaker_containers\nimport pandas as pd\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.utils.data\n\nfrom model import LSTMClassifier\n\nfrom utils import review_to_words, convert_and_pad\n\ndef model_fn(model_dir):\n \"\"\"Load the PyTorch model from the `model_dir` directory.\"\"\"\n print(\"Loading model.\")\n\n # First, load the parameters used to create the model.\n model_info = {}\n model_info_path = os.path.join(model_dir, 'model_info.pth')\n with open(model_info_path, 'rb') as f:\n model_info = torch.load(f)\n\n print(\"model_info: {}\".format(model_info))\n\n # Determine the device and construct the model.\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n model = LSTMClassifier(model_info['embedding_dim'], model_info['hidden_dim'], model_info['vocab_size'])\n\n # Load the store model parameters.\n model_path = os.path.join(model_dir, 'model.pth')\n with open(model_path, 'rb') as f:\n model.load_state_dict(torch.load(f))\n\n # Load the saved word_dict.\n word_dict_path = os.path.join(model_dir, 'word_dict.pkl')\n with open(word_dict_path, 'rb') as f:\n model.word_dict = pickle.load(f)\n\n model.to(device).eval()\n\n print(\"Done loading model.\")\n return model\n\ndef input_fn(serialized_input_data, content_type):\n print('Deserializing the input data.')\n if content_type == 'text/plain':\n data = serialized_input_data.decode('utf-8')\n return data\n raise Exception('Requested unsupported ContentType in content_type: ' + content_type)\n\ndef output_fn(prediction_output, accept):\n print('Serializing the generated output.')\n return str(prediction_output)\n\ndef predict_fn(input_data, model):\n print('Inferring sentiment of input data.')\n\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n \n if model.word_dict is None:\n raise Exception('Model has not been loaded properly, no word_dict.')\n \n # TODO: Process input_data so that it is ready to be sent to our model.\n # You should produce two variables:\n # data_X - A sequence of length 500 which represents the converted review\n # data_len - The length of the review\n processed_review = review_to_words(input_data)\n data_X, data_len = convert_and_pad(model.word_dict,processed_review) \n \n # Using data_X and data_len we construct an appropriate input tensor. Remember\n # that our model expects input data of the form 'len, review[500]'.\n data_pack = np.hstack((data_len, data_X))\n data_pack = data_pack.reshape(1, -1)\n \n data = torch.from_numpy(data_pack)\n data = data.to(device)\n\n # Make sure to put the model into evaluation mode\n model.eval()\n\n # TODO: Compute the result of applying the model to the input data. The variable `result` should\n # be a numpy array which contains a single integer which is either 1 or 0\n\n with torch.no_grad():\n output = model(data)\n result = np.round(output.numpy())\n \n return result\n\n return result\n" ]
[ [ "numpy.hstack", "torch.load", "torch.from_numpy", "torch.no_grad", "torch.cuda.is_available" ] ]
NightmareNyx/pygcn
[ "3972f167ce7fcc41cb21284d75816dfd9a15f7ef" ]
[ "pygcn/layers.py" ]
[ "import math\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.nn.modules.module import Module\nfrom torch.nn.parameter import Parameter\n\n\nclass GraphConvolution(Module):\n \"\"\"\n Simple GCN layer, similar to https://arxiv.org/abs/1609.02907\n \"\"\"\n\n def __init__(self, in_features, out_features, bias=True, init_method='xavier'):\n super(GraphConvolution, self).__init__()\n self.in_features = in_features\n self.out_features = out_features\n self.weight = Parameter(torch.FloatTensor(in_features, out_features))\n if bias:\n self.bias = Parameter(torch.FloatTensor(out_features))\n else:\n self.register_parameter('bias', None)\n self.reset_parameters(method=init_method)\n\n def reset_parameters(self, method='xavier'):\n if method == 'uniform':\n stdv = 1. / math.sqrt(self.weight.size(1))\n self.weight.data.uniform_(-stdv, stdv)\n if self.bias is not None:\n self.bias.data.uniform_(-stdv, stdv)\n elif method == 'kaiming':\n nn.init.kaiming_normal_(self.weight.data, a=0, mode='fan_in')\n if self.bias is not None:\n nn.init.constant_(self.bias.data, 0.0)\n elif method == 'xavier':\n nn.init.xavier_normal_(self.weight.data, gain=0.02) # Implement Xavier Uniform\n if self.bias is not None:\n nn.init.constant_(self.bias.data, 0.0)\n else:\n raise NotImplementedError\n\n def forward(self, input, adj):\n support = torch.mm(input, self.weight)\n output = torch.spmm(adj, support)\n if self.bias is not None:\n return output + self.bias\n else:\n return output\n\n def __repr__(self):\n return self.__class__.__name__ + ' (' \\\n + str(self.in_features) + ' -> ' \\\n + str(self.out_features) + ')'\n\n\nclass GraphAttention(nn.Module):\n \"\"\"\n Simple GAT layer, similar to https://arxiv.org/abs/1710.10903\n \"\"\"\n\n def __init__(self, in_features, out_features, dropout, alpha, concat=True):\n super(GraphAttention, self).__init__()\n self.dropout = dropout\n self.in_features = in_features\n self.out_features = out_features\n self.alpha = alpha\n self.concat = concat\n\n if torch.cuda.is_available():\n param_type = torch.cuda.FloatTensor\n else:\n param_type = torch.FloatTensor\n\n self.W = nn.Parameter(nn.init.xavier_normal_(torch.Tensor(in_features, out_features).type(param_type),\n gain=np.sqrt(2.0)), requires_grad=True)\n self.a1 = nn.Parameter(nn.init.xavier_normal_(torch.Tensor(out_features, 1).type(param_type),\n gain=np.sqrt(2.0)), requires_grad=True)\n self.a2 = nn.Parameter(nn.init.xavier_normal_(torch.Tensor(out_features, 1).type(param_type),\n gain=np.sqrt(2.0)), requires_grad=True)\n\n self.leaky_relu = nn.LeakyReLU(self.alpha)\n\n def forward(self, input, adj):\n h = torch.mm(input, self.W)\n N = h.size()[0]\n\n f_1 = torch.mm(h, self.a1)\n f_2 = torch.mm(h, self.a2)\n e = self.leaky_relu(f_1 + f_2.transpose(0, 1))\n\n zero_vec = -9e15 * torch.ones_like(e)\n attention = torch.where(adj > 0, e, zero_vec)\n attention = F.softmax(attention, dim=1)\n attention = F.dropout(attention, self.dropout, training=self.training)\n h_prime = torch.matmul(attention, h)\n\n if self.concat:\n return F.elu(h_prime)\n else:\n return h_prime\n\n def __repr__(self):\n return self.__class__.__name__ + ' (' \\\n + str(self.in_features) + ' -> ' \\\n + str(self.out_features) + ')'\n\n\nclass SpecialSpmmFunction(torch.autograd.Function):\n \"\"\"Special function for only sparse region backpropataion layer.\"\"\"\n\n @staticmethod\n def forward(ctx, indices, values, shape, b):\n assert indices.requires_grad == False\n a = torch.sparse_coo_tensor(indices, values, shape)\n ctx.save_for_backward(a, b)\n ctx.N = shape[0]\n return torch.matmul(a, b)\n\n @staticmethod\n def backward(ctx, grad_output):\n a, b = ctx.saved_tensors\n grad_values = grad_b = None\n if ctx.needs_input_grad[1]:\n grad_a_dense = grad_output.matmul(b.t())\n edge_idx = a._indices()[0, :] * ctx.N + a._indices()[1, :]\n grad_values = grad_a_dense.view(-1)[edge_idx]\n if ctx.needs_input_grad[3]:\n grad_b = a.t().matmul(grad_output)\n return None, grad_values, None, grad_b\n\n\nclass SpecialSpmm(nn.Module):\n def forward(self, indices, values, shape, b):\n return SpecialSpmmFunction.apply(indices, values, shape, b)\n\n\nclass SpGraphAttentionLayer(nn.Module):\n \"\"\"\n Sparse version GAT layer, similar to https://arxiv.org/abs/1710.10903\n \"\"\"\n\n def __init__(self, in_features, out_features, dropout, alpha, concat=True):\n super(SpGraphAttentionLayer, self).__init__()\n self.in_features = in_features\n self.out_features = out_features\n self.alpha = alpha\n self.concat = concat\n\n self.W = nn.Parameter(torch.zeros(size=(in_features, out_features)))\n nn.init.xavier_normal_(self.W.data, gain=1.414)\n\n self.a = nn.Parameter(torch.zeros(size=(1, 2 * out_features)))\n nn.init.xavier_normal_(self.a.data, gain=1.414)\n\n self.dropout = nn.Dropout(dropout)\n self.leaky_relu = nn.LeakyReLU(self.alpha)\n self.special_spmm = SpecialSpmm()\n\n def forward(self, input, adj):\n N = input.size()[0]\n edge = adj.nonzero().t()\n\n h = torch.mm(input, self.W)\n # h: N x out\n assert not torch.isnan(h).any()\n\n # Self-attention on the nodes - Shared attention mechanism\n edge_h = torch.cat((h[edge[0, :], :], h[edge[1, :], :]), dim=1).t()\n # edge: 2*D x E\n\n edge_e = torch.exp(-self.leaky_relu(self.a.mm(edge_h).squeeze()))\n assert not torch.isnan(edge_e).any()\n # edge_e: E\n\n e_rowsum = self.special_spmm(edge, edge_e, torch.Size([N, N]), torch.ones(size=(N, 1)).cuda())\n # e_rowsum: N x 1\n\n edge_e = self.dropout(edge_e)\n # edge_e: E\n\n h_prime = self.special_spmm(edge, edge_e, torch.Size([N, N]), h)\n assert not torch.isnan(h_prime).any()\n # h_prime: N x out\n\n h_prime = h_prime.div(e_rowsum)\n # h_prime: N x out\n assert not torch.isnan(h_prime).any()\n\n if self.concat:\n # if this layer is not last layer,\n return F.elu(h_prime)\n else:\n # if this layer is last layer,\n return h_prime\n\n def __repr__(self):\n return self.__class__.__name__ + ' (' + str(self.in_features) + ' -> ' + str(self.out_features) + ')'\n" ]
[ [ "torch.nn.functional.softmax", "numpy.sqrt", "torch.nn.functional.dropout", "torch.zeros", "torch.cat", "torch.sparse_coo_tensor", "torch.FloatTensor", "torch.cuda.is_available", "torch.where", "torch.nn.Dropout", "torch.mm", "torch.Size", "torch.ones", "torch.nn.functional.elu", "torch.ones_like", "torch.nn.init.constant_", "torch.nn.init.xavier_normal_", "torch.nn.LeakyReLU", "torch.isnan", "torch.Tensor", "torch.matmul", "torch.spmm", "torch.nn.init.kaiming_normal_" ] ]
jtrunnels91/ModularEstimator
[ "1088f91440abd5a82d094311f51d0250ecca52e1" ]
[ "modest/substates/correlationvector.py" ]
[ "## @file CorrelationVector\n# This package contains the #CorrelationVector class\n\nimport numpy as np\n#from numpy import sin, cos, arcsin, arccos, arctan2, square, sqrt, abs, power\nimport matplotlib.pyplot as plt\nfrom . import substate\nfrom .. modularfilter import ModularFilter\nfrom . oneDimensionalPositionVelocity import oneDPositionVelocity\nfrom .. signals.oneDimensionalObject import oneDObjectMeasurement\nfrom .. utils import covarianceContainer\nfrom scipy.linalg import block_diag\nfrom scipy.special import factorial\nfrom math import isnan\n\n## @class CorrelationVector\n# @brief CorrelationVector estimates the correlation vector and delay between\n# a signal and a time-delayed measurement of that signal\n#\n# @details\n# This class contains an estimator which estimates the correlation vector\n# between a signal (the #__trueSignal__) and measurements of that signal. This\n# correlation vector is then used to estimate the delay between the\n# #__trueSignal__ and the measurements of that signal.\n#\n# The estimator in this class currently assumes that the signal source is\n# \"distant,\" or infinitely far away. This implies that the unit vector to the\n# signal source is perfectly known, and not changing. A later implementation\n# could include the option of having a non-distant signal source, in which the\n# unit vector is changing as a function of position and therefore uncertain.\n#\n# @note This class is essentially an implementation of the estimator presented in\n# <a href=\"https://doi.org/10.2514/1.G002650\">\n# Recursive Range Estimation Using Astrophysical Signals of Opportunity</a>,\n# J. Runnels, D. Gebre, Journal of Guidance, Control and Dynamics, 2017. Some\n# equations from the paper are included in the class documentation for\n# reference. A more detailed discussion and derivation of the estimator can\n# be found in the journal article..\n\nclass CorrelationVector(substate.SubState):\n \n ## @fun #__init__ is responsible for initializing a correlation vector\n # estimator\n #\n # @details The primary function of the #__init__ method is to initialize\n # the correlation vector estimator, and store the relevant user inputs. A\n # few key user inputs are required in order to initialize the filter.\n # Additionally, because the algorithm is relatively complicated, there are\n # a number of optional tuning parameters which may be inputed at\n # initialization.\n #\n # In general, the parameters which are required inputs are the ones that\n # are critical for initialization of the filter, and should not be changed\n # during the course of the filter's lifetime. These inputs are stored as\n # \"private\" variables; indicating that the should not be changed during\n # the object's lifetime.\n #\n # The optional inputs, on the other hand, are inputs which are used in the\n # estimation functions (#timeUpdate, #getMeasurementMatrices, etc.).\n # These parameters could conceivably be changed during the lifetime of the\n # filter without causing problems, and the user may want to change them\n # depending on external factors. These parameters are initalized with\n # default values, and stored as public variables that the user can in\n # theory change.\n #\n # There are also a set of class variables which are publicly accessable\n # and which hold the most recent state estimate. These exist primarily\n # for convenience, and are never actually used within the class.\n # Modifying them will have no affect on the state estimates. The only way\n # to modify a state estimate is through the #storeStateVector method.\n #\n # The #__init__ method also checks the user inputs to ensure that they are\n # consistent with how they will be used in the class (where applicable).\n #\n # The trueSignal input is checked to see whether it has the following\n # methods:\n # - flux()\n # - signalID()\n # - unitVec()\n #\n # @param trueSignal An object that describes the signal for which\n # correlation should be estimated.\n # @param filterOrder The number of bins or \"taps\" in the correlation vector\n # @param dT The sampling period, or time-step between bins in the\n # correlation vector\n #\n # @param #t (optional) The initial starting time. If no value is passed,\n # initialized to zero by default.\n # @param #correlationVector (optional) The initial value of the\n # correlation vector. If not supplied, the correlation vector will be\n # initialized based on the filter #__dT__ and maximum flux of\n # the #__trueSignal__.\n # @param #correlationVectorCovariance (optional) The initial value of the\n # correlation vector estimate covariance. If not supplied, the covariance\n # matrix will be initialized based on the filter #__dT__ and maximum flux\n # of #__trueSignal__.\n # @param #signalDelay (optional) The initial estimate of delay between\n # the #__trueSignal__ and the signal measurements. If not supplied,\n # #signalDelay is initialized to zero.\n # @param #delayVar (optional) The variance of the estimate of delay\n # @param #aPriori (optional) Indicates whether initial estimates are a\n # priori or a posteriori. Default=True\n #\n # @param #centerPeak (optional) Boolean indicating whether the correlation\n # vector should be \"shifted\" after each update to keep the peak centered\n # at the zero index. Default is True.\n # @param #peakFitPoints (optional) Number of points used on either side of\n # max for quadratic fit in #computeSignalDelay. Minimum is 1, default is 1.\n # @param #processNoise (optional) Scalar term of additional process noise\n # added to covariance matrix in time update. Default is 1e-12\n # @param #measurementNoiseScaleFactor (optional) Scale factor to inflate\n # the measurement noise. Default is 1.\n def __init__(\n self,\n trueSignal,\n filterOrder,\n dT,\n t=0,\n correlationVector=None,\n correlationVectorCovariance=None,\n signalTDOA=0, \n TDOAVar=0,\n aPriori=True,\n centerPeak=True,\n peakFitPoints=1,\n processNoise=1e-12,\n measurementNoiseScaleFactor=1,\n peakLockThreshold=1,\n covarianceStorage='covariance',\n internalNavFilter=None,\n navProcessNoise=1,\n tdoaStdDevThreshold=None,\n velStdDevThreshold=None,\n tdoaNoiseScaleFactor=None,\n velocityNoiseScaleFactor=None,\n storeLastStateVectors=0,\n vInitial=None,\n aInitial=None,\n gradInitial=None,\n peakEstimator='EK'\n ):\n print('updated correlation filter')\n self.peakLockThreshold = peakLockThreshold\n self.peakCenteringDT = 0\n \n self.peakOffsetFromCenter = 0\n\n self.navProcessNoise = navProcessNoise\n \"\"\"\n This is the default process noise that is injected into the navigation states as noise in the derivative of the highest order state.\n \"\"\"\n \n ## @brief #__unitVecToSignal__ is a unit vector which points to the signal\n # source\n self.__unitVecToSignal__ = trueSignal.unitVec()\n \n ## @brief #__trueSignal__ is a signal object that contains the \"true\"\n # signal for which the correlation vector is being estimated\n self.__trueSignal__ = trueSignal\n\n ## @brief #__filterOrder__ is the number of \"taps\" in the estimated\n # correlation vector, #correlationVector.\n self.__filterOrder__ = filterOrder\n\n ## @brief #__dT__ is the \"sample period\" or \"bin size\" of the estimated\n # correlation vector\n self.__dT__ = dT\n\n ## @brief #t The current time\n self.t = t\n\n ## @brief #aPriori Indicates whether the current state vector is the\n # result of a time update (#aPriori = True) or a measurement update\n # (#aPriori = False)\n self.aPriori = aPriori\n\n if correlationVector is None:\n correlationVector = (\n np.ones(self.__filterOrder__) *\n self.__trueSignal__.peakAmplitude * self.__dT__\n )\n ## @brief #correlationVector is the current estimate of the\n # correlation vector between the incoming signal measurements and the\n # #__trueSignal__\n self.correlationVector = correlationVector\n\n if correlationVectorCovariance is None:\n if covarianceStorage == 'covariance':\n correlationVectorCovariance = (\n np.eye(self.__filterOrder__) *\n np.square(self.__trueSignal__.peakAmplitude * self.__dT__)\n )\n elif covarianceStorage == 'cholesky':\n correlationVectorCovariance = (\n np.eye(self.__filterOrder__) *\n self.__trueSignal__.peakAmplitude * self.__dT__\n )\n # Store the correlation vector covariance in a container\n ## @brief #correlationVectorCovariance is the covariance matrix of the\n # correlation vector estimate, #correlationVector\n self.correlationVectorCovariance = correlationVectorCovariance\n\n ## @brief #signalDelay is the current estimate of the delay between\n # the incoming signal measurements and the #__trueSignal__\n self.signalTDOA = signalTDOA\n \n ## @brief #delayVar is the variance of the signal delay estimate\n # #signalDelay\n self.TDOAVar = TDOAVar\n \n ## @brief #centerPeak indicates whether the correlation vector is\n # shifted to maintain the peak at the middle tap\n self.centerPeak = centerPeak\n\n ## @brief #peakLock indicates whether the current estimate of\n # correlation vector and peak location is accurate enough to \"know\"\n # that we've locked on to the correct peak.\n self.peakLock = False\n\n ## @brief #peakFitPoints is a variable which controls the number of\n # points used for quadratically estimating the location of the\n # correlation vector peak\n self.peakFitPoints = peakFitPoints\n\n ## @brief #processNoise is the scalar value used to generate an\n # additional process noise term in #timeUpdate.\n self.processNoise = processNoise\n\n ## @brief #measurementNoiseScaleFactor is a factor used to scale the\n # measurement noise matrix. The default value is 1 (no scaling).\n self.measurementNoiseScaleFactor = measurementNoiseScaleFactor\n\n \n self.peakEstimator = peakEstimator\n \"\"\"\n String that determines which algorithm is used to estimate peak. Use either EK (extended Kalman Filter) or UK (Unscented)\n \"\"\"\n \n self.__halfLength__ = int(np.ceil(self.__filterOrder__ / 2))\n self.__halfLengthSeconds__ = self.__halfLength__ * self.__dT__\n\n xAxis = np.linspace(0, self.__filterOrder__-1, self.__filterOrder__)\n self.xAxis = xAxis * self.__dT__\n \n self.__xVec__ = np.linspace(\n 0, \n self.peakFitPoints * 2, \n (self.peakFitPoints * 2) + 1\n )\n\n self.internalNavFilter = internalNavFilter\n print(internalNavFilter)\n if self.internalNavFilter == 'none':\n self.internalNavFilter = None\n self.INF_type = 'none'\n elif self.internalNavFilter == 'deep':\n self.INF_type = 'deep'\n elif self.internalNavFilter:\n self.INF_type = 'external'\n else:\n self.internalNavFilter = None\n self.INF_type = 'none'\n\n stateVector = correlationVector\n svCovariance = correlationVectorCovariance\n \n if self.INF_type == 'deep':\n if not vInitial:\n raise ValueError('In order to use the deep internal navigation filter, you must initialize. Filter expects to receive at least vInitial, but received None')\n\n self.velocity = vInitial['value']\n self.velocityStdDev = np.sqrt(vInitial['var'])\n if not aInitial:\n self.navVectorLength = 1\n navVector = np.zeros(1)\n navVector[0] = vInitial['value']\n\n navVar = np.zeros([1,1])\n navVar[0,0] = vInitial['var']\n \n elif not gradInitial:\n self.acceleration = aInitial['value']\n self.accelerationStdDev = np.sqrt(aInitial['var'])\n \n self.navVectorLength = 2\n navVector = np.zeros(2)\n navVector[0] = vInitial['value']\n navVector[1] = aInitial['value']\n\n navVar = np.zeros([2,2])\n navVar[0,0] = vInitial['var']\n navVar[1,1] = aInitial['var']\n else:\n self.acceleration = aInitial['value']\n self.accelerationStdDev = np.sqrt(aInitial['var'])\n self.gradient = gradInitial['value']\n self.gradientStdDev = np.sqrt(gradInitial['var'])\n \n self.navVectorLength = 3\n navVector = np.zeros(3)\n navVector[0] = vInitial['value']\n navVector[1] = aInitial['value']\n navVector[2] = gradInitial['value']\n\n navVar = np.zeros([3,3])\n navVar[0,0] = vInitial['var']\n navVar[1,1] = aInitial['var']\n navVar[2,2] = gradInitial['var']\n \n stateVector = np.append(stateVector,navVector)\n svCovariance = block_diag(svCovariance, navVar)\n\n svCovariance = covarianceContainer(\n svCovariance,\n covarianceStorage\n )\n\n self.mostRecentF = np.eye(self.__filterOrder__)\n self.stateVector = stateVector\n super().__init__(\n stateDimension=len(stateVector),\n stateVectorHistory={\n 't': t,\n 'stateVector': stateVector,\n 'covariance': svCovariance,\n 'aPriori': aPriori,\n 'signalTDOA': signalTDOA,\n 'TDOAVar': TDOAVar,\n 'xAxis': self.xAxis,\n 'stateVectorID': -1\n },\n storeLastStateVectors=storeLastStateVectors\n )\n\n self.tdoaStdDevThreshold = tdoaStdDevThreshold\n self.velStdDevThreshold = velStdDevThreshold\n\n self.tdoaNoiseScaleFactor = tdoaNoiseScaleFactor\n self.velocityNoiseScaleFactor = velocityNoiseScaleFactor\n\n if self.INF_type == 'external':\n self.navState = self.internalNavFilter.subStates['oneDPositionVelocity']['stateObject']\n return\n\n\n ##\n # @name Mandatory SubState Functions\n # The following functions are required in order for this class to be used\n # as a substate in ModularFilter. The inside of the functions may be\n # changed or updated, but their \"black box\" behavior must remain the\n # same; i.e. they must still perform the same essential functions and\n # return the same things.\n # @{\n\n ## @fun #storeStateVector stores an updated estimate of the state vector\n def storeStateVector(\n self,\n svDict\n ):\n # Unpack updated state vector values\n self.t = svDict['t']\n self.aPriori = svDict['aPriori']\n\n # Compute new estimate of delay based on new state vector, store in\n # svDict and local attributes\n if not svDict['aPriori']:\n self.correlationVector = svDict['stateVector'][0:self.__filterOrder__]\n self.correlationVectorCovariance = svDict['covariance']\n self.stateVector = svDict['stateVector']\n\n if self.peakEstimator == 'UK':\n tdoaDict = self.estimateSignalTDOA_UT(\n self.correlationVector,\n self.correlationVectorCovariance\n )\n\n elif self.peakEstimator == 'EK':\n tdoaDict = self.estimateSignalTDOA_EK(\n self.correlationVector,\n self.correlationVectorCovariance\n )\n else:\n raise ValueError('Unrecougnized peak finding algorithm %s' %self.peakEstimator)\n \n newTDOA = (\n (\n tdoaDict['meanTDOA'] \n ) *\n self.__dT__\n ) + self.peakCenteringDT\n \n newTDOAVar = tdoaDict['varTDOA'] * np.square(self.__dT__)\n if not isnan(newTDOA) and not isnan(newTDOAVar):\n self.signalTDOA = newTDOA\n self.TDOAVar = newTDOAVar\n\n svDict['signalTDOA'] = self.signalTDOA\n svDict['TDOAVar'] = self.TDOAVar\n\n if self.INF_type == 'external':\n if (\n (np.sqrt(self.TDOAVar) < (self.tdoaStdDevThreshold))\n or (self.tdoaStdDevThreshold == 0)\n ):\n self.internalNavFilter.measurementUpdateEKF(\n {'position': {\n 'value': self.signalTDOA,\n 'var': self.TDOAVar * self.tdoaNoiseScaleFactor\n }},\n 'oneDPositionVelocity'\n )\n else:\n self.internalNavFilter.measurementUpdateEKF(\n {}, ''\n )\n \n if self.peakLock is True and self.centerPeak is True:\n self.peakOffsetFromCenter = tdoaDict['meanTDOA'] - self.__halfLength__ + 1\n # self.peakOffsetFromCenter = np.mod(tdoaDict['meanTDOA'], self.__dT__)\n # print(self.peakOffsetFromCenter)\n else:\n self.peakOffsetFromCenter = 0\n\n else:\n \n# if self.peakLock is True and self.centerPeak is True:\n# svDict['stateVector'] = self.correlationVector\n# else:\n \n # self.correlationVector = svDict['stateVector']\n # if self.peakOffsetFromCenter != 0:\n # FLDict = self.buildFLMatrices(\n # -self.peakOffsetFromCenter*self.__dT__,\n # self.correlationVector\n # )\n # self.correlationVector = FLDict['F'].dot(self.correlationVector)\n # self.peakOffsetFromCenter = 0\n \n # self.correlationVector = svDict['stateVector'][0:self.__filterOrder__]\n self.correlationVector = self.mostRecentF.dot(self.correlationVector)\n svDict['stateVector'][0:self.__filterOrder__] = self.correlationVector\n self.stateVector = svDict['stateVector']\n self.correlationVectorCovariance = svDict['covariance']\n\n if self.peakEstimator == 'UK':\n tdoaDict = self.estimateSignalTDOA_UT(\n self.correlationVector,\n self.correlationVectorCovariance\n )\n\n elif self.peakEstimator == 'EK':\n tdoaDict = self.estimateSignalTDOA_EK(\n self.correlationVector,\n self.correlationVectorCovariance\n )\n else:\n raise ValueError('Unrecougnized peak finding algorithm %s' %self.peakEstimator)\n # newTDOA = (\n # (\n # tdoaDict['meanTDOA'] \n # ) *\n # self.__dT__\n # ) + self.peakCenteringDT\n \n newTDOAVar = tdoaDict['varTDOA'] * np.square(self.__dT__)\n\n # self.signalTDOA = newTDOA\n self.TDOAVar = newTDOAVar\n \n svDict['signalTDOA'] = self.signalTDOA\n svDict['TDOAVar'] = self.TDOAVar\n self.peakOffsetFromCenter = 0\n\n \n svDict['xAxis'] = self.xAxis + self.peakCenteringDT\n # svDict['xAxis'] = self.xAxis - self.signalTDOA\n \n tdoaSTD = np.sqrt(self.TDOAVar)\n if tdoaSTD < (self.peakLockThreshold * self.__dT__):\n if not self.peakLock:\n print(\n 'Substate %s reached peak lock at time %s'\n %(self.__trueSignal__.name, self.t)\n )\n self.peakLock = True\n else:\n if self.peakLock and tdoaSTD > (self.peakLockThreshold * self.__dT__ * 1.1):\n print(\n 'Substate %s lost peak lock at time %s'\n %(self.__trueSignal__.name, self.t)\n )\n self.peakLock = False\n self.peakOffsetFromCenter = 0\n\n if self.INF_type == 'deep':\n fO = self.__filterOrder__\n currentV = self.stateVector[fO]\n currentVStdDev = np.sqrt(self.correlationVectorCovariance[fO,fO].value)\n self.velocity = currentV\n self.velocityStdDev = currentVStdDev\n svDict['velocity'] = {'value':currentV, 'stddev': currentVStdDev}\n if self.navVectorLength == 2:\n currentA = self.stateVector[fO+1]\n currentAStdDev = np.sqrt(self.correlationVectorCovariance[fO+1,fO+1].value)\n svDict['acceleration'] = {'value':currentA, 'stddev': currentAStdDev}\n\n self.acceleration = currentA\n self.accelerationStdDev = currentAStdDev\n elif self.navVectorLength == 3:\n currentA = self.stateVector[fO+1]\n currentAStdDev = np.sqrt(self.correlationVectorCovariance[fO+1,fO+1].value)\n svDict['acceleration'] = {'value':currentA, 'stddev': currentAStdDev}\n self.acceleration = currentA\n self.accelerationStdDev = currentAStdDev\n \n currentGrad = self.stateVector[fO+2]\n currentGradStdDev = np.sqrt(self.correlationVectorCovariance[fO+2,fO+2].value)\n svDict['aGradient'] = {'value':currentGrad, 'stddev': currentGradStdDev}\n self.gradient = currentGrad\n self.gradientStdDev = currentGradStdDev\n \n elif self.INF_type == 'external':\n self.velocity = self.navState.currentVelocity\n self.velocityStdDev = np.sqrt(self.navState.velocityVar)\n \n super().storeStateVector(svDict)\n return\n\n ## @fun #timeUpdate returns the matrices for performing the correlation\n # vector time update.\n #\n # @details This function calls the #buildTimeUpdateMatrices method to\n # generate the time-update matrices.\n #\n # @param self The object pointer\n # @param dT The amount of time ellapsed over which the time update is to\n # be performed\n # @param dynamics A dictionary containing the dynamics for the time update\n # (e.g. velocity)\n #\n # @sa SubStates.SubState.timeUpdate\n def timeUpdate(\n self,\n dT,\n dynamics=None\n ):\n\n if self.INF_type != 'deep':\n\n timeUpdateMatrices = self.buildTimeUpdateMatrices(\n dT, dynamics, self.correlationVector\n )\n\n L = timeUpdateMatrices['L']\n Q = timeUpdateMatrices['Q']\n\n Qmat = (\n np.outer(L, L) * Q +\n (\n np.eye(self.__filterOrder__) * \n self.processNoise * dT * \n np.square(self.__trueSignal__.peakAmplitude * self.__dT__)\n )\n )\n\n if dynamics is not None and 'acceleration' in dynamics:\n oneDAcceleration = (\n dynamics['acceleration']['value'].dot(self.__unitVecToSignal__) /\n self.speedOfLight()\n )\n\n oneDAccelerationVar = (\n self.__unitVecToSignal__.dot(\n dynamics['acceleration']['value'].dot(\n self.__unitVecToSignal__.transpose()\n )\n ) / np.square(self.speedOfLight())\n )\n else:\n oneDAcceleration = 0\n oneDAccelerationVar = self.navProcessNoise\n\n if self.INF_type == 'external':\n self.internalNavFilter.timeUpdateEKF(\n dT,\n dynamics = {\n 'oneDPositionVelocityacceleration': {\n 'value': oneDAcceleration,\n 'var': oneDAccelerationVar\n }\n }\n )\n\n else:\n timeUpdateMatrices = self.buildDeepTimeUpdateMatrices(dT, dynamics, self.correlationVector)\n # if dynamics is not None and 'accelerationGrad' in dynamics:\n # navProcessNoise = (\n # dynamics['accelerationGrad']['value'].dot(self.__unitVecToSignal__) /\n # self.speedOfLight()\n # )\n\n # oneDAccelerationGradVar = (\n # self.__unitVecToSignal__.dot(\n # dynamics['accelerationGrad']['value'].dot(\n # self.__unitVecToSignal__.transpose()\n # )\n # ) / np.square(self.speedOfLight())\n # )\n # else:\n \n L = timeUpdateMatrices['L']\n Qmat = (\n np.outer(L, L) * self.navProcessNoise + (\n (\n block_diag(np.eye(self.__filterOrder__),np.zeros([self.navVectorLength,self.navVectorLength])) * \n self.processNoise * dT * \n np.square(self.__trueSignal__.flux * self.__dT__)\n )\n )\n )\n self.mostRecentF = timeUpdateMatrices['F'][0:self.__filterOrder__, 0:self.__filterOrder__]\n return {'F': timeUpdateMatrices['F'], 'Q': Qmat}\n\n def buildDeepTimeUpdateMatrices(self,dT, dynamics, h):\n \n FMatrixShift = -self.peakOffsetFromCenter\n filterOrder = self.__filterOrder__\n \n # Initialize empty matricies\n F = np.zeros([filterOrder + self.navVectorLength, filterOrder+self.navVectorLength])\n \n halfLength = self.__halfLength__\n indexDiff = dT/self.__dT__\n \n peakShift = self.stateVector[self.__filterOrder__] * indexDiff\n\n # Velocity term\n self.peakCenteringDT = (\n self.peakCenteringDT + self.stateVector[self.__filterOrder__] * dT \n )\n \n if self.navVectorLength > 1:\n # Acceleration term (if acceleration is being estimated)\n self.peakCenteringDT = (\n self.peakCenteringDT +\n self.stateVector[self.__filterOrder__ + 1] * np.power(dT,2)/2\n )\n peakShift = (\n peakShift + self.stateVector[self.__filterOrder__ + 1]*np.power(indexDiff,2)/2\n )\n \n if self.navVectorLength > 2:\n # Acceleration gradient term\n self.peakCenteringDT = (\n self.peakCenteringDT +\n self.stateVector[self.__filterOrder__] *\n self.stateVector[self.__filterOrder__ + 2] *\n np.power(dT,3)/6\n )\n peakShift = (\n peakShift +\n self.stateVector[self.__filterOrder__] *\n self.stateVector[self.__filterOrder__ + 2] *\n np.power(indexDiff,3)/6\n )\n \n \n self.peakCenteringDT = self.peakCenteringDT + (self.peakOffsetFromCenter*self.__dT__)\n \n # Build arrays of indicies from which to form the sinc function\n\n if np.mod(filterOrder, 2) == 0:\n baseVec = (\n np.linspace(\n 1 - halfLength,\n halfLength,\n filterOrder\n )\n )\n\n else:\n baseVec = (\n np.linspace(\n 1 - halfLength,\n halfLength - 1,\n filterOrder\n )\n )\n\n # Compute the sinc function of the base vector\n sincBase = np.sinc(baseVec + FMatrixShift)\n diffBase = np.zeros_like(sincBase)\n\n for i in range(len(baseVec)):\n diffBase[i] = self.sincDiff(baseVec[i] + peakShift)\n\n sincBase = np.roll(sincBase, 1 - int(halfLength))\n diffBase = np.roll(diffBase, 1 - int(halfLength))\n\n for i in range(len(sincBase)):\n currentDiff = np.roll(diffBase, i).dot(h)\n F[i, 0:filterOrder] = np.roll(sincBase, i)\n F[i, filterOrder] = currentDiff * indexDiff\n \n if self.navVectorLength > 1:\n F[i, filterOrder+1] = currentDiff * np.power(indexDiff, 2)/2\n if self.navVectorLength > 2:\n F[i, filterOrder+2] = (\n currentDiff *\n self.stateVector[filterOrder] *\n np.power(indexDiff, 3)/6\n )\n\n L = np.zeros(filterOrder+self.navVectorLength)\n \n if self.navVectorLength == 1:\n F[filterOrder, filterOrder] = 1\n L[filterOrder] = dT\n \n elif self.navVectorLength == 2:\n F[filterOrder, filterOrder] = 1.0\n F[filterOrder, filterOrder+1] = dT\n F[filterOrder+1, filterOrder+1] = 1.0\n \n L[filterOrder] = np.power(dT, 2)/2\n L[filterOrder + 1] = dT\n \n elif self.navVectorLength == 3:\n vCurrent = self.stateVector[filterOrder]\n aCurrent = self.stateVector[filterOrder + 1]\n gradCurrent = self.stateVector[filterOrder + 2]\n \n F[filterOrder,filterOrder] = 1 + (gradCurrent * np.power(dT,2)/2)\n F[filterOrder,filterOrder+1] = dT\n F[filterOrder,filterOrder+2] = vCurrent * np.power(dT,2)/2\n \n F[filterOrder+1,filterOrder+1] = 1\n F[filterOrder+1,filterOrder+2] = vCurrent * dT\n \n F[filterOrder+2,filterOrder+2] = 1\n\n L[filterOrder] = vCurrent * np.power(dT,3)/6\n L[filterOrder + 1] = vCurrent * np.power(dT,2)/2\n L[filterOrder + 2] = dT\n \n \n diffBase = np.zeros_like(sincBase)\n\n for i in range(len(baseVec)):\n diffBase[i] = self.sincDiff(baseVec[i])\n\n diffBase = np.roll(diffBase, 1 - int(halfLength))\n \n for i in range(len(baseVec)):\n L[i] = (\n np.roll(diffBase, i).dot(h) *\n np.power(indexDiff,self.navVectorLength+1)/factorial(self.navVectorLength+1)\n )\n \n # # Setting L to zero for test purposes only\n # L = np.zeros(filterOrder+self.navVectorLength)\n\n return({'F':F, 'L':L})\n \n def getMeasurementMatrices(\n self,\n measurement,\n source=None\n ):\n if (\n (source.signalID() == self.__trueSignal__.signalID()) and\n ('t' in measurement)\n ):\n\n measurementMatrices = self.getTOAMeasurementMatrices(\n measurement,\n self.correlationVector\n )\n\n HDict = {'correlationVector': measurementMatrices['H']}\n RDict = {'correlationVector': measurementMatrices['R']}\n dyDict = {'correlationVector': measurementMatrices['dY']}\n else:\n HDict = {'': None}\n RDict = {'': None}\n dyDict = {'': None}\n\n measurementMatricesDict = {\n 'H': HDict,\n 'R': RDict,\n 'dY': dyDict\n }\n\n return measurementMatricesDict\n\n ## @}\n\n ## @fun #buildTimeUpdateMatrices constructs the correlation vector time\n # update matrices\n #\n # @details The #buildTimeUpdateMatrices method constructs the matrices required to perform the time update of the correlation vector sub-state.\n #\n # The time update matrices are a function of the estimated spacecraft velocity (\\f$\\mathbf{v}\\f$), velocity variance (\\f$\\mathbf{P}_{\\mathbf{v}}\\f$), and the elapsed time over which the time update occurs (\\f$\\Delta T\\f$). The matrices are constructed as follows:\n #\n # \\f[\n # \\mathbf{F}_{j \\to k} = \\begin{bmatrix}\n # \\textrm{sinc}(\\hat{\\delta}) & \\hdots & \\textrm{sinc}(\\hat{\\delta} + N - 1) \\\\\n # \\vdots & \\ddots & \\vdots \\\\\n # \\textrm{sinc}(\\hat{\\delta} - N + 1) & \\hdots & \\textrm{sinc}(\\hat{\\delta})\n # \\end{bmatrix}\n # \\f]\n # \n # \\f[\n # \\mathbf{L}_{j} = \\begin{bmatrix}\n # \\frac{\\textrm{cos}}{(\\hat{\\delta})} - \\frac{\\textrm{sin}}{(\\hat{\\delta}^2)} & \\hdots \\\\\n # \\vdots & \\ddots \\\\\n # \\end{bmatrix} \\sv[timeIndex = k]\n # \\f]\n #\n # \\f[\n # Q_{\\delta} = \\left(\\frac{(k-j)}{c}\\right)^2\n # {\\LOSVec[S]}^T \\mathbf{P}_{\\mathbf{v}} \\LOSVec[S]\n # \\f]\n #\n # where\n #\n # \\f[\n # \\hat{\\delta}_{j \\to k} = \\frac{\\mathbf{v} \\LOSVec[S] \\Delta T}{c T}\n # \\f]\n #\n # @param self The object pointer\n # @param deltaT The amount of time over which the time update is occuring\n # @param dynamics A dictionary containing the relevant dynamics for the\n # time update\n # @param h The current correlation vector\n #\n # @returns A dictionary containing the matrices \\f$\\mathbf{F}\\f$,\n # \\f$\\mathbf{L}\\f$, and the scalar \\f$Q\\f\n def buildTimeUpdateMatrices(\n self,\n deltaT,\n dynamics,\n h\n ):\n \n indexDiff = deltaT/self.__dT__\n \n if (\n (dynamics is not None and 'velocity' in dynamics) or\n (\n self.INF_type == 'external' and\n (\n (np.sqrt(self.navState.velocityVar) < self.velStdDevThreshold) or\n self.velStdDevThreshold == 0\n )\n )\n ):\n if 'velocity' in dynamics:\n\n velocity = dynamics['velocity']['value']\n vVar = dynamics['velocity']['var'] * self.velocityNoiseScaleFactor\n\n\n peakShift = (\n (velocity.dot(self.__unitVecToSignal__) * indexDiff) /\n self.speedOfLight()\n )\n\n # velocityTDOA = peakShift * self.__dT__\n velocityTDOA = (\n velocity.dot(self.__unitVecToSignal__) * deltaT /\n self.speedOfLight()\n )\n Q = (\n self.__unitVecToSignal__.dot(\n vVar\n ).dot(self.__unitVecToSignal__) *\n np.square(indexDiff / self.speedOfLight())\n )\n tdoaQ = (\n self.__unitVecToSignal__.dot(vVar\n ).dot(self.__unitVecToSignal__) *\n np.square(deltaT/self.speedOfLight()))\n elif self.INF_type=='external':\n\n peakShift = self.navState.currentVelocity * indexDiff\n velocityTDOA = self.navState.currentVelocity * deltaT\n Q = self.navState.velocityVar * np.square(indexDiff) * self.velocityNoiseScaleFactor\n tdoaQ = self.navState.velocityVar * np.square(deltaT) * self.velocityNoiseScaleFactor\n\n else:\n velocityTDOA = 0\n peakShift = 0\n Q = self.navProcessNoise * np.power(indexDiff,4)/4\n \n tdoaQ = self.navProcessNoise * np.power(deltaT,4)/4\n\n FMatrixShift = -self.peakOffsetFromCenter # - peakShift\n self.signalTDOA = (\n self.signalTDOA +\n velocityTDOA\n )\n self.TDOAVar = self.TDOAVar + tdoaQ\n\n self.peakCenteringDT = (\n self.peakCenteringDT + velocityTDOA +\n (self.peakOffsetFromCenter*self.__dT__)\n )\n\n # Initialize empty matricies\n F = np.zeros([self.__filterOrder__, self.__filterOrder__])\n L = np.zeros([self.__filterOrder__, self.__filterOrder__])\n\n # Build arrays of indicies from which to form the sinc function\n\n if np.mod(self.__filterOrder__, 2) == 0:\n baseVec = (\n np.linspace(\n 1 - self.__halfLength__,\n self.__halfLength__,\n self.__filterOrder__\n )\n )\n\n else:\n baseVec = (\n np.linspace(\n 1 - self.__halfLength__,\n self.__halfLength__ - 1,\n self.__filterOrder__\n )\n )\n\n # Compute the sinc function of the base vector\n sincBase = np.sinc(baseVec + FMatrixShift)\n diffBase = np.zeros_like(sincBase)\n \n for i in range(len(baseVec)):\n diffBase[i] = self.sincDiff(baseVec[i] + peakShift)\n \n sincBase = np.roll(sincBase, 1 - int(self.__halfLength__))\n diffBase = np.roll(diffBase, 1 - int(self.__halfLength__))\n\n for i in range(len(F)):\n F[i] = np.roll(sincBase, i)\n L[i] = np.roll(diffBase, i)\n L = L.dot(h)\n\n # else:\n # # If no velocity was included in dynamics, then do nothing during\n # # time update\n # F = np.eye(self.__filterOrder__)\n # L = np.zeros(self.__filterOrder__)\n # Q = 0\n \n timeUpdateDict = {\n 'F': F,\n 'L': L,\n 'Q': Q\n }\n \n return(timeUpdateDict)\n\n def buildFLMatrices(self, peakShift, h):\n # Initialize empty matricies\n F = np.zeros([self.__filterOrder__, self.__filterOrder__])\n L = np.zeros([self.__filterOrder__, self.__filterOrder__])\n\n # Build arrays of indicies from which to form the sinc function\n\n if np.mod(self.__filterOrder__, 2) == 0:\n baseVec = (\n np.linspace(\n 1 - self.__halfLength__,\n self.__halfLength__,\n self.__filterOrder__\n )\n )\n\n else:\n baseVec = (\n np.linspace(\n 1 - self.__halfLength__,\n self.__halfLength__ - 1,\n self.__filterOrder__\n )\n )\n\n # Compute the sinc function of the base vector\n sincBase = np.sinc(baseVec + peakShift)\n diffBase = np.zeros_like(sincBase)\n\n for i in range(len(baseVec)):\n diffBase[i] = self.sincDiff(baseVec[i] + peakShift)\n \n sincBase = np.roll(sincBase, 1 - int(self.__halfLength__))\n diffBase = np.roll(diffBase, 1 - int(self.__halfLength__))\n\n for i in range(len(F)):\n F[i] = np.roll(sincBase, i)\n L[i] = np.roll(diffBase, i)\n\n L = L.dot(h)\n\n return {'F':F, 'L':L}\n\n ## @}\n \n ## @{\n # @name Functions Specific to #CorrelationVector\n #\n # The following remaining functions are not required in order for this\n # class to be used as a SubState, and may be changed as needed,\n # including inputs and outputs.\n def getTOAMeasurementMatrices(\n self,\n measurement,\n corrVec\n ):\n photonTOA = measurement['t']['value']\n \n adjustedTOA = photonTOA + self.peakCenteringDT\n \n H = np.eye(self.__filterOrder__)\n\n if self.INF_type == 'deep':\n H = np.append(H, np.zeros([self.__filterOrder__, self.navVectorLength]), axis=1)\n timeVector = np.linspace(\n 0,\n (self.__filterOrder__ - 1),\n self.__filterOrder__\n )\n timeVector = timeVector * self.__dT__\n\n timeVector = (\n timeVector + adjustedTOA\n )\n\n # if self.peakLock is True:\n # timeVector = timeVector - self.signalDelay\n\n signalTimeHistory = np.zeros(self.__filterOrder__)\n halfDT = self.__dT__/2.0\n# for timeIndex in range(len(timeVector)):\n# signalTimeHistory[timeIndex] = (\n# self.__trueSignal__.getSignal(timeVector[timeIndex]) *\n# self.__dT__\n# )\n for timeIndex in range(len(timeVector)):\n signalTimeHistory[timeIndex] = (\n self.__trueSignal__.signalIntegral(\n timeVector[timeIndex]-halfDT,\n timeVector[timeIndex] + halfDT\n )\n )\n # plt.plot(signalTimeHistory)\n # plt.show(block=False)\n # 1/0\n # print(corrVec)\n # print(signalTimeHistory)\n dY = signalTimeHistory - corrVec\n\n R = (\n np.eye(self.__filterOrder__) *\n #self.__trueSignal__.flux *\n self.__trueSignal__.peakAmplitude *\n self.__dT__ *\n np.dot(corrVec, corrVec) *\n self.measurementNoiseScaleFactor\n )\n\n measMatDict = {\n 'H': H,\n 'dY': dY,\n 'R': R\n }\n \n return measMatDict\n\n ## @fun #computeSignalTDOA computes the delay between the #__trueSignal__ and\n # measurements based on a correlation vector\n #\n # @details The #computeSignalDelay function is a rudimentary function\n # which takes an estimate of the correlation vector and uses it to\n # estimate the location of the peak. It functions by finding the tap with\n # the maximum value, and then fitting a quadratic to the points\n # surrounding the maximum value tap. The number of points to which the\n # quadratic is fitted is determined by the value of #peakFitPoints; the\n # number of points is equal to \\f$2 * n + 1\\f$ where \\f$n = \\f$\n # #peakFitPoints.\n #\n # The delay estimate that is returned is in units of #__dT__. So, a returned\n # value of 2 would imply that the peak is located at 2, and therefore the\n # delay corresponding to the correlation vector is 2 #__dT__.\n #\n # The returned delay may not include previously accumulated #signalDelay\n # between the signal and the measurements. See the #storeStateVector\n # function for more information on how the #signalDelay is stored and\n # accumulated delay is accounted for.\n #\n # @param self The object pointer\n # @param c The correlation vector\n # @param P the correlation vector covariance matrix\n #\n # @return The estimate of the delay\n def computeSignalTDOA(\n self,\n c,\n P\n ):\n\n # First estimate of peak location is the location of the max value\n peakLocation = np.argmax(c)\n\n # Next, we \"roll\" the correlation vector so that the values being\n # fitted quadratically are the first 2 * peakFitPoints + 1 values\n\n lowerBound = peakLocation - self.peakFitPoints\n upperBound = lowerBound + (self.peakFitPoints * 2) + 1\n if (lowerBound < 0) or (upperBound > self.__filterOrder__):\n mySlice = range(lowerBound, upperBound)\n slicedC = c.take(mySlice, mode='wrap')\n slicedP = P.take(mySlice, axis=0, mode='wrap').take(mySlice, axis=1, mode='wrap')\n else:\n mySlice = slice(lowerBound, upperBound)\n slicedC = c[mySlice]\n slicedP = P[mySlice, mySlice]\n\n # xVec is the vector of \"x\" values corresponding the \"y\" values to\n # which the quadratic is being fit.\n xVec = self.__xVec__\n# xVec = xVec - rollFactor\n xVec = xVec + lowerBound\n\n # Get the quadratic function that fits the peak and surrounding values,\n # and use it to estimate the location of the max\n # print(slicedC)\n if len(xVec) == 3:\n TDOA = self.peakFinder(xVec, slicedC)\n else:\n quadraticVec = self.quadraticFit(xVec, slicedC)\n try:\n TDOA = (-quadraticVec[1] / (2 * quadraticVec[0]))\n except:\n TDOA = xVec[peakLocation]\n\n return TDOA\n\n ## @fun #estimateSignalTDOA_UT uses a unscented tranform to estimate the\n # delay corresponding to a correlation vector\n #\n # @details The #estimateSignalDelayUT method is responsible for computing\n # the estimated value of delay corresponding to a correlation vector, as\n # well as the variance of that estimate. These values are computed using\n # a unscented transform (i.e. sigma-point) approach.\n #\n # The method receives the an estimate of the correlation vector, as well\n # as the covariance matrix corresponding to that vector. From there it\n # computes a set of n sigma points (where n is the length of the\n # correlation vector), and for each of the generated sigma point vectors,\n # it computes the peak location using the #computeSignalDelay method.\n #\n # @param self The object pointer\n # @param h The correlation vector\n # @param P The correlation vector covariance matrix\n #\n # @returns A dict containing the estimate of the peak location\n # (\"meanDelay\") and the estimate variance (\"varDelay\")\n def estimateSignalTDOA_UT(\n self,\n h,\n P,\n useMean=True\n ):\n # Compute sigma points\n hDimension = len(h)\n\n maxHIndex = np.argmax(h)\n rollAmount = -maxHIndex + self.__halfLength__\n # rollAmount = 1\n # hRolled = np.roll(h, rollAmount)\n \n # PRolled = np.roll(np.roll(P.value, rollAmount, axis=0), rollAmount, axis=1)\n # Compute the square root of P.\n if P.form == 'covariance':\n sqrtP = np.linalg.cholesky(\n hDimension * P.value[0:self.__filterOrder__, 0:self.__filterOrder__]\n )\n elif P.form == 'cholesky':\n # PVal = P.convertCovariance('covariance').value\n # sqrtP = np.linalg.cholesky(hDimension * PVal)\n \n sqrtP = P.value[0:self.__filterOrder__, 0:self.__filterOrder__] * np.sqrt(hDimension)\n \n sigmaPoints = h + np.append(sqrtP, -sqrtP, axis=0)\n\n # Append one more row of sigma points containing the unmodified estimate\n sigmaPoints = np.append(np.array([h]), sigmaPoints, axis=0)\n\n # Initiate vector to store the resulting peaks from each sigma point\n sigmaPointResults = np.zeros(len(sigmaPoints))\n\n # Compute the peak corresponding to each sigma point vector\n for i in range(len(sigmaPoints)):\n sigmaPointResults[i] = (\n self.computeSignalTDOA(sigmaPoints[i], P.convertCovariance('covariance').value)\n )\n \n\n \n #meanTDOA = np.mean(sigmaPointResults)\n meanTDOA = sigmaPointResults[0]\n for i in range(len(sigmaPoints)):\n if (meanTDOA - sigmaPointResults[i]) > self.__halfLength__:\n sigmaPointResults[i] += self.__dimension__\n elif (sigmaPointResults[i] - meanTDOA) > self.__halfLength__:\n sigmaPointResults[i] -= self.__dimension__\n \n # meanTDOA = self.computeSignalTDOA(h, P)\n varTDOA = np.var(sigmaPointResults)\n\n return {'meanTDOA': meanTDOA, 'varTDOA': varTDOA, 'sigmaPoints': sigmaPointResults}\n\n def estimateSignalTDOA_EK(self, h, P):\n\n if P.form == 'covariance':\n P = P.value[0:self.__filterOrder__, 0:self.__filterOrder__]\n\n elif P.form == 'cholesky':\n P = P.convertCovariance('covariance').value[0:self.__filterOrder__, 0:self.__filterOrder__]\n \n # First estimate of peak location is the location of the max value\n peakLocation = np.argmax(h)\n\n # Next, we \"roll\" the correlation vector so that the values being\n # fitted quadratically are the first 3 values\n\n lowerBound = peakLocation - 1\n upperBound = lowerBound + (1 * 2) + 1\n if (lowerBound < 0) or (upperBound > self.__filterOrder__):\n mySlice = range(lowerBound, upperBound)\n slicedC = h.take(mySlice, mode='wrap')\n slicedP = P.take(mySlice, axis=0, mode='wrap').take(mySlice, axis=1, mode='wrap')\n else:\n mySlice = slice(lowerBound, upperBound)\n slicedC = h[mySlice]\n slicedP = P[mySlice, mySlice]\n\n # xVec is the vector of \"x\" values corresponding the \"y\" values to\n # which the quadratic is being fit.\n xVec = self.__xVec__\n xVec = xVec + lowerBound\n\n # Get the quadratic function that fits the peak and surrounding values,\n # and use it to estimate the location of the max\n TDOA = self.peakFinder(xVec, slicedC)\n jacobian = self.peakFinderJacobian(xVec, slicedC)\n\n variance = jacobian.dot(slicedP).dot(jacobian.transpose())\n \n return {'meanTDOA': TDOA, 'varTDOA': variance}\n\n \n def speedOfLight(\n self\n ):\n return (299792)\n\n @staticmethod\n def sincDiff(x):\n if np.abs(x) < 1e-100:\n myDiff = 0.0\n\n else:\n piX = np.pi*x\n # myDiff = np.pi * (\n # (((np.pi * x) * np.cos(x * np.pi)) - np.sin(x * np.pi))\n # /\n # np.square(x * np.pi)\n # )\n myDiff = (piX*np.cos(piX) - np.sin(piX))/(np.pi * np.power(x,2))\n # myDiff\n return myDiff\n\n\n @staticmethod\n def quadraticFit(x, y):\n X_T = np.array([np.power(x, 2), x, np.ones(len(x))])\n X = X_T.transpose()\n if len(x) < 3:\n raise ValueError(\n \"Cannot fit a quadratic to less than three data points.\"\n )\n elif len(x) == 3:\n # Note: Suprisingly, it is faster to directly invert the X matrix\n # than it is to do a linear solve. Strange.\n \n #coef = np.linalg.solve(X, y)\n coef = np.linalg.inv(X).dot(y)\n else:\n #coef = np.linalg.solve(X_T.dot(X).dot(X_T), y)\n coef = np.linalg.inv(X_T.dot(X)).dot(X_T).dot(y)\n \n return coef\n\n def initializeRealTimePlot(\n self,\n plotHandle=None,\n axisHandle=None \n ):\n super().initializeRealTimePlot(plotHandle, axisHandle)\n self.RTPlotTDOA = self.RTPaxisHandle.scatter(\n self.signalTDOA,\n 1\n )\n \n self.RTPlotTDOA_error, = self.RTPaxisHandle.plot(\n [\n self.signalTDOA - np.sqrt(self.TDOAVar),\n self.signalTDOA + np.sqrt(self.TDOAVar)\n ],\n [1,1]\n )\n \n return\n \n def realTimePlot(\n self,\n normalized=True\n ):\n if self.RTPlotHandle is None:\n self.initializeRealTimePlot()\n \n self.RTPlotTDOA.set_offsets([self.signalTDOA, 1])\n self.RTPlotTDOA_error.set_data(\n [\n self.signalTDOA - np.sqrt(self.TDOAVar),\n self.signalTDOA + np.sqrt(self.TDOAVar)\n ],\n [1,1]\n )\n super().realTimePlot(normalized, substateRange = slice(0,self.__filterOrder__))\n return\n\n @staticmethod\n def peakFinder(x,y):\n x1 = x[0]\n x2 = x[1]\n x3 = x[2]\n \n y1 = y[0]\n y2 = y[1]\n y3 = y[2]\n \n x0 = (\n -(y1*(np.square(x3) - np.square(x2)) + y2*(np.square(x1) - np.square(x3)) + y3*(np.square(x2) - np.square(x1)))\n /\n (2*(y1*(x2-x3) + y2*(x3-x1) + y3*(x1-x2)))\n )\n x0 = (\n (\n y1*(np.square(x2)-np.square(x3)) +\n y2*(np.square(x3)-np.square(x1)) +\n y3*(np.square(x1)-np.square(x2))\n )\n /\n (2*(y1*(x2-x3) + y2*(x3-x1) + y3*(x1-x2)))\n )\n return(x0)\n\n @staticmethod\n def peakFinderJacobian(x,y):\n x1 = x[0]\n x2 = x[1]\n x3 = x[2]\n \n y1 = y[0]\n y2 = y[1]\n y3 = y[2]\n\n A = np.square(x2) - np.square(x3)\n B = np.square(x3) - np.square(x1)\n C = np.square(x1) - np.square(x2)\n\n D = x2-x3\n E = x3-x1\n # E = x1-x2\n F = x1-x2\n\n AE = A*E\n AF = A*F\n \n BD = B*D\n BF = B*F\n\n CD = C*D\n CE = C*E\n denom = 2*np.power(((D*y1) + (E*y2) + (F*y3)),2)\n\n dT_dy1 = (\n ((AE - BD)*y2 + (AF - CD)*y3)\n /\n denom\n )\n \n dT_dy2 = (\n ((BD - AE)*y1 + (BF - CE)*y3)\n /\n denom\n )\n \n dT_dy3 = (\n ((CD - AF)*y1 + (CE - BF)*y2)\n /\n denom\n )\n\n return np.array([dT_dy1, dT_dy2, dT_dy3])\n" ]
[ [ "numpy.dot", "numpy.sqrt", "numpy.linspace", "numpy.zeros_like", "numpy.var", "numpy.roll", "numpy.square", "numpy.sinc", "numpy.eye", "numpy.sin", "numpy.ceil", "numpy.argmax", "scipy.special.factorial", "numpy.outer", "numpy.zeros", "numpy.power", "numpy.linalg.inv", "numpy.append", "numpy.linalg.cholesky", "numpy.array", "numpy.abs", "scipy.linalg.block_diag", "numpy.cos", "numpy.ones", "numpy.mod" ] ]
rsaurabh799/ga-learner-dsmp-repo
[ "024f054e0385fd5faa24804004e25d9f849363aa" ]
[ "the-lego-collector-s-dilemma-(linear-regression)/code.py" ]
[ "# --------------\nimport pandas as pd\nimport numpy as np\nfrom sklearn.cross_validation import train_test_split\n# code starts here\ndf = pd.read_csv(path)\nprint(df.head(5))\nX = df.drop('list_price',axis=1)\ny = df['list_price']\nX_train,X_test,y_train,y_test = train_test_split(X,y,test_size = 0.3,random_state = 6)\n\n\n\n# code ends here\n\n\n\n# --------------\nimport matplotlib.pyplot as plt\n\n# code starts here \ncols = np.array(['ages','num_reviews','piece_count','play_star_rating','review_difficulty','star_rating','theme_name','val_star_rating','country'])\n\nfig,axes = plt.subplots(nrows = 3 , ncols = 3,figsize=(20,10))\nfor i in range(3):\n for j in range(3):\n col = cols[i*3+j]\n print(i*3+j)\n axes[i,j].scatter(X_train[col],y_train)\n\n\n\n# code ends here\n\n\n\n# --------------\n# Code starts here\n\ncorr = X_train.corr()\nprint(corr)\nX_train.drop(['play_star_rating','val_star_rating'],axis=1,inplace = True)\nX_test.drop(['play_star_rating','val_star_rating'],axis=1,inplace = True)\n\n# Code ends here\n\n\n# --------------\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.metrics import mean_squared_error, r2_score\n\n# Code starts here\n\nregressor = LinearRegression()\nregressor.fit(X_train,y_train)\ny_pred = regressor.predict(X_test)\nmse = mean_squared_error(y_test,y_pred)\nr2 = r2_score(y_test,y_pred)\nprint(mse)\nprint(r2)\n\n# Code ends here\n\n\n# --------------\n# Code starts here\n\nresidual = y_test - y_pred\nplt.hist(residual)\n\n\n# Code ends here\n\n\n" ]
[ [ "sklearn.cross_validation.train_test_split", "pandas.read_csv", "sklearn.metrics.r2_score", "matplotlib.pyplot.subplots", "sklearn.metrics.mean_squared_error", "sklearn.linear_model.LinearRegression", "numpy.array", "matplotlib.pyplot.hist" ] ]
atul04/Grammar-Correction
[ "89ee3338f901735cbad2144e5e41a54ee11213f9" ]
[ "utils.py" ]
[ "import torch\nimport spacy\nfrom torchtext.data.metrics import bleu_score\nimport sys\n\n\ndef translate_sentence(model, sentence, german, english, device, max_length=50):\n # Load german tokenizer\n spacy_ger = spacy.load(\"en\")\n\n # Create tokens using spacy and everything in lower case (which is what our vocab is)\n if type(sentence) == str:\n tokens = [token.text.lower() for token in spacy_ger(sentence)]\n else:\n tokens = [token.lower() for token in sentence]\n\n # Add <SOS> and <EOS> in beginning and end respectively\n tokens.insert(0, german.init_token)\n tokens.append(german.eos_token)\n\n # Go through each german token and convert to an index\n text_to_indices = [german.vocab.stoi[token] for token in tokens]\n\n # Convert to Tensor\n sentence_tensor = torch.LongTensor(text_to_indices).unsqueeze(1).to(device)\n\n # Build encoder hidden, cell state\n with torch.no_grad():\n outputs_encoder, hiddens, cells = model.encoder(sentence_tensor)\n\n outputs = [english.vocab.stoi[\"<sos>\"]]\n\n for _ in range(max_length):\n previous_word = torch.LongTensor([outputs[-1]]).to(device)\n\n with torch.no_grad():\n output, hiddens, cells = model.decoder(\n previous_word, outputs_encoder, hiddens, cells\n )\n best_guess = output.argmax(1).item()\n\n outputs.append(best_guess)\n\n # Model predicts it's the end of the sentence\n if output.argmax(1).item() == english.vocab.stoi[\"<eos>\"]:\n break\n\n translated_sentence = [english.vocab.itos[idx] for idx in outputs]\n\n # remove start token\n return translated_sentence[1:]\n\n\ndef bleu(data, model, german, english, device):\n targets = []\n outputs = []\n\n for example in data:\n src = vars(example)[\"source\"]\n trg = vars(example)[\"target\"]\n\n prediction = translate_sentence(model, src, german, english, device)\n prediction = prediction[:-1] # remove <eos> token\n\n targets.append([trg])\n outputs.append(prediction)\n\n return bleu_score(outputs, targets)\n\n\ndef save_checkpoint(state, filename=\"my_checkpoint.pth.tar\"):\n print(\"=> Saving checkpoint\")\n torch.save(state, filename)\n\n\ndef load_checkpoint(checkpoint, model, optimizer):\n print(\"=> Loading checkpoint\")\n model.load_state_dict(checkpoint[\"state_dict\"])\n optimizer.load_state_dict(checkpoint[\"optimizer\"])" ]
[ [ "torch.LongTensor", "torch.no_grad", "torch.save" ] ]
LukasBeiske/ctapipe
[ "8325700ca01cbae62733c2f41de4113013f18939" ]
[ "ctapipe/visualization/mpl_array.py" ]
[ "from itertools import cycle\n\nimport numpy as np\nfrom astropy import units as u\nfrom astropy.coordinates import Angle\nfrom matplotlib import pyplot as plt\nfrom matplotlib.collections import PatchCollection\nfrom matplotlib.lines import Line2D\nfrom matplotlib.patches import Circle\n\nfrom ctapipe.coordinates import GroundFrame\nfrom ctapipe.visualization.mpl_camera import polar_to_cart\n\n\nclass ArrayDisplay:\n \"\"\"\n Display a top-town view of a telescope array.\n\n This can be used in two ways: by default, you get a display of all\n telescopes in the subarray, colored by telescope type, however you can\n also color the telescopes by a value (like trigger pattern, or some other\n scalar per-telescope parameter). To set the color value, simply set the\n ``value`` attribute, and the fill color will be updated with the value. You\n might want to set the border color to zero to avoid confusion between the\n telescope type color and the value color (\n ``array_disp.telescope.set_linewidth(0)``)\n\n To display a vector field over the telescope positions, e.g. for\n reconstruction, call `set_vector_uv()` to set cartesian vectors,\n or `set_vector_rho_phi()` to set polar coordinate vectors.\n These both take an array of length N_tels, or a single value.\n\n\n Parameters\n ----------\n subarray: ctapipe.instrument.SubarrayDescription\n the array layout to display\n axes: matplotlib.axes.Axes\n matplotlib axes to plot on, or None to use current one\n title: str\n title of array plot\n tel_scale: float\n scaling between telescope mirror radius in m to displayed size\n autoupdate: bool\n redraw when the input changes\n radius: Union[float, list, None]\n set telescope radius to value, list/array of values. If None, radius\n is taken from the telescope's mirror size.\n \"\"\"\n\n def __init__(\n self,\n subarray,\n axes=None,\n autoupdate=True,\n tel_scale=2.0,\n alpha=0.7,\n title=None,\n radius=None,\n frame=GroundFrame(),\n ):\n\n self.frame = frame\n self.subarray = subarray\n self.axes = axes or plt.gca()\n\n # get the telescope positions. If a new frame is set, this will\n # transform to the new frame.\n self.tel_coords = subarray.tel_coords.transform_to(frame).cartesian\n self.unit = self.tel_coords.x.unit\n\n # set up colors per telescope type\n tel_types = [str(tel) for tel in subarray.tels.values()]\n if radius is None:\n # set radius to the mirror radius (so big tels appear big)\n radius = [\n np.sqrt(tel.optics.mirror_area.to(\"m2\").value) * tel_scale\n for tel in subarray.tel.values()\n ]\n\n self.radii = radius\n else:\n self.radii = np.ones(len(tel_types)) * radius\n\n if title is None:\n title = subarray.name\n\n # get default matplotlib color cycle (depends on the current style)\n color_cycle = cycle(plt.rcParams[\"axes.prop_cycle\"].by_key()[\"color\"])\n\n # map a color to each telescope type:\n tel_type_to_color = {}\n for tel_type in list(set(tel_types)):\n tel_type_to_color[tel_type] = next(color_cycle)\n\n tel_color = [tel_type_to_color[ttype] for ttype in tel_types]\n\n patches = []\n for x, y, r, c in zip(\n list(self.tel_coords.x.to_value(\"m\")),\n list(self.tel_coords.y.to_value(\"m\")),\n list(radius),\n tel_color,\n ):\n patches.append(Circle(xy=(x, y), radius=r, fill=True, color=c, alpha=alpha))\n\n # build the legend:\n legend_elements = []\n for ttype in list(set(tel_types)):\n color = tel_type_to_color[ttype]\n legend_elements.append(\n Line2D(\n [0],\n [0],\n marker=\"o\",\n color=color,\n label=ttype,\n markersize=10,\n alpha=alpha,\n linewidth=0,\n )\n )\n plt.legend(handles=legend_elements)\n\n self.add_radial_grid()\n\n # create the plot\n self.tel_colors = tel_color\n self.autoupdate = autoupdate\n self.telescopes = PatchCollection(patches, match_original=True)\n self.telescopes.set_linewidth(2.0)\n\n self.axes.add_collection(self.telescopes)\n self.axes.set_aspect(1.0)\n self.axes.set_title(title)\n xunit = self.tel_coords.x.unit.to_string(\"latex\")\n yunit = self.tel_coords.y.unit.to_string(\"latex\")\n xname, yname, _ = frame.get_representation_component_names().keys()\n self.axes.set_xlabel(f\"{xname} [{xunit}] $\\\\rightarrow$\")\n self.axes.set_ylabel(f\"{yname} [{yunit}] $\\\\rightarrow$\")\n self._labels = []\n self._quiver = None\n self.axes.autoscale_view()\n\n @property\n def values(self):\n \"\"\"An array containing a value per telescope\"\"\"\n return self.telescopes.get_array()\n\n @values.setter\n def values(self, values):\n \"\"\"set the telescope colors to display\"\"\"\n self.telescopes.set_array(np.ma.masked_invalid(values))\n self._update()\n\n def add_radial_grid(self, spacing=100 * u.m):\n \"\"\"add some dotted rings for distance estimation. The number of rings\n is estimated automatically from the spacing and the array footprint.\n\n Parameters\n ----------\n spacing: Quantity\n spacing between rings\n\n \"\"\"\n\n n_circles = np.round(\n (np.sqrt(self.subarray.footprint / np.pi) / spacing).to_value(\"\"),\n 0,\n )\n circle_radii = np.arange(1, n_circles + 2, 1) * spacing.to_value(self.unit)\n circle_patches = PatchCollection(\n [\n Circle(\n xy=(0, 0),\n radius=r,\n fill=False,\n fc=\"none\",\n linestyle=\"dotted\",\n color=\"gray\",\n alpha=0.1,\n lw=1,\n )\n for r in circle_radii\n ],\n color=\"#eeeeee\",\n ls=\"dotted\",\n fc=\"none\",\n lw=3,\n )\n\n self.axes.add_collection(circle_patches)\n\n def set_vector_uv(self, uu, vv, c=None, **kwargs):\n \"\"\"sets the vector field U,V and color for all telescopes\n\n Parameters\n ----------\n uu: array[num_tels]\n x-component of direction vector\n vv: array[num_tels]\n y-component of direction vector\n c: color or list of colors\n vector color for each telescope (or one for all)\n kwargs:\n extra args passed to plt.quiver(), ignored on subsequent updates\n \"\"\"\n coords = self.tel_coords\n uu = u.Quantity(uu).to_value(\"m\")\n vv = u.Quantity(vv).to_value(\"m\")\n N = len(coords.x)\n\n # matplotlib since 3.2 does not allow scalars anymore\n # if quiver was already created with a certain number of arrows\n if np.isscalar(uu):\n uu = np.full(N, uu)\n if np.isscalar(vv):\n vv = np.full(N, vv)\n\n # passing in None for C does not work, we need to provide\n # a variadic number of arguments\n args = [coords.x.to_value(\"m\"), coords.y.to_value(\"m\"), uu, vv]\n\n if c is None:\n # use colors by telescope type if the user did not provide any\n kwargs[\"color\"] = kwargs.get(\"color\", self.tel_colors)\n else:\n # same as above, enable use of scalar to set all values at once\n if np.isscalar(c):\n c = np.full(N, c)\n args.append(c)\n\n if self._quiver is None:\n self._quiver = self.axes.quiver(\n *args, scale_units=\"xy\", angles=\"xy\", scale=1, **kwargs\n )\n else:\n self._quiver.set_UVC(uu, vv, c)\n\n def set_vector_rho_phi(self, rho, phi, c=None, **kwargs):\n \"\"\"sets the vector field using R, Phi for each telescope\n\n Parameters\n ----------\n rho: float or array[float]\n vector magnitude for each telescope\n phi: array[Angle]\n vector angle for each telescope\n c: color or list of colors\n vector color for each telescope (or one for all)\n \"\"\"\n phi = Angle(phi).rad\n uu, vv = polar_to_cart(rho, phi)\n self.set_vector_uv(uu, vv, c=c, **kwargs)\n\n def set_vector_hillas(\n self, hillas_dict, core_dict, length, time_gradient, angle_offset\n ):\n \"\"\"\n Function to set the vector angle and length from a set of Hillas parameters.\n\n In order to proper use the arrow on the ground, also a dictionary with the time\n gradients for the different telescopes is needed. If the gradient is 0 the arrow\n is not plotted on the ground, whereas if the value of the gradient is negative,\n the arrow is rotated by 180 degrees (Angle(angle_offset) not added).\n\n This plotting behaviour has been tested with the timing_parameters function\n in ctapipe/image.\n\n Parameters\n ----------\n hillas_dict: Dict[int, HillasParametersContainer]\n mapping of tel_id to Hillas parameters\n core_dict : Dict[int, CoreParameters]\n mapping of tel_id to CoreParametersContainer\n length: Float\n length of the arrow (in meters)\n time_gradient: Dict[int, value of time gradient (no units)]\n dictionary for value of the time gradient for each telescope\n angle_offset: Float\n This should be the ``event.pointing.array_azimuth`` parameter\n\n \"\"\"\n\n # rot_angle_ellipse is psi parameter in HillasParametersContainer\n rho = np.zeros(self.subarray.num_tels) * u.m\n rot_angle_ellipse = np.zeros(self.subarray.num_tels) * u.deg\n\n for tel_id, params in hillas_dict.items():\n\n idx = self.subarray.tel_indices[tel_id]\n rho[idx] = u.Quantity(length, u.m)\n\n psi = core_dict[tel_id]\n\n if time_gradient[tel_id] > 0.01:\n angle_offset = Angle(angle_offset)\n rot_angle_ellipse[idx] = psi + angle_offset + 180 * u.deg\n elif time_gradient[tel_id] < -0.01:\n rot_angle_ellipse[idx] = psi + angle_offset\n else:\n rho[idx] = 0 * u.m\n\n self.set_vector_rho_phi(rho=rho, phi=rot_angle_ellipse)\n\n def set_line_hillas(self, hillas_dict, core_dict, range, **kwargs):\n \"\"\"\n Plot the telescope-wise direction of the shower as a segment.\n\n Each segment will be centered with a point on the telescope position\n and will be 2*range long.\n\n Parameters\n ----------\n hillas_dict: Dict[int, HillasParametersContainer]\n mapping of tel_id to Hillas parameters\n core_dict : Dict[int, CoreParameters]\n mapping of tel_id to CoreParametersContainer\n range: float\n half of the length of the segments to be plotted (in meters)\n \"\"\"\n\n coords = self.tel_coords\n c = self.tel_colors\n\n r = np.array([-range, range])\n\n for tel_id, params in hillas_dict.items():\n idx = self.subarray.tel_indices[tel_id]\n x_0 = coords[idx].x.to_value(u.m)\n y_0 = coords[idx].y.to_value(u.m)\n\n psi = core_dict[tel_id]\n\n x = x_0 + np.cos(psi).value * r\n y = y_0 + np.sin(psi).value * r\n self.axes.plot(x, y, color=c[idx], **kwargs)\n self.axes.scatter(x_0, y_0, color=c[idx])\n\n def add_labels(self):\n px = self.tel_coords.x.to_value(\"m\")\n py = self.tel_coords.y.to_value(\"m\")\n for tel, x, y, r in zip(self.subarray.tels, px, py, self.radii):\n name = str(tel)\n lab = self.axes.text(\n x,\n y - r * 1.8,\n name,\n fontsize=8,\n clip_on=True,\n horizontalalignment=\"center\",\n verticalalignment=\"top\",\n )\n self._labels.append(lab)\n\n def remove_labels(self):\n for lab in self._labels:\n lab.remove()\n self._labels = []\n\n def _update(self):\n \"\"\"signal a redraw if necessary\"\"\"\n if self.autoupdate:\n plt.draw()\n\n def background_contour(self, x, y, background, **kwargs):\n \"\"\"\n Draw image contours in background of the display, useful when likelihood fitting\n\n Parameters\n ----------\n x: ndarray\n array of image X coordinates\n y: ndarray\n array of image Y coordinates\n background: ndarray\n Array of image to use in background\n kwargs: key=value\n any style keywords to pass to matplotlib\n \"\"\"\n\n # use zorder to ensure the contours appear under the telescopes.\n self.axes.contour(x, y, background, zorder=0, **kwargs)\n" ]
[ [ "matplotlib.pyplot.legend", "matplotlib.pyplot.gca", "matplotlib.collections.PatchCollection", "numpy.sqrt", "numpy.arange", "matplotlib.lines.Line2D", "matplotlib.patches.Circle", "numpy.cos", "numpy.full", "matplotlib.pyplot.draw", "numpy.sin", "numpy.ma.masked_invalid", "numpy.isscalar", "numpy.array", "numpy.zeros" ] ]
VladLujerdeanu/Image-to-Coe-File
[ "faab54003982ce5b53f89298a9057680a5b63e1c" ]
[ "img2coe.py" ]
[ "import numpy as np\nimport sys\nimport os\nfrom PIL import Image\n\ndef img2coe(path, index):\n img = Image.open(path)\n arr = np.array(img)\n\n output_file = \"img\" + str(index) + \".coe\"\n\n f = open(output_file, \"w\")\n\n f.write(\"memory_initialization_radix=2;\\nmemory_initialization_vector=\")\n\n for line in arr:\n for r, g, b in line:\n r = int((r * 16) / 256)\n g = int((g * 16) / 256)\n b = int((b * 16) / 256)\n f.write(str('\\n{:04b}'.format(r)) + str('{:04b}'.format(g)) + str('{:04b}'.format(b)) + \",\")\n f.seek(f.tell() - 1, os.SEEK_SET)\n f.truncate()\n f.write(\";\")\n\nif __name__ == \"__main__\":\n if len(sys.argv) > 1:\n for i in range(1, len(sys.argv)):\n img2coe(str(sys.argv[i]), i)\n else:\n print(\"Insert at least one image path\\nFormat: python img2coe.py <path>\")" ]
[ [ "numpy.array" ] ]
taoddiao/dr.b
[ "87f9ae4a5001e1a9248b0e19ad90aa252e426fe9" ]
[ "DSB3Tutorial/LUNA_train_unet.py" ]
[ "from __future__ import print_function\n\nimport numpy as np\nfrom keras.models import Model\nfrom keras.layers import Input, merge, Convolution2D, MaxPooling2D, UpSampling2D\nfrom keras.optimizers import Adam\nfrom keras.optimizers import SGD\nfrom keras.callbacks import ModelCheckpoint, LearningRateScheduler\nfrom keras import backend as K\n\nworking_path = \"/home/qwerty/data/luna16/output/\"\n\nK.set_image_dim_ordering('th') # Theano dimension ordering in this code\n\nimg_rows = 512\nimg_cols = 512\n\nsmooth = 1.\n\n\ndef dice_coef(y_true, y_pred):\n y_true_f = K.flatten(y_true)\n y_pred_f = K.flatten(y_pred)\n intersection = K.sum(y_true_f * y_pred_f)\n return (2. * intersection + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) + smooth)\n\ndef dice_coef_np(y_true,y_pred):\n y_true_f = y_true.flatten()\n y_pred_f = y_pred.flatten()\n intersection = np.sum(y_true_f * y_pred_f)\n return (2. * intersection + smooth) / (np.sum(y_true_f) + np.sum(y_pred_f) + smooth)\n\ndef dice_coef_loss(y_true, y_pred):\n return -dice_coef(y_true, y_pred)\n\n\ndef get_unet():\n inputs = Input((1,img_rows, img_cols))\n conv1 = Convolution2D(32, (3, 3), activation='relu', border_mode='same')(inputs)\n conv1 = Convolution2D(32, (3, 3), activation='relu', border_mode='same')(conv1)\n pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)\n\n conv2 = Convolution2D(64, (3, 3), activation='relu', border_mode='same')(pool1)\n conv2 = Convolution2D(64, (3, 3), activation='relu', border_mode='same')(conv2)\n pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)\n\n conv3 = Convolution2D(128, (3, 3), activation='relu', border_mode='same')(pool2)\n conv3 = Convolution2D(128, (3, 3), activation='relu', border_mode='same')(conv3)\n pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)\n\n conv4 = Convolution2D(256, (3, 3), activation='relu', border_mode='same')(pool3)\n conv4 = Convolution2D(256, (3, 3), activation='relu', border_mode='same')(conv4)\n pool4 = MaxPooling2D(pool_size=(2, 2))(conv4)\n\n conv5 = Convolution2D(512, (3, 3), activation='relu', border_mode='same')(pool4)\n conv5 = Convolution2D(512, (3, 3), activation='relu', border_mode='same')(conv5)\n\n up6 = merge([UpSampling2D(size=(2, 2))(conv5), conv4], mode='concat', concat_axis=1)\n conv6 = Convolution2D(256, (3, 3), activation='relu', border_mode='same')(up6)\n conv6 = Convolution2D(256, (3, 3), activation='relu', border_mode='same')(conv6)\n\n up7 = merge([UpSampling2D(size=(2, 2))(conv6), conv3], mode='concat', concat_axis=1)\n conv7 = Convolution2D(128, (3, 3), activation='relu', border_mode='same')(up7)\n conv7 = Convolution2D(128, (3, 3), activation='relu', border_mode='same')(conv7)\n\n up8 = merge([UpSampling2D(size=(2, 2))(conv7), conv2], mode='concat', concat_axis=1)\n conv8 = Convolution2D(64, (3, 3), activation='relu', border_mode='same')(up8)\n conv8 = Convolution2D(64, (3, 3), activation='relu', border_mode='same')(conv8)\n\n up9 = merge([UpSampling2D(size=(2, 2))(conv8), conv1], mode='concat', concat_axis=1)\n conv9 = Convolution2D(32, (3, 3), activation='relu', border_mode='same')(up9)\n conv9 = Convolution2D(32, (3, 3), activation='relu', border_mode='same')(conv9)\n\n conv10 = Convolution2D(1, (1, 1), activation='sigmoid')(conv9)\n\n model = Model(input=inputs, output=conv10)\n\n model.compile(optimizer=Adam(lr=1.0e-5), loss=dice_coef_loss, metrics=[dice_coef])\n\n return model\n\n\ndef train_and_predict(use_existing):\n print('-'*30)\n print('Loading and preprocessing train data...')\n print('-'*30)\n imgs_train = np.load(working_path+\"trainImages.npy\").astype(np.float32)\n imgs_mask_train = np.load(working_path+\"trainMasks.npy\").astype(np.float32)\n\n imgs_test = np.load(working_path+\"testImages.npy\").astype(np.float32)\n imgs_mask_test_true = np.load(working_path+\"testMasks.npy\").astype(np.float32)\n\n mean = np.mean(imgs_train) # mean for data centering\n std = np.std(imgs_train) # std for data normalization\n\n imgs_train -= mean # images should already be standardized, but just in case\n imgs_train /= std\n\n print('-'*30)\n print('Creating and compiling model...')\n print('-'*30)\n model = get_unet()\n # Saving weights to unet.hdf5 at checkpoints\n model_checkpoint = ModelCheckpoint('unet.hdf5', monitor='loss', save_best_only=True)\n #\n # Should we load existing weights?\n # Set argument for call to train_and_predict to true at end of script\n if use_existing:\n model.load_weights('./unet.hdf5')\n\n #\n # The final results for this tutorial were produced using a multi-GPU\n # machine using TitanX's.\n # For a home GPU computation benchmark, on my home set up with a GTX970\n # I was able to run 20 epochs with a training set size of 320 and\n # batch size of 2 in about an hour. I started getting reseasonable masks\n # after about 3 hours of training.\n #\n print('-'*30)\n print('Fitting model...')\n print('-'*30)\n model.fit(imgs_train, imgs_mask_train, batch_size=2, epochs=20, verbose=1, shuffle=True,\n callbacks=[model_checkpoint])\n\n # loading best weights from training session\n print('-'*30)\n print('Loading saved weights...')\n print('-'*30)\n model.load_weights('./unet.hdf5')\n\n print('-'*30)\n print('Predicting masks on test data...')\n print('-'*30)\n num_test = len(imgs_test)\n imgs_mask_test = np.ndarray([num_test,1,512,512],dtype=np.float32)\n for i in range(num_test):\n imgs_mask_test[i] = model.predict([imgs_test[i:i+1]], verbose=0)[0]\n np.save('masksTestPredicted.npy', imgs_mask_test)\n mean = 0.0\n for i in range(num_test):\n mean+=dice_coef_np(imgs_mask_test_true[i,0], imgs_mask_test[i,0])\n mean/=num_test\n print(\"Mean Dice Coeff : \",mean)\n\nif __name__ == '__main__':\n train_and_predict(True)\n" ]
[ [ "numpy.ndarray", "numpy.save", "numpy.std", "numpy.mean", "numpy.load", "numpy.sum" ] ]
Yuta1004/procon30-battle-simulator-py
[ "dcd0bb34efab3201705ff2188c2fc62f6ac7bc09" ]
[ "simulator/game.py" ]
[ "# Copylight(c) 2019 NakagamiYuta\n# LICENCE : MIT\n\nimport numpy as np\nimport json\nfrom simulator.common import flatten_2d, gen_2d_list\n\nclass Game:\n \"\"\"\n Gameクラス\n\n Brief:\n  シミュレーター\n \"\"\"\n\n def __init__(self, board, agents):\n \"\"\"\n コンストラクタ\n\n Params\n ----------\n board : Board\n 盤面情報\n agents : Agent + List\n エージェント情報\n \"\"\"\n self.board = board\n self.agents = agents\n self.turn = 0\n\n\n def set_action(self, team_id, agent_id, dx, dy, remove_panel=False):\n \"\"\"\n エージェントに行動をセット\n\n Params\n ----------\n team_id : int\n チームID\n agent_id : int\n エージェントID\n dx : int\n dy : int\n \"\"\"\n if abs(dx) > 1 or abs(dy) > 1:\n return False\n\n for agent in self.agents:\n if (agent.team == team_id) and (agent.id == agent_id):\n agent.dx = dx\n agent.dy = dy\n agent.remove_panel = remove_panel\n return True\n\n\n def step(self):\n \"\"\"\n 1ターンゲームを進める\n\n Params\n ----------\n None\n\n Returns\n ----------\n safety_agents : list\n 正常に行動できたエージェントのID\n affected_agents : list\n 競合を起こしたエージェントのID\n \"\"\"\n\n # 相手陣地への移動を停留扱いに\n for agent in filter(lambda n: n.dx >= -1, self.agents):\n mx, my = self.__cal_mx_my(agent)\n if (self.board.tiled[my][mx] != agent.team) and (self.board.tiled[my][mx] != 0)\\\n and (not agent.remove_panel):\n agent.remove_panel = False\n agent.dx = 0\n agent.dy = 0\n\n # エージェントの行動が影響する範囲をリストアップ\n affected_positions = []\n for agent in filter(lambda n: n.dx >= -1, self.agents):\n mx, my = self.__cal_mx_my(agent)\n affected_positions.append((mx, my))\n if self.__can_action(agent) and agent.remove_panel:\n affected_positions.append((agent.x, agent.y))\n\n # 競合リストアップ\n for agent in filter(lambda n: n.dx >= -1, self.agents):\n mx, my = self.__cal_mx_my(agent)\n if not self.__can_action(agent) or not affected_positions.count((mx, my)) == 1:\n affected_positions.append((agent.x, agent.y))\n\n # 影響がないエージェントを行動させる\n safety_agents = []\n affected_agents = []\n for agent in filter(lambda n: n.dx >= -1, self.agents):\n mx, my = self.__cal_mx_my(agent)\n if self.__can_action(agent) and (affected_positions.count((mx, my)) <= 1): # 競合確認\n agent.move() # 行動\n safety_agents.append(agent.id)\n if agent.remove_panel:\n self.board.tiled[my][mx] = 0\n else:\n self.board.tiled[my][mx] = agent.team\n else:\n affected_agents.append(agent.id)\n\n # エージェントリセット\n list(map(lambda agent: agent.reset(), self.agents))\n\n self.turn += 1\n return safety_agents, affected_agents\n\n\n def cal_score(self, team_id_list):\n \"\"\"\n スコアを計算する\n\n Params\n ----------\n team_id_list : int + List\n スコアを計算するチームIDのリスト\n\n Returns\n ----------\n map<int, int>\n チームIDがキー, スコアが値\n \"\"\"\n score_list = {}\n\n for (idx, team_id) in enumerate(team_id_list):\n score_list[team_id] = {}\n\n # タイルポイント\n tiled_tmp = flatten_2d(self.board.tiled)\n points_flat = flatten_2d(self.board.points)\n score_list[team_id][\"tilePoint\"] = sum(map(lambda x, y: (x == team_id) * y, tiled_tmp, points_flat))\n\n # 全ての座標について、囲みが有効か探索\n self.rec_tiled = gen_2d_list(self.board.height, self.board.width)\n for y in range(self.board.height):\n for x in range(self.board.width):\n if self.rec_tiled[y][x] == 0:\n search_result = self.__recursive_child(x, y, team_id)\n self.rec_tiled = self.__search_result_process(self.rec_tiled, search_result)\n # ↑探索成功ならrec_tiledに結果を反映、そうでない場合は結果を破棄する\n\n # 領域ポイント : 囲みが有効である座標のスコアを合計する\n self.rec_tiled = flatten_2d(self.rec_tiled)\n score_list[team_id][\"areaPoint\"] = sum(map(lambda x, y: abs(x * y), self.rec_tiled, points_flat))\n\n self.rec_tiled = None\n return score_list\n\n\n def __recursive_child(self, x, y, target):\n # 盤面の外周に来た = 囲み無効\n if self.board.tiled[y][x] == target:\n return True\n elif (x == 0) or (x == self.board.width - 1) or (y == 0) or (y == self.board.height - 1):\n return False\n\n self.rec_tiled[y][x] = 2\n\n # 4方向を調べる\n dx_list = [-1, 1, 0, 0]\n dy_list = [0, 0, -1, 1]\n for (dx, dy) in zip(dx_list, dy_list):\n mx = x + dx\n my = y + dy\n if self.__is_safe_pos(mx, my) and (self.rec_tiled[my][mx] == 0):\n if not self.__recursive_child(mx, my, target):\n return False\n return True\n\n\n def __cal_mx_my(self, agent):\n mx = agent.x + agent.dx\n my = agent.y + agent.dy\n return mx, my\n\n\n def __can_action(self, agent):\n mx, my = self.__cal_mx_my(agent)\n return self.__is_safe_pos(mx, my)\n\n\n def __is_safe_pos(self, x, y):\n return (0 <= x) and (x < self.board.width) and\\\n (0 <= y) and (y < self.board.height)\n\n\n def __search_result_process(self, tiled, result):\n tiled_np = np.array(tiled)\n if result:\n tiled_np = tiled_np / 2.0\n tiled_np = np.ceil(tiled_np)\n else:\n tiled_np -= 2\n tiled_np = np.abs(tiled_np)\n tiled_np = tiled_np == 1\n\n tiled_np = tiled_np.astype(np.int)\n return tiled_np.tolist()\n" ]
[ [ "numpy.ceil", "numpy.array", "numpy.abs" ] ]
acse-jl8920/IRP-Johnson
[ "2a70ab9b286726847cc5d5bb65232b2b241f4d5a" ]
[ ".ipynb_checkpoints/Model-checkpoint.py" ]
[ "#coding=utf-8\nimport tensorflow as tf\nimport keras \nfrom keras.models import *\nfrom keras.layers import *\nimport numpy as np\nfrom metrics import metrics\nfrom losses import LOSS_FACTORY\nfrom keras.callbacks import History\nfrom keras.callbacks import ModelCheckpoint\ndef conv_block(input, filters):\n out = Conv2D(filters, kernel_size=(3,3), strides=1, padding='same')(input)\n out = BatchNormalization()(out)\n out = Activation('relu')(out)\n out = Conv2D(filters, kernel_size=(3,3), strides=1, padding='same')(out)\n out = BatchNormalization()(out)\n out = Activation('relu')(out)\n return out\n\ndef up_conv(input, filters):\n out = UpSampling2D()(input)\n out = Conv2D(filters, kernel_size=(3,3), strides=1, padding='same')(out)\n out = BatchNormalization()(out)\n out = Activation('relu')(out)\n return out\n\nclass UNet():\n def __init__(self):\n \n self.model_weights_path = ''\n self.model = self.__build_UNet()\n self.height = 416\n self.width = 416\n \n def __build_UNet(self,nClasses = 2, input_height=416, input_width=416):\n \"\"\"\n UNet - Basic Implementation\n Paper : https://arxiv.org/abs/1505.04597\n \"\"\"\n inputs = Input(shape=(input_height, input_width, 1))\n \n n1 = 32\n filters = [n1, n1 * 2, n1 * 4, n1 * 8, n1 * 16]\n conv1 = conv_block(inputs, n1)\n \n conv2 = MaxPooling2D(strides=2)(conv1)\n conv2 = conv_block(conv2, filters[1])\n \n conv3 = MaxPooling2D(strides=2)(conv2)\n conv3 = conv_block(conv3, filters[2])\n \n conv4 = MaxPooling2D(strides=2)(conv3)\n conv4 = conv_block(conv4, filters[3])\n \n conv5 = MaxPooling2D(strides=2)(conv4)\n conv5 = conv_block(conv5, filters[4])\n \n d5 = up_conv(conv5, filters[3])\n d5 = Add()([conv4, d5])\n \n d4 = up_conv(d5, filters[2])\n d4 = Add()([conv3, d4])\n d4 = conv_block(d4, filters[2])\n \n d3 = up_conv(d4, filters[1])\n d3 = Add()([conv2, d3])\n d3 = conv_block(d3, filters[1])\n \n d2 = up_conv(d3, filters[0])\n d2 = Add()([conv1, d2])\n d2 = conv_block(d2, filters[0])\n \n o = Conv2D(nClasses, (3, 3), padding='same')(d2)\n \n outputHeight = Model(inputs, o).output_shape[1]\n outputWidth = Model(inputs, o).output_shape[2]\n \n out = (Reshape((outputHeight * outputWidth, nClasses)))(o)\n out = Activation('softmax')(out)\n \n model = Model(inputs=inputs, outputs=out)\n model.outputHeight = outputHeight\n model.outputWidth = outputWidth\n \n return model\n def load_weights(self, weights_path):\n self.model.load_weights(weights_path)\n \n def complie_model(self, optimizer=None, version = '0', loss = 'ce'):\n '''\n \n\n Parameters\n ----------\n optimizer : object, optional\n The default is None. It require a optimizer such as Adam or SGD.\n \n version : str, optional\n The version of your model test. The default is '0'.\n loss : Str, optional\n 'ce'\tCross Entropy\n 'weighted_ce'\tWeighted Categorical loss\n 'b_focal'\tBinary Focal loss\n 'c_focal'\tCategorical Focal loss\n 'dice'\tDice loss\tYes\n 'bce_dice'\tBCE + Dice loss\n 'ce_dice'\tCE + Dice loss\n 'g_dice'\tGeneralized Dice loss\n 'jaccard'\tJaccard loss\n 'bce_jaccard'\tBCE + Jaccard loss\n 'ce_jaccard'\tCE + Jaccard loss\n 'tversky\tTversky' loss\n 'f_tversky'\tFocal Tversky loss\n The default is 'ce'.\n\n Returns\n -------\n None.\n\n ''' \n \n csv_logger = CSVLogger(log_file_path, append=False)\n # early_stop = EarlyStopping('loss', min_delta=0.1, patience=patience, verbose=1)\n\n history = History()\n #set the log save dir, it will save the network value by every epochs in tensorboards.\n tb_cb = keras.callbacks.TensorBoard(log_dir='weights/exp1/'+version+'/log/' , write_images=1, histogram_freq=0)\n reduce_lr = keras.callbacks.ReduceLROnPlateau(monitor='val_loss', patience=10, mode='auto')\n self.call_backs = [csv_logger, tb_cb, reduce_lr]\n self.version = version\n if(optimizer == None):\n opt = optimizers.Adam()\n else:\n opt = optimizer\n loss = LOSS_FACTORY[loss]\n self.model.compile(opt, loss =loss, metrics=['accuracy', 'iou_score','f1_score']) \n \n def train(self, X_train, y_train, X_val, y_val,epochs=20, \n batch_sizes = 6, weight_pth='weights/exp1/'):\n hist = self.model.fit(X_train,y_train,batch_size = batch_sizes,\n callbacks = self.call_backs,epochs=epochs,\n validation_data=(X_val,y_val), shuffle=True)\n self.model.save_weights(weight_pth+self.version+'.h5')\n def test(self, img, ground_turth):\n '''\n ground_turth: array of mask(shape[num_imgs, height * width, channel(2)] \n \n '''\n loss = LOSS_FACTORY['ce']\n adam = optimizers.Adam()\n self.model.compile(adam, loss =loss, metrics=['accuracy', 'iou_score','f1_score'])\n if(len(ground_turth.shape)>4):\n shape = ground_turth.shape\n ground_turth.reshape(shape[0], self.width*self.height,2)\n self.model.evaluate(img, ground_turth)\n \n \n \n def detect_mult_img(self, imgs):\n '''\n \n\n Parameters\n ----------\n imgs : array\n Batch of image with shape [num_img, width, weight]\n for the model in this project is (n,416,416)\n Returns\n -------\n r1 : arrays\n mask of each images, with shape (n, 416, 416)\n\n '''\n imgs = np.asarray(imgs)\n result = self.model.predict(imgs)\n result = result.reshape(imgs.shape[0],imgs.shape[1],imgs.shape[2],2)\n r1 = np.zeros((imgs.shape[0],imgs.shape[1],imgs.shape[2]))\n r1[result[:,:,:,0]<result[:,:,:,1]] = 1\n return r1\n \n def detect_single_img(self,img, model):\n '''\n detect single image\n\n Parameters\n ----------\n imgs : array\n Batch of image with shape [num_img, width, weight]\n for the model in this project is (n,416,416)\n Returns\n -------\n r1 : arrays\n mask of each images, with shape (n, 416, 416)\n\n ''' \n img = np.asarray(img)\n result = self.model.predict(img)\n result = result.reshape(img.shape[0],img.shape[1],2)\n r1 = np.zeros((img.shape[0],img.shape[1]))\n r1[result[:,:,0]<result[:,:,1]] = 1\n return r1" ]
[ [ "numpy.asarray", "numpy.zeros" ] ]
shvetsiya/carvana
[ "acc594cba53c44d577c9e3e326e0163eea8b4862" ]
[ "model/unet.py" ]
[ "import torch\nfrom torch import nn\nfrom torch.nn import functional as F\n\n\nclass Conv3BN(nn.Module):\n \"\"\"A module which applies the following actions:\n - convolution with 3x3 kernel;\n - batch normalization (if enabled);\n - ELU.\n Attributes:\n in_ch: Number of input channels.\n out_ch: Number of output channels.\n bn: A boolean indicating if Batch Normalization is enabled or not.\n \"\"\"\n\n def __init__(self, in_ch: int, out_ch: int, bn=True):\n super(Conv3BN, self).__init__()\n self.conv = nn.Conv2d(in_ch, out_ch, 3, padding=1)\n self.bn = nn.BatchNorm2d(out_ch) if bn else None\n self.activation = nn.ReLU(inplace=True)\n\n def forward(self, x):\n x = self.conv(x)\n if self.bn is not None:\n x = self.bn(x)\n x = self.activation(x)\n return x\n\n\nclass UNetEncoder(nn.Module):\n \"\"\"UNetEncoder module. Applies\n - MaxPool2d to reduce the input sice twice\n - twice Conv3BN, first with different size of channels and then with the same numbers of channels \n Attributes:\n in_ch: Number of input channels.\n out_ch: Number of output channels.\n \"\"\" \n def __init__(self, in_ch: int, out_ch: int):\n super(UNetEncoder, self).__init__()\n self.encode = nn.Sequential(nn.MaxPool2d(2, 2),\n Conv3BN(in_ch, out_ch),\n Conv3BN(out_ch, out_ch), \n )\n def forward(self, x):\n x = self.encode(x)\n return x\n\n\n\nclass UNetDecoder(nn.Module):\n \"\"\"UNetDecoder module. Applies\n - Upsample with scale_factor = 2\n - concatanation of miror slice with upsampled image along rows as a result the number of chanal increases\n - twice Conv3BN \n Attributes:\n in_ch: Number of input channels.\n out_ch: Number of output channels.\n \"\"\"\n def __init__(self, in_ch: int, out_ch: int):\n super(UNetDecoder, self).__init__()\n\n self.decode = nn.Sequential(Conv3BN(in_ch, out_ch),\n Conv3BN(out_ch, out_ch),\n Conv3BN(out_ch, out_ch),\n )\n\n self.upsample = nn.Upsample(scale_factor=2, mode='bilinear')\n def forward(self, x_copy, x_down):\n #N, C, H, W = x_copy.size()\n x_up = self.upsample(x_down) #F.upsample(x_down, size=(H, W), mode='bilinear')\n x_up = torch.cat([x_copy, x_up], 1)\n x_new = self.decode(x_up)\n return x_new\n \n\nclass UNet(nn.Module):\n \"\"\"A UNet module. Applies\n - once input_layer\n - depth times of\n - UNetEncoder\n - UNetDecoder\n - activation (sigmoid)\n The number of output channels of each UNetEncoder/UNetDecoder is twice larger/less than the previous\n number of input channels;\n Attributes:\n num_classes: Number of output channels.\n input_channels: Number of input image channels.\n filter_base: Number of out channels of the first UNet layer and base size for the each next.\n depth: number of UNet layers UNetEncoder/UNetDecoder on the way down/up.\n filter_base and depthe are connected as filter_base*2**depth = 1024 - the number of channels on the bottom layer\n \"\"\"\n\n def __init__(self,\n num_classes: int=1,\n input_channels: int=3,\n filters_base: int=8,\n depth: int=7):\n super(UNet, self).__init__()\n\n #filter sizes for down, center and up\n down_filter_sizes = [filters_base * 2**i for i in range(depth+1)] # 32, 64, 128, 256, 512, 1024\n up_filter_sizes = list(reversed(down_filter_sizes))\n\n # input layer\n self.input_layer = nn.Sequential(Conv3BN(input_channels, filters_base),\n Conv3BN(filters_base, filters_base),\n )\n # Going down: \n\n self.down, self.up = nn.ModuleList(), nn.ModuleList()\n # depth filters to go down\n for i in range(1, depth+1):\n self.down.append(UNetEncoder(down_filter_sizes[i-1], down_filter_sizes[i])) \n \n #depth filters to go up\n for i in range(1, depth+1): # the number of channel increseas after concatenation\n self.up.append(UNetDecoder(up_filter_sizes[i-1]+up_filter_sizes[i], up_filter_sizes[i]))\n\n # Final layer and activation:\n self.output = nn.Conv2d(up_filter_sizes[-1], out_channels=num_classes, kernel_size=1)\n \n self.activation = F.sigmoid\n \n def forward(self, x):\n \n x = self.input_layer(x) \n xs = [x] # collect slices from down side to copy them to up side\n #go down \n for module in self.down:\n x = module(x)\n xs.append(x)\n\n xs.reverse() \n\n #go up\n x = xs[0]\n for xc, module in zip(xs[1:], self.up):\n x = module(xc, x)\n\n x = self.output(x) \n x = self.activation(x)\n return x\n\n" ]
[ [ "torch.cat", "torch.nn.ModuleList", "torch.nn.Conv2d", "torch.nn.MaxPool2d", "torch.nn.Upsample", "torch.nn.BatchNorm2d", "torch.nn.ReLU" ] ]
gerkamspiano/QuantMacro
[ "f7e6e4ff7ae075d556f73cb1434c45652b4180cb" ]
[ "ps5_II.2_II.3.py" ]
[ "# Problem Set 5 - Germán Sánchez Arce\r\n\r\n# In collaboration with María González\r\n\r\n# Import packages\r\n\r\nimport numpy as np\r\nfrom numpy import vectorize\r\nfrom itertools import product\r\nimport matplotlib.pyplot as plt\r\nimport scipy as sp\r\nfrom scipy.interpolate import BSpline\r\nfrom scipy.interpolate import interp1d\r\n\r\n# Parametrization of the model:\r\n\r\nro = 0.06\r\nbeta = 1/(1+ro)\r\nw = 1\r\nr = 0.04\r\ngamma = 0.5\r\nsigmay = 0.2\r\n\r\n# Transition matrix for the Markov Process\r\n\r\npi = np.array([((1+gamma)/2, (1-gamma)/2),((1-gamma)/2, (1+gamma)/2)])\r\n\r\n#%% II.2 - The infinitely-lived households economy (Discrete method)\r\n\r\n########################### Quadratic Utility #################################\r\n\r\nY = (1-sigmay, 1+sigmay)\r\n\r\ncbar = 100*Y[1] # parameter for avoiding saturation of any consumer\r\n\r\nA = np.linspace(((-(1+r)/r)*Y[0]), 30, 80) # grid over assets tomorrow\r\n\r\nay = list(product(Y, A, A))\r\nay = np.array(ay)\r\n\r\ny = ay[:, 0]\r\nai = ay[:, 1]\r\naj = ay[:, 2]\r\n\r\nc = y+(1+r)*ai-aj\r\n\r\n@vectorize\r\n \r\ndef M(c):\r\n \r\n return -0.5*(c-cbar)**2\r\n \r\nM = M(c)\r\nM = np.reshape(M, (1, 12800))\r\nM = np.reshape(M, (160, 80))\r\n\r\n# Initial guess for the value function is a vector of zeros:\r\n\r\nVs = np.zeros(160)\r\n\r\n# Compute the matrix W:\r\n\r\ndef W1(A): \r\n \r\n return pi[0, 0]*(-0.5*(Y[0] + (1+r)*A - A - cbar)**2)/(1-beta) + pi[0, 1]*(-0.5*(Y[1] + (1+r)*A - A - cbar)**2)/(1-beta)\r\n\r\ndef W2(A):\r\n \r\n return pi[1, 0]*(-0.5*(Y[0] + (1+r)*A - A - cbar)**2)/(1-beta) + pi[1, 1]*(-0.5*(Y[1] + (1+r)*A - A - cbar)**2)/(1-beta)\r\n\r\n \r\nW1 = W1(A)\r\nW1 = np.reshape(W1, (80,1))\r\nW1 = np.tile(W1, 80)\r\nW1 = np.transpose(W1)\r\n\r\nW2 = W2(A)\r\nW2 = np.reshape(W2, (80,1))\r\nW2 = np.tile(W2, 80)\r\nW2 = np.transpose(W2)\r\n\r\nW = [W1, W2]\r\nW = np.reshape(W, (160,80))\r\n\r\n# Compute the matrix X:\r\n\r\nX = M + beta*W\r\n\r\nVs1 = np.amax(X, axis = 1)\r\n\r\ndiffVs = Vs - Vs1\r\n\r\ncount = 0\r\n\r\n# If differences are larger than 1, we iterate taking as new value functions \r\n# Vs1 up to obtain convergence:\r\n\r\nfor diffVs in range(1, 8000):\r\n \r\n Vss = Vs1\r\n Vs = [Vss[0:80], Vss[80:]]\r\n Vs = np.array(Vs)\r\n \r\n def W1(Vs):\r\n \r\n return pi[0, 0]*Vs[0, :] + pi[0, 1]*Vs[1, :]\r\n \r\n def W2(Vs):\r\n \r\n return pi[1, 0]*Vs[0, :] + pi[1, 1]*Vs[1, :]\r\n\r\n W1 = W1(Vs)\r\n W1 = np.reshape(W1, (1,80))\r\n W1 = np.tile(W1, 80)\r\n W1 = np.reshape(W1, (80,80))\r\n\r\n W2 = W2(Vs)\r\n W2 = np.reshape(W2, (1,80))\r\n W2 = np.tile(W2, 80)\r\n W2 = np.reshape(W2, (80,80))\r\n \r\n W = [W1, W2]\r\n W = np.reshape(W, (160, 80))\r\n \r\n X = M + beta*W\r\n \r\n Vs1 = np.amax(X, axis = 1)\r\n \r\n diffVs = Vss - Vs1\r\n \r\n count += 1\r\n \r\n\r\n# Once we obtain convergence, redefine the matrix X:\r\n \r\nX = M + beta*W\r\n\r\n# The value function given different realizations of y:\r\n\r\nV_y1 = Vs1[0:80]\r\nV_y2 = Vs1[80:]\r\n\r\n# Now we can obtain the decision rule, which give us column number that\r\n# maximizes row i of the X matrix:\r\n\r\ng = np.argmax(X, axis = 1)\r\n\r\n# For the first 45 periods:\r\n\r\naopt_y1 = A[g[0:80]] # optimal decision of assets given y1\r\naopt_y2 = A[g[80:]] # optimal decision of assets given y2\r\n\r\nc_y1 = Y[0]*np.ones(80) + (1+r)*A - aopt_y1\r\n\r\nc_y2 = Y[1]*np.ones(80) + (1+r)*A - aopt_y2\r\n\r\nfor i in range(0, 80):\r\n \r\n if c_y1[i] < 0:\r\n \r\n c_y1[i] = 0\r\n \r\n if c_y2[i] < 0:\r\n \r\n c_y2[i] = 0\r\n \r\n# Plot the value function and the optimal policy:\r\n \r\nplt.figure()\r\nplt.plot(A, V_y1, label = 'Value function for negative shock')\r\nplt.plot(A, V_y2, label = 'Value function for positive shock')\r\nplt.title('Value Function Iteration')\r\nplt.legend()\r\nplt.ylabel('Value Function')\r\nplt.xlabel('Assets')\r\nplt.show()\r\n \r\nplt.figure()\r\nplt.plot(A, aopt_y1, label = 'Optimal assets for negative shock')\r\nplt.plot(A, aopt_y2, label = 'Optimal assets for positive shock')\r\nplt.title('Policy rule for assets')\r\nplt.legend()\r\nplt.ylabel('Assets tomorrow')\r\nplt.xlabel('Assets today')\r\nplt.show()\r\n\r\nplt.figure()\r\nplt.plot(A, c_y1, label = 'Optimal consumption for negative shock')\r\nplt.plot(A, c_y2, label = 'Optimal consumption for positive shock')\r\nplt.title('Policy rule for consumption')\r\nplt.legend()\r\nplt.ylabel('Consumption')\r\nplt.xlabel('Assets')\r\nplt.show()\r\n\r\n#%% II.3 - The life-cycle economy (Backwards)\r\n\r\n########################### Quadratic Utility #################################\r\n\r\n\r\nW = np.zeros((160, 80))\r\n\r\ncount = 0\r\n\r\nwhile count < 45:\r\n \r\n W = np.amax((M + beta*W), axis = 1)\r\n W = np.reshape(W,(160, 1))\r\n W = W*np.ones((160, 80))\r\n \r\n count += 1\r\n\r\nplt.plot(A, W[0:80, 0], label = 'Value function for negative shock')\r\nplt.plot(A, W[80:, 0], label = 'Value function for positive shock')\r\nplt.legend()\r\nplt.title('Value function for finite horizon')\r\nplt.ylabel('Value function')\r\nplt.xlabel('Assets')\r\nplt.show()\r\n\r\nX = M + beta*W\r\ng = np.argmax(X, axis = 1)\r\n\r\naopt_y1 = A[g[0:80]] # optimal decision of assets given y1\r\naopt_y2 = A[g[80:]] # optimal decision of assets given y2\r\n\r\nc_y1 = Y[0]*np.ones(80) + (1+r)*A - aopt_y1\r\n\r\nc_y2 = Y[1]*np.ones(80) + (1+r)*A - aopt_y2\r\n\r\nfor i in range(0, 80):\r\n \r\n if c_y1[i] < 0:\r\n \r\n c_y1[i] = 0\r\n \r\n if c_y2[i] < 0:\r\n \r\n c_y2[i] = 0\r\n \r\nplt.figure()\r\nplt.plot(A, aopt_y1, label = 'Optimal assets for negative shock')\r\nplt.plot(A, aopt_y2, label = 'Optimal assets for positive shock')\r\nplt.legend()\r\nplt.title('Policy rule for assets')\r\nplt.ylabel('Assets tomorrow')\r\nplt.xlabel('Assets today')\r\nplt.show()\r\n\r\nplt.figure()\r\nplt.plot(A, c_y1, label = 'Optimal consumption for negative shock')\r\nplt.plot(A, c_y2, label = 'Optimal consumption for positive shock')\r\nplt.title('Policy rule for consumption')\r\nplt.legend()\r\nplt.ylabel('Consumption')\r\nplt.xlabel('Assets')\r\nplt.show()\r\n\r\n#%% II.2 - The infinitely-lived households economy (Discrete method)\r\n\r\n########################### CRRA Utility #####################################\r\n\r\nsigma = 2\r\n\r\nA = np.linspace(((-(1+r)/r)*Y[0]), 30, 80) # grid over assets tomorrow\r\n\r\nay = list(product(Y, A, A))\r\nay = np.array(ay)\r\n\r\ny = ay[:, 0]\r\nai = ay[:, 1]\r\naj = ay[:, 2]\r\n\r\nc = y + (1+r)*ai - aj\r\n \r\nM = np.zeros(12800)\r\n\r\nfor i in range(0, 12800):\r\n \r\n if c[i] >= 0:\r\n \r\n M[i] = ((c[i]**(1-sigma))-1)/(1-sigma)\r\n \r\n if c[i] < 0:\r\n \r\n M[i] = -100000\r\n\r\nM = np.reshape(M, (1, 12800)) \r\nM = np.reshape(M, (160, 80))\r\n\r\n# Initial guess for the value function is a vector of zeros:\r\n\r\nVs = np.zeros(160)\r\n\r\n# Compute the matrix W:\r\n\r\ndef W1(A): \r\n \r\n return pi[0, 0]*(((Y[0] + (1+r)*A - A)**(1-sigma))-1)/((1-sigma)*(1-beta)) + pi[0, 1]*(((Y[1] + (1+r)*A - A)**(1-sigma))-1)/((1-sigma)*(1-beta))\r\n\r\ndef W2(A):\r\n \r\n return pi[1, 0]*(((Y[0] + (1+r)*A - A)**(1-sigma))-1)/((1-sigma)*(1-beta)) + pi[1, 1]*(((Y[1] + (1+r)*A - A)**(1-sigma))-1)/((1-sigma)*(1-beta))\r\n\r\n \r\nW1 = W1(A)\r\nW1 = np.reshape(W1, (80,1))\r\nW1 = np.tile(W1, 80)\r\nW1 = np.transpose(W1)\r\n\r\nW2 = W2(A)\r\nW2 = np.reshape(W2, (80,1))\r\nW2 = np.tile(W2, 80)\r\nW2 = np.transpose(W2)\r\n\r\nW = [W1, W2]\r\nW = np.reshape(W, (160,80))\r\n\r\n# Compute the matrix X:\r\n\r\nX = M + beta*W\r\n\r\nVs1 = np.amax(X, axis = 1)\r\n\r\ndiffVs = Vs - Vs1\r\n\r\ncount = 0\r\n\r\n# If differences are larger than 1, we iterate taking as new value functions \r\n# Vs1 up to obtain convergence:\r\n\r\nfor diffVs in range(1, 8000):\r\n \r\n Vss = Vs1\r\n Vs = [Vss[0:80], Vss[80:]]\r\n Vs = np.array(Vs)\r\n \r\n def W1(Vs):\r\n \r\n return pi[0, 0]*Vs[0, :] + pi[0, 1]*Vs[1, :]\r\n \r\n def W2(Vs):\r\n \r\n return pi[1, 0]*Vs[0, :] + pi[1, 1]*Vs[1, :]\r\n\r\n W1 = W1(Vs)\r\n W1 = np.reshape(W1, (1,80))\r\n W1 = np.tile(W1, 80)\r\n W1 = np.reshape(W1, (80,80))\r\n\r\n W2 = W2(Vs)\r\n W2 = np.reshape(W2, (1,80))\r\n W2 = np.tile(W2, 80)\r\n W2 = np.reshape(W2, (80,80))\r\n \r\n W = [W1, W2]\r\n W = np.reshape(W, (160, 80))\r\n \r\n X = M + beta*W\r\n \r\n Vs1 = np.amax(X, axis = 1)\r\n \r\n diffVs = Vss - Vs1\r\n \r\n count += 1\r\n \r\n\r\n# Once we obtain convergence, redefine the matrix X:\r\n \r\nX = M + beta*W\r\n\r\n# The value function given different realizations of y:\r\n\r\nV_y1 = Vs1[0:80]\r\nV_y2 = Vs1[80:]\r\n\r\n# Now we can obtain the decision rule, which give us column number that\r\n# maximizes row i of the X matrix:\r\n\r\ng = np.argmax(X, axis = 1)\r\n\r\n# For the first 45 periods:\r\n\r\naopt_y1 = A[g[0:80]] # optimal decision of assets given y1\r\naopt_y2 = A[g[80:]] # optimal decision of assets given y2\r\n\r\nfor i in range(0, 2):\r\n \r\n aopt_y1[i] = 0\r\n aopt_y2[i] = 0\r\n \r\n\r\nc_y1 = Y[0]*np.ones(80) + (1+r)*A - aopt_y1\r\n\r\nc_y2 = Y[1]*np.ones(80) + (1+r)*A - aopt_y2\r\n\r\nfor i in range(0, 80):\r\n \r\n if c_y1[i] < 0:\r\n \r\n c_y1[i] = 0\r\n \r\n if c_y2[i] < 0:\r\n \r\n c_y2[i] = 0\r\n \r\n# Plot the value function and the optimal policy:\r\n \r\nplt.figure()\r\nplt.plot(A[3:], V_y1[3:], label = 'Value function for negative shock')\r\nplt.plot(A[3:], V_y2[3:], label = 'Value function for positive shock')\r\nplt.title('Value Function Iteration')\r\nplt.legend()\r\nplt.ylabel('Value Function')\r\nplt.xlabel('Assets')\r\nplt.show()\r\n \r\nplt.figure()\r\nplt.plot(A[3:], aopt_y1[3:], label = 'Optimal assets for negative shock')\r\nplt.plot(A[3:], aopt_y2[3:], label = 'Optimal assets for positive shock')\r\nplt.title('Policy rule for assets')\r\nplt.legend()\r\nplt.ylabel('Assets tomorrow')\r\nplt.xlabel('Assets today')\r\nplt.show()\r\n\r\nplt.figure()\r\nplt.plot(A, c_y1, label = 'Optimal consumption for negative shock')\r\nplt.plot(A, c_y2, label = 'Optimal consumption for positive shock')\r\nplt.title('Policy rule for consumption')\r\nplt.legend()\r\nplt.ylabel('Consumption')\r\nplt.xlabel('Assets')\r\nplt.show()\r\n\r\n#%% II.3 - The life-cycle economy (Backwards)\r\n\r\n########################### CRRA Utility #####################################\r\n\r\nW = np.zeros((160, 80))\r\n\r\ncount = 0\r\n\r\nwhile count < 45:\r\n \r\n W = np.amax((M + beta*W), axis = 1)\r\n W = np.reshape(W,(160, 1))\r\n W = W*np.ones((160, 80))\r\n \r\n count += 1\r\n\r\nplt.plot(A[1:], W[1:80, 1], label = 'Value function for negative shock')\r\nplt.plot(A[1:], W[81:, 1], label = 'Value function for positive shock')\r\nplt.title('Value function for finite horizon')\r\nplt.legend()\r\nplt.ylabel('Value function')\r\nplt.xlabel('Assets')\r\nplt.show()\r\n\r\nX = M + beta*W\r\ng = np.argmax(X, axis = 1)\r\n\r\naopt_y1 = A[g[0:80]] # optimal decision of assets given y1\r\naopt_y2 = A[g[80:]] # optimal decision of assets given y2\r\n\r\nc_y1 = Y[0]*np.ones(80) + (1+r)*A - aopt_y1\r\n\r\nc_y2 = Y[1]*np.ones(80) + (1+r)*A - aopt_y2\r\n\r\nfor i in range(0, 80):\r\n \r\n if c_y1[i] < 0:\r\n \r\n c_y1[i] = 0\r\n \r\n if c_y2[i] < 0:\r\n \r\n c_y2[i] = 0\r\n \r\nplt.figure()\r\nplt.plot(A, aopt_y1, label = 'Optimal assets for negative shock')\r\nplt.plot(A, aopt_y2, label = 'Optimal assets for positive shock')\r\nplt.title('Policy rule for assets')\r\nplt.legend()\r\nplt.ylabel('Assets tomorrow')\r\nplt.xlabel('Assets today')\r\nplt.show()\r\n\r\nplt.figure()\r\nplt.plot(A, c_y1, label = 'Optimal consumption for negative shock')\r\nplt.plot(A, c_y2, label = 'Optimal consumption for positive shock')\r\nplt.title('Policy rule for consumption')\r\nplt.legend()\r\nplt.ylabel('Consumption')\r\nplt.xlabel('Assets')\r\nplt.show()\r\n\r\n" ]
[ [ "matplotlib.pyplot.legend", "numpy.amax", "numpy.linspace", "matplotlib.pyplot.title", "numpy.reshape", "numpy.tile", "numpy.ones", "matplotlib.pyplot.plot", "matplotlib.pyplot.ylabel", "numpy.argmax", "numpy.transpose", "matplotlib.pyplot.xlabel", "numpy.array", "numpy.zeros", "matplotlib.pyplot.show", "matplotlib.pyplot.figure" ] ]
yangapku/OFA
[ "6bf21b0f2483d53b2750db1ea3fd103ec7d331d1" ]
[ "evaluate.py" ]
[ "#!/usr/bin/env python3 -u\n# Copyright 2022 The OFA-Sys Team. \n# All rights reserved.\n# This source code is licensed under the Apache 2.0 license \n# found in the LICENSE file in the root directory.\n\nimport logging\nimport os\nimport sys\n\nimport numpy as np\nimport torch\nfrom fairseq import distributed_utils, options, tasks, utils\nfrom fairseq.dataclass.utils import convert_namespace_to_omegaconf\nfrom fairseq.logging import progress_bar\nfrom fairseq.utils import reset_logging\nfrom omegaconf import DictConfig\n\nfrom utils import checkpoint_utils\nfrom utils.eval_utils import eval_step, merge_results\n\nlogging.basicConfig(\n format=\"%(asctime)s | %(levelname)s | %(name)s | %(message)s\",\n datefmt=\"%Y-%m-%d %H:%M:%S\",\n level=os.environ.get(\"LOGLEVEL\", \"INFO\").upper(),\n stream=sys.stdout,\n)\nlogger = logging.getLogger(\"ofa.evaluate\")\n\n\ndef apply_half(t):\n if t.dtype is torch.float32:\n return t.to(dtype=torch.half)\n return t\n\n\ndef main(cfg: DictConfig, **kwargs):\n utils.import_user_module(cfg.common)\n\n reset_logging()\n logger.info(cfg)\n\n assert (\n cfg.dataset.max_tokens is not None or cfg.dataset.batch_size is not None\n ), \"Must specify batch size either with --max-tokens or --batch-size\"\n\n # Fix seed for stochastic decoding\n if cfg.common.seed is not None and not cfg.generation.no_seed_provided:\n np.random.seed(cfg.common.seed)\n utils.set_torch_seed(cfg.common.seed)\n\n use_fp16 = cfg.common.fp16\n use_cuda = torch.cuda.is_available() and not cfg.common.cpu\n\n if use_cuda:\n torch.cuda.set_device(cfg.distributed_training.device_id)\n\n # Load ensemble\n overrides = eval(cfg.common_eval.model_overrides)\n logger.info(\"loading model(s) from {}\".format(cfg.common_eval.path))\n models, saved_cfg, task = checkpoint_utils.load_model_ensemble_and_task(\n utils.split_paths(cfg.common_eval.path),\n arg_overrides=overrides,\n suffix=cfg.checkpoint.checkpoint_suffix,\n strict=(cfg.checkpoint.checkpoint_shard_count == 1),\n num_shards=cfg.checkpoint.checkpoint_shard_count,\n )\n\n # loading the dataset should happen after the checkpoint has been loaded so we can give it the saved task config\n task.load_dataset(cfg.dataset.gen_subset, task_cfg=saved_cfg.task)\n\n # Move models to GPU\n for model, ckpt_path in zip(models, utils.split_paths(cfg.common_eval.path)):\n if kwargs['ema_eval']:\n logger.info(\"loading EMA weights from {}\".format(ckpt_path))\n model.load_state_dict(checkpoint_utils.load_ema_from_checkpoint(ckpt_path)['model'])\n model.eval()\n if use_fp16:\n model.half()\n if use_cuda and not cfg.distributed_training.pipeline_model_parallel:\n model.cuda()\n model.prepare_for_inference_(cfg)\n\n # Load dataset (possibly sharded)\n itr = task.get_batch_iterator(\n dataset=task.dataset(cfg.dataset.gen_subset),\n max_tokens=cfg.dataset.max_tokens,\n max_sentences=cfg.dataset.batch_size,\n max_positions=utils.resolve_max_positions(\n task.max_positions(), *[m.max_positions() for m in models]\n ),\n ignore_invalid_inputs=cfg.dataset.skip_invalid_size_inputs_valid_test,\n required_batch_size_multiple=cfg.dataset.required_batch_size_multiple,\n seed=cfg.common.seed,\n num_shards=cfg.distributed_training.distributed_world_size,\n shard_id=cfg.distributed_training.distributed_rank,\n num_workers=cfg.dataset.num_workers,\n data_buffer_size=cfg.dataset.data_buffer_size,\n ).next_epoch_itr(shuffle=False)\n progress = progress_bar.progress_bar(\n itr,\n log_format=cfg.common.log_format,\n log_interval=cfg.common.log_interval,\n default_log_format=(\"tqdm\" if not cfg.common.no_progress_bar else \"simple\"),\n )\n\n # Initialize generator\n generator = task.build_generator(models, cfg.generation)\n\n results = []\n score_sum = torch.FloatTensor([0]).cuda()\n score_cnt = torch.FloatTensor([0]).cuda()\n for sample in progress:\n if \"net_input\" not in sample:\n continue\n sample = utils.move_to_cuda(sample) if use_cuda else sample\n sample = utils.apply_to_sample(apply_half, sample) if cfg.common.fp16 else sample\n with torch.no_grad():\n result, scores = eval_step(task, generator, models, sample, **kwargs)\n results += result\n score_sum += sum(scores) if scores is not None else 0\n score_cnt += len(scores) if scores is not None else 0\n progress.log({\"sentences\": sample[\"nsentences\"]})\n\n merge_results(task, cfg, logger, score_cnt, score_sum, results)\n\n\ndef cli_main():\n parser = options.get_generation_parser()\n parser.add_argument(\"--ema-eval\", action='store_true', help=\"Use EMA weights to make evaluation.\")\n parser.add_argument(\"--beam-search-vqa-eval\", action='store_true', help=\"Use beam search for vqa evaluation (faster inference speed but sub-optimal result), if not specified, we compute scores for each answer in the candidate set, which is slower but can obtain best result.\")\n args = options.parse_args_and_arch(parser)\n cfg = convert_namespace_to_omegaconf(args)\n distributed_utils.call_main(cfg, main, ema_eval=args.ema_eval, beam_search_vqa_eval=args.beam_search_vqa_eval)\n\n\nif __name__ == \"__main__\":\n cli_main()\n" ]
[ [ "torch.cuda.set_device", "numpy.random.seed", "torch.no_grad", "torch.FloatTensor", "torch.cuda.is_available" ] ]
tiwalayo/flexible-bnn
[ "424572de879d64ee0b2f004d9649e823d2004430" ]
[ "src/models/stochastic/bbb/utils.py" ]
[ "import torch \r\nimport numpy as np\r\nimport torch.nn.functional as F\r\n\r\ndef kl_divergence(mu, sigma, mu_prior, sigma_prior):\r\n kl = 0.5 * (2 * torch.log(sigma_prior / sigma) - 1 + (sigma / sigma_prior).pow(2) + ((mu_prior - mu) / sigma_prior).pow(2)).sum() \r\n return kl\r\n\r\ndef normpdf(x, mu=0.0, sigma=0.3):\r\n m = torch.distributions.Normal(torch.tensor([mu]).to(x.device), torch.tensor([sigma]).to(x.device))\r\n return torch.exp(m.log_prob(x))\r\n\r\ndef KumaraswamyKL(A, B, prior=None, n_samples=100):\r\n GAMMA = 0.57721566490153286060651209008240243104215933593992\r\n return -((1-1/B) + (1-1/A) * (GAMMA + torch.log(B)) - torch.log(A*B)).sum()\r\n \r\n if not prior:\r\n raise ValueError(\"You need to supply a prior.\")\r\n eps = 1e-20\r\n T_ = lambda x, a, b: 2*(torch.pow(1 - torch.pow(1-x,1/b), 1/a))-1\r\n Kpdf = lambda x, a, b: a * b * torch.pow((x+1)/2,a-1) * torch.pow((1-torch.pow((x+1)/2,a)), b-1)\r\n \r\n def logratio(x):\r\n noise = torch.FloatTensor(n_samples).uniform_(0, 1).to(x.device)\r\n samples = T_(noise, x[0], x[1])\r\n return torch.log(eps+Kpdf(samples, x[0], x[1])) - torch.log(eps + prior(samples))\r\n \r\n params = torch.unbind(torch.cat((A.unsqueeze(0),B.unsqueeze(0)),dim=0).view(2,-1),dim=1)\r\n s =torch.cat([logratio(p) for p in params]).sum()\r\n return s" ]
[ [ "torch.FloatTensor", "torch.log", "torch.pow", "torch.tensor" ] ]
ChristophRaab/DATL
[ "e1d44992e41060bb842525591181bfbbf7fd3c23" ]
[ "parameter_init_adjustments.py" ]
[ "import numpy as np\nimport torch\nfrom torch import nn\n\n\ndef init_weights(m):\n classname = m.__class__.__name__\n if classname.find('Conv2d') != -1 or classname.find('ConvTranspose2d') != -1:\n nn.init.kaiming_uniform_(m.weight)\n nn.init.zeros_(m.bias)\n elif classname.find('BatchNorm') != -1:\n nn.init.normal_(m.weight, 1.0, 0.02)\n nn.init.zeros_(m.bias)\n elif classname.find('Linear') != -1:\n nn.init.xavier_normal_(m.weight)\n nn.init.zeros_(m.bias)\n\ndef cdann_lda_coeff(iter_num, high=1.0, low=0.0, alpha=10.0, max_iter=10000.0): # CDAM Lambda Adjustments progress based. \n return np.float(2.0 * (high - low) / (1.0 + np.exp(-alpha*iter_num / max_iter)) - (high - low) + low)\n\ndef inv_lr_scheduler(optimizer, iter_num, gamma, power, lr=0.001, weight_decay=0.0005):\n \"\"\"Decay learning rate by a factor of 0.1 every lr_decay_epoch epochs.\"\"\"\n lr = lr * (1 + gamma * iter_num) ** (-power)\n i=0\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr * param_group['lr_mult']\n param_group['weight_decay'] = weight_decay * param_group['decay_mult']\n i+=1\n\n return optimizer" ]
[ [ "torch.nn.init.xavier_normal_", "torch.nn.init.kaiming_uniform_", "torch.nn.init.normal_", "torch.nn.init.zeros_", "numpy.exp" ] ]
mzweilin/armory
[ "da3fedc02f6f4841a813c4af8aafcc3ff7501665" ]
[ "armory/utils/metrics.py" ]
[ "\"\"\"\nMetrics for scenarios\n\nOutputs are lists of python variables amenable to JSON serialization:\n e.g., bool, int, float\n numpy data types and tensors generally fail to serialize\n\"\"\"\n\nimport logging\nimport numpy as np\n\nlogger = logging.getLogger(__name__)\n\n\ndef categorical_accuracy(y, y_pred):\n \"\"\"\n Return the categorical accuracy of the predictions\n \"\"\"\n y = np.asarray(y)\n y_pred = np.asarray(y_pred)\n if y.ndim == 0:\n y = np.array([y])\n y_pred = np.array([y_pred])\n\n if y.shape == y_pred.shape:\n return [int(x) for x in list(y == y_pred)]\n elif y.ndim + 1 == y_pred.ndim:\n if y.ndim == 0:\n return [int(y == np.argmax(y_pred, axis=-1))]\n return [int(x) for x in list(y == np.argmax(y_pred, axis=-1))]\n else:\n raise ValueError(f\"{y} and {y_pred} have mismatched dimensions\")\n\n\ndef top_5_categorical_accuracy(y, y_pred):\n \"\"\"\n Return the top 5 categorical accuracy of the predictions\n \"\"\"\n return top_n_categorical_accuracy(y, y_pred, 5)\n\n\ndef top_n_categorical_accuracy(y, y_pred, n):\n if n < 1:\n raise ValueError(f\"n must be a positive integer, not {n}\")\n n = int(n)\n if n == 1:\n return categorical_accuracy(y, y_pred)\n y = np.asarray(y)\n y_pred = np.asarray(y_pred)\n if y.ndim == 0:\n y = np.array([y])\n y_pred = np.array([y_pred])\n\n if len(y) != len(y_pred):\n raise ValueError(\"y and y_pred are of different length\")\n if y.shape == y_pred.shape:\n raise ValueError(\"Must supply multiple predictions for top 5 accuracy\")\n elif y.ndim + 1 == y_pred.ndim:\n y_pred_top5 = np.argsort(y_pred, axis=-1)[:, -n:]\n if y.ndim == 0:\n return [int(y in y_pred_top5)]\n return [int(y[i] in y_pred_top5[i]) for i in range(len(y))]\n else:\n raise ValueError(f\"{y} and {y_pred} have mismatched dimensions\")\n\n\ndef norm(x, x_adv, ord):\n \"\"\"\n Return the given norm over a batch, outputting a list of floats\n \"\"\"\n x = np.asarray(x)\n x_adv = np.asarray(x_adv)\n # cast to float first to prevent overflow errors\n diff = (x.astype(float) - x_adv.astype(float)).reshape(x.shape[0], -1)\n values = np.linalg.norm(diff, ord=ord, axis=1)\n return list(float(x) for x in values)\n\n\ndef linf(x, x_adv):\n \"\"\"\n Return the L-infinity norm over a batch of inputs as a float\n \"\"\"\n return norm(x, x_adv, np.inf)\n\n\ndef l2(x, x_adv):\n \"\"\"\n Return the L2 norm over a batch of inputs as a float\n \"\"\"\n return norm(x, x_adv, 2)\n\n\ndef l1(x, x_adv):\n \"\"\"\n Return the L1 norm over a batch of inputs as a float\n \"\"\"\n return norm(x, x_adv, 1)\n\n\ndef lp(x, x_adv, p):\n \"\"\"\n Return the Lp norm over a batch of inputs as a float\n \"\"\"\n if p <= 0:\n raise ValueError(f\"p must be positive, not {p}\")\n return norm(x, x_adv, p)\n\n\ndef l0(x, x_adv):\n \"\"\"\n Return the L0 'norm' over a batch of inputs as a float\n \"\"\"\n return norm(x, x_adv, 0)\n\n\ndef _snr(x_i, x_adv_i):\n x_i = np.asarray(x_i, dtype=float)\n x_adv_i = np.asarray(x_adv_i, dtype=float)\n if x_i.shape != x_adv_i.shape:\n raise ValueError(f\"x_i.shape {x_i.shape} != x_adv_i.shape {x_adv_i.shape}\")\n elif x_i.ndim != 1:\n raise ValueError(\"_snr input must be single dimensional (not multichannel)\")\n signal_power = (x_i ** 2).mean()\n noise_power = ((x_i - x_adv_i) ** 2).mean()\n return signal_power / noise_power\n\n\ndef snr(x, x_adv):\n \"\"\"\n Return the SNR of a batch of samples with raw audio input\n \"\"\"\n if len(x) != len(x_adv):\n raise ValueError(f\"len(x) {len(x)} != len(x_adv) {len(x_adv)}\")\n return [float(_snr(x_i, x_adv_i)) for (x_i, x_adv_i) in zip(x, x_adv)]\n\n\ndef snr_db(x, x_adv):\n \"\"\"\n Return the SNR of a batch of samples with raw audio input in Decibels (DB)\n \"\"\"\n return [float(i) for i in 10 * np.log10(snr(x, x_adv))]\n\n\ndef _snr_spectrogram(x_i, x_adv_i):\n x_i = np.asarray(x_i, dtype=float)\n x_adv_i = np.asarray(x_adv_i, dtype=float)\n if x_i.shape != x_adv_i.shape:\n raise ValueError(f\"x_i.shape {x_i.shape} != x_adv_i.shape {x_adv_i.shape}\")\n signal_power = np.abs(x_i).mean()\n noise_power = np.abs(x_i - x_adv_i).mean()\n return signal_power / noise_power\n\n\ndef snr_spectrogram(x, x_adv):\n \"\"\"\n Return the SNR of a batch of samples with spectrogram input\n\n NOTE: Due to phase effects, this is only an estimate of the SNR.\n For instance, if x[0] = sin(t) and x_adv[0] = sin(t + 2*pi/3),\n Then the SNR will be calculated as infinity, when it should be 1.\n However, the spectrograms will look identical, so as long as the\n model uses spectrograms and not the underlying raw signal,\n this should not have a significant effect on the results.\n \"\"\"\n if x.shape != x_adv.shape:\n raise ValueError(f\"x.shape {x.shape} != x_adv.shape {x_adv.shape}\")\n return [float(_snr_spectrogram(x_i, x_adv_i)) for (x_i, x_adv_i) in zip(x, x_adv)]\n\n\ndef snr_spectrogram_db(x, x_adv):\n \"\"\"\n Return the SNR of a batch of samples with spectrogram input in Decibels (DB)\n \"\"\"\n return [float(i) for i in 10 * np.log10(snr_spectrogram(x, x_adv))]\n\n\nSUPPORTED_METRICS = {\n \"categorical_accuracy\": categorical_accuracy,\n \"top_n_categorical_accuracy\": top_n_categorical_accuracy,\n \"top_5_categorical_accuracy\": top_5_categorical_accuracy,\n \"norm\": norm,\n \"l0\": l0,\n \"l1\": l1,\n \"l2\": l2,\n \"lp\": lp,\n \"linf\": linf,\n \"snr\": snr,\n \"snr_db\": snr_db,\n \"snr_spectrogram\": snr_spectrogram,\n \"snr_spectrogram_db\": snr_spectrogram_db,\n}\n\n\nclass MetricList:\n \"\"\"\n Keeps track of all results from a single metric\n \"\"\"\n\n def __init__(self, name, function=None):\n if function is None:\n try:\n self.function = SUPPORTED_METRICS[name]\n except KeyError:\n raise KeyError(f\"{name} is not part of armory.utils.metrics\")\n elif callable(function):\n self.function = function\n else:\n raise ValueError(f\"function must be callable or None, not {function}\")\n self.name = name\n self._values = []\n\n def clear(self):\n self._values.clear()\n\n def append(self, *args, **kwargs):\n value = self.function(*args, **kwargs)\n self._values.extend(value)\n\n def __iter__(self):\n return self._values.__iter__()\n\n def __len__(self):\n return len(self._values)\n\n def values(self):\n return list(self._values)\n\n def mean(self):\n return sum(float(x) for x in self._values) / len(self._values)\n\n\nclass MetricsLogger:\n \"\"\"\n Uses the set of task and perturbation metrics given to it.\n \"\"\"\n\n def __init__(\n self, task=None, perturbation=None, means=True, record_metric_per_sample=False\n ):\n \"\"\"\n task - single metric or list of metrics\n perturbation - single metric or list of metrics\n means - whether to return the mean value for each metric\n record_metric_per_sample - whether to return metric values for each sample\n \"\"\"\n self.tasks = self._generate_counters(task)\n self.adversarial_tasks = self._generate_counters(task)\n self.perturbations = self._generate_counters(perturbation)\n self.means = bool(means)\n self.full = bool(record_metric_per_sample)\n if not self.means and not self.full:\n logger.warning(\n \"No metric results will be produced. \"\n \"To change this, set 'means' or 'record_metric_per_sample' to True.\"\n )\n if not self.tasks and not self.perturbations:\n logger.warning(\n \"No metric results will be produced. \"\n \"To change this, set one or more 'task' or 'perturbation' metrics\"\n )\n\n def _generate_counters(self, names):\n if names is None:\n names = []\n elif isinstance(names, str):\n names = [names]\n elif not isinstance(names, list):\n raise ValueError(\n f\"{names} must be one of (None, str, list), not {type(names)}\"\n )\n return [MetricList(x) for x in names]\n\n @classmethod\n def from_config(cls, config):\n return cls(**config)\n\n def clear(self):\n for metric in self.tasks + self.adversarial_tasks + self.perturbations:\n metric.clear()\n\n def update_task(self, y, y_pred, adversarial=False):\n tasks = self.adversarial_tasks if adversarial else self.tasks\n for metric in tasks:\n metric.append(y, y_pred)\n\n def update_perturbation(self, x, x_adv):\n for metric in self.perturbations:\n metric.append(x, x_adv)\n\n def log_task(self, adversarial=False):\n if adversarial:\n metrics = self.adversarial_tasks\n task_type = \"adversarial\"\n else:\n metrics = self.tasks\n task_type = \"benign\"\n\n for metric in metrics:\n logger.info(\n f\"Average {metric.name} on {task_type} test examples: \"\n f\"{metric.mean():.2%}\"\n )\n\n def results(self):\n \"\"\"\n Return dict of results\n \"\"\"\n results = {}\n for metrics, prefix in [\n (self.tasks, \"benign\"),\n (self.adversarial_tasks, \"adversarial\"),\n (self.perturbations, \"perturbation\"),\n ]:\n for metric in metrics:\n if self.full:\n results[f\"{prefix}_{metric.name}\"] = metric.values()\n if self.means:\n try:\n results[f\"{prefix}_mean_{metric.name}\"] = metric.mean()\n except ZeroDivisionError:\n raise ZeroDivisionError(\n f\"No values to calculate mean in {prefix}_{metric.name}\"\n )\n\n return results\n" ]
[ [ "numpy.abs", "numpy.asarray", "numpy.linalg.norm", "numpy.argmax", "numpy.argsort", "numpy.array" ] ]
feihoo87/waveforms
[ "d986852019206f18269a702f4dfbd17a78dc135a" ]
[ "waveforms/quantum/circuit/qlisp/utils.py" ]
[ "from itertools import repeat\n\nimport numpy as np\n\n\ndef DD(qubit, t, gates, pos, f=0):\n seq = [('X/2', qubit)]\n i = 0\n for gate in gates:\n gap = t * (pos[i] - pos[i - 1]) if i > 0 else t * pos[0]\n seq.append((('Delay', gap), qubit))\n seq.append((gate, qubit))\n i += 1\n gap = t * (1 - pos[-1]) if len(pos) > 0 else t\n seq.append((('Delay', gap), qubit))\n if f != 0:\n seq.append((('P', 2 * np.pi * f * t), qubit))\n seq.append(('X/2', qubit))\n return seq\n\n\ndef XY4(qubit, t, f=0):\n pos = np.arange(1, 5) / 5\n return DD(qubit, t, ['X', 'Y', 'X', 'Y'], pos, f)\n\n\ndef XY8(qubit, t, f=0):\n pos = np.arange(1, 9) / 9\n return DD(qubit, t, ['X', 'Y', 'X', 'Y', 'Y', 'X', 'Y', 'X'], pos, f)\n\n\ndef XY16(qubit, t, f=0):\n pos = np.arange(1, 17) / 17\n return DD(qubit, t, [\n 'X', 'Y', 'X', 'Y', 'Y', 'X', 'Y', 'X', 'X', 'Y', 'X', 'Y', 'Y', 'X',\n 'Y', 'X'\n ], pos, f)\n\n\ndef UDD(qubit, n, t, f=0):\n j = np.arange(n) + 1\n return DD(qubit, t, repeat('Y', times=n),\n np.sin(np.pi * j / (2 * n + 2))**2, f)\n\n\ndef CPMG(qubit, n, t, f=0):\n j = np.arange(n) + 1\n return DD(qubit, t, repeat('Y', times=n), (j - 0.5) / n, f)\n\n\ndef CP(qubit, n, t, f=0):\n j = np.arange(n) + 1\n return DD(qubit, t, repeat('X', times=n), (j - 0.5) / n, f)\n\n\ndef Ramsey(qubit, t, f=0):\n return [('X/2', qubit), (('Delay', t), qubit),\n (('rfUnitary', np.pi / 2, 2 * np.pi * f * t), qubit)]\n\n\ndef SpinEcho(qubit, t, f=0):\n return [('X/2', qubit), (('Delay', t / 2), qubit),\n (('rfUnitary', np.pi, np.pi * f * t), qubit),\n (('Delay', t / 2), qubit), ('X/2', qubit)]\n\n\n_ALLXYSeq = [('I', 'I'), ('X', 'X'), ('Y', 'Y'), ('X', 'Y'), ('Y', 'X'),\n ('X/2', 'I'), ('Y/2', 'I'), ('X/2', 'Y/2'), ('Y/2', 'X/2'),\n ('X/2', 'Y'), ('Y/2', 'X'), ('X', 'Y/2'), ('Y', 'X/2'),\n ('X/2', 'X'), ('X', 'X/2'), ('Y/2', 'Y'), ('Y', 'Y/2'),\n ('X', 'I'), ('Y', 'I'), ('X/2', 'X/2'), ('Y/2', 'Y/2')]\n\n\ndef ALLXY(qubit, i):\n assert 0 <= i < len(\n _ALLXYSeq), f\"i={i} is out of range(0, {len(_ALLXYSeq)})\"\n return [(gate, qubit) for gate in _ALLXYSeq[i]]\n" ]
[ [ "numpy.arange", "numpy.sin" ] ]
gitter-lab/active-learning-drug-discovery
[ "b24004a359037b3a1175a61c181ec231b711c797" ]
[ "active_learning_dd/utils/generate_dissimilarity_matrix.py" ]
[ "\"\"\"\n Script for generating the dissimilarity matrix.\n csv_file_or_dir: specifies a single file or path with format of csv files to be loaded. e.g: /path/iter_{}.csv or /path/iter_*.csv.\n output_dir: where to save the memmap file of the dissimilarity matrix.\n feature_name: specifies the column name for features in the csv file.\n cutoff: instances within this cutoff distance belong to the same cluster.\n dist_function: distance function to use.\n process: not used; can be ignored.\n \n Usage:\n python generate_dissimilarity_matrix.py \\\n --csv_file_or_dir=../../datasets/lc_clusters_cv_96/unlabeled_{}.csv \\\n --output_dir=../../datasets/ \\\n --feature_name=\"Morgan FP_2_1024\" \\\n --cutoff=0.3 \\\n --dist_function=tanimoto_dissimilarity \\\n --process_count=4 \\\n --process_batch_size=2056\n\"\"\"\nfrom __future__ import print_function\n\nimport argparse\nimport pandas as pd\nimport numpy as np\nimport glob\nimport time\nimport pathlib\nfrom multiprocessing import Process\n\nfrom .data_utils import *\n\ndef get_features(csv_files_list, feature_name, index_name, tmp_dir, process_batch_size) :\n # first get n_instances\n instances_per_file = []\n for f in csv_files_list:\n for chunk in pd.read_csv(f, chunksize=process_batch_size):\n instances_per_file.append(chunk.shape[0])\n \n n_features = len(chunk[feature_name].iloc[0])\n n_instances = np.sum(instances_per_file)\n X = np.memmap(tmp_dir+'/X.dat', dtype='float16', mode='w+', shape=(n_instances, n_features))\n chunksize = process_batch_size\n for i, f in enumerate(csv_files_list):\n for chunk in pd.read_csv(f, chunksize=chunksize):\n for batch_i in range(instances_per_file[i]//chunksize + 1): \n row_start = batch_i*chunksize\n row_end = min(instances_per_file[i], (batch_i+1)*chunksize)\n if i > 0:\n row_start = np.sum(instances_per_file[:i]) + batch_i*chunksize\n row_end = min(np.sum(instances_per_file[:(i+1)]), np.sum(instances_per_file[:i]) + (batch_i+1)*chunksize)\n X[chunk[index_name].values.astype('int64'),:] = np.vstack([np.fromstring(x, 'u1') - ord('0') for x in chunk[feature_name]]).astype(float) # this is from: https://stackoverflow.com/a/29091970\n X.flush()\n return n_instances, n_features\n \n\"\"\"\n Function wrapper method for computing dissimilarity_matrix for a range of indices.\n Used with multiprocessing.\n\"\"\"\ndef compute_dissimilarity_matrix_wrapper(start_ind, end_ind,\n n_instances, n_features,\n tmp_dir, output_dir, dist_func,\n process_id, process_batch_size):\n X = np.memmap(tmp_dir+'/X.dat', dtype='float16', mode='r', shape=(n_instances, n_features))\n dissimilarity_matrix = np.memmap(output_dir+'/dissimilarity_matrix_{}_{}.dat'.format(n_instances, n_instances), \n dtype='float16', mode='r+', shape=(n_instances, n_instances))\n dissimilarity_process_matrix = np.load(tmp_dir+'/dissimilarity_process_matrix.npy')[start_ind:end_ind]\n \n for i in range(end_ind-start_ind):\n start_time = time.time()\n row_start, row_end, col_start, col_end = dissimilarity_process_matrix[i,:]\n X_cols = X[col_start:col_end] \n X_rows = X[row_start:row_end]\n dist_col_row = dist_func(X_cols, X_rows, X_batch_size=process_batch_size//2, Y_batch_size=process_batch_size//2)\n dist_col_row = dist_col_row.reshape(X_cols.shape[0], X_rows.shape[0])\n \n dissimilarity_matrix[row_start:row_end, col_start:col_end] = dist_col_row.T\n dissimilarity_matrix[col_start:col_end, row_start:row_end] = dist_col_row\n end_time = time.time()\n print('pid: {}, at {} of {}. time {} seconds.'.format(process_id, i, (end_ind-start_ind), (end_time-start_time)))\n del dissimilarity_matrix\n \ndef compute_dissimilarity_matrix(csv_file_or_dir, output_dir, feature_name='Morgan FP_2_1024', dist_function='tanimoto_dissimilarity', \n process_count=1, process_batch_size=2056, index_name='Index ID'):\n num_files = len(glob.glob(csv_file_or_dir.format('*')))\n csv_files_list = [csv_file_or_dir.format(i) for i in range(num_files)]\n df_list = [pd.read_csv(csv_file) for csv_file in csv_files_list]\n data_df = pd.concat(df_list)\n \n # create tmp directory to store memmap arrays\n tmp_dir = './tmp/'\n pathlib.Path(tmp_dir).mkdir(parents=True, exist_ok=True) \n pathlib.Path(output_dir).mkdir(parents=True, exist_ok=True) \n n_instances, n_features = get_features(csv_files_list, feature_name, index_name, tmp_dir, process_batch_size)\n dist_func = feature_dist_func_dict()[dist_function]\n \n # compute_dissimilarity_matrix\n print('Generating dissimilarity_matrix...')\n start_time = time.time()\n dissimilarity_matrix = np.memmap(output_dir+'/dissimilarity_matrix_{}_{}.dat'.format(n_instances, n_instances), \n dtype='float16', mode='w+', shape=(n_instances, n_instances))\n del dissimilarity_matrix\n \n # precompute indices of slices for dissimilarity_matrix\n examples_per_slice = n_instances//process_count\n dissimilarity_process_matrix = []\n row_batch_size = process_batch_size // 2\n col_batch_size = process_batch_size // 2\n num_slices = 0\n for process_id in range(process_count): \n start_ind = process_id*examples_per_slice\n end_ind = (process_id+1)*examples_per_slice\n if process_id == (process_count-1):\n end_ind = n_instances\n if start_ind >= n_instances:\n break\n num_cols = end_ind - start_ind\n for batch_col_i in range(num_cols//col_batch_size + 1):\n col_start = start_ind + batch_col_i*col_batch_size\n col_end = min(end_ind, start_ind + (batch_col_i+1)*col_batch_size)\n for batch_row_i in range(col_end//row_batch_size + 1):\n row_start = batch_row_i*row_batch_size\n row_end = min(col_end, (batch_row_i+1)*row_batch_size)\n dissimilarity_process_matrix.append([row_start, row_end, col_start, col_end])\n num_slices += 1\n dissimilarity_process_matrix = np.array(dissimilarity_process_matrix)\n np.save(tmp_dir+'/dissimilarity_process_matrix.npy', dissimilarity_process_matrix)\n del dissimilarity_process_matrix\n print(num_slices)\n \n # distribute slices among processes\n process_pool = []\n slices_per_process = num_slices//process_count\n for process_id in range(process_count): \n start_ind = process_id*slices_per_process\n end_ind = (process_id+1)*slices_per_process\n if process_id == (process_count-1):\n end_ind = num_slices\n \n if start_ind >= num_slices:\n break\n \n process_pool.append(Process(target=compute_dissimilarity_matrix_wrapper, args=(start_ind, end_ind,\n n_instances, n_features,\n tmp_dir, output_dir, dist_func,\n process_id, process_batch_size)))\n process_pool[process_id].start()\n for process in process_pool:\n process.join()\n process.terminate()\n \n end_time = time.time()\n total_time = (end_time-start_time)/3600.0\n print('Done generating dissimilarity_matrix. Took {} hours'.format(total_time))\n \n import shutil\n shutil.rmtree(tmp_dir)\n \nnp.random.seed(1103)\nif __name__ == '__main__':\n # read args\n parser = argparse.ArgumentParser()\n parser.add_argument('--csv_file_or_dir', action=\"store\", dest=\"csv_file_or_dir\", required=True)\n parser.add_argument('--output_dir', action=\"store\", dest=\"output_dir\", required=True)\n parser.add_argument('--feature_name', default='Morgan FP_2_1024', action=\"store\", \n dest=\"feature_name\", required=False)\n parser.add_argument('--dist_function', default='tanimoto_dissimilarity', action=\"store\", \n dest=\"dist_function\", required=False)\n parser.add_argument('--process_count', type=int, default=1, action=\"store\", dest=\"process_count\", required=False)\n parser.add_argument('--process_batch_size', type=int, default=2**17, action=\"store\", dest=\"process_batch_size\", required=False)\n parser.add_argument('--index_name', default='Index ID', action=\"store\", dest=\"index_name\", required=False)\n \n given_args = parser.parse_args()\n csv_file_or_dir = given_args.csv_file_or_dir\n output_dir = given_args.output_dir\n feature_name = given_args.feature_name\n dist_function = given_args.dist_function\n process_count = given_args.process_count\n process_batch_size = given_args.process_batch_size\n index_name = given_args.index_name\n \n compute_dissimilarity_matrix(csv_file_or_dir, output_dir, feature_name, dist_function, \n process_count, process_batch_size, index_name)" ]
[ [ "pandas.concat", "pandas.read_csv", "numpy.random.seed", "numpy.memmap", "numpy.save", "numpy.fromstring", "numpy.load", "numpy.array", "numpy.sum" ] ]
hwk42/pipelines
[ "c89ed71cf6339cdcdd957d4dca4b1f32c10db9c9" ]
[ "samples/contrib/pytorch-samples/bert/wrapper.py" ]
[ "# !/usr/bin/env/python3\n# Copyright (c) Facebook, Inc. and its affiliates.\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# pylint: disable=arguments-differ\n# pylint: disable=unused-argument\n# pylint: disable=abstract-method\n\"\"\"Bert Wrapper.\"\"\"\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass AGNewsmodelWrapper(nn.Module):\n \"\"\"Warapper Class.\"\"\"\n\n def __init__(self, model):\n super( # pylint: disable=super-with-arguments\n AGNewsmodelWrapper, self\n ).__init__()\n self.model = model\n\n def compute_bert_outputs( # pylint: disable=no-self-use\n self, model_bert, embedding_input, attention_mask=None, head_mask=None\n ):\n \"\"\"Computes Bert Outputs.\n\n Args:\n model_bert : the bert model\n embedding_input : input for bert embeddings.\n attention_mask : attention mask\n head_mask : head mask\n Returns:\n output : the bert output\n \"\"\"\n if attention_mask is None:\n attention_mask = torch.ones( # pylint: disable=no-member\n embedding_input.shape[0], embedding_input.shape[1]\n ).to(embedding_input)\n\n extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)\n\n extended_attention_mask = extended_attention_mask.to(\n dtype=next(model_bert.parameters()).dtype\n ) # fp16 compatibility\n extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0\n\n if head_mask is not None:\n if head_mask.dim() == 1:\n head_mask = (\n head_mask.unsqueeze(0)\n .unsqueeze(0)\n .unsqueeze(-1)\n .unsqueeze(-1)\n )\n head_mask = head_mask.expand(\n model_bert.config.num_hidden_layers, -1, -1, -1, -1\n )\n elif head_mask.dim() == 2:\n head_mask = (\n head_mask.unsqueeze(1).unsqueeze(-1).unsqueeze(-1)\n ) # We can specify head_mask for each layer\n head_mask = head_mask.to(\n dtype=next(model_bert.parameters()).dtype\n ) # switch to fload if need + fp16 compatibility\n else:\n head_mask = [None] * model_bert.config.num_hidden_layers\n\n encoder_outputs = model_bert.encoder(\n embedding_input, extended_attention_mask, head_mask=head_mask\n )\n sequence_output = encoder_outputs[0]\n pooled_output = model_bert.pooler(sequence_output)\n outputs = (\n sequence_output,\n pooled_output,\n ) + encoder_outputs[1:]\n return outputs\n\n def forward(self, embeddings):\n \"\"\"Forward function.\n\n Args:\n embeddings : bert embeddings.\n \"\"\"\n outputs = self.compute_bert_outputs(self.model.bert_model, embeddings)\n pooled_output = outputs[1]\n output = F.relu(self.model.fc1(pooled_output))\n output = self.model.drop(output)\n output = self.model.out(output)\n return output\n" ]
[ [ "torch.ones" ] ]
11wi/11wi.github.io
[ "c89f6999ece59cba3ba5bdfd378028adcbad5ee3" ]
[ "attachments/matrix_util.py" ]
[ "import numpy as _np\nfrom multiprocessing import RawArray as _RawArray\nfrom multiprocessing import Pool as _Pool\nfrom functools import partial as _partial\nfrom numba import njit\n\n\ndef nonzero(array):\n index_array = _np.nonzero(array)[0]\n return index_array\n\n\ndef inverse(mat):\n return _np.ascontiguousarray(_np.linalg.inv(mat))\n\n\ndef cholesky(mat):\n return _np.linalg.cholesky(mat)\n\n\ndef normal(mu=0, sd=1, size=1):\n if isinstance(size, tuple):\n size = [int(i) for i in size]\n else:\n size = int(size)\n return _np.random.normal(loc=mu, scale=sd, size=size)\n\n\ndef wishart(nu, scale):\n \"\"\"\n :param nu: df\n :param scale: scale matrix (must be positive definite)\n :return: covariance matrix (symmetric positive definite)\n referred from\n https://gist.github.com/jfrelinger/2638485\n http://thaines.com/content/misc/gaussian_conjugate_prior_cheat_sheet.pdf\n \"\"\"\n dim = scale.shape[1]\n chol = cholesky(scale)\n Lambda = _np.zeros((dim, dim))\n\n for i in range(dim):\n for j in range(i + 1):\n if i == j:\n Lambda[i, j] = _np.random.chisquare(nu - (i + 1) + 1) ** .5\n else:\n Lambda[i, j] = normal(0, 1, 1).item()\n return chol @ Lambda @ Lambda.T @ chol.T\n\n\ndef mean_latent(latent_u):\n u_bar = _np.sum(latent_u, axis=0).reshape(-1, 1) / latent_u.shape[0]\n return u_bar\n\n\ndef cov_latent(latent_u):\n s_bar = _np.cov(latent_u, rowvar=False, bias=True)\n return s_bar\n\n\ndef user_based_item_rating(n, rating_matrix):\n items = nonzero(rating_matrix[n, :])\n rating = rating_matrix[n, :][items].reshape(-1, 1)\n return items, rating\n\n\ndef item_based_user_rating(n, rating_matrix):\n users = nonzero(rating_matrix[:, n])\n rating = rating_matrix[:, n][users].reshape(-1, 1)\n return users, rating\n\n\ndef update_hyperparam(latent_u, mu0, w0, b0):\n n_sample = latent_u.shape[0]\n u_bar = mean_latent(latent_u)\n s_bar = cov_latent(latent_u)\n\n mu0_star = ((b0 * mu0) + (n_sample * u_bar)) / (b0 + n_sample)\n\n w0_u_inv = inverse(w0)\n w0_star = inverse(w0_u_inv + n_sample * s_bar + (b0 * n_sample) / (b0 + n_sample) * (mu0 - u_bar) @ (mu0 - u_bar).T)\n\n return mu0_star, w0_star\n\n\ndef sampling_params(n_latent, n_sample, mu0_star, w0_star, b0):\n _sigma_u = wishart(nu=n_latent + n_sample, scale=w0_star)\n sigma_u = (_sigma_u + _sigma_u.T) / 2\n lambda_u = inverse(b0 + n_sample * sigma_u)\n mu_u = mu0_star + cholesky(lambda_u) @ normal(size=(n_latent, 1))\n return mu_u, lambda_u, sigma_u\n\n\ndef _sampling_latent(latent_v_i, mu_u, lambda_u, sigma_u, target_ratings, n_latent, b0):\n lambda_star_u = inverse(sigma_u + b0 * latent_v_i.T @ latent_v_i)\n mean_star_u = lambda_star_u @ (b0 * latent_v_i.T @ target_ratings + lambda_u @ mu_u)\n posterior_sample_u = mean_star_u + cholesky(lambda_star_u) @ normal(size=(n_latent, 1))\n return posterior_sample_u.reshape(-1)\n\n\ndef sampling_latent_user(each, mu_u, lambda_u, sigma_u, latent_v, rating_matrix, n_latent, b0):\n find_user = user_based_item_rating(each, rating_matrix)\n target_items, target_ratings = find_user[0], find_user[1]\n latent_v_i = latent_v[target_items]\n each_user_latent = _sampling_latent(latent_v_i, mu_u, lambda_u, sigma_u, target_ratings, n_latent, b0)\n return each_user_latent\n\n\ndef sampling_latent_item(each, mu_u, lambda_u, sigma_u, latent_v, rating_matrix, n_latent, b0):\n find_item = item_based_user_rating(each, rating_matrix)\n target_user, target_ratings = find_item[0], find_item[1]\n latent_v_i = latent_v[target_user]\n each_item_latent = _sampling_latent(latent_v_i, mu_u, lambda_u, sigma_u, target_ratings, n_latent, b0)\n return each_item_latent\n\n\n_parallel_env = {}\n\n\ndef _init_parallel(shared_array, latent_shape):\n _parallel_env['latent'] = shared_array\n _parallel_env['shape'] = latent_shape\n\n\ndef _init_args(n_sample_u, n_latent):\n shape_latent = (n_sample_u, n_latent)\n shared_latent = _RawArray('d', int(n_sample_u * n_latent))\n return shape_latent, shared_latent\n\n\ndef _pool_map(n_core, parallel_function, n_sample_u, shape_latent, shared_latent):\n with _Pool(processes=n_core, initializer=_init_parallel, initargs=(shared_latent, shape_latent)) as pool:\n pool.map(parallel_function, iterable=_np.arange(n_sample_u))\n latent = _np.frombuffer(shared_latent, dtype=_np.float64).reshape(shape_latent)\n return latent\n\n\ndef parallel_sampling_latent_user(n_core, mu_u, lambda_u, sigma_u, latent_v, rating_matrix, n_sample_u,\n n_latent, b0):\n \"\"\"\n https://research.wmz.ninja/articles/2018/03/on-sharing-large-arrays-when-using-pythons-multiprocessing.html\n \"\"\"\n shape_latent, shared_latent = _init_args(n_sample_u, n_latent)\n f = _partial(_parallel_sampling_latent_user, mu_u=mu_u, lambda_u=lambda_u, sigma_u=sigma_u, latent_v=latent_v,\n rating_matrix=rating_matrix, n_latent=n_latent, b0=b0)\n latent = _pool_map(n_core, f, n_sample_u, shape_latent, shared_latent)\n return latent\n\n\ndef parallel_sampling_latent_item(n_core, mu_v, lambda_v, sigma_v, latent_u, rating_matrix, n_sample_v,\n n_latent, b0):\n \"\"\"\n https://research.wmz.ninja/articles/2018/03/on-sharing-large-arrays-when-using-pythons-multiprocessing.html\n \"\"\"\n shape_latent, shared_latent = _init_args(n_sample_v, n_latent)\n f = _partial(_parallel_sampling_latent_item, mu_v=mu_v, lambda_v=lambda_v, sigma_v=sigma_v, latent_u=latent_u,\n rating_matrix=rating_matrix, n_latent=n_latent, b0=b0)\n latent = _pool_map(n_core, f, n_sample_v, shape_latent, shared_latent)\n return latent\n\n\ndef _parallel_sampling_latent_user(each, mu_u, lambda_u, sigma_u, latent_v, rating_matrix, n_latent, b0):\n updated = sampling_latent_user(each, mu_u, lambda_u, sigma_u, latent_v, rating_matrix, n_latent, b0)\n latent = _np.frombuffer(_parallel_env['latent']).reshape(_parallel_env['shape'])\n latent[each, :] = updated\n\n\ndef _parallel_sampling_latent_item(each, mu_v, lambda_v, sigma_v, latent_u, rating_matrix, n_latent, b0):\n updated = sampling_latent_item(each, mu_v, lambda_v, sigma_v, latent_u, rating_matrix, n_latent, b0)\n latent = _np.frombuffer(_parallel_env['latent']).reshape(_parallel_env['shape'])\n latent[each, :] = updated\n" ]
[ [ "numpy.random.chisquare", "numpy.nonzero", "numpy.linalg.inv", "numpy.arange", "numpy.frombuffer", "numpy.random.normal", "numpy.cov", "numpy.linalg.cholesky", "numpy.zeros", "numpy.sum" ] ]