repo_name
stringlengths 6
130
| hexsha
list | file_path
list | code
list | apis
list |
---|---|---|---|---|
mixib/brightway2-calc
|
[
"0fa409b6e7bccbef2a220dd6a811356798518ebe"
] |
[
"bw2calc/graph_traversal.py"
] |
[
"from . import spsolve\nfrom heapq import heappush, heappop\nimport numpy as np\nimport warnings\n\n\nclass GraphTraversal:\n \"\"\"\nTraverse a supply chain, following paths of greatest impact.\n\nThis implementation uses a queue of datasets to assess. As the supply chain is traversed, datasets inputs are added to a list sorted by LCA score. Each activity in the sorted list is assessed, and added to the supply chain graph, as long as its impact is above a certain threshold, and the maximum number of calculations has not been exceeded.\n\nBecause the next dataset assessed is chosen by its impact, not its position in the graph, this is neither a breadth-first nor a depth-first search, but rather \"importance-first\".\n\nThis class is written in a functional style - no variables are stored in *self*, only methods.\n\nShould be used by calling the ``calculate`` method.\n\n.. warning:: Graph traversal with multioutput processes only works when other inputs are substituted (see `Multioutput processes in LCA <http://chris.mutel.org/multioutput.html>`__ for a description of multiputput process math in LCA).\n\n \"\"\"\n\n def calculate(self, lca, cutoff=0.005, max_calc=1e5, skip_coproducts=False):\n \"\"\"\nTraverse the supply chain graph.\n\nArgs:\n * *lca* (dict): An instance of ``bw2calc.lca.LCA``.\n * *cutoff* (float, default=0.005): Cutoff criteria to stop LCA calculations. Relative score of total, i.e. 0.005 will cutoff if a dataset has a score less than 0.5 percent of the total.\n * *max_calc* (int, default=10000): Maximum number of LCA calculations to perform.\n\nReturns:\n Dictionary of nodes, edges, and number of LCA calculations.\n\n \"\"\"\n if not hasattr(lca, \"supply_array\"):\n lca.lci()\n if not hasattr(lca, \"characterized_inventory\"):\n lca.lcia()\n\n supply = lca.supply_array.copy()\n score = lca.score\n\n if score == 0:\n raise ValueError(\"Zero total LCA score makes traversal impossible\")\n\n # Create matrix of LCIA CFs times biosphere flows, as these don't\n # change. This is also the unit score of each activity.\n characterized_biosphere = np.array(\n (lca.characterization_matrix * lca.biosphere_matrix).sum(axis=0)\n ).ravel()\n\n heap, nodes, edges = self.initialize_heap(lca, supply, characterized_biosphere)\n nodes, edges, counter = self.traverse(\n heap,\n nodes,\n edges,\n 0,\n max_calc,\n cutoff,\n score,\n supply,\n characterized_biosphere,\n lca,\n skip_coproducts,\n )\n\n return {\n \"nodes\": nodes,\n \"edges\": edges,\n \"counter\": counter,\n }\n\n def initialize_heap(self, lca, supply, characterized_biosphere):\n \"\"\"\nCreate a `priority queue <http://docs.python.org/2/library/heapq.html>`_ or ``heap`` to store inventory datasets, sorted by LCA score.\n\nPopulates the heap with each activity in ``demand``. Initial nodes are the *functional unit*, i.e. the complete demand, and each activity in the *functional unit*. Initial edges are inputs from each activity into the *functional unit*.\n\nThe *functional unit* is an abstract dataset (as it doesn't exist in the matrix), and is assigned the index ``-1``.\n\n \"\"\"\n heap, edges = [], []\n nodes = {-1: {\"amount\": 1, \"cum\": lca.score, \"ind\": 1e-6 * lca.score}}\n for index, amount in enumerate(lca.demand_array):\n if amount == 0:\n continue\n cum_score = self.cumulative_score(\n index, supply, characterized_biosphere, lca\n )\n heappush(heap, (abs(1 / cum_score), index))\n nodes[index] = {\n \"amount\": float(supply[index]),\n \"cum\": cum_score,\n \"ind\": self.unit_score(index, supply, characterized_biosphere),\n }\n edges.append(\n {\n \"to\": -1,\n \"from\": index,\n \"amount\": amount,\n \"exc_amount\": amount,\n \"impact\": cum_score * amount / float(supply[index]),\n }\n )\n return heap, nodes, edges\n\n def cumulative_score(self, index, supply, characterized_biosphere, lca):\n \"\"\"Compute cumulative LCA score for a given activity\"\"\"\n demand = np.zeros((supply.shape[0],))\n demand[index] = (supply[index] *\n # Normalize by the production amount\n lca.technosphere_matrix[index, index])\n return float((characterized_biosphere * spsolve(lca.technosphere_matrix, demand)).sum())\n\n def unit_score(self, index, supply, characterized_biosphere):\n \"\"\"Compute the LCA impact caused by the direct emissions and resource consumption of a given activity\"\"\"\n return float(characterized_biosphere[index] * supply[index])\n\n def traverse(\n self,\n heap,\n nodes,\n edges,\n counter,\n max_calc,\n cutoff,\n total_score,\n supply,\n characterized_biosphere,\n lca,\n skip_coproducts,\n ):\n \"\"\"\nBuild a directed graph by traversing the supply chain.\n\nNode ids are actually technosphere row/col indices, which makes lookup easier.\n\nReturns:\n (nodes, edges, number of calculations)\n\n \"\"\"\n # static_databases = {name for name in databases if databases[name].get(\"static\")}\n # reverse = lca.dicts.activity.reversed\n\n while heap:\n if counter >= max_calc:\n warnings.warn(\"Stopping traversal due to calculation count.\")\n break\n parent_index = heappop(heap)[1]\n # Skip links from static databases\n # if static_databases and reverse[parent_index][0] in static_databases:\n # continue\n\n # Assume that this activity produces its reference product\n scale_value = lca.technosphere_matrix[parent_index, parent_index]\n if scale_value == 0:\n raise ValueError(\n \"Can't rescale activities that produce zero reference product\"\n )\n col = lca.technosphere_matrix[:, parent_index].tocoo()\n # Multiply by -1 because technosphere values are negative\n # (consumption of inputs) and rescale\n children = [\n (int(col.row[i]), float(-1 * col.data[i] / scale_value))\n for i in range(col.row.shape[0])\n ]\n for activity, amount in children:\n # Skip values on technosphere diagonal\n if activity == parent_index:\n continue\n # Skip negative coproducts\n if skip_coproducts and amount <= 0:\n continue\n counter += 1\n cumulative_score = self.cumulative_score(\n activity, supply, characterized_biosphere, lca\n )\n if abs(cumulative_score) < abs(total_score * cutoff):\n continue\n\n # flow between activity and parent (Multiply by -1 because technosphere values are negative)\n flow = (\n -1.0\n * lca.technosphere_matrix[activity, parent_index]\n * supply[parent_index]\n )\n total_activity_output = (\n lca.technosphere_matrix[activity, activity] * supply[activity]\n )\n\n # Edge format is (to, from, mass amount, cumulative impact)\n edges.append(\n {\n \"to\": parent_index,\n \"from\": activity,\n # Amount of this link * amount of parent demanding link\n \"amount\": flow,\n # Raw exchange value\n \"exc_amount\": amount,\n # Impact related to this flow\n \"impact\": flow / total_activity_output * cumulative_score,\n }\n )\n # Want multiple incoming edges, but don't add existing node\n if activity in nodes:\n continue\n nodes[activity] = {\n # Total amount of this flow supplied\n \"amount\": total_activity_output,\n # Cumulative score from all flows of this activity\n \"cum\": cumulative_score,\n # Individual score attributable to environmental flows\n # coming directory from or to this activity\n \"ind\": self.unit_score(activity, supply, characterized_biosphere),\n }\n heappush(heap, (abs(1 / cumulative_score), activity))\n\n return nodes, edges, counter\n"
] |
[
[
"numpy.zeros"
]
] |
vishalbelsare/emmental-tutorials
|
[
"5920cb71de07bfdb717e46ddfbe76457e8868fa7"
] |
[
"data_augmentation/eda/image/modules/soft_cross_entropy_loss.py"
] |
[
"from typing import List\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch import Tensor\n\n\nclass SoftCrossEntropyLoss(nn.Module):\n \"\"\"\n Calculate the CrossEntropyLoss with soft targets.\n\n :param weight: Weight to assign to each of the classes. Default: None\n :type weight: list of float\n :param reduction: The way to reduce the losses: 'none' | 'mean' | 'sum'.\n 'none': no reduction,\n 'mean': the mean of the losses,\n 'sum': the sum of the losses.\n :type reduction: str\n \"\"\"\n\n def __init__(self, weight: List[float] = None, reduction: str = \"mean\"):\n super().__init__()\n if weight is None:\n self.weight = None\n else:\n self.register_buffer(\"weight\", torch.Tensor(weight))\n\n self.reduction = reduction\n\n def forward(self, input: Tensor, target: Tensor) -> Tensor: # type:ignore\n \"\"\"\n Calculate the loss.\n\n :param input: prediction logits\n :param target: target probabilities\n :return: loss\n \"\"\"\n\n n, k = input.shape\n losses = input.new_zeros(n)\n\n for i in range(k):\n cls_idx = input.new_full((n,), i, dtype=torch.long)\n loss = F.cross_entropy(input, cls_idx, reduction=\"none\")\n if self.weight is not None:\n loss = loss * self.weight[i]\n losses += target[:, i].float() * loss\n\n if self.reduction == \"mean\":\n losses = losses.mean()\n elif self.reduction == \"sum\":\n losses = losses.sum()\n elif self.reduction != \"none\":\n raise ValueError(f\"Unrecognized reduction: {self.reduction}\")\n\n return losses\n"
] |
[
[
"torch.nn.functional.cross_entropy",
"torch.Tensor"
]
] |
muthissar/homework
|
[
"9ee6361183da84f58e8b4842cc2c6047f7d743e1",
"9ee6361183da84f58e8b4842cc2c6047f7d743e1"
] |
[
"hw3/train_ac_f18.py",
"hw3/dqn.py"
] |
[
"\"\"\"\nOriginal code from John Schulman for CS294 Deep Reinforcement Learning Spring 2017\nAdapted for CS294-112 Fall 2017 by Abhishek Gupta and Joshua Achiam\nAdapted for CS294-112 Fall 2018 by Soroush Nasiriany, Sid Reddy, and Greg Kahn\n\"\"\"\nimport numpy as np\nimport tensorflow as tf\nimport tensorflow_probability as tfp\nimport gym\nimport logz\nimport os\nimport time\nimport inspect\nfrom multiprocessing import Process\n\n#============================================================================================#\n# Utilities\n#============================================================================================#\n\ndef build_mlp(input_placeholder, output_size, scope, n_layers, size, activation=tf.tanh, output_activation=None):\n \"\"\"\n Builds a feedforward neural network\n \n arguments:\n input_placeholder: placeholder variable for the state (batch_size, input_size)\n output_size: size of the output layer\n scope: variable scope of the network\n n_layers: number of hidden layers\n size: dimension of the hidden layer\n activation: activation of the hidden layers\n output_activation: activation of the ouput layers\n\n returns:\n output placeholder of the network (the result of a forward pass) \n\n Hint: use tf.layers.dense \n \"\"\"\n # YOUR CODE HERE\n with tf.variable_scope(scope):\n layer = input_placeholder\n for _ in range(n_layers):\n layer = tf.layers.dense(\n inputs=layer,\n units=size,\n activation=activation\n )\n output_placeholder = tf.layers.dense(\n inputs=layer,\n units=output_size,\n activation=output_activation\n )\n\n return output_placeholder\n\ndef pathlength(path):\n return len(path[\"reward\"])\n\ndef setup_logger(logdir, locals_):\n # Configure output directory for logging\n logz.configure_output_dir(logdir)\n # Log experimental parameters\n args = inspect.getargspec(train_AC)[0]\n params = {k: locals_[k] if k in locals_ else None for k in args}\n logz.save_params(params)\n\n#============================================================================================#\n# Actor Critic\n#============================================================================================#\n\nclass Agent(object):\n def __init__(self, computation_graph_args, sample_trajectory_args, estimate_advantage_args):\n super(Agent, self).__init__()\n self.ob_dim = computation_graph_args['ob_dim']\n self.ac_dim = computation_graph_args['ac_dim']\n self.discrete = computation_graph_args['discrete']\n self.size = computation_graph_args['size']\n self.n_layers = computation_graph_args['n_layers']\n self.learning_rate = computation_graph_args['learning_rate']\n self.num_target_updates = computation_graph_args['num_target_updates']\n self.num_grad_steps_per_target_update = computation_graph_args['num_grad_steps_per_target_update']\n\n self.animate = sample_trajectory_args['animate']\n self.max_path_length = sample_trajectory_args['max_path_length']\n self.min_timesteps_per_batch = sample_trajectory_args['min_timesteps_per_batch']\n\n self.gamma = estimate_advantage_args['gamma']\n self.normalize_advantages = estimate_advantage_args['normalize_advantages']\n\n def init_tf_sess(self):\n tf_config = tf.ConfigProto(inter_op_parallelism_threads=1, intra_op_parallelism_threads=1)\n tf_config.gpu_options.allow_growth = True # may need if using GPU\n self.sess = tf.Session(config=tf_config)\n self.sess.__enter__() # equivalent to `with self.sess:`\n tf.global_variables_initializer().run() #pylint: disable=E1101\n\n def define_placeholders(self):\n \"\"\"\n Placeholders for batch batch observations / actions / advantages in actor critic\n loss function.\n See Agent.build_computation_graph for notation\n\n returns:\n sy_ob_no: placeholder for observations\n sy_ac_na: placeholder for actions\n sy_adv_n: placeholder for advantages\n \"\"\"\n sy_ob_no = tf.placeholder(shape=[None, self.ob_dim], name=\"ob\", dtype=tf.float32)\n if self.discrete:\n sy_ac_na = tf.placeholder(shape=[None], name=\"ac\", dtype=tf.int32) \n else:\n sy_ac_na = tf.placeholder(shape=[None, self.ac_dim], name=\"ac\", dtype=tf.float32) \n # YOUR CODE HERE\n #TODO: for now single value\n sy_adv_n = tf.placeholder(shape=[None], name=\"adv\", dtype=tf.float32)\n return sy_ob_no, sy_ac_na, sy_adv_n\n\n def policy_forward_pass(self, sy_ob_no):\n \"\"\" Constructs the symbolic operation for the policy network outputs,\n which are the parameters of the policy distribution p(a|s)\n\n arguments:\n sy_ob_no: (batch_size, self.ob_dim)\n\n returns:\n the parameters of the policy.\n\n if discrete, the parameters are the logits of a categorical distribution\n over the actions\n sy_logits_na: (batch_size, self.ac_dim)\n\n if continuous, the parameters are a tuple (mean, log_std) of a Gaussian\n distribution over actions. log_std should just be a trainable\n variable, not a network output.\n sy_mean: (batch_size, self.ac_dim)\n sy_logstd: (self.ac_dim,)\n\n Hint: use the 'build_mlp' function to output the logits (in the discrete case)\n and the mean (in the continuous case).\n Pass in self.n_layers for the 'n_layers' argument, and\n pass in self.size for the 'size' argument.\n \"\"\"\n #raise NotImplementedError\n if self.discrete:\n # YOUR_HW2 CODE_HERE\n network = build_mlp(input_placeholder = sy_ob_no,\n output_size = self.ac_dim,\n scope=\"nn_policy_discrete\",\n n_layers = self.n_layers,\n size=self.size\n )\n sy_logits_na = network\n return sy_logits_na\n else:\n # YOUR_HW2 CODE_HERE\n network = build_mlp(input_placeholder = sy_ob_no,\n output_size = self.ac_dim,\n scope=\"nn_policy_continous_mean\",\n n_layers = self.n_layers,\n size=self.size\n )\n sy_mean = network\n sy_logstd = tf.get_variable(\n \"policy_continous_logstd\", \n shape=[self.ac_dim],\n trainable=True,\n dtype=tf.float32,\n initializer=tf.constant_initializer(np.log(1))\n #initializer=tf.constant_initializer(np.log(0.01))\n #initializer=tf.constant_initializer(np.log(np.sqrt(10)))\n )\n\n return (sy_mean, sy_logstd)\n\n def sample_action(self, policy_parameters):\n \"\"\" Constructs a symbolic operation for stochastically sampling from the policy\n distribution\n\n arguments:\n policy_parameters\n if discrete: logits of a categorical distribution over actions \n sy_logits_na: (batch_size, self.ac_dim)\n if continuous: (mean, log_std) of a Gaussian distribution over actions\n sy_mean: (batch_size, self.ac_dim)\n sy_logstd: (self.ac_dim,)\n\n returns:\n sy_sampled_ac: \n if discrete: (batch_size)\n if continuous: (batch_size, self.ac_dim)\n\n Hint: for the continuous case, use the reparameterization trick:\n The output from a Gaussian distribution with mean 'mu' and std 'sigma' is\n \n mu + sigma * z, z ~ N(0, I)\n \n This reduces the problem to just sampling z. (Hint: use tf.random_normal!)\n \"\"\"\n #raise NotImplementedError\n if self.discrete:\n sy_logits_na = policy_parameters\n # YOUR_HW2 CODE_HERE\n action_probs = tf.nn.softmax(sy_logits_na,dim=1)\n self.action_probs = action_probs\n sy_sampled_ac = tf.map_fn(lambda probs: tf.cast(tf.distributions.Categorical(probs=probs).sample(),\n tf.float32),\n action_probs,\n parallel_iterations=False)\n sy_sampled_ac = tf.cast(sy_sampled_ac,tf.int32)\n \n \n else:\n sy_mean, sy_logstd = policy_parameters\n # YOUR_HW2 CODE_HERE\n stds = tf.exp(sy_logstd)\n sy_sampled_ac = tf.map_fn(lambda mean: mean + tf.random.normal([self.ac_dim])*stds,sy_mean)\n return sy_sampled_ac\n\n def get_log_prob(self, policy_parameters, sy_ac_na):\n \"\"\" Constructs a symbolic operation for computing the log probability of a set of actions\n that were actually taken according to the policy\n\n arguments:\n policy_parameters\n if discrete: logits of a categorical distribution over actions \n sy_logits_na: (batch_size, self.ac_dim)\n if continuous: (mean, log_std) of a Gaussian distribution over actions\n sy_mean: (batch_size, self.ac_dim)\n sy_logstd: (self.ac_dim,)\n\n sy_ac_na: (batch_size, self.ac_dim)\n\n returns:\n sy_logprob_n: (batch_size)\n\n Hint:\n For the discrete case, use the log probability under a categorical distribution.\n For the continuous case, use the log probability under a multivariate gaussian.\n \"\"\"\n #raise NotImplementedError\n if self.discrete:\n sy_logits_na = policy_parameters\n # YOUR_HW2 CODE_HERE\n sy_logprob_n = -tf.nn.sparse_softmax_cross_entropy_with_logits(labels=sy_ac_na, logits=sy_logits_na)\n else:\n sy_mean, sy_logstd = policy_parameters\n # YOUR_HW2 CODE_HERE\n std = tf.exp(sy_logstd)\n variance = (std*std)\n inverse_variance = 1/variance\n diff = sy_mean -sy_ac_na\n sy_logprob_n = -(tf.reduce_sum(tf.log(variance)) + tf.reduce_sum(inverse_variance *(diff*diff),axis=1))\n return sy_logprob_n\n\n def build_computation_graph(self):\n \"\"\"\n Notes on notation:\n \n Symbolic variables have the prefix sy_, to distinguish them from the numerical values\n that are computed later in the function\n \n Prefixes and suffixes:\n ob - observation \n ac - action\n _no - this tensor should have shape (batch self.size /n/, observation dim)\n _na - this tensor should have shape (batch self.size /n/, action dim)\n _n - this tensor should have shape (batch self.size /n/)\n \n Note: batch self.size /n/ is defined at runtime, and until then, the shape for that axis\n is None\n\n ----------------------------------------------------------------------------------\n loss: a function of self.sy_logprob_n and self.sy_adv_n that we will differentiate\n to get the policy gradient.\n \"\"\"\n self.sy_ob_no, self.sy_ac_na, self.sy_adv_n = self.define_placeholders()\n\n # The policy takes in an observation and produces a distribution over the action space\n self.policy_parameters = self.policy_forward_pass(self.sy_ob_no)\n\n # We can sample actions from this action distribution.\n # This will be called in Agent.sample_trajectory() where we generate a rollout.\n self.sy_sampled_ac = self.sample_action(self.policy_parameters)\n\n # We can also compute the logprob of the actions that were actually taken by the policy\n # This is used in the loss function.\n self.sy_logprob_n = self.get_log_prob(self.policy_parameters, self.sy_ac_na)\n\n self.actor_loss = tf.reduce_sum(-self.sy_logprob_n * self.sy_adv_n)\n self.actor_update_op = tf.train.AdamOptimizer(self.learning_rate).minimize(self.actor_loss)\n\n # define the critic\n self.critic_prediction = tf.squeeze(build_mlp(\n self.sy_ob_no,\n 1,\n \"nn_critic\",\n n_layers=self.n_layers,\n size=self.size))\n self.sy_target_n = tf.placeholder(shape=[None], name=\"critic_target\", dtype=tf.float32)\n self.critic_loss = tf.losses.mean_squared_error(self.sy_target_n, self.critic_prediction)\n self.critic_update_op = tf.train.AdamOptimizer(self.learning_rate).minimize(self.critic_loss)\n\n def sample_trajectories(self, itr, env):\n # Collect paths until we have enough timesteps\n timesteps_this_batch = 0\n paths = []\n while True:\n animate_this_episode=(len(paths)==0 and (itr % 10 == 0) and self.animate)\n path = self.sample_trajectory(env, animate_this_episode)\n paths.append(path)\n timesteps_this_batch += pathlength(path)\n if timesteps_this_batch > self.min_timesteps_per_batch:\n break\n return paths, timesteps_this_batch\n\n def sample_trajectory(self, env, animate_this_episode):\n ob = env.reset()\n obs, acs, rewards, next_obs, terminals = [], [], [], [], []\n steps = 0\n while True:\n if animate_this_episode:\n env.render()\n time.sleep(0.1)\n obs.append(ob)\n \n ac = self.sess.run(self.sy_sampled_ac, {self.sy_ob_no: [ob]}) # YOUR HW2 CODE HERE\n\n ac = ac[0]\n acs.append(ac)\n ob, rew, done, _ = env.step(ac)\n # add the observation after taking a step to next_obs\n # YOUR CODE HERE\n next_obs.append(ob)\n rewards.append(rew)\n steps += 1\n # If the episode ended, the corresponding terminal value is 1\n # otherwise, it is 0\n # YOUR CODE HERE\n if done or steps > self.max_path_length:\n terminals.append(1)\n break\n else:\n terminals.append(0)\n path = {\"observation\" : np.array(obs, dtype=np.float32), \n \"reward\" : np.array(rewards, dtype=np.float32), \n \"action\" : np.array(acs, dtype=np.float32),\n \"next_observation\": np.array(next_obs, dtype=np.float32),\n \"terminal\": np.array(terminals, dtype=np.float32)}\n return path\n\n def estimate_advantage(self, ob_no, next_ob_no, re_n, terminal_n):\n \"\"\"\n Estimates the advantage function value for each timestep.\n\n let sum_of_path_lengths be the sum of the lengths of the paths sampled from \n Agent.sample_trajectories\n\n arguments:\n ob_no: shape: (sum_of_path_lengths, ob_dim)\n next_ob_no: shape: (sum_of_path_lengths, ob_dim). The observation after taking one step forward\n re_n: length: sum_of_path_lengths. Each element in re_n is a scalar containing\n the reward for each timestep\n terminal_n: length: sum_of_path_lengths. Each element in terminal_n is either 1 if the episode ended\n at that timestep of 0 if the episode did not end\n\n returns:\n adv_n: shape: (sum_of_path_lengths). A single vector for the estimated \n advantages whose length is the sum of the lengths of the paths\n \"\"\"\n # First, estimate the Q value as Q(s, a) = r(s, a) + gamma*V(s')\n # To get the advantage, subtract the V(s) to get A(s, a) = Q(s, a) - V(s)\n # This requires calling the critic twice --- to obtain V(s') when calculating Q(s, a),\n # and V(s) when subtracting the baseline\n # Note: don't forget to use terminal_n to cut off the V(s') term when computing Q(s, a)\n # otherwise the values will grow without bound.\n # YOUR CODE HERE\n v_ob_no = self.sess.run(self.critic_prediction, feed_dict={self.sy_ob_no: ob_no})\n v_next_ob_no = self.sess.run(self.critic_prediction, feed_dict={self.sy_ob_no: next_ob_no})\n v_next_ob_no = v_next_ob_no * (1 - terminal_n)\n \n y = re_n + self.gamma * v_next_ob_no\n \n adv_n = y - v_ob_no\n if self.normalize_advantages:\n #raise NotImplementedError\n adv_n = (adv_n - np.mean(adv_n))/np.std(adv_n+1e-8) # YOUR_HW2 CODE_HERE\n return adv_n\n\n def update_critic(self, ob_no, next_ob_no, re_n, terminal_n):\n \"\"\"\n Update the parameters of the critic.\n\n let sum_of_path_lengths be the sum of the lengths of the paths sampled from\n Agent.sample_trajectories\n let num_paths be the number of paths sampled from Agent.sample_trajectories\n\n arguments:\n ob_no: shape: (sum_of_path_lengths, ob_dim)\n next_ob_no: shape: (sum_of_path_lengths, ob_dim). The observation after taking one step forward\n re_n: length: sum_of_path_lengths. Each element in re_n is a scalar containing\n the reward for each timestep\n terminal_n: length: sum_of_path_lengths. Each element in terminal_n is either 1 if the episode ended\n at that timestep of 0 if the episode did not end\n\n returns:\n nothing\n \"\"\"\n # Use a bootstrapped target values to update the critic\n # Compute the target values r(s, a) + gamma*V(s') by calling the critic to compute V(s')\n # In total, take n=self.num_grad_steps_per_target_update*self.num_target_updates gradient update steps\n # Every self.num_grad_steps_per_target_update steps, recompute the target values\n # by evaluating V(s') on the updated critic\n # Note: don't forget to use terminal_n to cut off the V(s') term when computing the target\n\n # otherwise the values will grow without bound.\n # YOUR CODE HERE\n for _ in range(self.num_target_updates):\n v_next_ob_no = self.sess.run(self.critic_prediction, feed_dict={self.sy_ob_no: next_ob_no})\n v_next_ob_no = v_next_ob_no * (1 - terminal_n)\n y = re_n + self.gamma * v_next_ob_no\n for _ in range(self.num_grad_steps_per_target_update):\n self.sess.run(self.critic_update_op, feed_dict={self.sy_target_n: y, self.sy_ob_no: ob_no})\n\n def update_actor(self, ob_no, ac_na, adv_n):\n \"\"\" \n Update the parameters of the policy.\n\n arguments:\n ob_no: shape: (sum_of_path_lengths, ob_dim)\n ac_na: shape: (sum_of_path_lengths).\n adv_n: shape: (sum_of_path_lengths). A single vector for the estimated\n advantages whose length is the sum of the lengths of the paths\n\n returns:\n nothing\n\n \"\"\"\n self.sess.run(self.actor_update_op,\n feed_dict={self.sy_ob_no: ob_no, self.sy_ac_na: ac_na, self.sy_adv_n: adv_n})\n\n\ndef train_AC(\n exp_name,\n env_name,\n n_iter, \n gamma, \n min_timesteps_per_batch, \n max_path_length,\n learning_rate,\n num_target_updates,\n num_grad_steps_per_target_update,\n animate, \n logdir, \n normalize_advantages,\n seed,\n n_layers,\n size):\n\n start = time.time()\n\n #========================================================================================#\n # Set Up Logger\n #========================================================================================#\n setup_logger(logdir, locals())\n\n #========================================================================================#\n # Set Up Env\n #========================================================================================#\n\n # Make the gym environment\n env = gym.make(env_name)\n\n # Set random seeds\n tf.set_random_seed(seed)\n np.random.seed(seed)\n env.seed(seed)\n\n # Maximum length for episodes\n max_path_length = max_path_length or env.spec.max_episode_steps\n\n # Is this env continuous, or self.discrete?\n discrete = isinstance(env.action_space, gym.spaces.Discrete)\n\n # Observation and action sizes\n ob_dim = env.observation_space.shape[0]\n ac_dim = env.action_space.n if discrete else env.action_space.shape[0]\n\n #========================================================================================#\n # Initialize Agent\n #========================================================================================#\n computation_graph_args = {\n 'n_layers': n_layers,\n 'ob_dim': ob_dim,\n 'ac_dim': ac_dim,\n 'discrete': discrete,\n 'size': size,\n 'learning_rate': learning_rate,\n 'num_target_updates': num_target_updates,\n 'num_grad_steps_per_target_update': num_grad_steps_per_target_update,\n }\n\n sample_trajectory_args = {\n 'animate': animate,\n 'max_path_length': max_path_length,\n 'min_timesteps_per_batch': min_timesteps_per_batch,\n }\n\n estimate_advantage_args = {\n 'gamma': gamma,\n 'normalize_advantages': normalize_advantages,\n }\n\n agent = Agent(computation_graph_args, sample_trajectory_args, estimate_advantage_args) #estimate_return_args\n\n # build computation graph\n agent.build_computation_graph()\n\n # tensorflow: config, session, variable initialization\n agent.init_tf_sess()\n\n #========================================================================================#\n # Training Loop\n #========================================================================================#\n\n total_timesteps = 0\n for itr in range(n_iter):\n print(\"********** Iteration %i ************\"%itr)\n paths, timesteps_this_batch = agent.sample_trajectories(itr, env)\n total_timesteps += timesteps_this_batch\n\n # Build arrays for observation, action for the policy gradient update by concatenating \n # across paths\n ob_no = np.concatenate([path[\"observation\"] for path in paths])\n ac_na = np.concatenate([path[\"action\"] for path in paths])\n re_n = np.concatenate([path[\"reward\"] for path in paths])\n next_ob_no = np.concatenate([path[\"next_observation\"] for path in paths])\n terminal_n = np.concatenate([path[\"terminal\"] for path in paths])\n\n # Call tensorflow operations to:\n # (1) update the critic, by calling agent.update_critic\n # (2) use the updated critic to compute the advantage by, calling agent.estimate_advantage\n # (3) use the estimated advantage values to update the actor, by calling agent.update_actor\n # YOUR CODE HERE\n agent.update_critic(ob_no, next_ob_no, re_n, terminal_n)\n adv_n = agent.estimate_advantage(ob_no, next_ob_no, re_n, terminal_n)\n agent.update_actor(ob_no,ac_na,adv_n)\n #raise NotImplementedError\n\n # Log diagnostics\n returns = [path[\"reward\"].sum() for path in paths]\n ep_lengths = [pathlength(path) for path in paths]\n logz.log_tabular(\"Time\", time.time() - start)\n logz.log_tabular(\"Iteration\", itr)\n logz.log_tabular(\"AverageReturn\", np.mean(returns))\n logz.log_tabular(\"StdReturn\", np.std(returns))\n logz.log_tabular(\"MaxReturn\", np.max(returns))\n logz.log_tabular(\"MinReturn\", np.min(returns))\n logz.log_tabular(\"EpLenMean\", np.mean(ep_lengths))\n logz.log_tabular(\"EpLenStd\", np.std(ep_lengths))\n logz.log_tabular(\"TimestepsThisBatch\", timesteps_this_batch)\n logz.log_tabular(\"TimestepsSoFar\", total_timesteps)\n logz.dump_tabular()\n logz.pickle_tf_vars()\n\n\ndef main():\n import argparse\n parser = argparse.ArgumentParser()\n parser.add_argument('env_name', type=str)\n parser.add_argument('--exp_name', type=str, default='vac')\n parser.add_argument('--render', action='store_true')\n parser.add_argument('--discount', type=float, default=1.0)\n parser.add_argument('--n_iter', '-n', type=int, default=100)\n parser.add_argument('--batch_size', '-b', type=int, default=1000)\n parser.add_argument('--ep_len', '-ep', type=float, default=-1.)\n parser.add_argument('--learning_rate', '-lr', type=float, default=5e-3)\n parser.add_argument('--dont_normalize_advantages', '-dna', action='store_true')\n parser.add_argument('--num_target_updates', '-ntu', type=int, default=10)\n parser.add_argument('--num_grad_steps_per_target_update', '-ngsptu', type=int, default=10)\n parser.add_argument('--seed', type=int, default=1)\n parser.add_argument('--n_experiments', '-e', type=int, default=1)\n parser.add_argument('--n_layers', '-l', type=int, default=2)\n parser.add_argument('--size', '-s', type=int, default=64)\n args = parser.parse_args()\n\n data_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'data')\n\n if not (os.path.exists(data_path)):\n os.makedirs(data_path)\n logdir = 'ac_' + args.exp_name + '_' + args.env_name + '_' + time.strftime(\"%d-%m-%Y_%H-%M-%S\")\n logdir = os.path.join(data_path, logdir)\n if not(os.path.exists(logdir)):\n os.makedirs(logdir)\n\n max_path_length = args.ep_len if args.ep_len > 0 else None\n\n processes = []\n\n for e in range(args.n_experiments):\n seed = args.seed + 10*e\n print('Running experiment with seed %d'%seed)\n\n def train_func():\n train_AC(\n exp_name=args.exp_name,\n env_name=args.env_name,\n n_iter=args.n_iter,\n gamma=args.discount,\n min_timesteps_per_batch=args.batch_size,\n max_path_length=max_path_length,\n learning_rate=args.learning_rate,\n num_target_updates=args.num_target_updates,\n num_grad_steps_per_target_update=args.num_grad_steps_per_target_update,\n animate=args.render,\n logdir=os.path.join(logdir,'%d'%seed),\n normalize_advantages=not(args.dont_normalize_advantages),\n seed=seed,\n n_layers=args.n_layers,\n size=args.size\n )\n # # Awkward hacky process runs, because Tensorflow does not like\n # # repeatedly calling train_AC in the same thread.\n p = Process(target=train_func, args=tuple())\n p.start()\n processes.append(p)\n # if you comment in the line below, then the loop will block \n # until this process finishes\n # p.join()\n\n for p in processes:\n p.join()\n \n\nif __name__ == \"__main__\":\n main()\n",
"import uuid\nimport time\nimport pickle\nimport sys\nimport gym.spaces\nimport itertools\nimport numpy as np\nimport random\nimport tensorflow as tf\nimport tensorflow.contrib.layers as layers\nfrom collections import namedtuple\nfrom dqn_utils import *\n\nOptimizerSpec = namedtuple(\"OptimizerSpec\", [\"constructor\", \"kwargs\", \"lr_schedule\"])\n\n\nclass QLearner(object):\n\n def __init__(\n self,\n env,\n q_func,\n optimizer_spec,\n session,\n exploration=LinearSchedule(1000000, 0.1),\n stopping_criterion=None,\n replay_buffer_size=1000000,\n batch_size=32,\n gamma=0.99,\n learning_starts=50000,\n learning_freq=4,\n frame_history_len=4,\n target_update_freq=10000,\n grad_norm_clipping=10,\n rew_file=None,\n double_q=True,\n lander=False):\n \"\"\"Run Deep Q-learning algorithm.\n\n You can specify your own convnet using q_func.\n\n All schedules are w.r.t. total number of steps taken in the environment.\n\n Parameters\n ----------\n env: gym.Env\n gym environment to train on.\n q_func: function\n Model to use for computing the q function. It should accept the\n following named arguments:\n img_in: tf.Tensor\n tensorflow tensor representing the input image\n num_actions: int\n number of actions\n scope: str\n scope in which all the model related variables\n should be created\n reuse: bool\n whether previously created variables should be reused.\n optimizer_spec: OptimizerSpec\n Specifying the constructor and kwargs, as well as learning rate schedule\n for the optimizer\n session: tf.Session\n tensorflow session to use.\n exploration: rl_algs.deepq.utils.schedules.Schedule\n schedule for probability of chosing random action.\n stopping_criterion: (env, t) -> bool\n should return true when it's ok for the RL algorithm to stop.\n takes in env and the number of steps executed so far.\n replay_buffer_size: int\n How many memories to store in the replay buffer.\n batch_size: int\n How many transitions to sample each time experience is replayed.\n gamma: float\n Discount Factor\n learning_starts: int\n After how many environment steps to start replaying experiences\n learning_freq: int\n How many steps of environment to take between every experience replay\n frame_history_len: int\n How many past frames to include as input to the model.\n target_update_freq: int\n How many experience replay rounds (not steps!) to perform between\n each update to the target Q network\n grad_norm_clipping: float or None\n If not None gradients' norms are clipped to this value.\n double_q: bool\n If True, then use double Q-learning to compute target values. Otherwise, use vanilla DQN.\n https://papers.nips.cc/paper/3964-double-q-learning.pdf\n \"\"\"\n assert type(env.observation_space) == gym.spaces.Box\n assert type(env.action_space) == gym.spaces.Discrete\n\n self.target_update_freq = target_update_freq\n self.optimizer_spec = optimizer_spec\n self.batch_size = batch_size\n self.learning_freq = learning_freq\n self.learning_starts = learning_starts\n self.stopping_criterion = stopping_criterion\n self.env = env\n self.session = session\n self.exploration = exploration\n self.rew_file = str(uuid.uuid4()) + '.pkl' if rew_file is None else rew_file\n self.mean_rew_file = 'mean_rew'+str(uuid.uuid4()) + '.pkl' if rew_file is None else rew_file\n\n ###############\n # BUILD MODEL #\n ###############\n\n if len(self.env.observation_space.shape) == 1:\n # This means we are running on low-dimensional observations (e.g. RAM)\n input_shape = self.env.observation_space.shape\n else:\n img_h, img_w, img_c = self.env.observation_space.shape\n input_shape = (img_h, img_w, frame_history_len * img_c)\n self.num_actions = self.env.action_space.n\n\n # set up placeholders\n # placeholder for current observation (or state)\n self.obs_t_ph = tf.placeholder(\n tf.float32 if lander else tf.uint8, [None] + list(input_shape))\n # placeholder for current action\n self.act_t_ph = tf.placeholder(tf.int32, [None])\n # placeholder for current reward\n self.rew_t_ph = tf.placeholder(tf.float32, [None])\n # placeholder for next observation (or state)\n self.obs_tp1_ph = tf.placeholder(\n tf.float32 if lander else tf.uint8, [None] + list(input_shape))\n # placeholder for end of episode mask\n # this value is 1 if the next state corresponds to the end of an episode,\n # in which case there is no Q-value at the next state; at the end of an\n # episode, only the current state reward contributes to the target, not the\n # next state Q-value (i.e. target is just rew_t_ph, not rew_t_ph + gamma * q_tp1)\n self.done_mask_ph = tf.placeholder(tf.float32, [None])\n\n # casting to float on GPU ensures lower data transfer times.\n if lander:\n obs_t_float = self.obs_t_ph\n obs_tp1_float = self.obs_tp1_ph\n else:\n obs_t_float = tf.cast(self.obs_t_ph, tf.float32) / 255.0\n obs_tp1_float = tf.cast(self.obs_tp1_ph, tf.float32) / 255.0\n\n # Here, you should fill in your own code to compute the Bellman error. This requires\n # evaluating the current and next Q-values and constructing the corresponding error.\n # TensorFlow will differentiate this error for you, you just need to pass it to the\n # optimizer. See assignment text for details.\n # Your code should produce one scalar-valued tensor: total_error\n # This will be passed to the optimizer in the provided code below.\n # Your code should also produce two collections of variables:\n # q_func_vars\n # target_q_func_vars\n # These should hold all of the variables of the Q-function network and target network,\n # respectively. A convenient way to get these is to make use of TF's \"scope\" feature.\n # For example, you can create your Q-function network with the scope \"q_func\" like this:\n # <something> = q_func(obs_t_float, num_actions, scope=\"q_func\", reuse=False)\n # And then you can obtain the variables like this:\n # q_func_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='q_func')\n # Older versions of TensorFlow may require using \"VARIABLES\" instead of \"GLOBAL_VARIABLES\"\n # Tip: use huber_loss (from dqn_utils) instead of squared error when defining self.total_error\n ######\n\n # YOUR CODE HERE\n \n self.q = q_func(obs_t_float, self.num_actions, scope=\"q_func\", reuse=False)\n q_func_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='q_func')\n self.max_action_for_qall = tf.argmax(self.q, axis=-1)\n self.target_q = q_func(obs_tp1_float, self.num_actions, scope=\"target_q_func\", reuse=False)\n self.double_q = double_q\n if double_q:\n self.max_action_index_for_dQ = tf.placeholder(tf.int32, [None])\n q = tf.squeeze(tf.batch_gather(self.target_q, tf.expand_dims(self.max_action_index_for_dQ,axis=1)))\n y = self.rew_t_ph + gamma * (1 - self.done_mask_ph) * q\n else:\n y = self.rew_t_ph + gamma * (1 - self.done_mask_ph) * tf.reduce_max(self.target_q, axis=-1)\n target_q_func_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='target_q_func')\n\n self.total_error = tf.reduce_mean(\n huber_loss(tf.squeeze(tf.batch_gather(self.q, tf.expand_dims(self.act_t_ph, axis=1))) - y))\n ######\n\n # construct optimization op (with gradient clipping)\n self.learning_rate = tf.placeholder(tf.float32, (), name=\"learning_rate\")\n optimizer = self.optimizer_spec.constructor(learning_rate=self.learning_rate, **self.optimizer_spec.kwargs)\n self.train_fn = minimize_and_clip(optimizer, self.total_error,\n var_list=q_func_vars, clip_val=grad_norm_clipping)\n\n # update_target_fn will be called periodically to copy Q network to target Q network\n update_target_fn = []\n for var, var_target in zip(sorted(q_func_vars, key=lambda v: v.name),\n sorted(target_q_func_vars, key=lambda v: v.name)):\n update_target_fn.append(var_target.assign(var))\n self.update_target_fn = tf.group(*update_target_fn)\n\n # construct the replay buffer\n self.replay_buffer = ReplayBuffer(replay_buffer_size, frame_history_len, lander=lander)\n self.replay_buffer_idx = None\n\n ###############\n # RUN ENV #\n ###############\n self.model_initialized = False\n self.num_param_updates = 0\n self.mean_episode_reward = -float('nan')\n self.mean_episode_rewards = []\n self.best_mean_episode_reward = -float('inf')\n self.best_mean_episode_rewards = []\n self.mean_episode_t = []\n self.last_obs = self.env.reset()\n self.log_every_n_steps = 10000\n\n self.start_time = None\n self.t = 0\n\n def stopping_criterion_met(self):\n return self.stopping_criterion is not None and self.stopping_criterion(self.env, self.t)\n\n def step_env(self):\n ### 2. Step the env and store the transition\n # At this point, \"self.last_obs\" contains the latest observation that was\n # recorded from the simulator. Here, your code needs to store this\n # observation and its outcome (reward, next observation, etc.) into\n # the replay buffer while stepping the simulator forward one step.\n # At the end of this block of code, the simulator should have been\n # advanced one step, and the replay buffer should contain one more\n # transition.\n # Specifically, self.last_obs must point to the new latest observation.\n # Useful functions you'll need to call:\n # obs, reward, done, info = env.step(action)\n # this steps the environment forward one step\n # obs = env.reset()\n # this resets the environment if you reached an episode boundary.\n # Don't forget to call env.reset() to get a new observation if done\n # is true!!\n # Note that you cannot use \"self.last_obs\" directly as input\n # into your network, since it needs to be processed to include context\n # from previous frames. You should check out the replay buffer\n # implementation in dqn_utils.py to see what functionality the replay\n # buffer exposes. The replay buffer has a function called\n # encode_recent_observation that will take the latest observation\n # that you pushed into the buffer and compute the corresponding\n # input that should be given to a Q network by appending some\n # previous frames.\n # Don't forget to include epsilon greedy exploration!\n # And remember that the first time you enter this loop, the model\n # may not yet have been initialized (but of course, the first step\n # might as well be random, since you haven't trained your net...)\n\n #####\n\n # YOUR CODE HERE\n self.replay_buffer_idx = self.replay_buffer.next_idx\n self.replay_buffer.store_frame(self.last_obs)\n if not self.model_initialized:\n act = self.env.action_space.sample()\n else:\n if self.exploration.value(self.t) > np.random.sample():\n act = self.env.action_space.sample()\n # print(act)\n else:\n state = self.replay_buffer.encode_recent_observation()\n values = self.session.run(self.q, {\n self.obs_t_ph: state[np.newaxis, ...]})\n act = np.argmax(values)\n # print(values)\n obs, reward, done, info = self.env.step(action=act)\n self.replay_buffer.store_effect(\n idx=self.replay_buffer_idx, action=act, reward=reward, done=done)\n if done:\n # print('DONE')\n obs = self.env.reset()\n self.last_obs = obs\n\n def update_model(self):\n ### 3. Perform experience replay and train the network.\n # note that this is only done if the replay buffer contains enough samples\n # for us to learn something useful -- until then, the model will not be\n # initialized and random actions should be taken\n if (self.t > self.learning_starts and \\\n self.t % self.learning_freq == 0 and \\\n self.replay_buffer.can_sample(self.batch_size)):\n # Here, you should perform training. Training consists of four steps:\n # 3.a: use the replay buffer to sample a batch of transitions (see the\n # replay buffer code for function definition, each batch that you sample\n # should consist of current observations, current actions, rewards,\n # next observations, and done indicator).\n # 3.b: initialize the model if it has not been initialized yet; to do\n # that, call\n # initialize_interdependent_variables(self.session, tf.global_variables(), {\n # self.obs_t_ph: obs_t_batch,\n # self.obs_tp1_ph: obs_tp1_batch,\n # })\n # where obs_t_batch and obs_tp1_batch are the batches of observations at\n # the current and next time step. The boolean variable model_initialized\n # indicates whether or not the model has been initialized.\n # Remember that you have to update the target network too (see 3.d)!\n # 3.c: train the model. To do this, you'll need to use the self.train_fn and\n # self.total_error ops that were created earlier: self.total_error is what you\n # created to compute the total Bellman error in a batch, and self.train_fn\n # will actually perform a gradient step and update the network parameters\n # to reduce total_error. When calling self.session.run on these you'll need to\n # populate the following placeholders:\n # self.obs_t_ph\n # self.act_t_ph\n # self.rew_t_ph\n # self.obs_tp1_ph\n # self.done_mask_ph\n # (this is needed for computing self.total_error)\n # self.learning_rate -- you can get this from self.optimizer_spec.lr_schedule.value(t)\n # (this is needed by the optimizer to choose the learning rate)\n # 3.d: periodically update the target network by calling\n # self.session.run(self.update_target_fn)\n # you should update every target_update_freq steps, and you may find the\n # variable self.num_param_updates useful for this (it was initialized to 0)\n #####\n\n # YOUR CODE HERE\n obs_batch, act_batch, rew_batch, next_obs_batch, done_mask = self.replay_buffer.sample(self.batch_size)\n if not self.model_initialized:\n initialize_interdependent_variables(self.session, tf.global_variables(), {\n self.obs_t_ph: obs_batch,\n self.obs_tp1_ph: next_obs_batch,\n })\n self.session.run(self.update_target_fn)\n self.model_initialized = True\n # 3.c\n if self.double_q:\n # double q case, firstly calculate self.max_action_index_for_dQ's value\n max_action_index_for_dQ = self.session.run(self.max_action_for_qall, feed_dict={\n self.obs_t_ph: next_obs_batch\n })\n self.session.run([self.train_fn, self.total_error], feed_dict={\n self.obs_t_ph: obs_batch,\n self.act_t_ph: act_batch,\n self.rew_t_ph: rew_batch,\n self.obs_tp1_ph: next_obs_batch,\n self.max_action_index_for_dQ: max_action_index_for_dQ,\n self.done_mask_ph: done_mask,\n self.learning_rate: self.optimizer_spec.lr_schedule.value(self.t)\n })\n else:\n self.session.run([self.train_fn, self.total_error], {\n self.obs_t_ph: obs_batch,\n self.act_t_ph: act_batch,\n self.rew_t_ph: rew_batch,\n self.obs_tp1_ph: next_obs_batch,\n self.done_mask_ph: done_mask,\n self.learning_rate: self.optimizer_spec.lr_schedule.value(self.t)\n })\n if (self.num_param_updates % self.target_update_freq) == 0:\n self.session.run(self.update_target_fn)\n\n self.num_param_updates += 1\n\n self.t += 1\n\n def log_progress(self):\n episode_rewards = get_wrapper_by_name(self.env, \"Monitor\").get_episode_rewards()\n\n if len(episode_rewards) > 0:\n self.mean_episode_reward = np.mean(episode_rewards[-100:])\n\n if len(episode_rewards) > 100:\n self.best_mean_episode_reward = max(self.best_mean_episode_reward, self.mean_episode_reward)\n if self.t % self.log_every_n_steps == 0 and self.model_initialized:\n self.mean_episode_rewards.append(self.mean_episode_reward)\n self.best_mean_episode_rewards.append(self.best_mean_episode_reward)\n self.mean_episode_t.append(self.t)\n print(\"Timestep %d\" % (self.t,))\n print(\"mean reward (100 episodes) %f\" % self.mean_episode_reward)\n print(\"best mean reward %f\" % self.best_mean_episode_reward)\n print(\"episodes %d\" % len(episode_rewards))\n print(\"exploration %f\" % self.exploration.value(self.t))\n print(\"learning_rate %f\" % self.optimizer_spec.lr_schedule.value(self.t))\n if self.start_time is not None:\n print(\"running time %f\" % ((time.time() - self.start_time) / 60.))\n\n self.start_time = time.time()\n\n sys.stdout.flush()\n\n with open(self.mean_rew_file, 'wb') as f:\n pickle.dump({\n 'mean_rewards': self.mean_episode_rewards,\n 'best_mean_rewards': self.best_mean_episode_rewards,\n 't': self.mean_episode_t\n }, f, pickle.HIGHEST_PROTOCOL)\n\n with open(self.rew_file, 'wb') as f:\n pickle.dump(episode_rewards, f, pickle.HIGHEST_PROTOCOL)\n\n\ndef learn(*args, **kwargs):\n alg = QLearner(*args, **kwargs)\n while not alg.stopping_criterion_met():\n alg.step_env()\n # at this point, the environment should have been advanced one step (and\n # reset if done was true), and self.last_obs should point to the new latest\n # observation\n alg.update_model()\n alg.log_progress()\n"
] |
[
[
"tensorflow.reduce_sum",
"tensorflow.cast",
"numpy.concatenate",
"numpy.max",
"numpy.mean",
"tensorflow.train.AdamOptimizer",
"tensorflow.distributions.Categorical",
"tensorflow.layers.dense",
"tensorflow.ConfigProto",
"numpy.std",
"tensorflow.Session",
"numpy.log",
"numpy.min",
"tensorflow.placeholder",
"tensorflow.exp",
"tensorflow.global_variables_initializer",
"tensorflow.nn.sparse_softmax_cross_entropy_with_logits",
"tensorflow.set_random_seed",
"numpy.array",
"tensorflow.losses.mean_squared_error",
"tensorflow.nn.softmax",
"numpy.random.seed",
"tensorflow.log",
"tensorflow.variable_scope",
"tensorflow.random.normal"
],
[
"tensorflow.reduce_max",
"tensorflow.get_collection",
"tensorflow.cast",
"tensorflow.global_variables",
"tensorflow.placeholder",
"tensorflow.expand_dims",
"numpy.argmax",
"numpy.mean",
"numpy.random.sample",
"tensorflow.argmax",
"tensorflow.group"
]
] |
loganlebanoff/datasets
|
[
"44649ac4f8fefdbaae0a66918b03ae7dd8169f1e"
] |
[
"tensorflow_datasets/core/utils/gcs_utils.py"
] |
[
"# coding=utf-8\n# Copyright 2020 The TensorFlow Datasets Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Lint as: python3\n\"\"\"Utilities for accessing TFDS GCS buckets.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport concurrent.futures\nimport os\nimport posixpath\nfrom typing import List, Optional\n\nimport tensorflow.compat.v2 as tf\n\nfrom tensorflow_datasets.core.utils import py_utils\nfrom tensorflow_datasets.core.utils import tqdm_utils\n\nGCS_ROOT_DIR = 'gs://tfds-data'\n\n# for dataset_info/\nGCS_DATASET_INFO_DIR = 'dataset_info'\nGCS_DATASETS_DIR = 'datasets'\n\n\n# TODO(tfds): On windows, gs:// isn't supported.\n# https://github.com/tensorflow/tensorflow/issues/38477\n_is_gcs_disabled = (os.name == 'nt')\n\n\ndef gcs_path(suffix: Optional[str] = None) -> str:\n \"\"\"Returns the GCS URI path.\n\n Args:\n suffix: Eventual relative path in the bucket. If `None`, returns the root\n GCS bucket uri.\n\n Returns:\n path: The GCS uri.\n \"\"\"\n path = GCS_ROOT_DIR\n if suffix:\n path = posixpath.join(path, suffix)\n return path\n\n\n@py_utils.memoize()\ndef gcs_listdir(dir_name: str) -> Optional[List[str]]:\n \"\"\"List all files in the given GCS dir (`['dataset/1.0.0/file0', ...]`).\"\"\"\n root_dir = gcs_path(dir_name)\n if _is_gcs_disabled or not tf.io.gfile.exists(root_dir):\n return None\n return [posixpath.join(dir_name, f) for f in tf.io.gfile.listdir(root_dir)]\n\n\ndef gcs_dataset_info_files(dataset_dir: str) -> Optional[List[str]]:\n \"\"\"Return paths to GCS files in the given dataset directory.\"\"\"\n return gcs_listdir(posixpath.join(GCS_DATASET_INFO_DIR, dataset_dir))\n\n\ndef is_dataset_on_gcs(dataset_name: str) -> bool:\n \"\"\"If the dataset is available on the GCS bucket gs://tfds-data/datasets.\"\"\"\n dir_name = posixpath.join(GCS_DATASETS_DIR, dataset_name)\n return not _is_gcs_disabled and tf.io.gfile.exists(gcs_path(dir_name))\n\n\ndef download_gcs_dataset(\n dataset_name, local_dataset_dir, max_simultaneous_downloads=25\n):\n \"\"\"Downloads prepared GCS dataset to local dataset directory.\"\"\"\n if _is_gcs_disabled:\n raise AssertionError('Cannot download from GCS when _is_gcs_disabled')\n\n prefix = posixpath.join(GCS_DATASETS_DIR, dataset_name)\n gcs_paths_to_dl = gcs_listdir(prefix)\n\n # Filter out the diffs folder if present\n filter_prefix = posixpath.join(prefix, 'diffs')\n gcs_paths_to_dl = [\n p for p in gcs_paths_to_dl if not p.startswith(filter_prefix)\n ]\n\n with tqdm_utils.async_tqdm(\n total=len(gcs_paths_to_dl), desc='Dl Completed...', unit=' file') as pbar:\n\n def _copy_from_gcs(gcs_path_):\n # Copy 'gs://tfds-data/datasets/ds/1.0.0/file' -> `local_dir/file`\n tf.io.gfile.copy(\n gcs_path(gcs_path_),\n os.path.join(local_dataset_dir, posixpath.basename(gcs_path_)),\n )\n pbar.update(1)\n\n with concurrent.futures.ThreadPoolExecutor(\n max_workers=max_simultaneous_downloads) as executor:\n futures = [\n executor.submit(_copy_from_gcs, path) for path in gcs_paths_to_dl\n ]\n for future in concurrent.futures.as_completed(futures):\n future.result()\n"
] |
[
[
"tensorflow.compat.v2.io.gfile.exists",
"tensorflow.compat.v2.io.gfile.listdir"
]
] |
YifanShenSZ/pytorch
|
[
"b4232f7cbe407909f9d95b91304c73fdc4c66a50",
"b4232f7cbe407909f9d95b91304c73fdc4c66a50",
"b4232f7cbe407909f9d95b91304c73fdc4c66a50",
"b4232f7cbe407909f9d95b91304c73fdc4c66a50",
"b4232f7cbe407909f9d95b91304c73fdc4c66a50",
"b4232f7cbe407909f9d95b91304c73fdc4c66a50"
] |
[
"test/ao/sparsity/test_sparsifier.py",
"torch/fx/passes/splitter_base.py",
"test/jit/test_schema_check.py",
"test/distributed/fsdp/test_fsdp_param_exec_order_wrap.py",
"test/onnx/test_operators.py",
"torch/_subclasses/fake_tensor.py"
] |
[
"# -*- coding: utf-8 -*-\n# Owner(s): [\"module: unknown\"]\n\nimport itertools\nimport logging\nimport re\n\nimport torch\nfrom torch import nn\nfrom torch.ao.sparsity import BaseSparsifier, WeightNormSparsifier, FakeSparsity, NearlyDiagonalSparsifier\nfrom torch.nn.utils.parametrize import is_parametrized\n\nfrom torch.testing._internal.common_utils import TestCase\n\nlogging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO)\n\nclass Model(nn.Module):\n def __init__(self):\n super().__init__()\n self.seq = nn.Sequential(\n nn.Linear(16, 16)\n )\n self.linear = nn.Linear(16, 16)\n self.head = nn.Linear(16, 4)\n\n def forward(self, x):\n x = self.seq(x)\n x = self.linear(x)\n x = self.head(x)\n return x\n\n\nclass ImplementedSparsifier(BaseSparsifier):\n def __init__(self, **kwargs):\n super().__init__(defaults=kwargs)\n\n def update_mask(self, module, **kwargs):\n module.parametrizations.weight[0].mask[0] = 0\n linear_state = self.state['linear.weight']\n linear_state['step_count'] = linear_state.get('step_count', 0) + 1\n\n\nclass TestBaseSparsifier(TestCase):\n def test_constructor(self):\n # Cannot instantiate the abstract base\n self.assertRaises(TypeError, BaseSparsifier)\n # Can instantiate the model with no configs\n model = Model()\n sparsifier = ImplementedSparsifier(test=3)\n sparsifier.prepare(model, config=None)\n assert len(sparsifier.groups) == 3\n sparsifier.step()\n # Can instantiate the model with configs\n sparsifier = ImplementedSparsifier(test=3)\n sparsifier.prepare(model, [{'tensor_fqn': 'linear.weight'}])\n assert len(sparsifier.groups) == 1\n assert sparsifier.groups[0]['tensor_fqn'] == 'linear.weight'\n assert 'test' in sparsifier.groups[0]\n assert sparsifier.groups[0]['test'] == 3\n\n def test_prepare_config(self):\n model = Model()\n sparsifier = ImplementedSparsifier(test=3)\n # Make sure there are no parametrizations before `prepare`\n assert not hasattr(model.seq[0], 'parametrizations')\n assert not hasattr(model.linear, 'parametrizations')\n assert not hasattr(model.head, 'parametrizations')\n sparsifier.prepare(model, config=[\n {'tensor_fqn': 'seq.0.weight', 'test': 42},\n # No 'linear' to make sure it will be skipped in the sparsification\n {'tensor_fqn': 'head.weight'}\n ])\n assert len(sparsifier.groups) == 2\n # Check if default argument is not assigned if explicit\n assert sparsifier.groups[0]['tensor_fqn'] == 'seq.0.weight'\n assert sparsifier.groups[0]['test'] == 42\n # Check if FQN and module are pointing to the same location\n assert sparsifier.groups[1]['tensor_fqn'] == 'head.weight'\n assert sparsifier.groups[1]['module'] == model.head\n # Check if parameterizations are attached\n assert hasattr(model.seq[0], 'parametrizations')\n assert not hasattr(model.linear, 'parametrizations')\n assert hasattr(model.head, 'parametrizations')\n\n def test_step(self):\n model = Model()\n sparsifier = ImplementedSparsifier(test=3)\n sparsifier.enable_mask_update = True\n sparsifier.prepare(model, [{'tensor_fqn': 'linear.weight'}])\n sparsifier.step()\n assert torch.all(model.linear.parametrizations.weight[0].mask[0] == 0)\n\n def test_state_dict(self):\n step_count = 3\n model0 = Model()\n sparsifier0 = ImplementedSparsifier(test=3)\n sparsifier0.prepare(model0, [{'tensor_fqn': 'linear.weight'}])\n mask = model0.linear.parametrizations['weight'][0].mask\n mask.data = torch.arange(mask.shape[0] * mask.shape[1]).reshape(mask.shape)\n for step in range(step_count):\n sparsifier0.step()\n state_dict = sparsifier0.state_dict()\n\n # Check the expected keys in the state_dict\n assert 'state' in state_dict\n assert 'step_count' in state_dict['state']['linear.weight']\n assert state_dict['state']['linear.weight']['step_count'] == 3\n assert 'groups' in state_dict\n assert 'test' in state_dict['groups'][0]\n assert 'tensor_fqn' in state_dict['groups'][0]\n assert state_dict['groups'][0]['tensor_fqn'] == 'linear.weight'\n\n # Check loading static_dict creates an equivalent model\n model1 = Model()\n sparsifier1 = ImplementedSparsifier()\n sparsifier1.prepare(model1, None)\n\n assert sparsifier0.state != sparsifier1.state\n\n # Make sure the masks are different in the beginning\n for mg in sparsifier0.groups:\n if mg['tensor_fqn'] == 'linear.weight':\n mask0 = mg['module'].parametrizations.weight[0].mask\n for mg in sparsifier1.groups:\n if mg['tensor_fqn'] == 'linear.weight':\n mask1 = mg['module'].parametrizations.weight[0].mask\n self.assertNotEqual(mask0, mask1)\n\n sparsifier1.load_state_dict(state_dict)\n\n # Make sure the states are loaded, and are correct\n assert sparsifier0.state == sparsifier1.state\n\n # Make sure the masks (and all dicts) are the same after loading\n assert len(sparsifier0.groups) == len(sparsifier1.groups)\n for idx in range(len(sparsifier0.groups)):\n mg0 = sparsifier0.groups[idx]\n mg1 = sparsifier1.groups[idx]\n for key in mg0.keys():\n assert key in mg1\n if key == 'module':\n # We cannot compare modules as they are different\n param0 = mg0[key].parametrizations.weight[0]\n param1 = mg1[key].parametrizations.weight[0]\n assert hasattr(param0, 'mask')\n assert hasattr(param1, 'mask')\n self.assertEqual(param0.__dict__, param1.__dict__)\n else:\n assert mg0[key] == mg1[key]\n\n def test_mask_squash(self):\n model = Model()\n sparsifier = ImplementedSparsifier(test=3)\n sparsifier.prepare(model, [{'tensor_fqn': 'linear.weight'}])\n assert hasattr(model.linear.parametrizations.weight[0], 'mask')\n assert is_parametrized(model.linear, 'weight')\n assert not is_parametrized(model.seq[0], 'weight')\n\n sparsifier.squash_mask()\n assert not is_parametrized(model.seq[0], 'weight')\n assert not is_parametrized(model.linear, 'weight')\n\n def test_mask_squash_with_params1(self):\n model = Model()\n sparsifier = ImplementedSparsifier(foo=3, bar=2, baz=1)\n sparsifier.prepare(model, [{'tensor_fqn': 'linear.weight'}, {'tensor_fqn': 'seq.0.weight'}])\n sparsifier.squash_mask(\n params_to_keep_per_layer={\n 'linear': ('foo', 'bar'),\n 'seq.0': ('baz',)\n })\n assert not is_parametrized(model.seq[0], 'weight')\n assert not is_parametrized(model.linear, 'weight')\n assert hasattr(model.seq[0], 'sparse_params')\n assert hasattr(model.linear, 'sparse_params')\n assert model.seq[0].sparse_params.get('foo', None) is None\n assert model.seq[0].sparse_params.get('bar', None) is None\n assert model.seq[0].sparse_params.get('baz', None) == 1\n assert model.linear.sparse_params.get('foo', None) == 3\n assert model.linear.sparse_params.get('bar', None) == 2\n assert model.linear.sparse_params.get('baz', None) is None\n\n def test_mask_squash_with_params2(self):\n model = Model()\n sparsifier = ImplementedSparsifier(foo=3, bar=2, baz=1)\n sparsifier.prepare(model, [{'tensor_fqn': 'linear.weight'}, {'tensor_fqn': 'seq.0.weight'}])\n sparsifier.squash_mask(params_to_keep=('foo', 'bar'))\n assert not is_parametrized(model.seq[0], 'weight')\n assert not is_parametrized(model.linear, 'weight')\n assert hasattr(model.seq[0], 'sparse_params')\n assert hasattr(model.linear, 'sparse_params')\n assert model.seq[0].sparse_params.get('foo', None) == 3\n assert model.seq[0].sparse_params.get('bar', None) == 2\n assert model.seq[0].sparse_params.get('baz', None) is None\n assert model.linear.sparse_params.get('foo', None) == 3\n assert model.linear.sparse_params.get('bar', None) == 2\n assert model.linear.sparse_params.get('baz', None) is None\n\n def test_mask_squash_with_params3(self):\n model = Model()\n sparsifier = ImplementedSparsifier(foo=3, bar=2, baz=1)\n sparsifier.prepare(model, [{'tensor_fqn': 'linear.weight'}, {'tensor_fqn': 'seq.0.weight'}])\n sparsifier.squash_mask(\n params_to_keep=('foo', 'bar'),\n params_to_keep_per_layer={'seq.0': ('baz',)})\n assert not is_parametrized(model.seq[0], 'weight')\n assert not is_parametrized(model.linear, 'weight')\n assert hasattr(model.seq[0], 'sparse_params')\n assert hasattr(model.linear, 'sparse_params')\n assert model.seq[0].sparse_params.get('foo', None) == 3\n assert model.seq[0].sparse_params.get('bar', None) == 2\n assert model.seq[0].sparse_params.get('baz', None) == 1\n assert model.linear.sparse_params.get('foo', None) == 3\n assert model.linear.sparse_params.get('bar', None) == 2\n assert model.linear.sparse_params.get('baz', None) is None\n\n\nclass TestWeightNormSparsifier(TestCase):\n def test_constructor(self):\n model = Model()\n sparsifier = WeightNormSparsifier()\n sparsifier.prepare(model, config=None)\n for g in sparsifier.groups:\n assert isinstance(g['module'], nn.Linear)\n # The groups are unordered\n assert g['module_fqn'] in ('seq.0', 'linear', 'head')\n\n def test_step(self):\n model = Model()\n sparsifier = WeightNormSparsifier(sparsity_level=0.5)\n sparsifier.prepare(model, config=[{'tensor_fqn': 'linear.weight'}])\n for g in sparsifier.groups:\n # Before step\n module = g['module']\n assert (1.0 - module.parametrizations['weight'][0].mask.mean()) == 0 # checking sparsity level is 0\n sparsifier.enable_mask_update = True\n sparsifier.step()\n self.assertAlmostEqual(model.linear.parametrizations['weight'][0].mask.mean().item(), 0.5, places=2)\n for g in sparsifier.groups:\n # After step\n module = g['module']\n assert (1.0 - module.parametrizations['weight'][0].mask.mean()) > 0 # checking sparsity level has increased\n # Test if the mask collapses to all zeros if the weights are randomized\n iters_before_collapse = 1000\n for _ in range(iters_before_collapse):\n model.linear.weight.data = torch.randn(model.linear.weight.shape)\n sparsifier.step()\n for g in sparsifier.groups:\n # After step\n module = g['module']\n assert (1.0 - module.parametrizations['weight'][0].mask.mean()) > 0 # checking sparsity level did not collapse\n\n def test_step_2_of_4(self):\n model = Model()\n sparsifier = WeightNormSparsifier(sparsity_level=1.0,\n sparse_block_shape=(1, 4),\n zeros_per_block=2)\n sparsifier.prepare(model, config=[{'tensor_fqn': 'linear.weight'}])\n sparsifier.step()\n # make sure the sparsity level is approximately 50%\n self.assertAlmostEqual(model.linear.parametrizations['weight'][0].mask.mean().item(), 0.5, places=2)\n # Make sure each block has exactly 50% zeros\n module = sparsifier.groups[0]['module']\n mask = module.parametrizations['weight'][0].mask\n for row in mask:\n for idx in range(0, len(row), 4):\n block = row[idx:idx + 4]\n block, _ = block.sort()\n assert (block[:2] == 0).all()\n assert (block[2:] != 0).all()\n\n def test_prepare(self):\n model = Model()\n sparsifier = WeightNormSparsifier()\n sparsifier.prepare(model, config=None)\n for g in sparsifier.groups:\n module = g['module']\n # Check mask exists\n assert hasattr(module.parametrizations['weight'][0], 'mask')\n # Check parametrization exists and is correct\n assert is_parametrized(module, 'weight')\n assert type(module.parametrizations.weight[0]) == FakeSparsity\n\n def test_mask_squash(self):\n model = Model()\n sparsifier = WeightNormSparsifier()\n sparsifier.prepare(model, config=None)\n sparsifier.squash_mask()\n for g in sparsifier.groups:\n module = g['module']\n assert not is_parametrized(module, 'weight')\n assert not hasattr(module, 'mask')\n\n def test_sparsity_levels(self):\n sparsity_levels = [-1.0, 0.0, 0.5, 1.0, 2.0]\n sparse_block_shapes = [(1, 1), (1, 4), (2, 2), (4, 1)]\n zeros_per_blocks = [0, 1, 2, 3, 4]\n\n testcases = itertools.tee(itertools.product(sparsity_levels,\n sparse_block_shapes,\n zeros_per_blocks))\n # Create a config and model with all the testcases\n model = nn.Sequential()\n sparsifier = WeightNormSparsifier()\n\n sparsity_per_layer_config = []\n p = re.compile(r'[-\\.\\s]')\n for sl, sbs, zpb in testcases[0]:\n # Make sure the number of zeros is not > values in a block\n if zpb > sbs[0] * sbs[1]:\n continue\n layer_name = f'{sl}_{sbs}_{zpb}'\n layer_name = p.sub('_', layer_name)\n\n layer = nn.Linear(12, 12, bias=False)\n layer.weight = nn.Parameter(torch.ones(12, 12))\n model.add_module(layer_name, layer)\n config = {\n 'tensor_fqn': layer_name + \".weight\",\n 'sparsity_level': sl,\n 'sparse_block_shape': sbs,\n 'zeros_per_block': zpb\n }\n sparsity_per_layer_config.append(config)\n\n sparsifier.prepare(model, sparsity_per_layer_config)\n sparsifier.step()\n sparsifier.squash_mask()\n model.eval()\n\n for sl, sbs, zpb in testcases[1]:\n if zpb > sbs[0] * sbs[1]:\n continue\n layer_name = f'{sl}_{sbs}_{zpb}'\n layer_name = p.sub('_', layer_name)\n layer = getattr(model, layer_name)\n\n # Level of sparsity is achieved\n sparse_mask = (layer.weight == 0).float()\n if zpb == 0:\n assert sparse_mask.mean() == 0\n else:\n # Ratio of individual zeros in the tensor\n true_sl = min(max(sl, 0.0), 1.0)\n true_sl = true_sl * zpb / sbs[0] / sbs[1]\n assert sparse_mask.mean() == true_sl\n\n\nclass TestNearlyDiagonalSparsifier(TestCase):\n def test_constructor(self):\n model = Model()\n sparsifier = NearlyDiagonalSparsifier(nearliness=1)\n sparsifier.prepare(model, config=None)\n for g in sparsifier.groups:\n assert isinstance(g['module'], nn.Linear)\n # The groups are unordered\n assert g['module_fqn'] in ('seq.0', 'linear', 'head')\n\n def test_step(self):\n model = Model()\n sparsifier = NearlyDiagonalSparsifier(nearliness=1)\n sparsifier.prepare(model, config=[{'tensor_fqn': 'linear.weight'}])\n\n for g in sparsifier.groups:\n # Before step\n module = g['module']\n assert (1.0 - module.parametrizations['weight'][0].mask.mean()) == 0 # checking sparsity level is 0\n\n sparsifier.enable_mask_update = True\n sparsifier.step()\n mask = module.parametrizations['weight'][0].mask\n height, width = mask.shape\n assert torch.all(mask == torch.eye(height, width))\n\n for g in sparsifier.groups:\n # After step\n module = g['module']\n assert (1.0 - module.parametrizations['weight'][0].mask.mean()) > 0 # checking sparsity level has increased\n\n # Test if the mask collapses to all zeros if the weights are randomized\n iters_before_collapse = 1000\n for _ in range(iters_before_collapse):\n model.linear.weight.data = torch.randn(model.linear.weight.shape)\n sparsifier.step()\n for g in sparsifier.groups:\n # After step\n module = g['module']\n assert (1.0 - module.parametrizations['weight'][0].mask.mean()) > 0 # checking sparsity level did not collapse\n\n def test_prepare(self):\n model = Model()\n sparsifier = NearlyDiagonalSparsifier(nearliness=1)\n sparsifier.prepare(model, config=None)\n for g in sparsifier.groups:\n module = g['module']\n # Check mask exists\n assert hasattr(module.parametrizations['weight'][0], 'mask')\n # Check parametrization exists and is correct\n assert is_parametrized(module, 'weight')\n assert type(module.parametrizations.weight[0]) == FakeSparsity\n\n def test_mask_squash(self):\n model = Model()\n sparsifier = NearlyDiagonalSparsifier(nearliness=1)\n sparsifier.prepare(model, config=None)\n sparsifier.step()\n sparsifier.squash_mask()\n for g in sparsifier.groups:\n module = g['module']\n assert not is_parametrized(module, 'weight')\n assert not hasattr(module, 'mask')\n weights = module.weight\n height, width = weights.shape\n assert torch.all(weights == torch.eye(height, width) * weights) # only diagonal to be present\n\n def test_sparsity_levels(self):\n nearliness_levels = list(nearliness for nearliness in range(-1, 100))\n model = nn.Sequential()\n\n p = re.compile(r'[-\\.\\s]')\n for nearliness in nearliness_levels:\n sparsifier = NearlyDiagonalSparsifier(nearliness=1)\n layer_name = f'{nearliness}'\n layer_name = p.sub('_', layer_name)\n\n layer = nn.Linear(32, 32, bias=False)\n layer.weight = nn.Parameter(torch.ones(32, 32))\n width, height = layer.weight.shape\n model.add_module(layer_name, layer)\n config = {\n 'tensor_fqn': layer_name + \".weight\",\n 'nearliness': nearliness\n }\n\n sparsifier.prepare(model, [config])\n # should raise a ValueError when nearliness arg is illegal\n if (nearliness > 0 and nearliness % 2 == 0) or (nearliness // 2 >= min(width, height)):\n with self.assertRaises(ValueError):\n sparsifier.step()\n else:\n sparsifier.step()\n sparsifier.squash_mask()\n model.eval()\n\n layer = getattr(model, layer_name)\n # verify that mask created corresponds to the nearliness\n self._verify_nearliness(layer.weight, nearliness)\n\n # helper function to verify nearliness of a mask\n def _verify_nearliness(self, mask: torch.Tensor, nearliness: int):\n if nearliness <= 0:\n assert torch.all(mask == torch.zeros(mask.shape[0], mask.shape[1]))\n else:\n height, width = mask.shape\n dist_to_diagonal = nearliness // 2\n for row in range(0, height):\n for col in range(0, width):\n if abs(row - col) <= dist_to_diagonal:\n assert mask[row, col] == 1\n else:\n assert mask[row, col] == 0\n",
"import argparse\nfrom collections import defaultdict\nfrom dataclasses import dataclass\nfrom typing import NamedTuple, Sequence, Iterable, Any, List, Dict, Optional, Tuple\nimport logging\n\nimport torch\nfrom torch.fx.passes.graph_manipulation import get_size_of_node\nfrom torch.fx.node import map_arg\nfrom torch.fx._compatibility import compatibility\n\nfrom .operator_support import (\n get_node_target,\n OperatorSupportBase,\n)\nfrom .graph_drawer import FxGraphDrawer\nfrom .shape_prop import ShapeProp\nfrom .split_utils import split_by_tags\nfrom .tools_common import (\n FxNetAccFusionsFinder,\n CALLABLE_NODE_OPS,\n Tensors,\n NodeList,\n NodeSet,\n is_node_output_tensor,\n)\nimport warnings\n\n_LOGGER = logging.getLogger(__name__)\n\n\nclass _SplitterSettingBase:\n def __init__(self):\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"--min_acc_module_size\",\n default=1,\n type=int,\n help=\"Minimum size limit of an accelerator subgraph.\",\n )\n parser.add_argument(\n \"--skip_fusion\",\n default=False,\n action=\"store_true\",\n help=\"If true then no fusion groups. Fusion group is used to \"\n \"enforce no non-tensor data flow between submodules. If we don't \"\n \"have this constrain, setting this to false is recommended as it \"\n \"can reduce overhead.\",\n )\n parser.add_argument(\n \"--allow_non_tensor\",\n default=False,\n action=\"store_true\",\n help=\"For some backends non-tensor data flow between cpu and them \"\n \"are not allowed. Therefore, if a node supported by accelerator but \"\n \"it has non-tensor inputs or outputs to a cpu node we would want to \"\n \"consider it as a cpu node during splitting. However, for some backends \"\n \"we might not care about non-tensor data flow and we can set this option \"\n \"to true to disable the functionality that prevent non-tensor data flow.\",\n )\n args, unknown = parser.parse_known_args()\n\n self.min_acc_module_size: int = args.min_acc_module_size\n self.skip_fusion: bool = args.skip_fusion\n self.allow_non_tensor: bool = args.allow_non_tensor\n\n\n@compatibility(is_backward_compatible=False)\nclass FxNetAccNodesFinder:\n \"\"\"\n Finds a set of nodes that can be supported on ACC, excluding nodes that have non-tensor\n input/output to cpu nodes to prevent non-tensor data flow between backends and cpu.\n\n I.e. if we have a chain:\n\n ACC_NODE_1 -> ACC_NODE_2 -> ACC_NODE_3 -> CPU_NODE_1\n\n where every ACC node produces non-tensor output, then they all should be treated as CPU nodes.\n\n This behavior can be turned off by passing allow_non_tensor=True.\n \"\"\"\n\n def __init__(\n self,\n module: torch.fx.GraphModule,\n operator_support: OperatorSupportBase,\n allow_non_tensor: bool,\n ):\n self.module = module\n self.operator_support = operator_support\n self.allow_non_tensor = allow_non_tensor\n\n def reduce_acc_nodes_non_tensor_input_helper(\n self, cpu_worklist: NodeList\n ):\n \"\"\"\n Transitively excludes nodes from ACC supported set.\n For every node in the worklist:\n - removes its downstream ACC nodes from ACC supported set,\n - if any downstream ACC node produces non-tensor output,\n then it gets added into the worklist.\n \"\"\"\n while cpu_worklist:\n node = cpu_worklist.pop(0)\n\n for user in node.users:\n if user in self.acc_nodes:\n self.acc_nodes.remove(user)\n if not is_node_output_tensor(user):\n cpu_worklist.append(user)\n\n def reduce_acc_nodes_non_tensor_input(self):\n \"\"\"\n Excludes nodes from ACC supported set that have direct\n upstream CPU nodes that produce non-tensor outputs.\n \"\"\"\n non_tensor_cpu_nodes: NodeList = []\n\n for node in self.module.graph.nodes:\n if node.op not in CALLABLE_NODE_OPS:\n continue\n if node in self.acc_nodes:\n continue\n if is_node_output_tensor(node):\n continue\n non_tensor_cpu_nodes.append(node)\n\n self.reduce_acc_nodes_non_tensor_input_helper(non_tensor_cpu_nodes)\n\n def reduce_acc_nodes_non_tensor_output(self):\n \"\"\"\n Excludes nodes from ACC supported set that produce non-tensor\n outputs and have downstream CPU nodes.\n \"\"\"\n while True:\n new_cpu_nodes: NodeList = []\n\n for acc_node in self.acc_nodes:\n if is_node_output_tensor(acc_node):\n continue\n for user in acc_node.users:\n if user not in self.acc_nodes:\n new_cpu_nodes.append(acc_node)\n break\n\n if not new_cpu_nodes:\n break\n\n for new_cpu_node in new_cpu_nodes:\n self.acc_nodes.remove(new_cpu_node)\n\n self.reduce_acc_nodes_non_tensor_input_helper(new_cpu_nodes)\n\n def __call__(self) -> NodeSet:\n submodules = dict(self.module.named_modules())\n self.acc_nodes = {\n n\n for n in self.module.graph.nodes\n if n.op in CALLABLE_NODE_OPS\n and self.operator_support.is_node_supported(submodules, n)\n }\n\n if not self.allow_non_tensor:\n self.reduce_acc_nodes_non_tensor_input()\n self.reduce_acc_nodes_non_tensor_output()\n\n return self.acc_nodes\n\n@compatibility(is_backward_compatible=False)\nclass FxNetSplitterInternalError(Exception):\n pass\n\n@compatibility(is_backward_compatible=False)\n@dataclass\nclass Subgraph:\n is_acc: bool\n nodes: NodeList\n\n\n@compatibility(is_backward_compatible=False)\nclass SplitResult(NamedTuple):\n \"\"\"\n Stores the results of the splitter.\n\n Attributes:\n split_module: root module after splitting.\n submodule_inputs: a dict that maps submodule name to its inputs.\n non_acc_submodule_prefix: the prefix for non acc submodules. For\n acc submodule the prefix is alwasy \"_run_on_acc_\".\n \"\"\"\n\n split_module: torch.fx.GraphModule\n submodule_inputs: Dict[str, Any]\n non_acc_submodule_prefix: str\n\n\n@compatibility(is_backward_compatible=False)\ndef generate_inputs_for_submodules(\n model: torch.nn.Module,\n inputs: Sequence[Any],\n target_submodules: Iterable[str]\n) -> Dict[str, Any]:\n \"\"\"\n Generate inputs for targeting submdoules in the given model. Note that if two submodules refer to the same obj, this\n function doesn't work.\n\n Args:\n model: root model.\n inputs: inputs to the root model.\n target_submodules: submodules that we want to generate inputs for.\n\n Returns:\n A dict that maps from submodule name to its inputs.\n \"\"\"\n\n handles = []\n results = {}\n submodule_to_names = dict((mod, name) for name, mod in model.named_modules())\n\n def pre_forward(module, module_inputs):\n results[submodule_to_names[module]] = module_inputs\n try:\n for name, mod in model.named_modules():\n if name in target_submodules:\n handles.append(mod.register_forward_pre_hook(pre_forward))\n model(*inputs)\n except Exception as e:\n warnings.warn(f\"Failed to generate submodule inputs because of the following error:\\n{e}\")\n finally:\n for h in handles:\n h.remove()\n return results\n\n\nclass _SplitterBase:\n \"\"\"\n Splits a GraphModule into sub-GraphModules for execution on CPU or the accelerator.\n Output is a GraphModule with supported and unsupported operators grouped into as few sub-GraphModules as possible.\n Assumes that only \"call_module\", \"call_function\" and \"call_method\" from FX IR can potentially be executed on the accelerator.\n\n Given the following graph:\n ==> b ==>\n // \\\\\n a d\n \\\\ //\n ==> c ==>\n\n class SimpleModule(torch.nn.Module):\n def forward(self, a):\n b = torch.sin(a)\n c = torch.cos(a)\n d = b + c\n return d\n\n and providing \"operator_support\" that indicates that 'b' and 'c' can be executed on the accelerator,\n we will get the following split result:\n\n main:\n def forward(self, a):\n run_on_acc_0_0 = self._run_on_acc_0_0(a)\n getitem = run_on_acc_0_0[0]\n getitem_1 = run_on_acc_0_0[1]\n run_on_cpu_1_1 = self._run_on_cpu_1_1(getitem, getitem_1)\n return run_on_cpu_1_1\n\n _run_on_acc_0_0:\n def forward(self, a):\n sin_1 = torch.sin(a)\n cos_1 = torch.cos(a)\n return (sin_1, cos_1)\n\n _run_on_cpu_1_1:\n def forward(self, sin_1, cos_1):\n add_1 = sin_1 + cos_1\n return add_1\n \"\"\"\n\n # PCIe bandwidth for the backend, default to 100 GB/s\n PCIe_BW = 100 * 2 ** 30\n\n def __init__(\n self,\n module: torch.fx.GraphModule,\n sample_input: Sequence[Any],\n operator_support: OperatorSupportBase,\n settings: _SplitterSettingBase,\n non_acc_submodule_name: str = \"_run_on_cpu_\",\n ):\n \"\"\"\n Preprocesses graph before splitting:\n - finds nodes supported by ACC,\n - finds fusion groups for ACC nodes having non-tensor IO,\n - builds a graph of direct dependencies,\n - builds a map of fused nodes to their fusions.\n As a result we get self.acc_nodes, self.deps and self.fusions.\n \"\"\"\n assert isinstance(module, torch.fx.GraphModule)\n\n self.module = module\n ShapeProp(self.module).propagate(*sample_input)\n\n self.settings = settings\n self.operator_support = operator_support\n self.sample_input = sample_input\n self.acc_nodes = FxNetAccNodesFinder(self.module, self.operator_support, self.settings.allow_non_tensor)()\n\n if self.settings.skip_fusion:\n self.fusions = {}\n else:\n self.fusions = FxNetAccFusionsFinder(module, self.acc_nodes)()\n\n # Modify deps to add more deps for fused nodes\n self.deps = self.find_deps()\n self.update_deps_for_fusions()\n\n self.non_acc_submodule_name = non_acc_submodule_name\n\n # ===============================================================\n # Helpers for ctor and initial state\n # ===============================================================\n\n def find_deps(self) -> Dict[torch.fx.Node, NodeSet]:\n \"\"\"\n Builds a graph of node dependencies. Leaf nodes don't have any\n dependencies and the \"output\" node doesn't have nodes depending on it.\n\n Resulting graph has only direct dependencies, i.e. there are no\n transitive dependencies.\n \"\"\"\n deps: Dict[torch.fx.Node, NodeSet] = defaultdict(set)\n for node in self.module.graph.nodes:\n if node.op not in CALLABLE_NODE_OPS:\n continue\n\n for user in node.users:\n if user.op != \"output\":\n deps[user].add(node)\n return deps\n\n def update_deps_for_fusions(self):\n \"\"\"\n Updates graph of dependencies so that:\n - nodes from the same fusion depend on the same set of outer nodes,\n - outer nodes depending on a fusion depend on all nodes in that fusion.\n \"\"\"\n for node in self.fusions:\n fusion = self.fusions[node]\n for fused_neighbor in fusion:\n self.deps[node].update(self.deps[fused_neighbor] - fusion)\n\n for user in fused_neighbor.users:\n if user not in fusion:\n self.deps[user].add(node)\n\n # ===============================================================\n # Helpers for preview\n # ===============================================================\n\n def _lower_model_to_backend(\n self, mod: torch.fx.GraphModule, inputs: Tensors\n ) -> torch.nn.Module:\n \"\"\"\n Lower the model to a backend.\n \"\"\"\n\n return mod\n\n def _find_culprit(\n self, mod: torch.fx.GraphModule, inputs: Tensors\n ) -> str:\n \"\"\"\n When an error occurs during lowering or running the lowered mod, we use this\n function to find culprits in the `mod` that causes the error.\n \"\"\"\n\n return \"Unable to find a culprit because _find_culprit() function is not implemented.\"\n\n def _draw_graph_based_on_node_support(\n self, mod: torch.fx.GraphModule, supported_nodes: NodeList\n ):\n color_map = {\n \"default\": \"AliceBlue\",\n \"supported\": \"chartreuse1\",\n \"unsupported\": \"crimson\",\n }\n\n class CustomDrawer(FxGraphDrawer):\n def _get_node_style(self, node):\n template = super()._get_node_style(node)\n if node in supported_nodes:\n template[\"fillcolor\"] = color_map[\"supported\"]\n elif node.op in CALLABLE_NODE_OPS:\n template[\"fillcolor\"] = color_map[\"unsupported\"]\n else:\n template[\"fillcolor\"] = color_map[\"default\"]\n\n return template\n\n drawer = CustomDrawer(mod, \"node_support\", ignore_getattr=True)\n dot_graph = drawer.get_main_dot_graph()\n dot_graph.write_raw(\"node_support.dot\")\n\n def node_support_preview(self, dump_graph: bool = False):\n submodules = dict(self.module.named_modules())\n\n supported_nodes: NodeList = []\n supported_node_types = defaultdict(set)\n unsupported_node_types = defaultdict(set)\n\n def get_dtype(arg):\n tensor_meta = arg.meta.get(\"tensor_meta\")\n return getattr(tensor_meta, \"dtype\", None)\n\n for node in self.module.graph.nodes:\n if node.op not in CALLABLE_NODE_OPS:\n continue\n\n target = get_node_target(submodules, node)\n\n # Store dtype of arg in node.args. If arg doesn't have dtype, i.e. not a tensor, we'll store None.\n arg_dtypes = [\n get_dtype(arg) if isinstance(arg, torch.fx.Node) else None\n for arg in node.args\n ]\n\n # Find last non-None element. If all elements are None, return max_len.\n last_index = len(arg_dtypes) - next(\n (\n i\n for i, dtype in enumerate(reversed(arg_dtypes))\n if dtype is not None\n ),\n len(arg_dtypes),\n )\n\n # Strip None elements at the end.\n arg_dtypes_tuple = tuple(arg_dtypes[:last_index])\n kwarg_dtypes_tuple = tuple(\n (k, get_dtype(arg))\n for k, arg in node.kwargs.items()\n if isinstance(arg, torch.fx.Node)\n )\n\n if self.operator_support.is_node_supported(submodules, node):\n supported_nodes.append(node)\n supported_node_types[target].add((arg_dtypes_tuple, kwarg_dtypes_tuple))\n else:\n unsupported_node_types[target].add((arg_dtypes_tuple, kwarg_dtypes_tuple))\n\n if dump_graph:\n self._draw_graph_based_on_node_support(self.module, supported_nodes)\n\n reports = \"\\nSupported node types in the model:\\n\"\n for t, dtypes in supported_node_types.items():\n for arg_dtypes_tuple, kwarg_dtypes_tuple in dtypes:\n reports += f\"{t}: ({arg_dtypes_tuple}, {dict(kwarg_dtypes_tuple)})\\n\"\n\n reports += \"\\nUnsupported node types in the model:\\n\"\n for t, dtypes in unsupported_node_types.items():\n for arg_dtypes_tuple, kwarg_dtypes_tuple in dtypes:\n reports += f\"{t}: ({arg_dtypes_tuple}, {dict(kwarg_dtypes_tuple)})\\n\"\n\n print(reports)\n\n # Return reports for testing purpose\n return reports\n\n def split_preview(self, dump_graph: bool = False):\n reports = \"\"\n subgraphs = self.put_nodes_into_subgraphs()\n acc_subgraphs_num = len([g for g in subgraphs if g.is_acc])\n cpu_subgraphs_num = len(subgraphs) - acc_subgraphs_num\n reports += f\"Before removing small acc subgraphs, total {len(subgraphs)} subgraphs are created:\"\n reports += f\" {acc_subgraphs_num} acc subgraphs and {cpu_subgraphs_num} cpu subgraphs.\\n\"\n\n subgraphs = self.remove_small_acc_subgraphs(subgraphs)\n acc_subgraphs_num = len([g for g in subgraphs if g.is_acc])\n cpu_subgraphs_num = len(subgraphs) - acc_subgraphs_num\n reports += f\"After removing small acc subgraphs, total {len(subgraphs)} subgraphs are created:\"\n reports += f\" {acc_subgraphs_num} acc subgraphs and {cpu_subgraphs_num} cpu subgraphs.\\n\"\n\n for i, subgraph in enumerate(subgraphs):\n reports += f\"_run_on_acc_{i}: \" if subgraph.is_acc else f\"{self.non_acc_submodule_name}{i}: \"\n reports += f\"{len(subgraph.nodes)} node(s)\\n\"\n\n self.tag(subgraphs)\n split_mod = self.split(remove_tag=True)\n split_mod.eval()\n\n if dump_graph:\n drawer = FxGraphDrawer(\n split_mod, \"preview\", ignore_getattr=True\n )\n dot_graphs = drawer.get_all_dot_graphs()\n for name, dot_graph in dot_graphs.items():\n dot_graph.write_raw(f\"{name}.dot\")\n\n max_qps: float = self.PCIe_BW\n bottleneck_module = \"\"\n\n for node in split_mod.graph.nodes:\n if node.op == \"call_module\" and \"acc\" in node.target:\n reports += f\"\\nProcessing acc submodule {node.target}\\n\"\n\n submod = getattr(split_mod, node.target)\n\n def get_submod_inputs(main_mod, submod, example_inputs):\n sub_inputs = None\n\n def get_inputs(self, inputs):\n nonlocal sub_inputs\n sub_inputs = inputs\n\n handle = submod.register_forward_pre_hook(get_inputs)\n main_mod(*example_inputs)\n handle.remove()\n return sub_inputs\n\n submod_inputs = get_submod_inputs(\n split_mod, submod, self.sample_input\n )\n ShapeProp(submod).propagate(*submod_inputs)\n\n total_input_bytes = 0\n total_output_bytes = 0\n\n reports += \"Checking inputs...\\n\"\n for n in submod.graph.nodes:\n if n.op == \"placeholder\":\n if not is_node_output_tensor(n):\n reports += f\"Input {n.name} is not a tensor, this might cause problems during lowering!\\n\"\n else:\n total_input_bytes += get_size_of_node(submod, n)[0]\n if n.op == \"output\":\n output_node = n\n\n reports += \"Checking outputs...\\n\"\n\n def get_bytes(node: torch.fx.Node):\n nonlocal total_output_bytes\n nonlocal reports\n if not is_node_output_tensor(node):\n reports += f\"Output {node.name} is not a tensor, this might cause problems during lowering!\\n\"\n else:\n total_output_bytes += get_size_of_node(submod, node)[0]\n\n map_arg(output_node.args, get_bytes)\n qps = self.PCIe_BW / max(total_input_bytes, total_output_bytes)\n reports += f\"Total input size in bytes is {total_input_bytes}, total output size in bytes is {total_output_bytes},\"\n reports += f\" theoretical max qps (bounds by PCIe bandwidth) for this submodule is {qps}.\\n\"\n\n if qps < max_qps:\n max_qps = qps\n bottleneck_module = node.target\n\n try:\n lowered_submod = self._lower_model_to_backend(submod, submod_inputs)\n except RuntimeError:\n reports += \"Run into an error during lowering!\\n\"\n reports += self._find_culprit(submod, submod_inputs)\n continue\n\n try:\n lowered_submod(*submod_inputs)\n except RuntimeError:\n reports += \"Run into an error during inference!\\n\"\n reports += self._find_culprit(submod, submod_inputs)\n else:\n reports += \"Lowering and running succeed!\\n\"\n\n reports += f\"\\nTheoretical max qps (bounds by PCIe bandwidth) for this model is {max_qps},\"\n reports += f\" bottleneck is submodule {bottleneck_module}.\"\n print(reports)\n\n # return the reports for testing purposes\n return reports\n\n # ===============================================================\n # Helpers for extend_acc_subgraph() method\n # ===============================================================\n\n def find_reverse_deps(\n self, tag_id: Optional[int] = None\n ) -> Dict[torch.fx.Node, NodeSet]:\n \"\"\"\n Builds reversed topological node dependencies, if tag_id is specified,\n we ignore nodes that are in later subgraph i.e. nodes have greater tag_id.\n \"\"\"\n result: Dict[torch.fx.Node, NodeSet] = defaultdict(set)\n\n for node in self.module.graph.nodes:\n if node.op not in CALLABLE_NODE_OPS:\n continue\n\n for user in node.users:\n if user.op not in CALLABLE_NODE_OPS:\n continue\n\n if tag_id is None or (int(user.tag.split(\"_\")[-1]) < tag_id):\n result[node].add(user)\n\n return result\n\n def update_reverse_deps_for_fusions(\n self, deps: Dict[torch.fx.Node, NodeSet]\n ):\n processed_node = set()\n\n for node, fusion in self.fusions.items():\n if node in processed_node:\n continue\n\n new_dep = set()\n\n # Create a new dependency set which include all the\n # dependencies of the nodes in the fusion group\n for n in fusion:\n new_dep.update(deps[n])\n\n # Exclude nodes in the fusion\n new_dep.difference_update(fusion)\n\n # Update dependency\n for n in fusion:\n deps[n] = new_dep\n\n for arg in n.all_input_nodes:\n if arg not in fusion:\n deps[arg].update(fusion)\n\n processed_node.add(n)\n\n def find_parent_nodes_of_subgraph(self, tag: str) -> NodeSet:\n \"\"\"\n Finds parent nodes of the `tag` subgraph.\n\n Traverse the inputs of nodes in the subgraph, if input doesn't belong to the subgraph\n and is not a placeholder, we consider it as the parent node of the subgraph.\n \"\"\"\n parent_nodes = set()\n\n for node in self.module.graph.nodes:\n if node.op in CALLABLE_NODE_OPS and node.tag == tag:\n for arg in node.all_input_nodes:\n if arg.op in CALLABLE_NODE_OPS and arg.tag != tag:\n parent_nodes.add(arg)\n\n return parent_nodes\n\n def extend_acc_subgraph(self, tag: str):\n \"\"\"\n Extend the acc subgraph with `tag` going the reversed topological direction.\n \"\"\"\n # Dict that maps node to its users and ignore users that\n # are in the subgraph that has greater tag\n deps = self.find_reverse_deps(tag_id=int(tag.split(\"_\")[-1]))\n self.update_reverse_deps_for_fusions(deps)\n\n # Parent nodes of the subgraph\n parent_nodes = self.find_parent_nodes_of_subgraph(tag)\n\n visited_nodes: NodeSet = set()\n\n while parent_nodes:\n node = None\n\n # Find a acc node that depends on visited nodes only\n for n in parent_nodes:\n if deps[n] <= visited_nodes and n in self.acc_nodes:\n node = n\n break\n\n if node is None:\n break\n\n # Put the node into `tag` subgraph\n node.tag = tag # type: ignore[attr-defined]\n parent_nodes.remove(node)\n visited_nodes.add(node)\n\n # If node is in a fusion group, add all fusion buddies to parent nodes\n if node in self.fusions:\n for fusion_node in self.fusions[node]:\n if fusion_node not in visited_nodes:\n parent_nodes.add(fusion_node)\n\n # Add inputs of the node to parent nodes\n for arg in node.all_input_nodes:\n if arg.op in CALLABLE_NODE_OPS and arg not in visited_nodes:\n parent_nodes.add(arg)\n\n # ===============================================================\n # Helpers for split() method\n # ===============================================================\n\n def starter_nodes(self) -> Tuple[NodeSet, NodeSet]:\n \"\"\"\n Finds nodes that consume module inputs or get_attr nodes.\n \"\"\"\n starter_cpu_nodes: NodeSet = set()\n starter_acc_nodes: NodeSet = set()\n for node in self.module.graph.nodes:\n if node.op not in {\"placeholder\", \"get_attr\"}:\n continue\n for user in node.users:\n if user in self.acc_nodes:\n starter_acc_nodes.add(user)\n else:\n starter_cpu_nodes.add(user)\n return starter_cpu_nodes, starter_acc_nodes\n\n def put_nodes_into_subgraphs(self) -> List[Subgraph]:\n # We start graph traversal from leaf nodes\n current_cpu_nodes, current_acc_nodes = self.starter_nodes()\n visited_nodes: NodeSet = set()\n\n # Determine which subgraph to start from based on node dependency\n acc_subgraph: bool = True\n for n in current_cpu_nodes:\n if self.deps[n] <= visited_nodes:\n acc_subgraph = False\n break\n\n current_subgraph_nodes: NodeList = []\n\n # Result accumulator\n subgraphs: List[Subgraph] = []\n while current_cpu_nodes or current_acc_nodes:\n # Find the first node that should belong to the current subgraph and has all dependencies resolved\n current_nodes = current_acc_nodes if acc_subgraph else current_cpu_nodes\n node = next(\n (n for n in current_nodes if self.deps[n] <= visited_nodes),\n None,\n )\n\n # If nothing was found, then it's time to flip the mode and start a new subgraph\n if node is None:\n if not current_subgraph_nodes:\n raise FxNetSplitterInternalError(\"Subgraph can't be empty\")\n\n subgraphs.append(\n Subgraph(is_acc=acc_subgraph, nodes=current_subgraph_nodes)\n )\n acc_subgraph = not acc_subgraph\n current_subgraph_nodes = []\n continue\n\n current_nodes.remove(node)\n visited_nodes.add(node)\n current_subgraph_nodes.append(node)\n\n # Add fusion buddies\n if node in self.fusions:\n if node in self.acc_nodes:\n current_acc_nodes.update(self.fusions[node] - visited_nodes)\n else:\n current_cpu_nodes.update(self.fusions[node] - visited_nodes)\n\n # Put depending nodes into the queue\n for user in node.users:\n if user.op not in CALLABLE_NODE_OPS:\n continue\n\n # Add downstream nodes\n if user in self.acc_nodes:\n current_acc_nodes.add(user)\n else:\n current_cpu_nodes.add(user)\n\n # Check if the last subgraph was not created\n if current_subgraph_nodes:\n subgraphs.append(\n Subgraph(is_acc=acc_subgraph, nodes=current_subgraph_nodes)\n )\n\n if not subgraphs:\n raise FxNetSplitterInternalError(\"Couldn't create subgraphs\")\n\n return subgraphs\n\n def remove_small_acc_subgraphs(self, subgraphs: List[Subgraph]) -> List[Subgraph]:\n \"\"\"\n This pass finds ACC submodules with less than specified size and merges\n them with adjacent CPU submodules.\n \"\"\"\n result: List[Subgraph] = []\n for subgraph in subgraphs:\n if subgraph.is_acc:\n if len(subgraph.nodes) >= self.settings.min_acc_module_size:\n result.append(subgraph)\n else:\n print(\n \"Eliminating acc subgraph because it's smaller than the threshold: \"\n f\"{len(subgraph.nodes)} < {self.settings.min_acc_module_size}\"\n )\n if result:\n result[-1].nodes.extend(subgraph.nodes)\n else:\n subgraph.is_acc = False\n result.append(subgraph)\n else:\n if result and not result[-1].is_acc:\n result[-1].nodes.extend(subgraph.nodes)\n else:\n result.append(subgraph)\n return result\n\n def tag(self, subgraphs: List[Subgraph]):\n self.tags: List[str] = []\n for subgraph in subgraphs:\n subgraph_name = self.non_acc_submodule_name\n\n tag = f\"_run_on_acc_{len(self.tags)}\" if subgraph.is_acc else f\"{self.non_acc_submodule_name}{len(self.tags)}\"\n self.tags.append(tag)\n for node in subgraph.nodes:\n if hasattr(node, \"tag\"):\n raise FxNetSplitterInternalError(f\"Node {node} was already tagged\")\n node.tag = tag # type: ignore[attr-defined]\n\n def split(self, remove_tag: bool = False) -> torch.fx.GraphModule:\n split_module = split_by_tags(self.module, self.tags)\n if remove_tag:\n for node in self.module.graph.nodes:\n if hasattr(node, \"tag\"):\n del node.tag\n return split_module\n\n def __call__(self) -> torch.fx.GraphModule:\n subgraphs = self.put_nodes_into_subgraphs()\n subgraphs = self.remove_small_acc_subgraphs(subgraphs)\n acc_subgraphs_count = len([s for s in subgraphs if s.is_acc])\n non_acc_subgraphs_count = len(subgraphs) - acc_subgraphs_count\n print(f\"Got {acc_subgraphs_count} acc subgraphs and {non_acc_subgraphs_count} non-acc subgraphs\")\n self.tag(subgraphs)\n return self.split()\n\n def generate_split_results(self) -> SplitResult:\n split_module = self()\n submodule_names = []\n for name, mod in split_module.named_children():\n submodule_names.append(name)\n submodule_inputs = generate_inputs_for_submodules(split_module, self.sample_input, submodule_names)\n return SplitResult(split_module, submodule_inputs, self.non_acc_submodule_name)\n",
"# Owner(s): [\"oncall: jit\"]\n\nimport os\nimport sys\nimport torch\nfrom torch.utils._pytree import tree_map\n\n\nfrom torch.testing._internal.schema_check_mode import SchemaCheckMode\nfrom torch.utils._python_dispatch import enable_torch_dispatch_mode\nfrom torch.testing._internal.jit_utils import JitTestCase\n\npytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))\nsys.path.append(pytorch_test_dir)\n\nif __name__ == '__main__':\n raise RuntimeError(\"This test file is not meant to be run directly, use:\\n\\n\"\n \"\\tpython test/test_jit.py TESTNAME\\n\\n\"\n \"instead.\")\n\n# This TorchDispatchTensor Subclass is used to simulate an incorrect schema\n# which is then used to test that SchemaCheckMode behaves as expected\n\nclass IncorrectAliasTensor(torch.Tensor):\n INCORRECT_OPS = {\"aten::add\", \"aten::sub\"}\n\n elem: torch.Tensor\n\n __slots__ = ['elem']\n\n __torch_function__ = torch._C._disabled_torch_function_impl\n\n @staticmethod\n def __new__(cls, elem, *args, **kwargs):\n # The wrapping tensor (IncorrectAliasTensor) shouldn't hold any\n # memory for the class in question, but it should still\n # advertise the same device as before\n r = torch.Tensor._make_wrapper_subclass( # type: ignore[attr-defined]\n cls, elem.size(),\n strides=elem.stride(), storage_offset=elem.storage_offset(),\n # TODO: clone storage aliasing\n dtype=elem.dtype, layout=elem.layout,\n device=elem.device, requires_grad=kwargs.get(\"requires_grad\", False)\n )\n # ...the real tensor is held as an element on the tensor.\n r.elem = elem.detach() if r.requires_grad else elem\n return r\n\n def __repr__(self):\n return super().__repr__(tensor_contents=f\"{self.elem}\")\n\n @classmethod\n def __torch_dispatch__(cls, func, types, args=(), kwargs=None):\n def unwrap(e):\n return e.elem if isinstance(e, cls) else e\n\n def wrap(e):\n return cls(e) if isinstance(e, torch.Tensor) else e\n unwrapped_args = tree_map(unwrap, args)\n out = func(*unwrapped_args, **tree_map(unwrap, kwargs))\n if func._schema.name in IncorrectAliasTensor.INCORRECT_OPS:\n args[0].elem = out\n\n return tree_map(wrap, out)\n\n# Tests various schema checking functionalities.\nclass TestSchemaCheck(JitTestCase):\n # Tests that SchemaCheckMode records operator order with grad\n def test_schema_check_mode_operator_order(self):\n schema_check = SchemaCheckMode()\n with enable_torch_dispatch_mode(schema_check):\n x = torch.rand((3, 3), requires_grad=True)\n x.relu().sin()\n self.assertEqual([\"aten::rand\", \"aten::relu\", \"aten::sin\"], schema_check.ops)\n\n # Tests that SchemaCheckMode records operator order without grad\n def test_schema_check_tensor_operator_order_without_grad(self):\n schema_check = SchemaCheckMode()\n with enable_torch_dispatch_mode(schema_check):\n x = torch.rand((3, 3), requires_grad=False)\n x.relu().sin()\n self.assertEqual([\"aten::rand\", \"aten::relu\", \"aten::sin\"], schema_check.ops)\n\n # Tests that SchemaCheckMode wraps torch.Tensor\n def test_schema_check_tensor_functionality(self):\n x = torch.rand((3, 3), requires_grad=True)\n expected = x.relu().sin()\n with enable_torch_dispatch_mode(SchemaCheckMode()):\n actual = x.relu().sin()\n self.assertEqual(expected, actual)\n\n # Tests that SchemaCheckMode wraps torch.Tensor when an argument's default is overriden\n def test_schema_check_tensor_functionality_default_replaced(self):\n x = torch.rand((3, 3), requires_grad=True)\n expected = x.add(x, alpha=2)\n with enable_torch_dispatch_mode(SchemaCheckMode()):\n actual = x.add(x, alpha=2)\n self.assertEqual(expected, actual)\n\n # Tests that SchemaCheckMode wraps torch.Tensor when there is a Tensor[] argument\n def test_schema_check_tensor_functionality_list_input(self):\n a = torch.rand((3, 3))\n b = torch.rand((3, 3))\n c = torch.rand((3, 3))\n expected = torch.linalg.multi_dot([a, b, c])\n with enable_torch_dispatch_mode(SchemaCheckMode()):\n actual = torch.linalg.multi_dot([a, b, c])\n self.assertEqual(expected, actual)\n\n # Tests that SchemaCheckMode wraps torch.Tensor when there is a kwarg tensor input\n def test_schema_check_tensor_functionality_kwarg_tensor(self):\n x = torch.rand((3, 5))\n w = torch.rand((4))\n expected = torch.stft(x, 4, win_length=4, window=w, return_complex=True)\n with enable_torch_dispatch_mode(SchemaCheckMode()):\n actual = torch.stft(x, 4, win_length=4, window=w, return_complex=True)\n self.assertEqual(expected, actual)\n\n # Tests that SchemaCheckMode wraps torch.Tensor with a mutable op\n def test_schema_check_tensor_functionality_mutable_inputs(self):\n expected = torch.rand((3, 3), requires_grad=False)\n actual = torch.clone(expected)\n expected.sinh_()\n with enable_torch_dispatch_mode(SchemaCheckMode()):\n actual.sinh_()\n self.assertEqual(expected, actual)\n\n # Tests that an exception is raised for a mismatching mutation\n def test_mutation_check_fail(self):\n with self.assertRaisesRegex(RuntimeError, \"Argument running_mean is not defined as mutable but was mutated\"):\n x = torch.rand((3, 3), requires_grad=True)\n batch = torch.nn.BatchNorm1d(3, track_running_stats=True)\n with enable_torch_dispatch_mode(SchemaCheckMode()):\n batch(x)\n\n # Tests that an exception is raised for a mismatching mutation over multiple ops\n def test_mutation_check_fail_multiple_operators(self):\n with self.assertRaisesRegex(RuntimeError, \"Argument running_mean is not defined as mutable but was mutated\"):\n x = torch.rand((3, 3), requires_grad=True)\n batch = torch.nn.BatchNorm1d(3, track_running_stats=True)\n with enable_torch_dispatch_mode(SchemaCheckMode()):\n x = x.sinh()\n x = x.tanh()\n x = x.relu()\n batch(x)\n\n # Tests that an exception is raised for a mismatching alias\n def test_alias_check_fail(self):\n with self.assertRaisesRegex(RuntimeError, \"Argument input is not defined to alias output but was aliasing\"):\n x = torch.rand((3, 3), requires_grad=True)\n y = torch.zeros((3, 3))\n with enable_torch_dispatch_mode(SchemaCheckMode()):\n IncorrectAliasTensor(x).add(IncorrectAliasTensor(y), alpha=2)\n\n # Tests that an exception is raised for a mismatching alias over multiple ops\n def test_alias_check_fail_multiple_operators(self):\n with self.assertRaisesRegex(RuntimeError, \"Argument input is not defined to alias output but was aliasing\"):\n x = torch.rand((3, 3), requires_grad=True)\n y = torch.zeros((3, 3), requires_grad=True)\n with enable_torch_dispatch_mode(SchemaCheckMode()):\n IncorrectAliasTensor(x).sin().relu().add(IncorrectAliasTensor(y), alpha=2)\n\n # Tests that an exception is raised for a centered mismatching alias over multiple ops\n def test_alias_check_fail_multiple_operators_centered(self):\n with self.assertRaisesRegex(RuntimeError, \"Argument input is not defined to alias output but was aliasing\"):\n x = torch.rand((3, 3), requires_grad=True)\n y = torch.zeros((3, 3), requires_grad=True)\n with enable_torch_dispatch_mode(SchemaCheckMode()):\n IncorrectAliasTensor(x).sin().add(IncorrectAliasTensor(y), alpha=2).relu()\n\n # Tests that SchemaCheckMode wraps Torch.tensor when inputs alias\n def test_alias_check_with_aliasing_inputs(self):\n expected = torch.rand((3, 3))\n x = expected\n actual = torch.clone(expected)\n y = actual\n expected.add_(x)\n with enable_torch_dispatch_mode(SchemaCheckMode()):\n actual.add_(y)\n self.assertEqual(expected, actual)\n\n # Tests that isAliasOf returns as expected\n def test_is_alias_of(self):\n x = torch.rand((3, 3), requires_grad=True)\n y = torch.rand((3, 3), requires_grad=True)\n y = x.add(x, alpha=2)\n self.assertTrue(torch._C._is_alias_of(x, x))\n self.assertFalse(torch._C._is_alias_of(x, y))\n",
"# Owner(s): [\"oncall: distributed\"]\n\nimport torch\nfrom torch.testing._internal.common_fsdp import FSDPTest\nfrom torch.testing._internal.common_distributed import skip_if_lt_x_gpu\nfrom torch.distributed.fsdp.wrap import ParamExecOrderWrapPolicy, always_wrap_policy\nfrom torch.distributed.fsdp.fully_sharded_data_parallel import ShardingStrategy\nfrom torch.distributed.fsdp import FullyShardedDataParallel as FSDP\nfrom torch.testing._internal.common_utils import (\n instantiate_parametrized_tests,\n parametrize,\n run_tests,\n)\n\n\nclass Model(torch.nn.Module):\n def __init__(self) -> None:\n super().__init__()\n self.layer0 = torch.nn.Linear(6, 6)\n self.layer1 = torch.nn.Linear(6, 6, bias=False)\n self.layer2 = torch.nn.Sequential(\n torch.nn.Linear(6, 3, bias=False),\n torch.nn.ReLU(),\n torch.nn.Linear(3, 6, bias=False),\n )\n self.relu = torch.nn.ReLU()\n\n def forward(self, x):\n # `layer0` -> `layer2` -> `layer1`\n # the forward execution order is NOT consistent with the model definition order.\n z = self.relu(self.layer0(x))\n z = self.relu(self.layer2(z))\n z = self.relu(self.layer1(z))\n return z\n\n def get_input(self, device: torch.device):\n return (torch.randn((8, 6)).to(device), )\n\n def get_loss(self, input, output):\n return (output - input[0]).sum()\n\n @staticmethod\n def wrap(sharding_strategy: ShardingStrategy, device: torch.device, init_policy=always_wrap_policy):\n model = Model()\n wrap_policy = ParamExecOrderWrapPolicy(init_policy=init_policy)\n fsdp_model = FSDP(model, auto_wrap_policy=wrap_policy, sharding_strategy=sharding_strategy)\n return fsdp_model.to(device)\n\n\nclass TestFSDPExecOrder(FSDPTest):\n @property\n def device(self):\n return torch.device(\"cuda\")\n\n @skip_if_lt_x_gpu(2)\n @parametrize(\n \"sharding_strategy\",\n [ShardingStrategy.FULL_SHARD, ShardingStrategy.SHARD_GRAD_OP],\n )\n @parametrize(\"iters\", [1, 3])\n def test_fsdp_flatten_params_exec_order(self, sharding_strategy: ShardingStrategy, iters: int):\n \"\"\"Tests the basic APIs of FSDP with ParamExecOrderWrapPolicy\"\"\"\n fsdp_model = Model.wrap(sharding_strategy, self.device)\n self.assertTrue(fsdp_model._is_param_exec_order_prep_stage())\n for _ in range(iters):\n input = fsdp_model.module.get_input(self.device)\n output = fsdp_model(*input)\n loss = fsdp_model.module.get_loss(input, output).to(self.device)\n loss.backward()\n params_list = list(fsdp_model.parameters())\n # Since the forward execution order is NOT consistent with the model definition order,\n # the ordering in flatten_named_params_exec_order should be different from named_parameters\n self.assertEqual(\n fsdp_model._fsdp_params_exec_order,\n [\n params_list[0],\n params_list[2],\n params_list[3],\n params_list[1]\n ]\n )\n self.assertTrue(fsdp_model._use_param_exec_order_policy())\n self.assertTrue(not fsdp_model._is_param_exec_order_prep_stage())\n\n\ninstantiate_parametrized_tests(TestFSDPExecOrder)\n\nif __name__ == \"__main__\":\n run_tests()\n",
"# Owner(s): [\"module: onnx\"]\n\nimport glob\nimport inspect\nimport io\nimport itertools\nimport os\nimport shutil\nimport tempfile\n\nfrom test_pytorch_common import (\n BATCH_SIZE,\n RNN_HIDDEN_SIZE,\n RNN_INPUT_SIZE,\n RNN_SEQUENCE_LENGTH,\n TestCase,\n flatten,\n run_tests,\n skipIfCaffe2,\n skipIfNoLapack,\n)\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.onnx\nimport torch.testing._internal.common_utils as common\nfrom torch.autograd import Function, Variable\nfrom torch.nn import Module, functional\nfrom torch.onnx import (\n register_custom_op_symbolic,\n unregister_custom_op_symbolic,\n)\nfrom torch.onnx.symbolic_helper import (\n _get_tensor_dim_size,\n _get_tensor_sizes,\n parse_args,\n)\n\n\"\"\"Usage: python test/onnx/test_operators.py [--no-onnx] [--produce-onnx-test-data]\n --no-onnx: no onnx python dependence\n --produce-onnx-test-data: generate onnx test data\n --accept: accept onnx updates and overwrite models\n\"\"\"\n\n# Full diff for expect files\nimport unittest\n\nunittest.TestCase.maxDiff = None\n\n_onnx_test = False # flag to produce onnx test cases.\n_onnx_dep = True # flag to import onnx package.\n\n\ndef export_to_pbtxt(model, inputs, *args, **kwargs):\n return torch.onnx.export_to_pretty_string(\n model, inputs, google_printer=True, *args, **kwargs\n )\n\n\ndef export_to_pb(model, inputs, *args, **kwargs):\n f = io.BytesIO()\n with torch.no_grad():\n torch.onnx.export(model, inputs, f, *args, **kwargs)\n return f.getvalue()\n\n\nclass FuncModule(Module):\n def __init__(self, f, params=None):\n if params is None:\n params = ()\n super().__init__()\n self.f = f\n self.params = nn.ParameterList(list(params))\n\n def forward(self, *args):\n return self.f(*itertools.chain(args, self.params))\n\n\nclass TestOperators(TestCase):\n def assertONNX(self, f, args, params=None, **kwargs):\n if params is None:\n params = ()\n if isinstance(f, nn.Module):\n m = f\n else:\n m = FuncModule(f, params)\n m.eval()\n onnx_model_pbtxt = export_to_pbtxt(m, args, **kwargs)\n subname = kwargs.pop(\"subname\", None)\n self.assertExpected(onnx_model_pbtxt, subname)\n if _onnx_dep:\n onnx_model_pb = export_to_pb(m, args, **kwargs)\n import onnx\n import onnx.checker\n import onnx.numpy_helper\n import test_onnx_common\n\n model_def = onnx.ModelProto.FromString(onnx_model_pb)\n onnx.checker.check_model(model_def)\n if _onnx_test:\n test_function = inspect.stack()[1][0].f_code.co_name\n test_name = test_function[0:4] + \"_operator\" + test_function[4:]\n output_dir = os.path.join(\n test_onnx_common.pytorch_operator_dir, test_name\n )\n # Assume:\n # 1) the old test should be delete before the test.\n # 2) only one assertONNX in each test, otherwise will override the data.\n assert not os.path.exists(output_dir), \"{} should not exist!\".format(\n output_dir\n )\n os.makedirs(output_dir)\n with open(os.path.join(output_dir, \"model.onnx\"), \"wb\") as file:\n file.write(model_def.SerializeToString())\n data_dir = os.path.join(output_dir, \"test_data_set_0\")\n os.makedirs(data_dir)\n if isinstance(args, Variable):\n args = (args,)\n for index, var in enumerate(flatten(args)):\n tensor = onnx.numpy_helper.from_array(var.data.numpy())\n with open(\n os.path.join(data_dir, f\"input_{index}.pb\"), \"wb\"\n ) as file:\n file.write(tensor.SerializeToString())\n outputs = m(*args)\n if isinstance(outputs, Variable):\n outputs = (outputs,)\n for index, var in enumerate(flatten(outputs)):\n tensor = onnx.numpy_helper.from_array(var.data.numpy())\n with open(\n os.path.join(data_dir, f\"output_{index}.pb\"), \"wb\"\n ) as file:\n file.write(tensor.SerializeToString())\n\n def assertONNXRaises(self, err, f, args, params=None, **kwargs):\n if params is None:\n params = ()\n if isinstance(f, nn.Module):\n m = f\n else:\n m = FuncModule(f, params)\n self.assertExpectedRaises(err, lambda: export_to_pbtxt(m, args, **kwargs))\n\n def assertONNXRaisesRegex(self, err, reg, f, args, params=None, **kwargs):\n if params is None:\n params = ()\n if isinstance(f, nn.Module):\n m = f\n else:\n m = FuncModule(f, params)\n with self.assertRaisesRegex(err, reg):\n export_to_pbtxt(m, args, **kwargs)\n\n def test_basic(self):\n x = torch.tensor([0.4], requires_grad=True)\n y = torch.tensor([0.7], requires_grad=True)\n self.assertONNX(lambda x, y: -torch.sigmoid(torch.tanh(x * (x + y))), (x, y))\n\n def test_view(self):\n x = torch.tensor([0.0], requires_grad=True)\n self.assertONNX(lambda x: x.view(1, 1), x)\n\n def test_index(self):\n x = torch.tensor([[0.0]], requires_grad=True)\n self.assertONNX(lambda x: x[0], x)\n\n def test_type_as(self):\n x = torch.tensor([0.0], requires_grad=True)\n self.assertONNX(lambda x: x.type_as(x), x)\n\n def test_addconstant(self):\n x = torch.randn(2, 3, requires_grad=True).double()\n self.assertONNX(lambda x: x + 1, x)\n\n def test_add_broadcast(self):\n x = torch.randn(2, 3, requires_grad=True).double()\n y = torch.randn(3, requires_grad=True).double()\n self.assertONNX(lambda x, y: x + y, (x, y))\n\n def test_add_left_broadcast(self):\n x = torch.randn(3, requires_grad=True).double()\n y = torch.randn(2, 3, requires_grad=True).double()\n self.assertONNX(lambda x, y: x + y, (x, y))\n\n def test_add_size1_broadcast(self):\n x = torch.randn(2, 3, requires_grad=True).double()\n y = torch.randn(2, 1, requires_grad=True).double()\n self.assertONNX(lambda x, y: x + y, (x, y))\n\n def test_add_size1_right_broadcast(self):\n x = torch.randn(2, 3, requires_grad=True).double()\n y = torch.randn(3, requires_grad=True).double()\n self.assertONNX(lambda x, y: x + y, (x, y))\n\n def test_add_size1_singleton_broadcast(self):\n x = torch.randn(2, 3, requires_grad=True).double()\n y = torch.randn(1, 3, requires_grad=True).double()\n self.assertONNX(lambda x, y: x + y, (x, y))\n\n def test_rsub(self):\n x = torch.randn(2, 3, requires_grad=True).double()\n self.assertONNX(lambda x: 1 - x, (x,))\n\n def test_transpose(self):\n x = torch.tensor([[0.0, 1.0], [2.0, 3.0]], requires_grad=True)\n self.assertONNX(lambda x: x.transpose(0, 1).transpose(1, 0), x)\n\n def test_chunk(self):\n x = torch.tensor([0.0, 1.0, 2.0], requires_grad=True)\n self.assertONNX(lambda x: x.chunk(2), x)\n\n def test_split(self):\n x = torch.tensor(\n [[0.0, 1.0, 1.0, 0.0, 2.0, 2.0], [2.0, 3.0, 3.0, 2.0, 1.0, 1.0]]\n )\n self.assertONNX(lambda x: torch.split(x, 2, 1), x)\n\n def test_split_with_sizes(self):\n x = torch.tensor(\n [[0.0, 1.0, 1.0, 0.0, 2.0, 2.0], [2.0, 3.0, 3.0, 2.0, 1.0, 1.0]]\n )\n self.assertONNX(lambda x: torch.split(x, [2, 1, 3], 1), x)\n\n def test_concat2(self):\n x = torch.randn(2, 3)\n y = torch.randn(2, 3)\n self.assertONNX(lambda inputs: torch.cat(inputs, 1), ((x, y),))\n\n def test_mm(self):\n m1 = torch.randn(2, 3, requires_grad=True)\n m2 = torch.randn(3, 4, requires_grad=True)\n self.assertONNX(torch.mm, (m1, m2))\n\n def test_addmm(self):\n m1 = torch.randn(2, 3, requires_grad=True)\n m2 = torch.randn(3, 4, requires_grad=True)\n m3 = torch.randn(4, requires_grad=True)\n self.assertONNX(\n lambda x, y, z: torch.addmm(torch.addmm(z, x, y), x, y), (m1, m2, m3)\n )\n\n def test_permute2(self):\n x = torch.tensor([[[[[[0.0]]]]]], requires_grad=True)\n self.assertONNX(lambda x: x.permute(0, 1, 4, 2, 5, 3), x)\n\n def test_pad(self):\n x = torch.tensor(\n [[[[0.0, 1.0, 1.0, 1.0], [2.0, 3.0, 7.0, 7.0]]]], requires_grad=True\n )\n self.assertONNX(nn.ReflectionPad2d((2, 3, 0, 1)), x)\n\n def test_params(self):\n x = torch.tensor([[1.0, 2.0], [3.0, 4.0]], requires_grad=True)\n y = nn.Parameter(torch.tensor([[1.0, 2.0], [3.0, 4.0]], requires_grad=True))\n self.assertONNX(\n lambda x, y: -torch.sigmoid(torch.tanh(x * (x + y))),\n x,\n params=(y,),\n keep_initializers_as_inputs=True,\n )\n\n def test_params_onnx_irv4(self):\n x = torch.tensor([[1.0, 2.0], [3.0, 4.0]], requires_grad=True)\n y = nn.Parameter(torch.tensor([[1.0, 2.0], [3.0, 4.0]], requires_grad=True))\n self.assertONNX(\n lambda x, y: -torch.sigmoid(torch.tanh(x * (x + y))),\n x,\n params=(y,),\n keep_initializers_as_inputs=False,\n )\n\n def test_symbolic_mismatch(self):\n class MyFun(Function):\n @staticmethod\n def symbolic(g, x):\n # The inside of this function should never be invoked, because\n # we will fail due to an argument mismatch first.\n raise AssertionError()\n\n @staticmethod\n def forward(ctx, x, y):\n return x + y\n\n x = torch.ones(2, 2)\n y = torch.ones(2, 2)\n # NB: Don't use expect test here, the type error wobbles depending\n # on Python version\n with self.assertRaisesRegex(TypeError, \"occurred when translating MyFun\"):\n export_to_pbtxt(FuncModule(MyFun().apply), (x, y))\n\n # TODO: Do an nn style test for these\n def test_batchnorm(self):\n x = torch.ones(2, 2, 2, 2, requires_grad=True)\n self.assertONNX(nn.BatchNorm2d(2), x, keep_initializers_as_inputs=True)\n\n def test_batchnorm_onnx_irv4(self):\n x = torch.ones(2, 2, 2, 2, requires_grad=True)\n self.assertONNX(nn.BatchNorm2d(2), x)\n\n def test_batchnorm_1d(self):\n x = torch.ones(2, 2, requires_grad=True)\n self.assertONNX(nn.BatchNorm1d(2), x, keep_initializers_as_inputs=True)\n\n def test_batchnorm_training(self):\n x = torch.ones(2, 2, 2, 2, requires_grad=True)\n self.assertONNX(\n nn.BatchNorm2d(2),\n x,\n training=torch.onnx.TrainingMode.TRAINING,\n keep_initializers_as_inputs=True,\n )\n\n def test_conv(self):\n x = torch.ones(20, 16, 50, 40, requires_grad=True)\n self.assertONNX(\n nn.Conv2d(16, 13, 3, bias=False), x, keep_initializers_as_inputs=True\n )\n\n def test_conv_onnx_irv4(self):\n x = torch.ones(20, 16, 50, 40, requires_grad=True)\n self.assertONNX(nn.Conv2d(16, 13, 3, bias=False), x)\n\n def test_conv_onnx_irv4_opset8(self):\n # This test point checks that for opset 8 (or lower), even if\n # keep_initializers_as_inputs is set to False, it is ignored,\n # and initializers are listed as ONNX graph input, in accordance\n # with ONNX IR v3 semantics (which apply to opset version <= 8).\n x = torch.ones(1, 2, 5, 7, requires_grad=True)\n conv_node = nn.Conv2d(2, 4, 3, bias=False)\n conv_node.weight.data.fill_(1.0)\n self.assertONNX(\n conv_node, x, opset_version=8, keep_initializers_as_inputs=False\n )\n\n def test_conv_variable_length(self):\n x = torch.ones(5, 3, 6, 6, requires_grad=True)\n model = torch.nn.Conv2d(3, 2, 3)\n\n dynamic_axes = {\n \"input_1\": [0, 2, 3],\n \"output_1\": {0: \"output_1_variable_dim_0\", 1: \"output_1_variable_dim_1\"},\n }\n model_proto_file = tempfile.NamedTemporaryFile()\n torch.onnx.export(\n model,\n x,\n model_proto_file.name,\n verbose=True,\n input_names=[\"input_1\"],\n output_names=[\"output_1\"],\n dynamic_axes=dynamic_axes,\n )\n\n import onnx\n\n onnx_model = onnx.load(model_proto_file.name)\n onnx.checker.check_model(onnx_model)\n\n # Asserting the default dynamic axes names are generated when custom names are not provided\n assert (\n onnx_model.graph.input[0].type.tensor_type.shape.dim[0].dim_param\n == \"input_1_dynamic_axes_1\"\n )\n assert (\n onnx_model.graph.input[0].type.tensor_type.shape.dim[2].dim_param\n == \"input_1_dynamic_axes_2\"\n )\n assert (\n onnx_model.graph.input[0].type.tensor_type.shape.dim[3].dim_param\n == \"input_1_dynamic_axes_3\"\n )\n\n # Asserting the custom names are applied when provided\n assert (\n onnx_model.graph.output[0].type.tensor_type.shape.dim[0].dim_param\n == \"output_1_variable_dim_0\"\n )\n assert (\n onnx_model.graph.output[0].type.tensor_type.shape.dim[1].dim_param\n == \"output_1_variable_dim_1\"\n )\n\n def test_convtranspose(self):\n x = torch.ones(2, 3, 4, 5, requires_grad=True)\n self.assertONNX(\n nn.ConvTranspose2d(\n 3, 3, 3, stride=3, bias=False, padding=1, output_padding=2\n ),\n x,\n keep_initializers_as_inputs=True,\n )\n\n def test_maxpool(self):\n x = torch.randn(20, 16, 50)\n self.assertONNX(nn.MaxPool1d(3, stride=2), x)\n\n def test_maxpool_dilations(self):\n x = torch.randn(20, 16, 50)\n self.assertONNX(nn.MaxPool1d(2, stride=1, dilation=2), x, opset_version=10)\n\n def test_avg_pool2d(self):\n x = torch.randn(20, 16, 50, 32)\n self.assertONNX(nn.AvgPool2d(3, stride=2), x)\n\n def test_maxpool_indices(self):\n x = torch.randn(20, 16, 50)\n self.assertONNX(nn.MaxPool1d(3, stride=2, return_indices=True), x)\n\n @skipIfCaffe2\n def test_at_op(self):\n x = torch.randn(3, 4)\n\n class MyFun(Function):\n @staticmethod\n def symbolic(g, x):\n return g.at(\"add\", x, x)\n\n @staticmethod\n def forward(ctx, x):\n return x + x\n\n class MyModule(Module):\n def forward(self, x):\n return MyFun.apply(x)\n\n self.assertONNX(\n MyModule(),\n x,\n operator_export_type=torch.onnx.OperatorExportTypes.ONNX_ATEN_FALLBACK,\n )\n\n def test_clip(self):\n x = torch.randn(3, 4, requires_grad=True)\n self.assertONNX(lambda x: torch.clamp(x, min=-0.5, max=0.5), x)\n\n def test_clip_min(self):\n x = torch.randn(1, 2, 3, 4, requires_grad=True)\n self.assertONNX(lambda x: x.clamp(min=-0.1), x)\n\n def test_clip_max(self):\n x = torch.randn(1, 2, 3, 4, requires_grad=True)\n self.assertONNX(lambda x: x.clamp(max=0.1), x)\n\n def test_hardtanh(self):\n x = torch.randn(3, 4, requires_grad=True)\n self.assertONNX(lambda x: torch.nn.Hardtanh(-0.5, 0.5)(x), x)\n\n def test_full(self):\n x = torch.randn(3, 4, requires_grad=True)\n self.assertONNX(lambda x: torch.full(x.shape, 2.0), x)\n\n def test_full_like(self):\n x = torch.randn(3, 4, requires_grad=True)\n self.assertONNX(lambda x: torch.full_like(x, 2), x)\n\n def test_max(self):\n x = torch.randn(3, 4, requires_grad=True)\n y = torch.randn(3, 4, requires_grad=True)\n self.assertONNX(lambda x, y: torch.max(x, y), (x, y))\n\n def test_min(self):\n x = torch.randn(3, 4, requires_grad=True)\n y = torch.randn(3, 4, requires_grad=True)\n self.assertONNX(lambda x, y: torch.min(x, y), (x, y))\n\n def test_mean(self):\n x = torch.randn(1, 2, 3, 4, requires_grad=True)\n self.assertONNX(lambda x: torch.mean(x), x)\n\n def test_reduced_mean(self):\n x = torch.randn(1, 2, 3, 4, requires_grad=True)\n self.assertONNX(lambda x: torch.mean(x, dim=2), x)\n\n def test_reduced_mean_keepdim(self):\n x = torch.randn(1, 2, 3, 4, requires_grad=True)\n self.assertONNX(lambda x: torch.mean(x, dim=(2, 3), keepdim=True), x)\n\n def test_mean_dtype(self):\n x = torch.randn(1, 2, 3, 4, requires_grad=True)\n self.assertONNX(lambda x: torch.mean(x, dtype=torch.double), x)\n\n def test_reduced_mean_dtype(self):\n x = torch.randn(1, 2, 3, 4, requires_grad=True)\n self.assertONNX(lambda x: torch.mean(x, dim=0, dtype=torch.double), x)\n\n def test_sum(self):\n x = torch.randn(1, 2, 3, 4, requires_grad=True)\n self.assertONNX(lambda x: torch.sum(x), x)\n\n def test_sum_dtype(self):\n x = torch.randn(1, 2, 3, 4, requires_grad=True)\n self.assertONNX(lambda x: torch.sum(x, dtype=torch.double), x)\n\n def test_reduced_sum_dtype(self):\n x = torch.randn(1, 2, 3, 4, requires_grad=True)\n self.assertONNX(lambda x: torch.sum(x, dim=0, dtype=torch.double), x)\n\n def test_reduced_sum(self):\n x = torch.randn(1, 2, 3, 4, requires_grad=True)\n self.assertONNX(lambda x: torch.sum(x, dim=(1, 2)), x)\n\n def test_reduced_sum_keepdim(self):\n x = torch.randn(1, 2, 3, 4, requires_grad=True)\n self.assertONNX(lambda x: torch.sum(x, dim=2, keepdim=True), x)\n\n def test_prod(self):\n x = torch.randn(1, 2, 3, 4, requires_grad=True)\n self.assertONNX(lambda x: torch.prod(x), x)\n\n def test_reduced_prod(self):\n x = torch.randn(1, 2, 3, 4, requires_grad=True)\n self.assertONNX(lambda x: torch.prod(x, dim=2), x)\n\n def test_reduced_prod_keepdim(self):\n x = torch.randn(1, 2, 3, 4, requires_grad=True)\n self.assertONNX(lambda x: torch.prod(x, dim=2, keepdim=True), x)\n\n def test_prod_dtype(self):\n x = torch.randn(1, 2, 3, 4, requires_grad=True)\n self.assertONNX(lambda x: torch.prod(x, dtype=torch.double), x)\n\n def test_reduced_prod_dtype(self):\n x = torch.randn(1, 2, 3, 4, requires_grad=True)\n self.assertONNX(lambda x: torch.prod(x, dim=0, dtype=torch.double), x)\n\n def test_sqrt(self):\n x = torch.randn(3, 4, requires_grad=True)\n self.assertONNX(lambda x: torch.sqrt(x), x)\n\n def test_rsqrt(self):\n x = torch.randn(3, 4, requires_grad=True)\n self.assertONNX(lambda x: torch.rsqrt(x), x)\n\n def test_equal(self):\n x = torch.randn(1, 2, 3, 1, requires_grad=False).int()\n y = torch.randn(1, 4, requires_grad=False).int()\n self.assertONNX(lambda x, y: x == y, (x, y))\n\n def test_lt(self):\n x = torch.randn(1, 2, 3, 1, requires_grad=False).int()\n y = torch.randn(1, 4, requires_grad=False).int()\n self.assertONNX(lambda x, y: x < y, (x, y))\n\n def test_gt(self):\n x = torch.randn(1, 2, 3, 1, requires_grad=False).int()\n y = torch.randn(1, 4, requires_grad=False).int()\n self.assertONNX(lambda x, y: x > y, (x, y))\n\n def test_le(self):\n x = torch.randn(3, 4, requires_grad=False).int()\n y = torch.randn(3, 4, requires_grad=False).int()\n self.assertONNX(lambda x, y: x <= y, (x, y))\n\n def test_ge(self):\n x = torch.randn(3, 4, requires_grad=False).int()\n y = torch.randn(3, 4, requires_grad=False).int()\n self.assertONNX(lambda x, y: x >= y, (x, y))\n\n def test_exp(self):\n x = torch.randn(3, 4, requires_grad=True)\n self.assertONNX(lambda x: x.exp(), x)\n\n def test_sin(self):\n x = torch.randn(3, 4, requires_grad=True)\n self.assertONNX(lambda x: x.sin(), x)\n\n def test_cos(self):\n x = torch.randn(3, 4, requires_grad=True)\n self.assertONNX(lambda x: x.cos(), x)\n\n def test_tan(self):\n x = torch.randn(3, 4, requires_grad=True)\n self.assertONNX(lambda x: x.tan(), x)\n\n def test_asin(self):\n x = torch.rand(3, 4, requires_grad=True)\n self.assertONNX(lambda x: x.asin(), x)\n\n def test_acos(self):\n x = torch.rand(3, 4, requires_grad=True)\n self.assertONNX(lambda x: x.acos(), x)\n\n def test_slice(self):\n x = torch.rand(3, 4, requires_grad=True)\n self.assertONNX(lambda x: x[:, 1:2], x)\n\n def test_slice_dynamic(self):\n x = torch.rand(3, 4, requires_grad=True)\n self.assertONNX(lambda x: x[x.size(0) :, x.size(1) - 3], x, opset_version=10)\n\n def test_sign(self):\n x = torch.rand(3, 4, requires_grad=True)\n self.assertONNX(lambda x: x.sign(), x)\n\n def test_narrow(self):\n x = torch.randn(3, 3, requires_grad=True)\n self.assertONNX(lambda x: torch.narrow(x, 0, 0, 2), x)\n\n def test_atan(self):\n x = torch.randn(3, 4, requires_grad=True)\n self.assertONNX(lambda x: x.atan(), x)\n\n def test_view_flatten(self):\n x = torch.randn(1, 2, 3, 4, requires_grad=True)\n self.assertONNX(lambda x: x.view(x.size()[0], x.numel() // x.size()[0]), x)\n\n def test_flatten(self):\n x = torch.randn(1, 2, 3, 4, requires_grad=True)\n self.assertONNX(lambda x: torch.flatten(x), x)\n\n def test_flatten2D(self):\n x = torch.randn(1, 2, 3, 4, requires_grad=True)\n self.assertONNX(lambda x: torch.flatten(x, 1), x)\n\n def test_isnan(self):\n x = torch.tensor([1, float(\"nan\"), 2])\n self.assertONNX(lambda x: torch.isnan(x), x)\n\n def test_argmax(self):\n x = torch.randn(4, 4, requires_grad=True)\n self.assertONNX(lambda x: torch.argmax(x, dim=1), x)\n\n def test_logsoftmax(self):\n x = torch.randn(1, 2, 3, 4, requires_grad=True)\n self.assertONNX(nn.LogSoftmax(dim=3), x)\n\n def test_pow(self):\n x = torch.randn(1, 2, 3, 4, requires_grad=True)\n y = torch.randn(1, 2, 3, 4, requires_grad=True)\n self.assertONNX(lambda x, y: x.pow(y), (x, y))\n\n def test_elu(self):\n x = torch.randn(1, 2, 3, 4, requires_grad=True)\n self.assertONNX(nn.ELU(), x)\n\n def test_selu(self):\n x = torch.randn(1, 2, 3, 4, requires_grad=True)\n self.assertONNX(nn.SELU(), x)\n\n def test_repeat(self):\n x = torch.randn(1, 2, 3, 4, requires_grad=True)\n self.assertONNX(lambda x: x.repeat(1, 2, 3, 4), x)\n\n def test_repeat_dim_overflow(self):\n x = torch.randn(1, 2, requires_grad=True)\n self.assertONNX(lambda x: x.repeat(1, 2, 3, 4), x)\n\n def test_norm_p1(self):\n x = torch.randn(1, 2, 3, 4, requires_grad=True)\n self.assertONNX(lambda x: x.norm(p=1, dim=2), (x))\n\n def test_norm_p2(self):\n x = torch.randn(1, 2, 3, 4, requires_grad=True)\n self.assertONNX(lambda x: x.norm(p=2, dim=2), (x))\n\n def test_upsample_nearest_scale(self):\n x = torch.randn(1, 2, 3, 4, requires_grad=True)\n self.assertONNX(\n lambda x: nn.functional.interpolate(\n x, scale_factor=2.0, mode=\"nearest\", recompute_scale_factor=False\n ),\n x,\n )\n\n def test_upsample_nearest_scale_default_scale_factor(self):\n x = torch.randn(1, 2, 3, 4, requires_grad=True)\n self.assertONNX(\n lambda x: nn.functional.interpolate(x, scale_factor=2.0, mode=\"nearest\"), x\n )\n\n def test_upsample_nearest_size(self):\n x = torch.randn(1, 2, 3, 4, requires_grad=True)\n self.assertONNX(\n lambda x: nn.functional.interpolate(x, size=16, mode=\"nearest\"), x\n )\n\n def test_unsqueeze(self):\n x = torch.randn(3, 4, requires_grad=True)\n self.assertONNX(lambda x: x.unsqueeze(len(x.shape)), x)\n\n def test_batchnorm_noaffine(self):\n x = torch.randn(128, 128, 1, 1, requires_grad=True)\n self.assertONNX(\n nn.BatchNorm2d(128, affine=False, momentum=0.3),\n x,\n keep_initializers_as_inputs=True,\n )\n\n @skipIfCaffe2\n def test_embedding_bags(self):\n emb_bag = nn.EmbeddingBag(10, 8)\n input = torch.tensor([1, 2, 3, 4]).long()\n offset = torch.tensor([0]).long()\n self.assertONNX(\n emb_bag,\n (input, offset),\n keep_initializers_as_inputs=True,\n operator_export_type=torch.onnx.OperatorExportTypes.ONNX_ATEN_FALLBACK,\n )\n\n def test_implicit_expand(self):\n x = torch.randn(3, 4, requires_grad=True)\n self.assertONNX(lambda x: x + 1, x)\n\n def test_reduce_sum_negative_indices(self):\n x = torch.randn(3, 4, requires_grad=True)\n self.assertONNX(lambda x: x.sum(-1), x)\n\n def test_randn(self):\n x = torch.randn(1, 2, 3, 4)\n self.assertONNX(lambda x: torch.randn(1, 2, 3, 4) + x, x)\n\n def test_rand(self):\n x = torch.rand(1, 2, 3, 4)\n self.assertONNX(lambda x: torch.rand(1, 2, 3, 4) + x, x)\n\n def test_rrelu(self):\n x = torch.randn(1, 2, 3, 4)\n self.assertONNX(torch.nn.RReLU(), x)\n\n def test_prelu(self):\n x = torch.randn(1, 2, 3, 4)\n self.assertONNX(torch.nn.PReLU(2), x, keep_initializers_as_inputs=True)\n\n def test_log_sigmoid(self):\n x = torch.randn(1, 2, 3, 4)\n self.assertONNX(torch.nn.LogSigmoid(), x)\n\n def test_linear(self):\n x = torch.randn(3, 4)\n self.assertONNX(\n torch.nn.Linear(4, 5, bias=True), x, keep_initializers_as_inputs=True\n )\n\n def test_empty_like(self):\n x = torch.randn(5, 8, requires_grad=True)\n self.assertONNX(lambda x: torch.empty_like(x), x)\n\n def test_empty_like_opset7(self):\n x = torch.randn(5, 8, requires_grad=True)\n self.assertONNX(lambda x: torch.empty_like(x), x, opset_version=7)\n\n def test_zeros_like(self):\n x = torch.randn(5, 8, requires_grad=True)\n self.assertONNX(lambda x: torch.zeros_like(x), x)\n\n def test_ones_like(self):\n x = torch.randn(6, 10, requires_grad=True)\n self.assertONNX(lambda x: torch.ones_like(x), x)\n\n def test_expand(self):\n x = torch.randn(6, 1, requires_grad=True)\n self.assertONNX(lambda x: x.expand(4, 6, 2), x)\n\n def test_ne(self):\n x = torch.randn(1, 2, 3, 1, requires_grad=False).int()\n y = torch.randn(1, 4, requires_grad=False).int()\n self.assertONNX(lambda x, y: torch.ne(x, y), (x, y))\n\n def test_reducemax(self):\n x = torch.randn(1, 2, 3, 4)\n self.assertONNX(lambda x: torch.max(x), x)\n\n def test_reducemin(self):\n x = torch.randn(1, 2, 3, 4)\n self.assertONNX(lambda x: torch.min(x), x)\n\n def test_erf(self):\n x = torch.randn(1, 2, 3, 4)\n self.assertONNX(lambda x: x.erf(), x)\n\n def test_dropout(self):\n x = torch.randn(3, 4, requires_grad=True)\n self.assertONNX(lambda x: torch.max(functional.dropout(x, training=False)), x)\n\n def test_dropout_default(self):\n x = torch.randn(3, 4, requires_grad=True)\n self.assertONNX(\n lambda x: torch.max(\n functional.dropout(\n x,\n )\n ),\n x,\n )\n\n def test_dropout_training(self):\n x = torch.randn(3, 4, requires_grad=True)\n self.assertONNX(\n lambda x: torch.max(functional.dropout(x)),\n x,\n training=torch.onnx.TrainingMode.TRAINING,\n )\n\n def test_dropout_opset12(self):\n x = torch.randn(3, 4, requires_grad=True)\n self.assertONNX(\n lambda x: torch.max(functional.dropout(x, training=False)),\n x,\n opset_version=12,\n )\n\n def test_dropout_training_opset12(self):\n x = torch.randn(3, 4, requires_grad=True)\n self.assertONNX(\n lambda x: torch.max(functional.dropout(x)),\n x,\n opset_version=12,\n training=torch.onnx.TrainingMode.TRAINING,\n )\n\n def test_nonzero(self):\n x = torch.tensor(\n [[[2.0, 2.0], [1.0, 0.0]], [[0.0, 0.0], [1.0, 1.0]]], requires_grad=True\n )\n self.assertONNX(lambda x: torch.nonzero(x), x)\n\n def test_gather(self):\n data = torch.randn(3, 4, 3, requires_grad=True)\n index = torch.tensor([2, 0]).view(1, 2, 1).expand(3, 2, 3)\n self.assertONNX(lambda data, index: data.gather(1, index), (data, index))\n\n def test_gather_opset11(self):\n data = torch.randn(3, 4, 3, requires_grad=True)\n index = torch.tensor([2, 0]).view(1, 2, 1).expand(3, 2, 3)\n self.assertONNX(\n lambda data, index: data.gather(1, index), (data, index), opset_version=11\n )\n\n def test_scatter_add(self):\n data = torch.tensor([[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]])\n indices = torch.tensor([[1, 0], [0, 1], [0, 1]], dtype=torch.int64)\n values = torch.tensor([[1.0, 1.1], [2.0, 2.1], [3.0, 3.1]])\n self.assertONNX(\n lambda data, index: data.scatter_add(1, indices, values),\n (data, (indices, values)),\n )\n\n def test_scatter_add_opset11(self):\n data = torch.tensor([[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]])\n indices = torch.tensor([[1, 0], [0, 1], [0, 1]], dtype=torch.int64)\n values = torch.tensor([[1.0, 1.1], [2.0, 2.1], [3.0, 3.1]])\n self.assertONNX(\n lambda data, index: data.scatter_add(1, indices, values),\n (data, (indices, values)),\n opset_version=11,\n )\n\n def test_scatter_add_opset16(self):\n data = torch.tensor([[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]])\n indices = torch.tensor([[0, 0], [1, 1], [0, 1]], dtype=torch.int64)\n values = torch.tensor([[1.0, 1.1], [2.0, 2.1], [3.0, 3.1]])\n self.assertONNX(\n lambda data, index: data.scatter_add(1, indices, values),\n (data, (indices, values)),\n opset_version=16,\n )\n\n def test_master_opset(self):\n x = torch.randn(2, 3).float()\n y = torch.randn(2, 3).float()\n self.assertONNX(lambda x, y: x + y, (x, y), opset_version=10)\n\n def test_std(self):\n x = torch.randn(2, 3, 4).float()\n self.assertONNX(\n lambda x: torch.std(x, dim=(0, 1), unbiased=True, keepdim=True), x\n )\n\n def test_cumsum(self):\n x = torch.randn(2, 3, 4, requires_grad=True)\n self.assertONNX(lambda x: torch.cumsum(x, dim=1), x, opset_version=11)\n\n # Github Issue: https://github.com/pytorch/pytorch/issues/71095\n # def test_c2_op(self):\n # class MyModel(torch.nn.Module):\n # def __init__(self):\n # super(MyModel, self).__init__()\n #\n # def forward(self, scores, bbox_deltas, im_info, anchors):\n # a, b = torch.ops._caffe2.GenerateProposals(\n # (scores), (bbox_deltas), (im_info), (anchors),\n # 2.0, 6000, 300, 0.7, 16, True, -90, 90, 1.0, True,\n # )\n # return a, b\n #\n # model = MyModel()\n # A = 4\n # H = 10\n # W = 8\n # img_count = 3\n # scores = torch.ones(img_count, A, H, W, dtype=torch.float32)\n # bbox_deltas = torch.linspace(0, 10, steps=img_count * 4 * A * H * W,\n # dtype=torch.float32)\n # bbox_deltas = bbox_deltas.view(img_count, 4 * A, H, W)\n # im_info = torch.ones(img_count, 3, dtype=torch.float32)\n # anchors = torch.ones(A, 4, dtype=torch.float32)\n # inputs = (scores, bbox_deltas, im_info, anchors)\n # self.assertONNX(model, inputs, custom_opsets={\"org.pytorch._caffe2\": 0})\n\n def test_dict(self):\n class MyModel(torch.nn.Module):\n def forward(self, x_in):\n x_out = {}\n x_out[\"test_key_out\"] = torch.add(\n x_in[list(x_in.keys())[0]], list(x_in.keys())[0]\n )\n return x_out\n\n x = {torch.tensor(1.0): torch.randn(1, 2, 3)}\n self.assertONNX(MyModel(), (x, {}))\n\n def test_dict_str(self):\n class MyModel(torch.nn.Module):\n def forward(self, x_in):\n x_out = {}\n x_out[\"test_key_out\"] = torch.add(x_in[\"test_key_in\"], 2.0)\n return x_out\n\n x = {\"test_key_in\": torch.randn(1, 2, 3)}\n self.assertONNX(MyModel(), (x, {}))\n\n def test_arange_dynamic(self):\n class TestModel(torch.nn.Module):\n def forward(self, input):\n return torch.arange(input.shape[0], input.shape[0] + 5, 0.5)\n\n input = torch.randn(5, 3, 2)\n self.assertONNX(TestModel(), input, opset_version=11)\n\n def test_bitshift(self):\n class BitshiftModel(torch.nn.Module):\n def forward(self, input):\n return input >> 1, input >> 2\n\n input = torch.arange(24, dtype=torch.uint8).reshape(3, 4, 2)\n self.assertONNX(BitshiftModel(), input, opset_version=11)\n\n @skipIfCaffe2\n def test_layer_norm_aten(self):\n model = torch.nn.LayerNorm([10, 10])\n x = torch.randn(20, 5, 10, 10)\n self.assertONNX(\n model,\n x,\n operator_export_type=torch.onnx.OperatorExportTypes.ONNX_ATEN_FALLBACK,\n )\n\n def test_pixel_shuffle(self):\n x = torch.randn(2, 8, 3, 4).float()\n self.assertONNX(\n lambda x: torch.pixel_shuffle(x, upscale_factor=2), x, opset_version=11\n )\n\n def test_frobenius_norm(self):\n x = torch.randn(2, 3, 4).float()\n self.assertONNX(lambda x: torch.norm(x, p=\"fro\", dim=(0, 1), keepdim=True), x)\n\n def test_unfold(self):\n x = torch.randn(2, 3, 4, requires_grad=True)\n self.assertONNX(lambda x: x.unfold(dimension=2, size=2, step=2), x)\n\n def test_remainder(self):\n x = torch.randn(2, 3, 4)\n y = torch.randn(2, 1, 4)\n self.assertONNX(lambda x, y: torch.remainder(x, y), (x, y))\n\n def test_fmod(self):\n x = torch.randn(2, 3, 4)\n y = torch.randn(2, 1, 4)\n self.assertONNX(lambda x, y: torch.fmod(x, y), (x, y), opset_version=10)\n\n def test_gelu(self):\n x = torch.randn(2, 3, 4, 5, requires_grad=True)\n self.assertONNX(lambda x: torch.nn.functional.gelu(x), x)\n\n def test_unique(self):\n x = torch.randint(3, (2, 3, 4, 5)).float()\n self.assertONNX(\n lambda x: torch.unique(\n x, dim=0, sorted=True, return_inverse=False, return_counts=True\n ),\n x,\n opset_version=11,\n )\n\n def test_meshgrid(self):\n x = torch.ones(3, requires_grad=True)\n y = torch.zeros(4, requires_grad=True)\n z = torch.ones(5, requires_grad=True)\n self.assertONNX(lambda x, y, z: torch.meshgrid(x, y, z), (x, y, z))\n\n def test_topk(self):\n x = torch.arange(1.0, 6.0, requires_grad=True)\n k = torch.tensor(3)\n self.assertONNX(lambda x, k: torch.topk(x, k), (x, k), opset_version=10)\n\n def test_topk_smallest_unsorted(self):\n x = torch.arange(1.0, 6.0, requires_grad=True)\n k = torch.tensor(3)\n self.assertONNX(\n lambda x, k: torch.topk(x, k, largest=False, sorted=False),\n (x, k),\n opset_version=11,\n )\n\n def test_baddbmm(self):\n x = torch.randn(10, 3, 5)\n b1 = torch.randn(10, 3, 4)\n b2 = torch.randn(10, 4, 5)\n self.assertONNX(lambda x, b1, b2: torch.baddbmm(x, b1, b2), (x, b1, b2))\n\n def test_round(self):\n x = torch.tensor([0.9920, -1.0362, -1.5000, 2.5000], requires_grad=True)\n self.assertONNX(lambda x: torch.round(x), x, opset_version=11)\n\n def test_dim(self):\n x = torch.ones((2, 2), requires_grad=True)\n self.assertONNX(lambda x: torch.scalar_tensor(x.dim()), x)\n\n @skipIfNoLapack\n def test_det(self):\n x = torch.randn(2, 3, 5, 5, device=torch.device(\"cpu\"))\n self.assertONNX(lambda x: torch.det(x), x, opset_version=11)\n self.assertONNX(lambda x: torch.linalg.det(x), x, opset_version=11)\n\n def test_softmaxcrossentropy(self):\n x = torch.randn(3, 5)\n y = torch.empty(3, dtype=torch.long).random_(5)\n self.assertONNX(torch.nn.CrossEntropyLoss(), (x, y), opset_version=12)\n\n def test_softmaxcrossentropy_ignore_index(self):\n x = torch.randn(3, 5)\n y = torch.empty(3, dtype=torch.long).random_(5)\n self.assertONNX(\n torch.nn.CrossEntropyLoss(ignore_index=1), (x, y), opset_version=12\n )\n\n def test_softmaxcrossentropy_weights(self):\n x = torch.randn(3, 5)\n y = torch.empty(3, dtype=torch.long).random_(5)\n self.assertONNX(\n torch.nn.CrossEntropyLoss(weight=torch.randn(5)), (x, y), opset_version=12\n )\n\n def test_softmaxcrossentropy_3d(self):\n x = torch.randn(3, 5, 2)\n y = torch.empty(3, 2, dtype=torch.long).random_(5)\n self.assertONNX(torch.nn.CrossEntropyLoss(), (x, y), opset_version=12)\n\n def test_softmaxcrossentropy_3d_none(self):\n x = torch.randn(3, 5, 2)\n y = torch.empty(3, 2, dtype=torch.long).random_(5)\n self.assertONNX(\n torch.nn.CrossEntropyLoss(reduction=\"none\"), (x, y), opset_version=12\n )\n\n def test_softmaxcrossentropy_4d(self):\n x = torch.randn(3, 5, 2, 1)\n y = torch.empty(3, 2, 1, dtype=torch.long).random_(5)\n self.assertONNX(torch.nn.CrossEntropyLoss(), (x, y), opset_version=12)\n\n def test_lstm_none_sequence_lens(self):\n \"\"\"Test symbolic shape inference for LSTM when the input sequence_lens = None.\"\"\"\n input = torch.randn(RNN_SEQUENCE_LENGTH, BATCH_SIZE, RNN_INPUT_SIZE)\n h0 = torch.randn(1, BATCH_SIZE, RNN_HIDDEN_SIZE)\n c0 = torch.randn(1, BATCH_SIZE, RNN_HIDDEN_SIZE)\n\n class LSTMModel(torch.nn.Module):\n def __init__(self):\n super().__init__()\n self.rnn = torch.nn.LSTM(\n RNN_INPUT_SIZE, RNN_HIDDEN_SIZE, 1, bidirectional=False\n )\n\n def forward(self, x, h0, c0):\n a, b = self.rnn(x, (h0, c0))\n return torch.ones(b[0].shape)\n\n self.assertONNX(\n LSTMModel(),\n (input, h0, c0),\n input_names=[\"x\", \"y\"],\n dynamic_axes={\"x\": {0: \"batch\"}},\n opset_version=12,\n )\n\n def test_dynamic_axes_add(self):\n m1 = torch.randn(2, 3, requires_grad=True)\n m2 = torch.randn(2, 1, requires_grad=True)\n self.assertONNX(\n lambda x, y: torch.add(x, y),\n (m1, m2),\n input_names=[\"input_1\", \"input_2\"],\n dynamic_axes={\"input_1\": {1: \"dim_1\"}, \"input_2\": {1: \"dim_2\"}},\n opset_version=12,\n )\n\n def test_dynamic_axes_add_inputs_same_symbolic_shape(self):\n m1 = torch.randn(2, 3, requires_grad=True)\n self.assertONNX(\n lambda x: torch.add(x, x),\n (m1,),\n input_names=[\"input_1\"],\n dynamic_axes={\"input_1\": {1: \"dim_1\"}},\n opset_version=12,\n )\n\n def test_dynamic_axes_matmul(self):\n m1 = torch.randn(2, 2, 4, requires_grad=True)\n m2 = torch.randn(2, 4, 3, requires_grad=True)\n self.assertONNX(\n lambda x, y: torch.matmul(x, y),\n (m1, m2),\n input_names=[\"input_1\", \"input_2\"],\n dynamic_axes={\"input_1\": {1: \"dim_0\"}, \"input_2\": {2: \"dim_1\"}},\n opset_version=12,\n )\n\n def test_dynamic_axes_reduce_mean(self):\n m1 = torch.randn(2, 3, 4, requires_grad=True)\n self.assertONNX(\n lambda x: torch.mean(x, dim=1),\n (m1),\n input_names=[\"input\"],\n dynamic_axes={\"input\": {1: \"dim_1\", 2: \"dim_2\"}},\n opset_version=12,\n )\n\n def test_dynamic_axes_unchange(self):\n \"\"\"Test ProcessUnchangeNode in symbolic shape inference.\"\"\"\n m1 = torch.randn(2, 3, requires_grad=True)\n self.assertONNX(\n lambda x: torch.softmax(x, dim=0),\n (m1,),\n input_names=[\"input\"],\n dynamic_axes={\"input\": {1: \"dim_1\"}},\n opset_version=12,\n )\n\n def test_aten_embedding_1(self):\n _onnx_opset_version = 12\n\n @parse_args(\"v\", \"v\", \"i\", \"b\", \"b\")\n def embedding(g, weight, indices, padding_idx, scale_grad_by_freq, sparse):\n custom_attributes_json = (\n \"{\"\n f'\"padding_idx\":{str(padding_idx)},'\n f'\"scale_grad_by_freq\":{str(scale_grad_by_freq).lower()},'\n f'\"sparse\":{str(sparse).lower()}'\n \"}\"\n )\n output = g.at(\n \"embedding\",\n weight,\n indices,\n custom_attributes_json_s=custom_attributes_json,\n )\n return output\n\n register_custom_op_symbolic(\"::embedding\", embedding, _onnx_opset_version)\n\n class Model(torch.nn.Module):\n def __init__(self):\n super().__init__()\n self.emb = torch.nn.Embedding(4, 8)\n\n def forward(self, x, y):\n res = self.emb(x)\n res = res + y\n return torch.ones(res.shape[0])\n\n model = Model()\n x = torch.ones(32, dtype=torch.long)\n y = torch.randn(1, 8)\n self.assertONNX(model, (x, y), opset_version=_onnx_opset_version)\n\n unregister_custom_op_symbolic(\"::embedding\", _onnx_opset_version)\n\n # This is test_aten_embedding_1 with shape inference on custom symbolic aten::embedding.\n @skipIfCaffe2\n def test_aten_embedding_2(self):\n _onnx_opset_version = 12\n\n @parse_args(\"v\", \"v\", \"i\", \"b\", \"b\")\n def embedding(g, weight, indices, padding_idx, scale_grad_by_freq, sparse):\n custom_attributes_json = (\n \"{\"\n f'\"padding_idx\":{str(padding_idx)},'\n f'\"scale_grad_by_freq\":{str(scale_grad_by_freq).lower()},'\n f'\"sparse\":{str(sparse).lower()}'\n \"}\"\n )\n output = g.at(\n \"embedding\",\n weight,\n indices,\n custom_attributes_json_s=custom_attributes_json,\n )\n\n # do shape inference and set it via setType\n indices_shape = _get_tensor_sizes(indices)\n if indices_shape is not None and hasattr(weight.type(), \"with_sizes\"):\n output_type = weight.type().with_sizes(\n indices_shape + [_get_tensor_dim_size(weight, 1)]\n )\n output.setType(output_type)\n return output\n\n register_custom_op_symbolic(\"::embedding\", embedding, _onnx_opset_version)\n\n class Model(torch.nn.Module):\n def __init__(self):\n super().__init__()\n self.emb = torch.nn.Embedding(4, 8)\n\n def forward(self, x, y):\n res = self.emb(x)\n res = res + y\n return torch.ones(res.shape[0])\n\n model = Model()\n x = torch.ones(32, dtype=torch.long)\n y = torch.randn(1, 8)\n self.assertONNX(\n model,\n (x, y),\n opset_version=_onnx_opset_version,\n input_names=[\"input_1\", \"input_2\"],\n dynamic_axes={\"input_1\": {0: \"dim_0\"}, \"input_2\": {0: \"dim_1\", 1: \"dim_2\"}},\n keep_initializers_as_inputs=False,\n operator_export_type=torch.onnx.OperatorExportTypes.ONNX_ATEN_FALLBACK,\n )\n\n unregister_custom_op_symbolic(\"::embedding\", _onnx_opset_version)\n\n # Without shapeValueMap, the onnx graph looks like:\n # graph(%0 : Float(*, 1, 128, 1, strides=[128, 128, 1, 1], requires_grad=0, device=cpu)):\n # %2 : Long(4, strides=[1], device=cpu) = onnx::Shape(%0)\n # %4 : Long(device=cpu) = onnx::Constant[value={0}]()\n # %5 : Long(device=cpu) = onnx::Gather[axis=0](%2, %4)\n # %6 : Long(device=cpu) = onnx::Constant[value={1}]()\n # %7 : Long(device=cpu) = onnx::Constant[value={2}]()\n # %8 : Long(device=cpu) = onnx::Constant[value={-1}]()\n # %9 : int[] = prim::ListConstruct(%5, %6, %7, %8)\n # %10 : Float(*, *, *, *, strides=[128, 128, 64, 1], requires_grad=0, device=cpu) = onnx::Reshape(%0, %9)\n # ...\n # With shapeValueMap, it becomes:\n # ...\n # %10 : Float(*, 1, 2, 64, strides=[128, 128, 64, 1], requires_grad=0, device=cpu) = onnx::Reshape(%0, %9)\n # ...\n def test_shape_value_map(self):\n class RSoftMax(torch.nn.Module):\n def __init__(self, radix, cardinality):\n super().__init__()\n self.radix = radix\n self.cardinality = cardinality\n\n def forward(self, x):\n batch = x.size(0)\n x = x.view(batch, self.cardinality, self.radix, -1).transpose(1, 2)\n x = F.softmax(x, dim=1)\n x = x.reshape(batch, -1)\n return x\n\n radix = 2\n cardinality = 1\n x = torch.randn(10, 1, 128, 1)\n self.assertONNX(\n RSoftMax(radix, cardinality),\n (x,),\n input_names=[\"x\"],\n dynamic_axes={\"x\": {0: \"dim_0\"}},\n )\n\n\nif __name__ == \"__main__\":\n no_onnx_dep_flag = \"--no-onnx\"\n _onnx_dep = no_onnx_dep_flag not in common.UNITTEST_ARGS\n if no_onnx_dep_flag in common.UNITTEST_ARGS:\n common.UNITTEST_ARGS.remove(no_onnx_dep_flag)\n onnx_test_flag = \"--produce-onnx-test-data\"\n _onnx_test = onnx_test_flag in common.UNITTEST_ARGS\n if onnx_test_flag in common.UNITTEST_ARGS:\n common.UNITTEST_ARGS.remove(onnx_test_flag)\n if _onnx_test:\n _onnx_dep = True\n import test_onnx_common\n\n for d in glob.glob(\n os.path.join(test_onnx_common.pytorch_operator_dir, \"test_operator_*\")\n ):\n shutil.rmtree(d)\n run_tests()\n",
"import torch\n\nfrom torch.utils._pytree import tree_map, tree_flatten\nfrom functools import partial\nfrom torch.fx.operator_schemas import normalize_function\nfrom torch.utils._mode_utils import no_dispatch\nfrom torch._subclasses.meta_utils import MetaConverter\nfrom typing import Union, Callable\nfrom torch._ops import OpOverload\nfrom torch.overrides import TorchFunctionMode\nfrom torch.utils._python_dispatch import TorchDispatchMode, enable_torch_dispatch_mode\nimport weakref\nimport functools\nimport itertools\nfrom dataclasses import dataclass\n\n\naten = torch.ops.aten\n\n\n@dataclass\nclass UnsupportedFakeTensorException(RuntimeError):\n reason: str\n\n@dataclass\nclass DynamicOutputShapeException(RuntimeError):\n func: OpOverload\n\n\n_device_not_kwarg_ops = (\n aten._resize_output_.default,\n aten.nested_tensor.default,\n aten.pin_memory.default,\n aten.is_pinned.default,\n aten.to.device,\n aten.to.prim_Device,\n aten._pin_memory.default,\n aten._resize_output.functional,\n aten._resize_output.out,\n)\n\n# this op is never actually used\n_non_kwarg_device_constructors = (torch.ops.aten._list_to_tensor,)\n\n\ndef contains_tensor_types(type):\n tensor_type = torch._C.TensorType.get()\n return type.isSubtypeOf(tensor_type) or any(\n contains_tensor_types(e) for e in type.containedTypes()\n )\n\n\n_like_tensor_constructors = (\n aten.empty_like.default,\n aten.full_like.default,\n aten.ones_like.default,\n aten.rand_like.default,\n aten.randn_like.default,\n aten.randint_like.default,\n aten.randint_like.low_dtype,\n aten.randn_like.default,\n aten.zeros_like.default,\n aten.new_empty.default,\n aten.new_empty_strided.default,\n aten.new_full.default,\n aten.new_zeros.default,\n aten.new_ones.default,\n)\n\n\[email protected]_cache(None)\ndef _is_tensor_constructor(func: OpOverload):\n assert isinstance(func, OpOverload)\n schema = func._schema\n if any(contains_tensor_types(arg.type) for arg in schema.arguments):\n return False\n # TODO: no real reason to restrict multiple outputs\n return (\n len(schema.returns) == 1 and schema.returns[0].type is torch._C.TensorType.get()\n )\n\n\n# Similar to `MetaConverter`, this is a class for converting\n# multiple tensors into fake tensors which share the same view/storage\n# structure. Like `MetaConverter`, it will keep alive all\n# tensors that are converted to FakeTensors.\nclass FakeTensorConverter(object):\n tensor_memo: weakref.WeakValueDictionary\n meta_converter: MetaConverter\n\n def __init__(self):\n # FakeTensors store the FakeTensorMode which in turn stores a\n # FakeTensor, so we need to hold a weak reference to the FakeTensor\n # otherwise we would induce a circular reference\n self.tensor_memo = weakref.WeakValueDictionary()\n self.meta_converter = MetaConverter()\n\n def _get_memo(self, t):\n if t in self.tensor_memo:\n out = self.tensor_memo[t]\n out._fix_weakref()\n return out\n return None\n\n def from_real_tensor(self, fake_mode, t):\n maybe_memo = self._get_memo(t)\n if maybe_memo is not None:\n return maybe_memo\n existing_device = t.device\n # not yet supported in metatensors\n if t.is_complex():\n raise UnsupportedFakeTensorException(\"complex nyi in meta tensors\")\n if t.is_sparse:\n raise UnsupportedFakeTensorException(\"sparse nyi in meta tensors\")\n if t.is_quantized:\n raise UnsupportedFakeTensorException(\"quantized nyi in meta tensors\")\n with no_dispatch():\n out = FakeTensor(fake_mode, self.meta_converter(t), existing_device)\n if type(t) is torch.nn.Parameter:\n out = torch.nn.Parameter(out, requires_grad=out.requires_grad) # type: ignore[assignment]\n self.tensor_memo[t] = out\n return out\n\n def from_meta_and_device(self, fake_mode, t, device):\n maybe_memo = self._get_memo(t)\n if maybe_memo is not None:\n return maybe_memo\n out = FakeTensor(fake_mode, t, device)\n self.tensor_memo[t] = out\n return out\n\n def __call__(self, fake_mode, t, device=None):\n assert t.device.type != \"meta\" or device is not None\n if t.device.type != \"meta\":\n return self.from_real_tensor(fake_mode, t)\n else:\n return self.from_meta_and_device(fake_mode, t, device)\n\n\nop_implementations = []\n\n\ndef register_op_impl(run_impl_check: Union[Callable[[OpOverload], bool], OpOverload]):\n def impl_decorator(op_impl):\n global op_implementations\n if isinstance(run_impl_check, OpOverload):\n op_implementations.append((lambda func: func == run_impl_check, op_impl))\n else:\n op_implementations.append((run_impl_check, op_impl))\n\n return op_impl\n\n return impl_decorator\n\n@register_op_impl(lambda func: (_is_tensor_constructor(func) or func in _like_tensor_constructors))\ndef contructors(fake_mode, func, *args, **kwargs):\n assert func not in _non_kwarg_device_constructors\n _, new_kwargs = normalize_function(\n func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True\n )\n if func in _like_tensor_constructors:\n default_device = new_kwargs[\"input\"].device\n # TODO: file issue\n args = (new_kwargs.pop(\"input\"),)\n else:\n # cpu is default device if none is specified\n default_device = torch.device(\"cpu\")\n args = ()\n out_device = new_kwargs.pop(\"device\", None)\n out_device = out_device if out_device is not None else default_device\n new_kwargs[\"device\"] = torch.device(\"meta\")\n r = func(*args, **new_kwargs)\n return FakeTensor(fake_mode, r, out_device)\n\n@register_op_impl(lambda func: func in (aten.to.prim_Device, aten.to.device))\ndef non_kwarg_to(fake_mode, func, *args, **kwargs):\n _, new_kwargs = normalize_function(\n func, args, kwargs, normalize_to_only_use_kwargs=True\n )\n input_device = new_kwargs[\"device\"]\n out_device = input_device if input_device else new_kwargs[\"input\"].device\n new_kwargs[\"device\"] = torch.device(\"meta\")\n r = func(*args, **new_kwargs)\n return fake_mode.fake_tensor_converter(fake_mode, r, out_device)\n\n\n# Dont default to default device handling,\n# since the device of `the_template` is ignored\n@register_op_impl(aten.resize_as_.default)\ndef resize_as_(fake_mode, func, *args, **kwargs):\n return func(*args, **kwargs)\n\n\n# _to_copy fails when run with FakeTensors to cuda device\n# TODO: debug\n@register_op_impl(torch.ops.aten._to_copy.default)\ndef to_copy(fake_mode, func, *args, **kwargs):\n _, new_kwargs = normalize_function(\n func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True\n )\n\n input_device = new_kwargs.pop(\"device\", None)\n out_device = input_device if input_device else new_kwargs[\"input\"].device\n with no_dispatch():\n input = new_kwargs.pop(\"input\").to(\"meta\")\n return FakeTensor(\n fake_mode, torch.ops.aten._to_copy(input, **new_kwargs), out_device\n )\n\n@register_op_impl(torch.ops.aten.clone.default)\ndef clone(fake_mode, func, input, memory_format=None):\n out_device = input.device\n with no_dispatch():\n out = torch.ops.aten._to_copy(input.to(\"meta\"), memory_format=memory_format)\n return FakeTensor(fake_mode, out, out_device)\n\n# index.Tensor data-dependent in only some conditions\n@register_op_impl(lambda func: torch.Tag.dynamic_output_shape in func.tags # type: ignore[attr-defined]\n and func != aten.index.Tensor)\ndef data_dep_op(fake_mode, func, *args, **kwargs):\n raise DynamicOutputShapeException(func)\n\n# Bool Indices get Expanded as Masks\n# See: IndexingUtils.h:expandTensors\ndef check_no_bool_index_tensors(func, self, indices):\n for index in indices:\n if index is not None and index.dtype in (torch.bool, torch.uint8):\n raise DynamicOutputShapeException(func)\n\n# Meta tensors give you the ability to run PyTorch code without having to\n# actually do computation through tensors allocated on a `meta` device.\n# Because the device is `meta`, meta tensors do not model device propagation.\n# FakeTensor extends MetaTensors to also carry an additional `fake_device`\n# which tracks devices that would have been used.\n\n\nclass FakeTensor(torch.Tensor):\n fake_device: torch.device\n fake_mode: \"FakeTensorMode\"\n\n @staticmethod\n def __new__(cls, fake_mode, elem, device):\n return torch.Tensor._make_subclass(\n cls, elem, elem.requires_grad, dispatch_device=True\n )\n\n def __init__(self, fake_mode, elem, device: Union[torch.device, str]):\n # elem does not need to be recorded, because FakeTensor *is a* elem\n assert elem.device.type == \"meta\"\n device = device if isinstance(device, torch.device) else torch.device(device)\n assert device.type != \"meta\"\n self.fake_device = device\n self.fake_mode = fake_mode\n\n @staticmethod\n def from_tensor(t, fake_mode):\n existing_device = t.device\n return FakeTensor(fake_mode, t.to(device=\"meta\"), existing_device)\n\n # TODO: resolve error in default __repr__\n def __repr__(self):\n return f\"FakeTensor({self.fake_device}, {self.size()}, {self.dtype})\"\n\n def new(self, *args, **kwargs):\n # torch.Tensor.new does not go through the normal dispatcher pattern\n # so in order to use the same pattern as normal invocation of\n # returning meta device within the kernel we need to intercept\n # the call here\n out_device = self.fake_device\n if \"device\" in kwargs:\n kwarg_device = kwargs.pop(\"device\")\n out_device = kwarg_device if kwarg_device else out_device\n kwargs[\"device\"] = \"meta\"\n self.in_kernel_invocation = True\n try:\n with no_dispatch():\n meta_out = super().new(*args, **kwargs)\n finally:\n self.in_kernel_invocation = False\n\n with no_dispatch():\n return FakeTensor(self.fake_mode, meta_out, out_device)\n\n @classmethod\n def __torch_dispatch__(cls, func, types, args=(), kwargs=None):\n # need to handle here to avoid infinite recursion\n # see [in_kernel_invocation]\n if func == torch.ops.prim.device.default:\n assert len(args) == 1 and isinstance(args[0], FakeTensor)\n if args[0].fake_mode.in_kernel_invocation:\n return torch.device(\"meta\")\n else:\n return args[0].fake_device\n\n fake_mode = None\n for arg in itertools.chain(tree_flatten(args)[0], tree_flatten(kwargs)[0]):\n if isinstance(arg, FakeTensor):\n if fake_mode is None:\n fake_mode = arg.fake_mode\n else:\n assert fake_mode is arg.fake_mode, \"Mixing modes NYI\"\n\n with enable_torch_dispatch_mode(fake_mode):\n return func(*args, **kwargs)\n\n @staticmethod\n def _find_common_device(func, args, kwargs):\n # cpu - zero-dim tensors can be called in cuda kernels,\n # so overwrite the common_device if it the only existing\n # device comes from a cpu zero-dim tensor\n common_device = None\n is_cpu_zero_dim = None\n\n def cpu_zero_dim(t):\n return t.device.type == \"cpu\" and t.dim() == 0\n\n def merge_devices(t):\n nonlocal common_device\n nonlocal is_cpu_zero_dim\n if not isinstance(t, FakeTensor):\n return\n\n if common_device is None:\n common_device = t.device\n is_cpu_zero_dim = cpu_zero_dim(t)\n return\n\n t_is_cpu_zero_dim = cpu_zero_dim(t)\n if t.device == common_device:\n if is_cpu_zero_dim:\n is_cpu_zero_dim = t_is_cpu_zero_dim\n return\n\n # mismatching devices !\n # if current tensor is cpu 0 dim, defer to existing device\n if t_is_cpu_zero_dim:\n return\n\n # current device is from cpu 0 dim tensor, overwrite\n if is_cpu_zero_dim:\n common_device = t.device\n is_cpu_zero_dim = t_is_cpu_zero_dim\n return\n\n # mismatching devices of non-zero dim tensors, throw\n # This might be valid behavior and need to be explicitly modeled, e.g. reshape_as\n raise Exception(\n f\"Unhandled FakeTensor Device Propagation for {func}, found two different devices {common_device}, {t.device}\"\n )\n\n tree_map(merge_devices, args)\n tree_map(merge_devices, kwargs)\n\n assert common_device is not None, f\"Could not find common device for {func}\"\n\n return common_device\n\n __torch_function__ = torch._C._disabled_torch_function_impl\n\n\n# We keep one instantiation of `fake_tensor_converter` active\n# for the duration of `with torch_enable_mode(FakeTensorMode)`.\n# This allows accurate storage aliasing across invocation of\n# different operators. While this will keep all freshly allocated\n# tensors alive during `FakeTensorMode`, there will no be no\n# new allocations of Tensors which have non-meta storage so\n# memory should not significantly incraese.\n\n\nclass FakeTensorMode(TorchDispatchMode):\n def __init__(self, allow_cpu_fallback=True):\n self.allow_cpu_fallback = allow_cpu_fallback\n self.fake_tensor_converter = FakeTensorConverter()\n\n # [in_kernel_invocation]\n # when FakeTensor is invoked in user code, .device should return\n # the fake_device of the tensor so that code such as as `if x.is_cuda`\n # or torch.zeros([10, 10], device=x.device) continues to execute as if\n # the FakeTensor were real. However, within kernel execution, we return\n # the `Meta` device because all computation within the kernels should\n # behave as if the Tensors are on meta devices. Kernels should allocate\n # new tensors on meta devices, and checks like `is_meta` should return true.\n # within python refs, we always return the real device by defining\n # the device property\n self.in_kernel_invocation = False\n\n def __torch_dispatch__(self, func, types, args=(), kwargs=None):\n kwargs = kwargs if kwargs else {}\n\n if func == torch.ops.prim.device.default:\n assert len(args) == 1 and isinstance(args[0], FakeTensor)\n if args[0].fake_mode.in_kernel_invocation:\n return torch.device(\"meta\")\n else:\n return args[0].fake_device\n\n # prims already wrap FakeTensor inputs to FakeTensor outputs\n # and do device logic, we dont need do anything but run them\n if \"prims::\" in func._schema.name:\n with no_dispatch():\n return func(*args, **kwargs)\n\n with no_dispatch():\n # TODO: apply as no_dispatch decorator\n converter = self.fake_tensor_converter\n\n # this is generated from torch.tensor(), which does not use the\n # dispatcher, to allow wrapper subclasses to wrap the new tensor\n # we need to handle before error checking\n if func == torch.ops.aten.lift.default:\n assert (\n len(kwargs) == 0\n and len(args) == 1\n and type(args[0]) is torch.Tensor\n )\n with no_dispatch():\n return converter(self, args[0])\n\n def wrap(e, device=None):\n if isinstance(e, torch.Tensor) and not isinstance(e, FakeTensor):\n return converter(self, e, device)\n else:\n return e\n\n # if we are in the dispatch mode, we will enter this function even if the inputs\n # are not FakeTensors. For now, throw if any non-Fake Tensor inputs\n # and just support constructors. TODO: extend more broadly\n conversion_made = False\n\n def check_non_fake_tensor(x):\n nonlocal conversion_made\n conversion_made = conversion_made or (\n isinstance(x, torch.Tensor) and not isinstance(x, FakeTensor)\n )\n\n tree_map(check_non_fake_tensor, args)\n tree_map(check_non_fake_tensor, kwargs)\n\n if conversion_made:\n raise Exception(\n \"Invoking operators with non-Fake Tensor inputs in FakeTensorMode is not yet supported. \"\n f\"Please convert all Tensors to FakeTensors first. Found in {func}\"\n )\n\n for run_impl_check, op_impl in op_implementations:\n if run_impl_check(func):\n return op_impl(self, func, *args, **kwargs)\n\n if func == aten.index.Tensor:\n check_no_bool_index_tensors(func, *args, **kwargs)\n\n self.in_kernel_invocation = True\n try:\n r = func(*args, **kwargs)\n except NotImplementedError as not_implemented_error:\n if not self.allow_cpu_fallback:\n raise not_implemented_error\n r = run_cpu_fallback(func, args, kwargs, not_implemented_error)\n finally:\n self.in_kernel_invocation = False\n\n # TODO: handle non-kwarg devices\n assert func not in _device_not_kwarg_ops, f\"NYI: {func}\"\n\n # if device is specified, use that\n if kwargs.get(\"device\", None):\n return tree_map(partial(wrap, device=kwargs[\"device\"]), r)\n\n common_device = FakeTensor._find_common_device(func, args, kwargs)\n\n return tree_map(partial(wrap, device=common_device), r)\n\n def from_tensor(self, tensor):\n return self.fake_tensor_converter(self, tensor)\n\ndef run_cpu_fallback(func, args, kwargs, orig_not_implemented_exception):\n with no_dispatch():\n def to_cpu(e):\n if isinstance(e, FakeTensor):\n return torch.zeros_like(e, device=\"cpu\")\n return e\n\n try:\n args = tree_map(to_cpu, args)\n kwargs = tree_map(to_cpu, kwargs)\n\n r = func(*args, **kwargs)\n except Exception as new_exception:\n raise orig_not_implemented_exception from new_exception\n\n tensor_impls = set()\n storages = set()\n\n for e in tree_flatten((args, kwargs))[0]:\n if isinstance(e, torch.Tensor):\n tensor_impls.add(e)\n storages.add(e.storage()._cdata)\n\n # TODO: also check metadata change on inputs\n # proper aliasing/metadata relationship between outputs and inputs will\n # not be set up, bc of conversion to cpu, error on reused impls\n for e in tree_flatten(r)[0]:\n if e in tensor_impls or (\n isinstance(e, torch.Tensor) and e.storage()._cdata in storages\n ):\n raise orig_not_implemented_exception\n\n # we're only converting these to MetaTensors now, not Fake Tensors,\n # and the cpu inputs should be temporary. just convert outputs to meta\n # and continue\n return tree_map(MetaConverter(), r)\n\n\n# Just for use to allow copying a module to fake tensors,\n# does not apply elsewhere\nclass FakeCopyMode(TorchFunctionMode):\n def __init__(self, fake_mode):\n self.fake_mode = fake_mode\n\n def __torch_function__(self, func, types, args=(), kwargs=None):\n kwargs = kwargs if kwargs else {}\n\n # clone will get called in Parameter deepcopy\n if func == torch._C._TensorBase.clone:\n return func(self.fake_mode.from_tensor(args[0]), **kwargs)\n elif func == torch.Tensor.__deepcopy__:\n assert len(args) == 2 and len(kwargs) == 0\n tensor, memo = args\n\n if id(tensor) in memo:\n return memo[id(tensor)]\n\n out = self.fake_mode.from_tensor(tensor)\n memo[id(tensor)] = out\n return out\n else:\n with torch._C.DisableTorchFunction():\n return func(*args, **kwargs)\n"
] |
[
[
"torch.all",
"torch.nn.Sequential",
"torch.nn.utils.parametrize.is_parametrized",
"torch.ones",
"torch.zeros",
"torch.ao.sparsity.WeightNormSparsifier",
"torch.randn",
"torch.eye",
"torch.nn.Linear",
"torch.ao.sparsity.NearlyDiagonalSparsifier",
"torch.arange"
],
[
"torch.fx.passes.graph_manipulation.get_size_of_node",
"torch.fx._compatibility.compatibility",
"torch.fx.node.map_arg"
],
[
"torch.nn.BatchNorm1d",
"torch.zeros",
"torch.clone",
"torch.utils._pytree.tree_map",
"torch.utils._python_dispatch.enable_torch_dispatch_mode",
"torch.testing._internal.schema_check_mode.SchemaCheckMode",
"torch._C._is_alias_of",
"torch.rand",
"torch.linalg.multi_dot",
"torch.stft"
],
[
"torch.distributed.fsdp.FullyShardedDataParallel",
"torch.randn",
"torch.nn.Linear",
"torch.testing._internal.common_distributed.skip_if_lt_x_gpu",
"torch.device",
"torch.nn.ReLU",
"torch.distributed.fsdp.wrap.ParamExecOrderWrapPolicy",
"torch.testing._internal.common_utils.run_tests",
"torch.testing._internal.common_utils.parametrize",
"torch.testing._internal.common_utils.instantiate_parametrized_tests"
],
[
"torch.fmod",
"torch.randint",
"torch.max",
"torch.zeros",
"torch.nn.functional.dropout",
"torch.narrow",
"torch.rsqrt",
"torch.topk",
"torch.device",
"torch.nn.EmbeddingBag",
"torch.sqrt",
"torch.randn",
"torch.nn.SELU",
"torch.linalg.det",
"torch.ones_like",
"torch.empty_like",
"torch.full",
"torch.onnx.register_custom_op_symbolic",
"torch.min",
"torch.nn.Conv2d",
"torch.nn.Linear",
"torch.nn.AvgPool2d",
"torch.nn.BatchNorm2d",
"torch.nn.ReflectionPad2d",
"torch.det",
"torch.onnx.export_to_pretty_string",
"torch.onnx.symbolic_helper.parse_args",
"torch.sum",
"torch.split",
"torch.addmm",
"torch.norm",
"torch.tensor",
"torch.nn.LogSigmoid",
"torch.rand",
"torch.nonzero",
"torch.nn.LogSoftmax",
"torch.zeros_like",
"torch.nn.LSTM",
"torch.nn.RReLU",
"torch.matmul",
"torch.onnx.unregister_custom_op_symbolic",
"torch.meshgrid",
"torch.cat",
"torch.nn.ELU",
"torch.nn.Embedding",
"torch.unique",
"torch.nn.functional.interpolate",
"torch.full_like",
"torch.testing._internal.common_utils.UNITTEST_ARGS.remove",
"torch.softmax",
"torch.ones",
"torch.add",
"torch.round",
"torch.nn.MaxPool1d",
"torch.std",
"torch.arange",
"torch.nn.ConvTranspose2d",
"torch.pixel_shuffle",
"torch.ne",
"torch.cumsum",
"torch.mean",
"torch.nn.functional.softmax",
"torch.onnx.symbolic_helper._get_tensor_sizes",
"torch.remainder",
"torch.onnx.symbolic_helper._get_tensor_dim_size",
"torch.tanh",
"torch.no_grad",
"torch.flatten",
"torch.onnx.export",
"torch.nn.CrossEntropyLoss",
"torch.prod",
"torch.nn.BatchNorm1d",
"torch.empty",
"torch.nn.PReLU",
"torch.nn.functional.gelu",
"torch.baddbmm",
"torch.nn.Hardtanh",
"torch.isnan",
"torch.nn.LayerNorm",
"torch.clamp",
"torch.argmax"
],
[
"torch.utils._mode_utils.no_dispatch",
"torch.nn.Parameter",
"torch.utils._pytree.tree_flatten",
"torch._C.DisableTorchFunction",
"torch._subclasses.meta_utils.MetaConverter",
"torch.Tensor._make_subclass",
"torch.fx.operator_schemas.normalize_function",
"torch.utils._pytree.tree_map",
"torch.utils._python_dispatch.enable_torch_dispatch_mode",
"torch._C.TensorType.get",
"torch.zeros_like",
"torch.ops.aten._to_copy",
"torch.device"
]
] |
switchablenorms/SwitchNorm_Detection
|
[
"ab6848667bc8976367fdacb4b8ebbaeefdc79bd6"
] |
[
"lib/core/test_engine.py"
] |
[
"# Copyright (c) 2017-present, Facebook, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n##############################################################################\n\n\"\"\"Test a Detectron network on an imdb (image database).\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nfrom collections import defaultdict\nimport cv2\nimport datetime\nimport logging\nimport numpy as np\nimport os\nimport yaml\n\nimport torch\nimport torch.nn as nn\n\nfrom core.config import cfg\n# from core.rpn_generator import generate_rpn_on_dataset #TODO: for rpn only case\n# from core.rpn_generator import generate_rpn_on_range\nfrom core.test import im_detect_all\nfrom core.test import im_conv_body_only\nfrom datasets import task_evaluation\nfrom datasets.json_dataset import JsonDataset\nfrom modeling import model_builder\nimport nn as mynn\nfrom utils.detectron_weight_helper import load_detectron_weight\nimport utils.env as envu\nimport utils.net as net_utils\nimport utils.subprocess as subprocess_utils\nimport utils.vis as vis_utils\nfrom utils.io import save_object\nfrom utils.timer import Timer\n\nlogger = logging.getLogger(__name__)\n\n\ndef get_eval_functions():\n # Determine which parent or child function should handle inference\n if cfg.MODEL.RPN_ONLY:\n raise NotImplementedError\n # child_func = generate_rpn_on_range\n # parent_func = generate_rpn_on_dataset\n else:\n # Generic case that handles all network types other than RPN-only nets\n # and RetinaNet\n child_func = test_net\n parent_func = test_net_on_dataset\n\n return parent_func, child_func\n\n\ndef get_inference_dataset(index, datasets, is_parent=True):\n assert is_parent or len(cfg.TEST.DATASETS) == 1, \\\n 'The child inference process can only work on a single dataset'\n\n dataset_name = datasets\n\n if cfg.TEST.PRECOMPUTED_PROPOSALS:\n assert is_parent or len(cfg.TEST.PROPOSAL_FILES) == 1, \\\n 'The child inference process can only work on a single proposal file'\n assert len(cfg.TEST.PROPOSAL_FILES) == len(cfg.TEST.DATASETS), \\\n 'If proposals are used, one proposal file must be specified for ' \\\n 'each dataset'\n proposal_file = cfg.TEST.PROPOSAL_FILES[index]\n else:\n proposal_file = None\n\n return dataset_name, proposal_file\n\n\ndef run_inference(\n args, ind_range=None,\n multi_gpu_testing=False, gpu_id=0,\n check_expected_results=False,\n tb_logger=None,\n cur_iter=-1):\n global tblogger\n global curiter\n tblogger = tb_logger\n curiter = cur_iter\n\n parent_func, child_func = get_eval_functions()\n is_parent = ind_range is None\n\n def result_getter():\n if is_parent:\n # Parent case:\n # In this case we're either running inference on the entire dataset in a\n # single process or (if multi_gpu_testing is True) using this process to\n # launch subprocesses that each run inference on a range of the dataset\n all_results = {}\n for i in range(len(cfg.TEST.DATASETS)):\n dataset_name, proposal_file = get_inference_dataset(i, cfg.TEST.DATASETS[i])\n output_dir = args.output_dir\n results = parent_func(\n args,\n dataset_name,\n proposal_file,\n output_dir,\n multi_gpu=multi_gpu_testing\n )\n all_results.update(results)\n\n return all_results\n else:\n # Subprocess child case:\n # In this case test_net was called via subprocess.Popen to execute on a\n # range of inputs on a single dataset\n dataset_name, proposal_file = \\\n get_inference_dataset(0, cfg.TEST.DATASETS[0], is_parent=False)\n output_dir = args.output_dir\n return child_func(\n args,\n dataset_name,\n proposal_file,\n output_dir,\n ind_range=ind_range,\n gpu_id=gpu_id\n )\n\n all_results = result_getter()\n if check_expected_results and is_parent:\n task_evaluation.check_expected_results(\n all_results,\n atol=cfg.EXPECTED_RESULTS_ATOL,\n rtol=cfg.EXPECTED_RESULTS_RTOL\n )\n task_evaluation.log_copy_paste_friendly_results(all_results)\n\n return all_results\n\n\ndef test_net_on_dataset(\n args,\n dataset_name,\n proposal_file,\n output_dir,\n multi_gpu=False,\n gpu_id=0):\n \"\"\"Run inference on a dataset.\"\"\"\n dataset = JsonDataset(dataset_name)\n test_timer = Timer()\n test_timer.tic()\n if multi_gpu:\n num_images = len(dataset.get_roidb())\n all_boxes, all_segms, all_keyps = multi_gpu_test_net_on_dataset(\n args, dataset_name, proposal_file, num_images, output_dir\n )\n else:\n all_boxes, all_segms, all_keyps = test_net(\n args, dataset_name, proposal_file, output_dir, gpu_id=gpu_id\n )\n test_timer.toc()\n logger.info('Total inference time: {:.3f}s'.format(test_timer.average_time))\n results = task_evaluation.evaluate_all(\n dataset, all_boxes, all_segms, all_keyps, output_dir\n )\n return results\n\n\ndef multi_gpu_test_net_on_dataset(\n args, dataset_name, proposal_file, num_images, output_dir):\n \"\"\"Multi-gpu inference on a dataset.\"\"\"\n binary_dir = envu.get_runtime_dir()\n binary_ext = envu.get_py_bin_ext()\n binary = os.path.join(binary_dir, args.test_net_file + binary_ext)\n assert os.path.exists(binary), 'Binary \\'{}\\' not found'.format(binary)\n\n # Pass the target dataset and proposal file (if any) via the command line\n opts = ['TEST.DATASETS', '(\"{}\",)'.format(dataset_name)]\n if proposal_file:\n opts += ['TEST.PROPOSAL_FILES', '(\"{}\",)'.format(proposal_file)]\n\n # Run inference in parallel in subprocesses\n # Outputs will be a list of outputs from each subprocess, where the output\n # of each subprocess is the dictionary saved by test_net().\n outputs = subprocess_utils.process_in_parallel(\n 'detection', num_images, binary, output_dir,\n args.load_ckpt, args.load_detectron, opts\n )\n\n # Collate the results from each subprocess\n all_boxes = [[] for _ in range(cfg.MODEL.NUM_CLASSES)]\n all_segms = [[] for _ in range(cfg.MODEL.NUM_CLASSES)]\n all_keyps = [[] for _ in range(cfg.MODEL.NUM_CLASSES)]\n for det_data in outputs:\n all_boxes_batch = det_data['all_boxes']\n all_segms_batch = det_data['all_segms']\n all_keyps_batch = det_data['all_keyps']\n for cls_idx in range(1, cfg.MODEL.NUM_CLASSES):\n all_boxes[cls_idx] += all_boxes_batch[cls_idx]\n all_segms[cls_idx] += all_segms_batch[cls_idx]\n all_keyps[cls_idx] += all_keyps_batch[cls_idx]\n det_file = os.path.join(output_dir, 'detections.pkl')\n cfg_yaml = yaml.dump(cfg)\n save_object(\n dict(\n all_boxes=all_boxes,\n all_segms=all_segms,\n all_keyps=all_keyps,\n cfg=cfg_yaml\n ), det_file\n )\n logger.info('Wrote detections to: {}'.format(os.path.abspath(det_file)))\n\n return all_boxes, all_segms, all_keyps\n\n\ndef test_net(\n args,\n dataset_name,\n proposal_file,\n output_dir,\n model=None,\n ind_range=None,\n gpu_id=0):\n \"\"\"Run inference on all images in a dataset or over an index range of images\n in a dataset using a single GPU.\n \"\"\"\n assert not cfg.MODEL.RPN_ONLY, \\\n 'Use rpn_generate to generate proposals from RPN-only models'\n\n model = initialize_model_from_cfg(args, gpu_id=gpu_id)\n\n if tblogger is not None and gpu_id == 0:\n for name, param in model.named_parameters():\n if 'mean_weight' in name:\n softmax = nn.Softmax(0)\n weight = softmax(param).cpu().detach().numpy()\n tblogger.add_scalar('mean_weight/'+name+'/in', weight[0], curiter)\n tblogger.add_scalar('mean_weight/'+name+'/ln', weight[1], curiter)\n if len(weight) > 2:\n tblogger.add_scalar('mean_weight/'+name+'/bn', weight[2], curiter)\n elif 'var_weight' in name:\n softmax = nn.Softmax(0)\n weight = softmax(param).cpu().detach().numpy()\n tblogger.add_scalar('var_weight/'+name+'/in', weight[0], curiter)\n tblogger.add_scalar('var_weight/'+name+'/ln', weight[1], curiter)\n if len(weight) > 2:\n tblogger.add_scalar('var_weight/'+name+'/bn', weight[2], curiter)\n\n\n roidb, dataset, start_ind, end_ind, total_num_images = get_roidb_and_dataset(\n dataset_name, proposal_file, ind_range\n )\n model.eval()\n num_images = len(roidb)\n num_classes = cfg.MODEL.NUM_CLASSES\n all_boxes, all_segms, all_keyps = empty_results(num_classes, num_images)\n timers = defaultdict(Timer)\n for i, entry in enumerate(roidb):\n if cfg.TEST.PRECOMPUTED_PROPOSALS:\n # The roidb may contain ground-truth rois (for example, if the roidb\n # comes from the training or val split). We only want to evaluate\n # detection on the *non*-ground-truth rois. We select only the rois\n # that have the gt_classes field set to 0, which means there's no\n # ground truth.\n box_proposals = entry['boxes'][entry['gt_classes'] == 0]\n if len(box_proposals) == 0:\n continue\n else:\n # Faster R-CNN type models generate proposals on-the-fly with an\n # in-network RPN; 1-stage models don't require proposals.\n box_proposals = None\n\n im = cv2.imread(entry['image'])\n cls_boxes_i, cls_segms_i, cls_keyps_i = im_detect_all(model, im, box_proposals, timers)\n\n extend_results(i, all_boxes, cls_boxes_i)\n if cls_segms_i is not None:\n extend_results(i, all_segms, cls_segms_i)\n if cls_keyps_i is not None:\n extend_results(i, all_keyps, cls_keyps_i)\n\n if i % 10 == 0: # Reduce log file size\n ave_total_time = np.sum([t.average_time for t in timers.values()])\n eta_seconds = ave_total_time * (num_images - i - 1)\n eta = str(datetime.timedelta(seconds=int(eta_seconds)))\n det_time = (\n timers['im_detect_bbox'].average_time +\n timers['im_detect_mask'].average_time +\n timers['im_detect_keypoints'].average_time\n )\n misc_time = (\n timers['misc_bbox'].average_time +\n timers['misc_mask'].average_time +\n timers['misc_keypoints'].average_time\n )\n logger.info(\n (\n 'im_detect: range [{:d}, {:d}] of {:d}: '\n '{:d}/{:d} {:.3f}s + {:.3f}s (eta: {})'\n ).format(\n start_ind + 1, end_ind, total_num_images, start_ind + i + 1,\n start_ind + num_images, det_time, misc_time, eta\n )\n )\n\n if cfg.VIS:\n im_name = os.path.splitext(os.path.basename(entry['image']))[0]\n vis_utils.vis_one_image(\n im[:, :, ::-1],\n '{:d}_{:s}'.format(i, im_name),\n os.path.join(output_dir, 'vis'),\n cls_boxes_i,\n segms=cls_segms_i,\n keypoints=cls_keyps_i,\n thresh=cfg.VIS_TH,\n box_alpha=0.8,\n dataset=dataset,\n show_class=True\n )\n\n cfg_yaml = yaml.dump(cfg)\n if ind_range is not None:\n det_name = 'detection_range_%s_%s.pkl' % tuple(ind_range)\n else:\n det_name = 'detections.pkl'\n det_file = os.path.join(output_dir, det_name)\n save_object(\n dict(\n all_boxes=all_boxes,\n all_segms=all_segms,\n all_keyps=all_keyps,\n cfg=cfg_yaml\n ), det_file\n )\n logger.info('Wrote detections to: {}'.format(os.path.abspath(det_file)))\n return all_boxes, all_segms, all_keyps\n\n\ndef initialize_model_from_cfg(args, gpu_id=0):\n \"\"\"Initialize a model from the global cfg. Loads test-time weights and\n set to evaluation mode.\n \"\"\"\n model = model_builder.Generalized_RCNN()\n\n if args.cuda:\n model.cuda()\n\n if args.load_ckpt:\n load_name = args.load_ckpt\n logger.info(\"loading checkpoint %s\", load_name)\n checkpoint = torch.load(load_name, map_location=lambda storage, loc: storage)\n net_utils.load_ckpt(model, checkpoint['model'])\n\n if args.load_detectron:\n logger.info(\"loading detectron weights %s\", args.load_detectron)\n load_detectron_weight(model, args.load_detectron)\n\n model = mynn.DataParallel(model, cpu_keywords=['im_info', 'roidb'], minibatch=True)\n\n return model\n\n\ndef get_roidb_and_dataset(dataset_name, proposal_file, ind_range):\n \"\"\"Get the roidb for the dataset specified in the global cfg. Optionally\n restrict it to a range of indices if ind_range is a pair of integers.\n \"\"\"\n dataset = JsonDataset(dataset_name)\n if cfg.TEST.PRECOMPUTED_PROPOSALS:\n assert proposal_file, 'No proposal file given'\n roidb = dataset.get_roidb(\n proposal_file=proposal_file,\n proposal_limit=cfg.TEST.PROPOSAL_LIMIT\n )\n else:\n roidb = dataset.get_roidb()\n\n if ind_range is not None:\n total_num_images = len(roidb)\n start, end = ind_range\n roidb = roidb[start:end]\n else:\n start = 0\n end = len(roidb)\n total_num_images = end\n\n return roidb, dataset, start, end, total_num_images\n\n\ndef empty_results(num_classes, num_images):\n \"\"\"Return empty results lists for boxes, masks, and keypoints.\n Box detections are collected into:\n all_boxes[cls][image] = N x 5 array with columns (x1, y1, x2, y2, score)\n Instance mask predictions are collected into:\n all_segms[cls][image] = [...] list of COCO RLE encoded masks that are in\n 1:1 correspondence with the boxes in all_boxes[cls][image]\n Keypoint predictions are collected into:\n all_keyps[cls][image] = [...] list of keypoints results, each encoded as\n a 3D array (#rois, 4, #keypoints) with the 4 rows corresponding to\n [x, y, logit, prob] (See: utils.keypoints.heatmaps_to_keypoints).\n Keypoints are recorded for person (cls = 1); they are in 1:1\n correspondence with the boxes in all_boxes[cls][image].\n \"\"\"\n # Note: do not be tempted to use [[] * N], which gives N references to the\n # *same* empty list.\n all_boxes = [[[] for _ in range(num_images)] for _ in range(num_classes)]\n all_segms = [[[] for _ in range(num_images)] for _ in range(num_classes)]\n all_keyps = [[[] for _ in range(num_images)] for _ in range(num_classes)]\n return all_boxes, all_segms, all_keyps\n\n\ndef extend_results(index, all_res, im_res):\n \"\"\"Add results for an image to the set of all results at the specified\n index.\n \"\"\"\n # Skip cls_idx 0 (__background__)\n for cls_idx in range(1, len(im_res)):\n all_res[cls_idx][index] = im_res[cls_idx]\n"
] |
[
[
"torch.nn.Softmax",
"torch.load"
]
] |
NeoBert/liudengfeng-zipline
|
[
"dd436fa066a1a9718f676fa161fda32bbbf0f5d9"
] |
[
"zipline/examples/pairtrade.py"
] |
[
"#!/usr/bin/env python\n#\n# Copyright 2013 Quantopian, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport logbook\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport statsmodels.api as sm\nfrom datetime import datetime\nimport pytz\n\nfrom zipline.algorithm import TradingAlgorithm\nfrom zipline.transforms import batch_transform\nfrom zipline.utils.factory import load_from_yahoo\n\n\n@batch_transform\ndef ols_transform(data, sid1, sid2):\n \"\"\"Computes regression coefficient (slope and intercept)\n via Ordinary Least Squares between two SIDs.\n \"\"\"\n p0 = data.price[sid1]\n p1 = sm.add_constant(data.price[sid2], prepend=True)\n slope, intercept = sm.OLS(p0, p1).fit().params\n\n return slope, intercept\n\n\nclass Pairtrade(TradingAlgorithm):\n \"\"\"Pairtrading relies on cointegration of two stocks.\n\n The expectation is that once the two stocks drifted apart\n (i.e. there is spread), they will eventually revert again. Thus,\n if we short the upward drifting stock and long the downward\n drifting stock (in short, we buy the spread) once the spread\n widened we can sell the spread with profit once they converged\n again. A nice property of this algorithm is that we enter the\n market in a neutral position.\n\n This specific algorithm tries to exploit the cointegration of\n Pepsi and Coca Cola by estimating the correlation between the\n two. Divergence of the spread is evaluated by z-scoring.\n \"\"\"\n\n def initialize(self, window_length=100):\n self.spreads = []\n self.invested = 0\n self.window_length = window_length\n self.ols_transform = ols_transform(refresh_period=self.window_length,\n window_length=self.window_length)\n self.PEP = self.symbol('PEP')\n self.KO = self.symbol('KO')\n\n def handle_data(self, data):\n ######################################################\n # 1. Compute regression coefficients between PEP and KO\n params = self.ols_transform.handle_data(data, self.PEP, self.KO)\n if params is None:\n return\n intercept, slope = params\n\n ######################################################\n # 2. Compute spread and zscore\n zscore = self.compute_zscore(data, slope, intercept)\n self.record(zscores=zscore)\n\n ######################################################\n # 3. Place orders\n self.place_orders(data, zscore)\n\n def compute_zscore(self, data, slope, intercept):\n \"\"\"1. Compute the spread given slope and intercept.\n 2. zscore the spread.\n \"\"\"\n spread = (data[self.PEP].price -\n (slope * data[self.KO].price + intercept))\n self.spreads.append(spread)\n spread_wind = self.spreads[-self.window_length:]\n zscore = (spread - np.mean(spread_wind)) / np.std(spread_wind)\n return zscore\n\n def place_orders(self, data, zscore):\n \"\"\"Buy spread if zscore is > 2, sell if zscore < .5.\n \"\"\"\n if zscore >= 2.0 and not self.invested:\n self.order(self.PEP, int(100 / data[self.PEP].price))\n self.order(self.KO, -int(100 / data[self.KO].price))\n self.invested = True\n elif zscore <= -2.0 and not self.invested:\n self.order(self.PEP, -int(100 / data[self.PEP].price))\n self.order(self.KO, int(100 / data[self.KO].price))\n self.invested = True\n elif abs(zscore) < .5 and self.invested:\n self.sell_spread()\n self.invested = False\n\n def sell_spread(self):\n \"\"\"\n decrease exposure, regardless of position long/short.\n buy for a short position, sell for a long.\n \"\"\"\n ko_amount = self.portfolio.positions[self.KO].amount\n self.order(self.KO, -1 * ko_amount)\n pep_amount = self.portfolio.positions[self.PEP].amount\n self.order(self.PEP, -1 * pep_amount)\n\nif __name__ == '__main__':\n logbook.StderrHandler().push_application()\n start = datetime(2000, 1, 1, 0, 0, 0, 0, pytz.utc)\n end = datetime(2002, 1, 1, 0, 0, 0, 0, pytz.utc)\n data = load_from_yahoo(stocks=['PEP', 'KO'], indexes={},\n start=start, end=end)\n\n pairtrade = Pairtrade()\n results = pairtrade.run(data)\n data['spreads'] = np.nan\n\n ax1 = plt.subplot(211)\n # TODO Bugged - indices are out of bounds\n # data[[pairtrade.PEPsid, pairtrade.KOsid]].plot(ax=ax1)\n plt.ylabel('price')\n plt.setp(ax1.get_xticklabels(), visible=False)\n\n ax2 = plt.subplot(212, sharex=ax1)\n results.zscores.plot(ax=ax2, color='r')\n plt.ylabel('zscored spread')\n\n plt.gcf().set_size_inches(18, 8)\n"
] |
[
[
"matplotlib.pyplot.gcf",
"numpy.std",
"matplotlib.pyplot.subplot",
"numpy.mean",
"matplotlib.pyplot.ylabel"
]
] |
HSE-DynGraph-Research-team/DynGraphModelling
|
[
"890326f4bd7991ef88a7a79cd2c8a77541621423"
] |
[
"models/CAW/utils.py"
] |
[
"import numpy as np\nimport torch\nimport os\nimport random\n\n\nclass EarlyStopMonitor(object):\n def __init__(self, max_round=3, higher_better=True, tolerance=1e-3):\n self.max_round = max_round\n self.num_round = 0\n\n self.epoch_count = 0\n self.best_epoch = 0\n\n self.last_best = None\n self.higher_better = higher_better\n self.tolerance = tolerance\n\n def early_stop_check(self, curr_val):\n if not self.higher_better:\n curr_val *= -1\n if self.last_best is None:\n self.last_best = curr_val\n elif (curr_val - self.last_best) / np.abs(self.last_best) > self.tolerance:\n self.last_best = curr_val\n self.num_round = 0\n self.best_epoch = self.epoch_count\n else:\n self.num_round += 1\n self.epoch_count += 1\n return self.num_round >= self.max_round\n\n\nclass RandEdgeSampler(object):\n def __init__(self, src_list, dst_list):\n src_list = np.concatenate(src_list)\n dst_list = np.concatenate(dst_list)\n self.src_list = np.unique(src_list)\n self.dst_list = np.unique(dst_list)\n\n def sample(self, size):\n src_index = np.random.randint(0, len(self.src_list), size)\n dst_index = np.random.randint(0, len(self.dst_list), size)\n return self.src_list[src_index], self.dst_list[dst_index]\n\n\ndef set_random_seed(seed):\n torch.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False\n np.random.seed(seed)\n random.seed(seed)\n os.environ['PYTHONHASHSEED'] = str(seed)\n\n\ndef process_sampling_numbers(num_neighbors, num_layers):\n num_neighbors = [int(n) for n in num_neighbors]\n if len(num_neighbors) == 1:\n num_neighbors = num_neighbors * num_layers\n else:\n num_layers = len(num_neighbors)\n return num_neighbors, num_layers\n"
] |
[
[
"numpy.abs",
"numpy.random.seed",
"numpy.unique",
"torch.manual_seed",
"numpy.concatenate",
"torch.cuda.manual_seed_all"
]
] |
xopclabs/random-rotation-sklearn
|
[
"41f624066cfb1830bf067f77da9d284c6e46f1a1"
] |
[
"rrsklearn/boosting.py"
] |
[
"import numpy as np\nfrom sklearn.ensemble import GradientBoostingClassifier, GradientBoostingRegressor\nfrom sklearn.ensemble._gb import BaseGradientBoosting\nfrom .tree import RRDecisionTreeRegressor\n\n\nclass RRBaseGradientBoosting(BaseGradientBoosting):\n \"\"\"Abstract base class for Random Rotation Gradient Boosting.\"\"\"\n\n def _fit_stage(self, i, X, y, raw_predictions, sample_weight, sample_mask,\n random_state, X_csc=None, X_csr=None):\n \"\"\"Fit another stage of ``_n_classes`` trees to the boosting model.\"\"\"\n\n assert sample_mask.dtype == bool\n loss = self.loss_\n original_y = y\n\n # Need to pass a copy of raw_predictions to negative_gradient()\n # because raw_predictions is partially updated at the end of the loop\n # in update_terminal_regions(), and gradients need to be evaluated at\n # iteration i - 1.\n raw_predictions_copy = raw_predictions.copy()\n\n for k in range(loss.K):\n if loss.is_multi_class:\n y = np.array(original_y == k, dtype=np.float64)\n\n residual = loss.negative_gradient(y, raw_predictions_copy, k=k,\n sample_weight=sample_weight)\n\n rs = random_state.randint(0, 2**32 - 1)\n # induce regression tree on residuals\n tree = RRDecisionTreeRegressor(\n criterion=self.criterion,\n splitter='best',\n max_depth=self.max_depth,\n min_samples_split=self.min_samples_split,\n min_samples_leaf=self.min_samples_leaf,\n min_weight_fraction_leaf=self.min_weight_fraction_leaf,\n min_impurity_decrease=self.min_impurity_decrease,\n min_impurity_split=self.min_impurity_split,\n max_features=self.max_features,\n max_leaf_nodes=self.max_leaf_nodes,\n random_state=rs,\n ccp_alpha=self.ccp_alpha)\n\n if self.subsample < 1.0:\n # no inplace multiplication!\n sample_weight = sample_weight * sample_mask.astype(np.float64)\n\n X = X_csr if X_csr is not None else X\n tree.fit(X, residual, sample_weight=sample_weight,\n check_input=False)\n\n # update tree leaves\n loss.update_terminal_regions(\n tree.tree_, X, y, residual, raw_predictions, sample_weight,\n sample_mask, learning_rate=self.learning_rate, k=k)\n\n # add tree to ensemble\n self.estimators_[i, k] = tree\n\n return raw_predictions\n\n\nclass RRGradientBoostingClassifier(RRBaseGradientBoosting, GradientBoostingClassifier):\n '''Mixes Random Rotation BaseGradientBoosting with GradientBoostingClassifier'''\n pass\n\n\nclass RRGradientBoostingRegressor(RRBaseGradientBoosting, GradientBoostingRegressor):\n '''Mixes Random Rotation BaseGradientBoosting with GradientBoostingRegressor'''\n pass\n"
] |
[
[
"numpy.array"
]
] |
DimitryRakhlei/BTECH
|
[
"fefe469bd7d1f4adbc70bdc57670e793ad4c31f6"
] |
[
"c8005/a1/src/avg.py"
] |
[
"import glob\nimport numpy as np\n#import matplotlib.pyplot as plt\n\nmt_files = glob.glob(\"../logs/mt_*.log\")\nmp_files = glob.glob(\"../logs/mp_*.log\")\n\nprint(mt_files)\nprint(mp_files)\n\nvalues = {}\nfor fn in mt_files:\n with open(fn, \"r\") as file:\n values[fn] = np.array([float(x.rstrip()) for x in file.readlines()])\n\n\nfor fn in mp_files:\n with open(fn, \"r\") as file:\n values[fn] = np.array([float(x.rstrip()) for x in file.readlines()])\n\n\nprint(values)\n\n\naverages = {}\nfor fn in mt_files:\n averages[fn] = np.mean(values[fn])\n\nfor fn in mp_files:\n averages[fn] = np.mean(values[fn])\n\n\n\nprint(averages)\n\n#plt.plot([44444, averages[mt_files[0]]], [9999999, averages[mt_files[1]]], 'k', lw=2)\n#plt.plot([44444, averages[mp_files[0]]], [9999999, averages[mp_files[1]]], lw=2)\n#plt.xlim(1000, 0.001)\n#plt.show()\n"
] |
[
[
"numpy.mean"
]
] |
tk1012/ion-kit
|
[
"d42be09dfd78fe415058723c186a76a84c699d45"
] |
[
"python/tests/test_all.py"
] |
[
"# https://github.com/fixstars/ion-csharp/blob/master/test/Test.cs\nfrom ionpy import Node, Builder, Buffer, PortMap, Port, Param, Type, TypeCode\nimport numpy as np # TODO: rewrite with pure python\n\n\ndef test_all():\n t = Type(code_=TypeCode.Int, bits_=32, lanes_=1)\n input_port = Port(key='input', type=t, dim=2)\n value41 = Param(key='v', val='41')\n\n builder = Builder()\n builder.set_target(target='host')\n builder.with_bb_module(path='libion-bb-test.so')\n # builder.with_bb_module(path='ion-bb-test.dll') # for Windows\n\n node = builder.add('test_inc_i32x2').set_port(ports=[ input_port, ]).set_param(params=[ value41, ])\n\n port_map = PortMap()\n\n sizes = (4, 4)\n ibuf = Buffer(type=t, sizes=sizes)\n obuf = Buffer(type=t, sizes=sizes)\n\n idata = np.full((4*4, ), fill_value=1, dtype=np.int32)\n odata = np.full((4*4, ), fill_value=0, dtype=np.int32)\n\n idata_bytes = idata.tobytes(order='C')\n odata_bytes = odata.tobytes(order='C')\n\n ibuf.write(data=idata_bytes)\n obuf.write(data=odata_bytes)\n\n port_map.set_buffer(port=input_port, buffer=ibuf)\n port_map.set_buffer(port=node.get_port(key='output'), buffer=obuf)\n\n builder.run(port_map=port_map)\n\n obuf_bytes = obuf.read(num_data_bytes=len(odata_bytes))\n odata = np.frombuffer(obuf_bytes, dtype=np.int32)\n\n for i in range(4*4):\n assert odata[i] == 42\n"
] |
[
[
"numpy.frombuffer",
"numpy.full"
]
] |
CadQuery/PostMesh
|
[
"d68f44707166d6556042ed79b336c996d8ae52c5"
] |
[
"setup.py"
] |
[
"from setuptools import setup\nfrom setuptools import find_packages\nfrom distutils.command.clean import clean\nfrom distutils.extension import Extension\nfrom distutils.sysconfig import get_config_vars\nfrom Cython.Build import cythonize\nimport os, platform, sys, fnmatch\nimport numpy\n\n\ndef setup_package():\n\n # Get Platform/OS\n _os = sys.platform\n\n # Get the current directory\n _pwd_ = os.path.dirname(os.path.realpath('__file__'))\n _upwd_ = os.path.dirname(_pwd_)\n\n # Remove the \"-Wstrict-prototypes\" compiler option, which isn't valid for C++.\n cfg_vars = get_config_vars()\n for key, value in cfg_vars.items():\n if isinstance(value,str):\n cfg_vars[key] = value.replace(\"-Wstrict-prototypes\", \"\")\n\n # Suppress numpy deprecation warnings\n no_deprecated = (\"NPY_NO_DEPRECATED_API\",None)\n\n sourcefiles = [\n os.path.join(_pwd_,\"bindings\",\"PostMeshPy.pyx\"),\n os.path.join(_pwd_,\"src\",\"PostMeshBase.cpp\"),\n os.path.join(_pwd_,\"src\",\"PostMeshCurve.cpp\"),\n os.path.join(_pwd_,\"src\",\"PostMeshSurface.cpp\")\n ]\n\n\n # Set the compiler\n # Must be called as: \"python setup.py build_ext CXX=/usr/bin/g++\"\n args = sys.argv\n _cxx_specified = False\n if len(args) > 1:\n for counter, arg in enumerate(args):\n if \"CXX\" in arg:\n _cxx_specified = True\n _cxx_compiler = arg.split(\"=\")[-1]\n args.remove(arg)\n if _cxx_specified:\n os.environ[\"CC\"] = _cxx_compiler\n os.environ[\"CXX\"] = _cxx_compiler\n else:\n _cxx_compiler = get_config_vars()['CXX'].split(' ')[0]\n os.environ[\"CC\"] = _cxx_compiler\n os.environ[\"CXX\"] = _cxx_compiler\n\n\n # Compiler arguments\n if \"clang++\" in _cxx_compiler or (\"c++\" in _cxx_compiler and \"darwin\" in _os):\n compiler_args = [\"-O3\",\"-std=c++11\",\"-m64\",\"-march=native\",\"-mtune=native\",\"-ffp-contract=fast\",\n \"-ffast-math\",\"-flto\",\"-DNPY_NO_DEPRECATED_API\",\"-Wno-shorten-64-to-32\"]\n else:\n compiler_args = [\"-O3\",\"-std=c++11\",\"-m64\",\"-march=native\",\"-mtune=native\",\"-ffp-contract=fast\",\n \"-mfpmath=sse\",\"-ffast-math\",\"-ftree-vectorize\",\"-finline-functions\",\"-finline-limit=100000\",\n \"-funroll-loops\",\"-Wno-unused-function\",\"-flto\",\"-DNPY_NO_DEPRECATED_API\",\"-Wno-cpp\"]\n\n # if \"darwin\" in _os:\n # compiler_args.append(\"-stdlib=libstdc++\")\n\n\n eigen_include_path = \"/usr/local/include/eigen/\"\n oce_include_path = \"/usr/local/include/oce/\"\n\n\n # Link to OpenCascade runtime libraries\n # Search for all subdirectories under /usr/local/lib\n # Change the directory name if occ is elsewhere\n occ_dir = \"/usr/local/lib\"\n all_dir_libs = os.listdir(occ_dir)\n occ_libs = []\n for i in all_dir_libs:\n lib_suffix = i.split(\".\")[-1]\n if i[:4]==\"libT\" and (lib_suffix != \"a\" and lib_suffix != \"la\" and lib_suffix != \"0\"):\n if \"darwin\" in _os:\n occ_libs.append(i[3:-6])\n elif \"linux\" in _os:\n occ_libs.append(\":\"+i)\n\n found_oce = False\n for i in occ_libs:\n if \"TKernel\" in i:\n found_oce = True\n break\n\n\n if found_oce is False:\n if \"darwin\" in _os:\n version = next(os.walk(\"/usr/local/Cellar/oce/\"))[1][0]\n occ_dir = os.path.join(\"/usr/local/Cellar/oce\",version,\"lib\")\n oce_include_path = os.path.join(\"/usr/local/Cellar/oce\",version,\"include\",\"oce\")\n elif \"linux\" in _os:\n occ_dir = \"/usr/lib/x86_64-linux-gnu\"\n oce_include_path = \"/usr/include/oce/\"\n\n all_dir_libs = os.listdir(occ_dir)\n for i in all_dir_libs:\n lib_suffix = i.split(\".\")[-1]\n if i[:4]==\"libT\" and (lib_suffix != \"a\" and lib_suffix != \"la\" and lib_suffix != \"0\"):\n occ_libs.append(\":\"+i)\n\n\n # Create extension module\n extensions = [\n Extension(\n name = \"PostMeshPy\",\n sources = sourcefiles,\n language=\"c++\",\n include_dirs = [_pwd_,\n _pwd_+\"/include/\",\n eigen_include_path,\n oce_include_path,\n numpy.get_include()],\n libraries= [\"stdc++\"] + occ_libs,\n library_dirs = [_pwd_, os.path.join(\"/usr\",\"local\",\"lib\")],\n extra_compile_args = compiler_args,\n define_macros=[no_deprecated],\n ),\n ]\n\n with open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\n\n setup(\n ext_modules = cythonize(extensions),\n name = \"PostMeshPy\",\n version = \"1.6.1\",\n description = \"A Python wrapper for PostMesh - a high order curvilinear mesh generator based on OpenCascade\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n author=\"Roman Poya\",\n author_email = \"[email protected]\",\n url = \"https://github.com/romeric/PostMesh\",\n license=\"MIT\",\n install_requires=[\n 'numpy>=1.9',\n 'cython>=0.23'],\n packages=find_packages(),\n include_package_data=True,\n package_data={'': ['bindings/*','src/*','include/*','example/*',\n '*.pyx', '*.pxd', '*.h', '*.hpp', '*.c', '*.cpp', 'Makefile']},\n extra_files = \"LICENSE.md\"\n )\n\n\nif __name__ == \"__main__\":\n setup_package()\n"
] |
[
[
"numpy.get_include"
]
] |
itsAbdulKhadar/Machine-Learning-with-Streamlit
|
[
"c8a0c7ca5a1bcf2730ae9587bcddfebe323965a3",
"c8a0c7ca5a1bcf2730ae9587bcddfebe323965a3",
"c8a0c7ca5a1bcf2730ae9587bcddfebe323965a3",
"c8a0c7ca5a1bcf2730ae9587bcddfebe323965a3",
"c8a0c7ca5a1bcf2730ae9587bcddfebe323965a3",
"c8a0c7ca5a1bcf2730ae9587bcddfebe323965a3",
"c8a0c7ca5a1bcf2730ae9587bcddfebe323965a3",
"c8a0c7ca5a1bcf2730ae9587bcddfebe323965a3"
] |
[
"venv/Lib/site-packages/pandas/tests/series/methods/test_replace.py",
"venv/Lib/site-packages/streamlit/bootstrap.py",
"venv/Lib/site-packages/pandas/tests/scalar/period/test_period.py",
"venv/Lib/site-packages/pandas/io/excel/_openpyxl.py",
"venv/Lib/site-packages/pandas/tests/indexes/period/test_period.py",
"venv/Lib/site-packages/pandas/tests/series/indexing/test_datetime.py",
"venv/Lib/site-packages/pandas/core/generic.py",
"venv/Lib/site-packages/pandas/tests/indexes/common.py"
] |
[
"import re\n\nimport numpy as np\nimport pytest\n\nimport pandas as pd\nimport pandas._testing as tm\n\n\nclass TestSeriesReplace:\n def test_replace(self, datetime_series):\n N = 100\n ser = pd.Series(np.random.randn(N))\n ser[0:4] = np.nan\n ser[6:10] = 0\n\n # replace list with a single value\n return_value = ser.replace([np.nan], -1, inplace=True)\n assert return_value is None\n\n exp = ser.fillna(-1)\n tm.assert_series_equal(ser, exp)\n\n rs = ser.replace(0.0, np.nan)\n ser[ser == 0.0] = np.nan\n tm.assert_series_equal(rs, ser)\n\n ser = pd.Series(np.fabs(np.random.randn(N)), tm.makeDateIndex(N), dtype=object)\n ser[:5] = np.nan\n ser[6:10] = \"foo\"\n ser[20:30] = \"bar\"\n\n # replace list with a single value\n rs = ser.replace([np.nan, \"foo\", \"bar\"], -1)\n\n assert (rs[:5] == -1).all()\n assert (rs[6:10] == -1).all()\n assert (rs[20:30] == -1).all()\n assert (pd.isna(ser[:5])).all()\n\n # replace with different values\n rs = ser.replace({np.nan: -1, \"foo\": -2, \"bar\": -3})\n\n assert (rs[:5] == -1).all()\n assert (rs[6:10] == -2).all()\n assert (rs[20:30] == -3).all()\n assert (pd.isna(ser[:5])).all()\n\n # replace with different values with 2 lists\n rs2 = ser.replace([np.nan, \"foo\", \"bar\"], [-1, -2, -3])\n tm.assert_series_equal(rs, rs2)\n\n # replace inplace\n return_value = ser.replace([np.nan, \"foo\", \"bar\"], -1, inplace=True)\n assert return_value is None\n\n assert (ser[:5] == -1).all()\n assert (ser[6:10] == -1).all()\n assert (ser[20:30] == -1).all()\n\n ser = pd.Series([np.nan, 0, np.inf])\n tm.assert_series_equal(ser.replace(np.nan, 0), ser.fillna(0))\n\n ser = pd.Series([np.nan, 0, \"foo\", \"bar\", np.inf, None, pd.NaT])\n tm.assert_series_equal(ser.replace(np.nan, 0), ser.fillna(0))\n filled = ser.copy()\n filled[4] = 0\n tm.assert_series_equal(ser.replace(np.inf, 0), filled)\n\n ser = pd.Series(datetime_series.index)\n tm.assert_series_equal(ser.replace(np.nan, 0), ser.fillna(0))\n\n # malformed\n msg = r\"Replacement lists must match in length\\. Expecting 3 got 2\"\n with pytest.raises(ValueError, match=msg):\n ser.replace([1, 2, 3], [np.nan, 0])\n\n # make sure that we aren't just masking a TypeError because bools don't\n # implement indexing\n with pytest.raises(TypeError, match=\"Cannot compare types .+\"):\n ser.replace([1, 2], [np.nan, 0])\n\n ser = pd.Series([0, 1, 2, 3, 4])\n result = ser.replace([0, 1, 2, 3, 4], [4, 3, 2, 1, 0])\n tm.assert_series_equal(result, pd.Series([4, 3, 2, 1, 0]))\n\n def test_replace_gh5319(self):\n # API change from 0.12?\n # GH 5319\n ser = pd.Series([0, np.nan, 2, 3, 4])\n expected = ser.ffill()\n result = ser.replace([np.nan])\n tm.assert_series_equal(result, expected)\n\n ser = pd.Series([0, np.nan, 2, 3, 4])\n expected = ser.ffill()\n result = ser.replace(np.nan)\n tm.assert_series_equal(result, expected)\n # GH 5797\n ser = pd.Series(pd.date_range(\"20130101\", periods=5))\n expected = ser.copy()\n expected.loc[2] = pd.Timestamp(\"20120101\")\n result = ser.replace({pd.Timestamp(\"20130103\"): pd.Timestamp(\"20120101\")})\n tm.assert_series_equal(result, expected)\n result = ser.replace(pd.Timestamp(\"20130103\"), pd.Timestamp(\"20120101\"))\n tm.assert_series_equal(result, expected)\n\n # GH 11792: Test with replacing NaT in a list with tz data\n ts = pd.Timestamp(\"2015/01/01\", tz=\"UTC\")\n s = pd.Series([pd.NaT, pd.Timestamp(\"2015/01/01\", tz=\"UTC\")])\n result = s.replace([np.nan, pd.NaT], pd.Timestamp.min)\n expected = pd.Series([pd.Timestamp.min, ts], dtype=object)\n tm.assert_series_equal(expected, result)\n\n def test_replace_timedelta_td64(self):\n tdi = pd.timedelta_range(0, periods=5)\n ser = pd.Series(tdi)\n\n # Using a single dict argument means we go through replace_list\n result = ser.replace({ser[1]: ser[3]})\n\n expected = pd.Series([ser[0], ser[3], ser[2], ser[3], ser[4]])\n tm.assert_series_equal(result, expected)\n\n def test_replace_with_single_list(self):\n ser = pd.Series([0, 1, 2, 3, 4])\n result = ser.replace([1, 2, 3])\n tm.assert_series_equal(result, pd.Series([0, 0, 0, 0, 4]))\n\n s = ser.copy()\n return_value = s.replace([1, 2, 3], inplace=True)\n assert return_value is None\n tm.assert_series_equal(s, pd.Series([0, 0, 0, 0, 4]))\n\n # make sure things don't get corrupted when fillna call fails\n s = ser.copy()\n msg = (\n r\"Invalid fill method\\. Expecting pad \\(ffill\\) or backfill \"\n r\"\\(bfill\\)\\. Got crash_cymbal\"\n )\n with pytest.raises(ValueError, match=msg):\n return_value = s.replace([1, 2, 3], inplace=True, method=\"crash_cymbal\")\n assert return_value is None\n tm.assert_series_equal(s, ser)\n\n def test_replace_with_empty_list(self):\n # GH 21977\n s = pd.Series([[1], [2, 3], [], np.nan, [4]])\n expected = s\n result = s.replace([], np.nan)\n tm.assert_series_equal(result, expected)\n\n # GH 19266\n with pytest.raises(ValueError, match=\"cannot assign mismatch\"):\n s.replace({np.nan: []})\n with pytest.raises(ValueError, match=\"cannot assign mismatch\"):\n s.replace({np.nan: [\"dummy\", \"alt\"]})\n\n def test_replace_mixed_types(self):\n s = pd.Series(np.arange(5), dtype=\"int64\")\n\n def check_replace(to_rep, val, expected):\n sc = s.copy()\n r = s.replace(to_rep, val)\n return_value = sc.replace(to_rep, val, inplace=True)\n assert return_value is None\n tm.assert_series_equal(expected, r)\n tm.assert_series_equal(expected, sc)\n\n # MUST upcast to float\n e = pd.Series([0.0, 1.0, 2.0, 3.0, 4.0])\n tr, v = [3], [3.0]\n check_replace(tr, v, e)\n\n # MUST upcast to float\n e = pd.Series([0, 1, 2, 3.5, 4])\n tr, v = [3], [3.5]\n check_replace(tr, v, e)\n\n # casts to object\n e = pd.Series([0, 1, 2, 3.5, \"a\"])\n tr, v = [3, 4], [3.5, \"a\"]\n check_replace(tr, v, e)\n\n # again casts to object\n e = pd.Series([0, 1, 2, 3.5, pd.Timestamp(\"20130101\")])\n tr, v = [3, 4], [3.5, pd.Timestamp(\"20130101\")]\n check_replace(tr, v, e)\n\n # casts to object\n e = pd.Series([0, 1, 2, 3.5, True], dtype=\"object\")\n tr, v = [3, 4], [3.5, True]\n check_replace(tr, v, e)\n\n # test an object with dates + floats + integers + strings\n dr = pd.Series(pd.date_range(\"1/1/2001\", \"1/10/2001\", freq=\"D\"))\n result = dr.astype(object).replace([dr[0], dr[1], dr[2]], [1.0, 2, \"a\"])\n expected = pd.Series([1.0, 2, \"a\"] + dr[3:].tolist(), dtype=object)\n tm.assert_series_equal(result, expected)\n\n def test_replace_bool_with_string_no_op(self):\n s = pd.Series([True, False, True])\n result = s.replace(\"fun\", \"in-the-sun\")\n tm.assert_series_equal(s, result)\n\n def test_replace_bool_with_string(self):\n # nonexistent elements\n s = pd.Series([True, False, True])\n result = s.replace(True, \"2u\")\n expected = pd.Series([\"2u\", False, \"2u\"])\n tm.assert_series_equal(expected, result)\n\n def test_replace_bool_with_bool(self):\n s = pd.Series([True, False, True])\n result = s.replace(True, False)\n expected = pd.Series([False] * len(s))\n tm.assert_series_equal(expected, result)\n\n def test_replace_with_dict_with_bool_keys(self):\n s = pd.Series([True, False, True])\n with pytest.raises(TypeError, match=\"Cannot compare types .+\"):\n s.replace({\"asdf\": \"asdb\", True: \"yes\"})\n\n def test_replace2(self):\n N = 100\n ser = pd.Series(np.fabs(np.random.randn(N)), tm.makeDateIndex(N), dtype=object)\n ser[:5] = np.nan\n ser[6:10] = \"foo\"\n ser[20:30] = \"bar\"\n\n # replace list with a single value\n rs = ser.replace([np.nan, \"foo\", \"bar\"], -1)\n\n assert (rs[:5] == -1).all()\n assert (rs[6:10] == -1).all()\n assert (rs[20:30] == -1).all()\n assert (pd.isna(ser[:5])).all()\n\n # replace with different values\n rs = ser.replace({np.nan: -1, \"foo\": -2, \"bar\": -3})\n\n assert (rs[:5] == -1).all()\n assert (rs[6:10] == -2).all()\n assert (rs[20:30] == -3).all()\n assert (pd.isna(ser[:5])).all()\n\n # replace with different values with 2 lists\n rs2 = ser.replace([np.nan, \"foo\", \"bar\"], [-1, -2, -3])\n tm.assert_series_equal(rs, rs2)\n\n # replace inplace\n return_value = ser.replace([np.nan, \"foo\", \"bar\"], -1, inplace=True)\n assert return_value is None\n assert (ser[:5] == -1).all()\n assert (ser[6:10] == -1).all()\n assert (ser[20:30] == -1).all()\n\n def test_replace_with_dictlike_and_string_dtype(self):\n # GH 32621\n s = pd.Series([\"one\", \"two\", np.nan], dtype=\"string\")\n expected = pd.Series([\"1\", \"2\", np.nan])\n result = s.replace({\"one\": \"1\", \"two\": \"2\"})\n tm.assert_series_equal(expected, result)\n\n def test_replace_with_empty_dictlike(self):\n # GH 15289\n s = pd.Series(list(\"abcd\"))\n tm.assert_series_equal(s, s.replace(dict()))\n\n with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):\n empty_series = pd.Series([])\n tm.assert_series_equal(s, s.replace(empty_series))\n\n def test_replace_string_with_number(self):\n # GH 15743\n s = pd.Series([1, 2, 3])\n result = s.replace(\"2\", np.nan)\n expected = pd.Series([1, 2, 3])\n tm.assert_series_equal(expected, result)\n\n def test_replace_replacer_equals_replacement(self):\n # GH 20656\n # make sure all replacers are matching against original values\n s = pd.Series([\"a\", \"b\"])\n expected = pd.Series([\"b\", \"a\"])\n result = s.replace({\"a\": \"b\", \"b\": \"a\"})\n tm.assert_series_equal(expected, result)\n\n def test_replace_unicode_with_number(self):\n # GH 15743\n s = pd.Series([1, 2, 3])\n result = s.replace(\"2\", np.nan)\n expected = pd.Series([1, 2, 3])\n tm.assert_series_equal(expected, result)\n\n def test_replace_mixed_types_with_string(self):\n # Testing mixed\n s = pd.Series([1, 2, 3, \"4\", 4, 5])\n result = s.replace([2, \"4\"], np.nan)\n expected = pd.Series([1, np.nan, 3, np.nan, 4, 5])\n tm.assert_series_equal(expected, result)\n\n @pytest.mark.parametrize(\n \"categorical, numeric\",\n [\n (pd.Categorical(\"A\", categories=[\"A\", \"B\"]), [1]),\n (pd.Categorical((\"A\",), categories=[\"A\", \"B\"]), [1]),\n (pd.Categorical((\"A\", \"B\"), categories=[\"A\", \"B\"]), [1, 2]),\n ],\n )\n def test_replace_categorical(self, categorical, numeric):\n # GH 24971\n # Do not check if dtypes are equal due to a known issue that\n # Categorical.replace sometimes coerces to object (GH 23305)\n s = pd.Series(categorical)\n result = s.replace({\"A\": 1, \"B\": 2})\n expected = pd.Series(numeric)\n tm.assert_series_equal(expected, result)\n\n def test_replace_categorical_single(self):\n # GH 26988\n dti = pd.date_range(\"2016-01-01\", periods=3, tz=\"US/Pacific\")\n s = pd.Series(dti)\n c = s.astype(\"category\")\n\n expected = c.copy()\n expected = expected.cat.add_categories(\"foo\")\n expected[2] = \"foo\"\n expected = expected.cat.remove_unused_categories()\n assert c[2] != \"foo\"\n\n result = c.replace(c[2], \"foo\")\n tm.assert_series_equal(expected, result)\n assert c[2] != \"foo\" # ensure non-inplace call does not alter original\n\n return_value = c.replace(c[2], \"foo\", inplace=True)\n assert return_value is None\n tm.assert_series_equal(expected, c)\n\n first_value = c[0]\n return_value = c.replace(c[1], c[0], inplace=True)\n assert return_value is None\n assert c[0] == c[1] == first_value # test replacing with existing value\n\n def test_replace_with_no_overflowerror(self):\n # GH 25616\n # casts to object without Exception from OverflowError\n s = pd.Series([0, 1, 2, 3, 4])\n result = s.replace([3], [\"100000000000000000000\"])\n expected = pd.Series([0, 1, 2, \"100000000000000000000\", 4])\n tm.assert_series_equal(result, expected)\n\n s = pd.Series([0, \"100000000000000000000\", \"100000000000000000001\"])\n result = s.replace([\"100000000000000000000\"], [1])\n expected = pd.Series([0, 1, \"100000000000000000001\"])\n tm.assert_series_equal(result, expected)\n\n @pytest.mark.parametrize(\n \"ser, to_replace, exp\",\n [\n ([1, 2, 3], {1: 2, 2: 3, 3: 4}, [2, 3, 4]),\n ([\"1\", \"2\", \"3\"], {\"1\": \"2\", \"2\": \"3\", \"3\": \"4\"}, [\"2\", \"3\", \"4\"]),\n ],\n )\n def test_replace_commutative(self, ser, to_replace, exp):\n # GH 16051\n # DataFrame.replace() overwrites when values are non-numeric\n\n series = pd.Series(ser)\n\n expected = pd.Series(exp)\n result = series.replace(to_replace)\n\n tm.assert_series_equal(result, expected)\n\n @pytest.mark.parametrize(\n \"ser, exp\", [([1, 2, 3], [1, True, 3]), ([\"x\", 2, 3], [\"x\", True, 3])]\n )\n def test_replace_no_cast(self, ser, exp):\n # GH 9113\n # BUG: replace int64 dtype with bool coerces to int64\n\n series = pd.Series(ser)\n result = series.replace(2, True)\n expected = pd.Series(exp)\n\n tm.assert_series_equal(result, expected)\n\n def test_replace_invalid_to_replace(self):\n # GH 18634\n # API: replace() should raise an exception if invalid argument is given\n series = pd.Series([\"a\", \"b\", \"c \"])\n msg = (\n r\"Expecting 'to_replace' to be either a scalar, array-like, \"\n r\"dict or None, got invalid type.*\"\n )\n with pytest.raises(TypeError, match=msg):\n series.replace(lambda x: x.strip())\n\n def test_replace_only_one_dictlike_arg(self):\n # GH#33340\n\n ser = pd.Series([1, 2, \"A\", pd.Timestamp.now(), True])\n to_replace = {0: 1, 2: \"A\"}\n value = \"foo\"\n msg = \"Series.replace cannot use dict-like to_replace and non-None value\"\n with pytest.raises(ValueError, match=msg):\n ser.replace(to_replace, value)\n\n to_replace = 1\n value = {0: \"foo\", 2: \"bar\"}\n msg = \"Series.replace cannot use dict-value and non-None to_replace\"\n with pytest.raises(ValueError, match=msg):\n ser.replace(to_replace, value)\n\n def test_replace_extension_other(self):\n # https://github.com/pandas-dev/pandas/issues/34530\n ser = pd.Series(pd.array([1, 2, 3], dtype=\"Int64\"))\n ser.replace(\"\", \"\") # no exception\n\n def test_replace_with_compiled_regex(self):\n # https://github.com/pandas-dev/pandas/issues/35680\n s = pd.Series([\"a\", \"b\", \"c\"])\n regex = re.compile(\"^a$\")\n result = s.replace({regex: \"z\"}, regex=True)\n expected = pd.Series([\"z\", \"b\", \"c\"])\n tm.assert_series_equal(result, expected)\n",
"# Copyright 2018-2021 Streamlit Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport signal\nimport sys\nfrom typing import Any, Dict\n\nimport click\nimport tornado.ioloop\nfrom streamlit.git_util import GitRepo, MIN_GIT_VERSION\n\nfrom streamlit import version\nfrom streamlit import config\nfrom streamlit import net_util\nfrom streamlit import url_util\nfrom streamlit import env_util\nfrom streamlit import secrets\nfrom streamlit import util\nfrom streamlit.config import CONFIG_FILENAMES\nfrom streamlit.logger import get_logger\nfrom streamlit.report import Report\nfrom streamlit.secrets import SECRETS_FILE_LOC\nfrom streamlit.server.server import Server, server_address_is_unix_socket\nfrom streamlit.watcher.file_watcher import watch_file\nfrom streamlit.watcher.file_watcher import report_watchdog_availability\n\nLOGGER = get_logger(__name__)\n\n# Wait for 1 second before opening a browser. This gives old tabs a chance to\n# reconnect.\n# This must be >= 2 * WebSocketConnection.ts#RECONNECT_WAIT_TIME_MS.\nBROWSER_WAIT_TIMEOUT_SEC = 1\n\nNEW_VERSION_TEXT = \"\"\"\n %(new_version)s\n\n See what's new at https://discuss.streamlit.io/c/announcements\n\n Enter the following command to upgrade:\n %(prompt)s %(command)s\n\"\"\" % {\n \"new_version\": click.style(\n \"A new version of Streamlit is available.\", fg=\"blue\", bold=True\n ),\n \"prompt\": click.style(\"$\", fg=\"blue\"),\n \"command\": click.style(\"pip install streamlit --upgrade\", bold=True),\n}\n\n\ndef _set_up_signal_handler():\n LOGGER.debug(\"Setting up signal handler\")\n\n def signal_handler(signal_number, stack_frame):\n # The server will shut down its threads and stop the ioloop\n Server.get_current().stop()\n\n signal.signal(signal.SIGTERM, signal_handler)\n signal.signal(signal.SIGINT, signal_handler)\n if sys.platform == \"win32\":\n signal.signal(signal.SIGBREAK, signal_handler)\n else:\n signal.signal(signal.SIGQUIT, signal_handler)\n\n\ndef _fix_sys_path(script_path):\n \"\"\"Add the script's folder to the sys path.\n\n Python normally does this automatically, but since we exec the script\n ourselves we need to do it instead.\n \"\"\"\n sys.path.insert(0, os.path.dirname(script_path))\n\n\ndef _fix_matplotlib_crash():\n \"\"\"Set Matplotlib backend to avoid a crash.\n\n The default Matplotlib backend crashes Python on OSX when run on a thread\n that's not the main thread, so here we set a safer backend as a fix.\n Users can always disable this behavior by setting the config\n runner.fixMatplotlib = false.\n\n This fix is OS-independent. We didn't see a good reason to make this\n Mac-only. Consistency within Streamlit seemed more important.\n \"\"\"\n if config.get_option(\"runner.fixMatplotlib\"):\n try:\n # TODO: a better option may be to set\n # os.environ[\"MPLBACKEND\"] = \"Agg\". We'd need to do this towards\n # the top of __init__.py, before importing anything that imports\n # pandas (which imports matplotlib). Alternately, we could set\n # this environment variable in a new entrypoint defined in\n # setup.py. Both of these introduce additional trickiness: they\n # need to run without consulting streamlit.config.get_option,\n # because this would import streamlit, and therefore matplotlib.\n import matplotlib\n\n matplotlib.use(\"Agg\")\n except ImportError:\n pass\n\n\ndef _fix_tornado_crash():\n \"\"\"Set default asyncio policy to be compatible with Tornado 6.\n\n Tornado 6 (at least) is not compatible with the default\n asyncio implementation on Windows. So here we\n pick the older SelectorEventLoopPolicy when the OS is Windows\n if the known-incompatible default policy is in use.\n\n This has to happen as early as possible to make it a low priority and\n overrideable\n\n See: https://github.com/tornadoweb/tornado/issues/2608\n\n FIXME: if/when tornado supports the defaults in asyncio,\n remove and bump tornado requirement for py38\n \"\"\"\n if env_util.IS_WINDOWS and sys.version_info >= (3, 8):\n import asyncio\n\n try:\n from asyncio import ( # type: ignore[attr-defined]\n WindowsProactorEventLoopPolicy,\n WindowsSelectorEventLoopPolicy,\n )\n except ImportError:\n pass\n # Not affected\n else:\n if type(asyncio.get_event_loop_policy()) is WindowsProactorEventLoopPolicy:\n # WindowsProactorEventLoopPolicy is not compatible with\n # Tornado 6 fallback to the pre-3.8 default of Selector\n asyncio.set_event_loop_policy(WindowsSelectorEventLoopPolicy())\n\n\ndef _fix_sys_argv(script_path, args):\n \"\"\"sys.argv needs to exclude streamlit arguments and parameters\n and be set to what a user's script may expect.\n \"\"\"\n import sys\n\n sys.argv = [script_path] + list(args)\n\n\ndef _on_server_start(server):\n _maybe_print_old_git_warning(server.script_path)\n _print_url(server.is_running_hello)\n report_watchdog_availability()\n _print_new_version_message()\n\n # Load secrets.toml if it exists. If the file doesn't exist, this\n # function will return without raising an exception. We catch any parse\n # errors and display them here.\n try:\n secrets.load_if_toml_exists()\n except BaseException as e:\n LOGGER.error(f\"Failed to load {SECRETS_FILE_LOC}\", exc_info=e)\n\n def maybe_open_browser():\n if config.get_option(\"server.headless\"):\n # Don't open browser when in headless mode.\n return\n\n if server.browser_is_connected:\n # Don't auto-open browser if there's already a browser connected.\n # This can happen if there's an old tab repeatedly trying to\n # connect, and it happens to success before we launch the browser.\n return\n\n if config.is_manually_set(\"browser.serverAddress\"):\n addr = config.get_option(\"browser.serverAddress\")\n elif config.is_manually_set(\"server.address\"):\n if server_address_is_unix_socket():\n # Don't open browser when server address is an unix socket\n return\n addr = config.get_option(\"server.address\")\n else:\n addr = \"localhost\"\n\n util.open_browser(Report.get_url(addr))\n\n # Schedule the browser to open using the IO Loop on the main thread, but\n # only if no other browser connects within 1s.\n ioloop = tornado.ioloop.IOLoop.current()\n ioloop.call_later(BROWSER_WAIT_TIMEOUT_SEC, maybe_open_browser)\n\n\ndef _fix_pydeck_mapbox_api_warning():\n \"\"\"Sets MAPBOX_API_KEY environment variable needed for PyDeck otherwise it will throw an exception\"\"\"\n\n os.environ[\"MAPBOX_API_KEY\"] = config.get_option(\"mapbox.token\")\n\n\ndef _print_new_version_message():\n if version.should_show_new_version_notice():\n click.secho(NEW_VERSION_TEXT)\n\n\ndef _print_url(is_running_hello):\n if is_running_hello:\n title_message = \"Welcome to Streamlit. Check out our demo in your browser.\"\n else:\n title_message = \"You can now view your Streamlit app in your browser.\"\n\n named_urls = []\n\n if config.is_manually_set(\"browser.serverAddress\"):\n named_urls = [\n (\"URL\", Report.get_url(config.get_option(\"browser.serverAddress\")))\n ]\n\n elif (\n config.is_manually_set(\"server.address\") and not server_address_is_unix_socket()\n ):\n named_urls = [\n (\"URL\", Report.get_url(config.get_option(\"server.address\"))),\n ]\n\n elif config.get_option(\"server.headless\"):\n internal_ip = net_util.get_internal_ip()\n if internal_ip:\n named_urls.append((\"Network URL\", Report.get_url(internal_ip)))\n\n external_ip = net_util.get_external_ip()\n if external_ip:\n named_urls.append((\"External URL\", Report.get_url(external_ip)))\n\n else:\n named_urls = [\n (\"Local URL\", Report.get_url(\"localhost\")),\n ]\n\n internal_ip = net_util.get_internal_ip()\n if internal_ip:\n named_urls.append((\"Network URL\", Report.get_url(internal_ip)))\n\n click.secho(\"\")\n click.secho(\" %s\" % title_message, fg=\"blue\", bold=True)\n click.secho(\"\")\n\n for url_name, url in named_urls:\n url_util.print_url(url_name, url)\n\n click.secho(\"\")\n\n if is_running_hello:\n click.secho(\" Ready to create your own Python apps super quickly?\")\n click.secho(\" Head over to \", nl=False)\n click.secho(\"https://docs.streamlit.io\", bold=True)\n click.secho(\"\")\n click.secho(\" May you create awesome apps!\")\n click.secho(\"\")\n click.secho(\"\")\n\n\ndef _maybe_print_old_git_warning(script_path: str) -> None:\n \"\"\"If our script is running in a Git repo, and we're running a very old\n Git version, print a warning that Git integration will be unavailable.\n \"\"\"\n repo = GitRepo(script_path)\n if (\n not repo.is_valid()\n and repo.git_version is not None\n and repo.git_version < MIN_GIT_VERSION\n ):\n git_version_string = \".\".join(str(val) for val in repo.git_version)\n min_version_string = \".\".join(str(val) for val in MIN_GIT_VERSION)\n click.secho(\"\")\n click.secho(\" Git integration is disabled.\", fg=\"yellow\", bold=True)\n click.secho(\"\")\n click.secho(\n f\" Streamlit requires Git {min_version_string} or later, \"\n f\"but you have {git_version_string}.\",\n fg=\"yellow\",\n )\n click.secho(\n \" Git is used by Streamlit Sharing (https://streamlit.io/sharing).\",\n fg=\"yellow\",\n )\n click.secho(\" To enable this feature, please update Git.\", fg=\"yellow\")\n\n\ndef load_config_options(flag_options: Dict[str, Any]):\n \"\"\"Load config options from config.toml files, then overlay the ones set by\n flag_options.\n\n The \"streamlit run\" command supports passing Streamlit's config options\n as flags. This function reads through the config options set via flag,\n massages them, and passes them to get_config_options() so that they\n overwrite config option defaults and those loaded from config.toml files.\n\n Parameters\n ----------\n flag_options : Dict[str, Any]\n A dict of config options where the keys are the CLI flag version of the\n config option names.\n \"\"\"\n options_from_flags = {\n name.replace(\"_\", \".\"): val\n for name, val in flag_options.items()\n if val is not None\n }\n\n # Force a reparse of config files (if they exist). The result is cached\n # for future calls.\n config.get_config_options(force_reparse=True, options_from_flags=options_from_flags)\n\n\ndef _install_config_watchers(flag_options: Dict[str, Any]):\n def on_config_changed(_path):\n load_config_options(flag_options)\n\n for filename in CONFIG_FILENAMES:\n if os.path.exists(filename):\n watch_file(filename, on_config_changed)\n\n\ndef run(script_path, command_line, args, flag_options):\n \"\"\"Run a script in a separate thread and start a server for the app.\n\n This starts a blocking ioloop.\n\n Parameters\n ----------\n script_path : str\n command_line : str\n args : [str]\n flag_options : Dict[str, Any]\n \"\"\"\n _fix_sys_path(script_path)\n _fix_matplotlib_crash()\n _fix_tornado_crash()\n _fix_sys_argv(script_path, args)\n _fix_pydeck_mapbox_api_warning()\n _install_config_watchers(flag_options)\n\n # Install a signal handler that will shut down the ioloop\n # and close all our threads\n _set_up_signal_handler()\n\n ioloop = tornado.ioloop.IOLoop.current()\n\n # Create and start the server.\n server = Server(ioloop, script_path, command_line)\n server.start(_on_server_start)\n\n # (Must come after start(), because this starts a new thread and start()\n # may call sys.exit() which doesn't kill other threads.\n server.add_preheated_report_session()\n\n # Start the ioloop. This function will not return until the\n # server is shut down.\n ioloop.start()\n",
"from datetime import date, datetime, timedelta\n\nimport numpy as np\nimport pytest\nimport pytz\n\nfrom pandas._libs.tslibs import iNaT, period as libperiod\nfrom pandas._libs.tslibs.ccalendar import DAYS, MONTHS\nfrom pandas._libs.tslibs.parsing import DateParseError\nfrom pandas._libs.tslibs.period import INVALID_FREQ_ERR_MSG, IncompatibleFrequency\nfrom pandas._libs.tslibs.timezones import dateutil_gettz, maybe_get_tz\nfrom pandas.compat.numpy import np_datetime64_compat\n\nimport pandas as pd\nfrom pandas import NaT, Period, Timedelta, Timestamp, offsets\nimport pandas._testing as tm\n\n\nclass TestPeriodConstruction:\n def test_construction(self):\n i1 = Period(\"1/1/2005\", freq=\"M\")\n i2 = Period(\"Jan 2005\")\n\n assert i1 == i2\n\n i1 = Period(\"2005\", freq=\"A\")\n i2 = Period(\"2005\")\n i3 = Period(\"2005\", freq=\"a\")\n\n assert i1 == i2\n assert i1 == i3\n\n i4 = Period(\"2005\", freq=\"M\")\n i5 = Period(\"2005\", freq=\"m\")\n\n msg = r\"Input has different freq=M from Period\\(freq=A-DEC\\)\"\n with pytest.raises(IncompatibleFrequency, match=msg):\n i1 != i4\n assert i4 == i5\n\n i1 = Period.now(\"Q\")\n i2 = Period(datetime.now(), freq=\"Q\")\n i3 = Period.now(\"q\")\n\n assert i1 == i2\n assert i1 == i3\n\n i1 = Period(\"1982\", freq=\"min\")\n i2 = Period(\"1982\", freq=\"MIN\")\n assert i1 == i2\n\n i1 = Period(year=2005, month=3, day=1, freq=\"D\")\n i2 = Period(\"3/1/2005\", freq=\"D\")\n assert i1 == i2\n\n i3 = Period(year=2005, month=3, day=1, freq=\"d\")\n assert i1 == i3\n\n i1 = Period(\"2007-01-01 09:00:00.001\")\n expected = Period(datetime(2007, 1, 1, 9, 0, 0, 1000), freq=\"L\")\n assert i1 == expected\n\n expected = Period(np_datetime64_compat(\"2007-01-01 09:00:00.001Z\"), freq=\"L\")\n assert i1 == expected\n\n i1 = Period(\"2007-01-01 09:00:00.00101\")\n expected = Period(datetime(2007, 1, 1, 9, 0, 0, 1010), freq=\"U\")\n assert i1 == expected\n\n expected = Period(np_datetime64_compat(\"2007-01-01 09:00:00.00101Z\"), freq=\"U\")\n assert i1 == expected\n\n msg = \"Must supply freq for ordinal value\"\n with pytest.raises(ValueError, match=msg):\n Period(ordinal=200701)\n\n msg = \"Invalid frequency: X\"\n with pytest.raises(ValueError, match=msg):\n Period(\"2007-1-1\", freq=\"X\")\n\n # GH#34703 tuple freq disallowed\n with pytest.raises(TypeError, match=\"pass as a string instead\"):\n Period(\"1982\", freq=(\"Min\", 1))\n\n def test_construction_bday(self):\n\n # Biz day construction, roll forward if non-weekday\n i1 = Period(\"3/10/12\", freq=\"B\")\n i2 = Period(\"3/10/12\", freq=\"D\")\n assert i1 == i2.asfreq(\"B\")\n i2 = Period(\"3/11/12\", freq=\"D\")\n assert i1 == i2.asfreq(\"B\")\n i2 = Period(\"3/12/12\", freq=\"D\")\n assert i1 == i2.asfreq(\"B\")\n\n i3 = Period(\"3/10/12\", freq=\"b\")\n assert i1 == i3\n\n i1 = Period(year=2012, month=3, day=10, freq=\"B\")\n i2 = Period(\"3/12/12\", freq=\"B\")\n assert i1 == i2\n\n def test_construction_quarter(self):\n\n i1 = Period(year=2005, quarter=1, freq=\"Q\")\n i2 = Period(\"1/1/2005\", freq=\"Q\")\n assert i1 == i2\n\n i1 = Period(year=2005, quarter=3, freq=\"Q\")\n i2 = Period(\"9/1/2005\", freq=\"Q\")\n assert i1 == i2\n\n i1 = Period(\"2005Q1\")\n i2 = Period(year=2005, quarter=1, freq=\"Q\")\n i3 = Period(\"2005q1\")\n assert i1 == i2\n assert i1 == i3\n\n i1 = Period(\"05Q1\")\n assert i1 == i2\n lower = Period(\"05q1\")\n assert i1 == lower\n\n i1 = Period(\"1Q2005\")\n assert i1 == i2\n lower = Period(\"1q2005\")\n assert i1 == lower\n\n i1 = Period(\"1Q05\")\n assert i1 == i2\n lower = Period(\"1q05\")\n assert i1 == lower\n\n i1 = Period(\"4Q1984\")\n assert i1.year == 1984\n lower = Period(\"4q1984\")\n assert i1 == lower\n\n def test_construction_month(self):\n\n expected = Period(\"2007-01\", freq=\"M\")\n i1 = Period(\"200701\", freq=\"M\")\n assert i1 == expected\n\n i1 = Period(\"200701\", freq=\"M\")\n assert i1 == expected\n\n i1 = Period(200701, freq=\"M\")\n assert i1 == expected\n\n i1 = Period(ordinal=200701, freq=\"M\")\n assert i1.year == 18695\n\n i1 = Period(datetime(2007, 1, 1), freq=\"M\")\n i2 = Period(\"200701\", freq=\"M\")\n assert i1 == i2\n\n i1 = Period(date(2007, 1, 1), freq=\"M\")\n i2 = Period(datetime(2007, 1, 1), freq=\"M\")\n i3 = Period(np.datetime64(\"2007-01-01\"), freq=\"M\")\n i4 = Period(np_datetime64_compat(\"2007-01-01 00:00:00Z\"), freq=\"M\")\n i5 = Period(np_datetime64_compat(\"2007-01-01 00:00:00.000Z\"), freq=\"M\")\n assert i1 == i2\n assert i1 == i3\n assert i1 == i4\n assert i1 == i5\n\n def test_period_constructor_offsets(self):\n assert Period(\"1/1/2005\", freq=offsets.MonthEnd()) == Period(\n \"1/1/2005\", freq=\"M\"\n )\n assert Period(\"2005\", freq=offsets.YearEnd()) == Period(\"2005\", freq=\"A\")\n assert Period(\"2005\", freq=offsets.MonthEnd()) == Period(\"2005\", freq=\"M\")\n assert Period(\"3/10/12\", freq=offsets.BusinessDay()) == Period(\n \"3/10/12\", freq=\"B\"\n )\n assert Period(\"3/10/12\", freq=offsets.Day()) == Period(\"3/10/12\", freq=\"D\")\n\n assert Period(\n year=2005, quarter=1, freq=offsets.QuarterEnd(startingMonth=12)\n ) == Period(year=2005, quarter=1, freq=\"Q\")\n assert Period(\n year=2005, quarter=2, freq=offsets.QuarterEnd(startingMonth=12)\n ) == Period(year=2005, quarter=2, freq=\"Q\")\n\n assert Period(year=2005, month=3, day=1, freq=offsets.Day()) == Period(\n year=2005, month=3, day=1, freq=\"D\"\n )\n assert Period(year=2012, month=3, day=10, freq=offsets.BDay()) == Period(\n year=2012, month=3, day=10, freq=\"B\"\n )\n\n expected = Period(\"2005-03-01\", freq=\"3D\")\n assert Period(year=2005, month=3, day=1, freq=offsets.Day(3)) == expected\n assert Period(year=2005, month=3, day=1, freq=\"3D\") == expected\n\n assert Period(year=2012, month=3, day=10, freq=offsets.BDay(3)) == Period(\n year=2012, month=3, day=10, freq=\"3B\"\n )\n\n assert Period(200701, freq=offsets.MonthEnd()) == Period(200701, freq=\"M\")\n\n i1 = Period(ordinal=200701, freq=offsets.MonthEnd())\n i2 = Period(ordinal=200701, freq=\"M\")\n assert i1 == i2\n assert i1.year == 18695\n assert i2.year == 18695\n\n i1 = Period(datetime(2007, 1, 1), freq=\"M\")\n i2 = Period(\"200701\", freq=\"M\")\n assert i1 == i2\n\n i1 = Period(date(2007, 1, 1), freq=\"M\")\n i2 = Period(datetime(2007, 1, 1), freq=\"M\")\n i3 = Period(np.datetime64(\"2007-01-01\"), freq=\"M\")\n i4 = Period(np_datetime64_compat(\"2007-01-01 00:00:00Z\"), freq=\"M\")\n i5 = Period(np_datetime64_compat(\"2007-01-01 00:00:00.000Z\"), freq=\"M\")\n assert i1 == i2\n assert i1 == i3\n assert i1 == i4\n assert i1 == i5\n\n i1 = Period(\"2007-01-01 09:00:00.001\")\n expected = Period(datetime(2007, 1, 1, 9, 0, 0, 1000), freq=\"L\")\n assert i1 == expected\n\n expected = Period(np_datetime64_compat(\"2007-01-01 09:00:00.001Z\"), freq=\"L\")\n assert i1 == expected\n\n i1 = Period(\"2007-01-01 09:00:00.00101\")\n expected = Period(datetime(2007, 1, 1, 9, 0, 0, 1010), freq=\"U\")\n assert i1 == expected\n\n expected = Period(np_datetime64_compat(\"2007-01-01 09:00:00.00101Z\"), freq=\"U\")\n assert i1 == expected\n\n def test_invalid_arguments(self):\n msg = \"Must supply freq for datetime value\"\n with pytest.raises(ValueError, match=msg):\n Period(datetime.now())\n with pytest.raises(ValueError, match=msg):\n Period(datetime.now().date())\n\n msg = \"Value must be Period, string, integer, or datetime\"\n with pytest.raises(ValueError, match=msg):\n Period(1.6, freq=\"D\")\n msg = \"Ordinal must be an integer\"\n with pytest.raises(ValueError, match=msg):\n Period(ordinal=1.6, freq=\"D\")\n msg = \"Only value or ordinal but not both should be given but not both\"\n with pytest.raises(ValueError, match=msg):\n Period(ordinal=2, value=1, freq=\"D\")\n\n msg = \"If value is None, freq cannot be None\"\n with pytest.raises(ValueError, match=msg):\n Period(month=1)\n\n msg = \"Given date string not likely a datetime\"\n with pytest.raises(ValueError, match=msg):\n Period(\"-2000\", \"A\")\n msg = \"day is out of range for month\"\n with pytest.raises(DateParseError, match=msg):\n Period(\"0\", \"A\")\n msg = \"Unknown datetime string format, unable to parse\"\n with pytest.raises(DateParseError, match=msg):\n Period(\"1/1/-2000\", \"A\")\n\n def test_constructor_corner(self):\n expected = Period(\"2007-01\", freq=\"2M\")\n assert Period(year=2007, month=1, freq=\"2M\") == expected\n\n assert Period(None) is NaT\n\n p = Period(\"2007-01-01\", freq=\"D\")\n\n result = Period(p, freq=\"A\")\n exp = Period(\"2007\", freq=\"A\")\n assert result == exp\n\n def test_constructor_infer_freq(self):\n p = Period(\"2007-01-01\")\n assert p.freq == \"D\"\n\n p = Period(\"2007-01-01 07\")\n assert p.freq == \"H\"\n\n p = Period(\"2007-01-01 07:10\")\n assert p.freq == \"T\"\n\n p = Period(\"2007-01-01 07:10:15\")\n assert p.freq == \"S\"\n\n p = Period(\"2007-01-01 07:10:15.123\")\n assert p.freq == \"L\"\n\n p = Period(\"2007-01-01 07:10:15.123000\")\n assert p.freq == \"L\"\n\n p = Period(\"2007-01-01 07:10:15.123400\")\n assert p.freq == \"U\"\n\n def test_multiples(self):\n result1 = Period(\"1989\", freq=\"2A\")\n result2 = Period(\"1989\", freq=\"A\")\n assert result1.ordinal == result2.ordinal\n assert result1.freqstr == \"2A-DEC\"\n assert result2.freqstr == \"A-DEC\"\n assert result1.freq == offsets.YearEnd(2)\n assert result2.freq == offsets.YearEnd()\n\n assert (result1 + 1).ordinal == result1.ordinal + 2\n assert (1 + result1).ordinal == result1.ordinal + 2\n assert (result1 - 1).ordinal == result2.ordinal - 2\n assert (-1 + result1).ordinal == result2.ordinal - 2\n\n @pytest.mark.parametrize(\"month\", MONTHS)\n def test_period_cons_quarterly(self, month):\n # bugs in scikits.timeseries\n freq = f\"Q-{month}\"\n exp = Period(\"1989Q3\", freq=freq)\n assert \"1989Q3\" in str(exp)\n stamp = exp.to_timestamp(\"D\", how=\"end\")\n p = Period(stamp, freq=freq)\n assert p == exp\n\n stamp = exp.to_timestamp(\"3D\", how=\"end\")\n p = Period(stamp, freq=freq)\n assert p == exp\n\n @pytest.mark.parametrize(\"month\", MONTHS)\n def test_period_cons_annual(self, month):\n # bugs in scikits.timeseries\n freq = f\"A-{month}\"\n exp = Period(\"1989\", freq=freq)\n stamp = exp.to_timestamp(\"D\", how=\"end\") + timedelta(days=30)\n p = Period(stamp, freq=freq)\n\n assert p == exp + 1\n assert isinstance(p, Period)\n\n @pytest.mark.parametrize(\"day\", DAYS)\n @pytest.mark.parametrize(\"num\", range(10, 17))\n def test_period_cons_weekly(self, num, day):\n daystr = f\"2011-02-{num}\"\n freq = f\"W-{day}\"\n\n result = Period(daystr, freq=freq)\n expected = Period(daystr, freq=\"D\").asfreq(freq)\n assert result == expected\n assert isinstance(result, Period)\n\n def test_period_from_ordinal(self):\n p = Period(\"2011-01\", freq=\"M\")\n res = Period._from_ordinal(p.ordinal, freq=\"M\")\n assert p == res\n assert isinstance(res, Period)\n\n @pytest.mark.parametrize(\"freq\", [\"A\", \"M\", \"D\", \"H\"])\n def test_construct_from_nat_string_and_freq(self, freq):\n per = Period(\"NaT\", freq=freq)\n assert per is NaT\n\n per = Period(\"NaT\", freq=\"2\" + freq)\n assert per is NaT\n\n per = Period(\"NaT\", freq=\"3\" + freq)\n assert per is NaT\n\n def test_period_cons_nat(self):\n p = Period(\"nat\", freq=\"W-SUN\")\n assert p is NaT\n\n p = Period(iNaT, freq=\"D\")\n assert p is NaT\n\n p = Period(iNaT, freq=\"3D\")\n assert p is NaT\n\n p = Period(iNaT, freq=\"1D1H\")\n assert p is NaT\n\n p = Period(\"NaT\")\n assert p is NaT\n\n p = Period(iNaT)\n assert p is NaT\n\n def test_period_cons_mult(self):\n p1 = Period(\"2011-01\", freq=\"3M\")\n p2 = Period(\"2011-01\", freq=\"M\")\n assert p1.ordinal == p2.ordinal\n\n assert p1.freq == offsets.MonthEnd(3)\n assert p1.freqstr == \"3M\"\n\n assert p2.freq == offsets.MonthEnd()\n assert p2.freqstr == \"M\"\n\n result = p1 + 1\n assert result.ordinal == (p2 + 3).ordinal\n\n assert result.freq == p1.freq\n assert result.freqstr == \"3M\"\n\n result = p1 - 1\n assert result.ordinal == (p2 - 3).ordinal\n assert result.freq == p1.freq\n assert result.freqstr == \"3M\"\n\n msg = \"Frequency must be positive, because it represents span: -3M\"\n with pytest.raises(ValueError, match=msg):\n Period(\"2011-01\", freq=\"-3M\")\n\n msg = \"Frequency must be positive, because it represents span: 0M\"\n with pytest.raises(ValueError, match=msg):\n Period(\"2011-01\", freq=\"0M\")\n\n def test_period_cons_combined(self):\n p = [\n (\n Period(\"2011-01\", freq=\"1D1H\"),\n Period(\"2011-01\", freq=\"1H1D\"),\n Period(\"2011-01\", freq=\"H\"),\n ),\n (\n Period(ordinal=1, freq=\"1D1H\"),\n Period(ordinal=1, freq=\"1H1D\"),\n Period(ordinal=1, freq=\"H\"),\n ),\n ]\n\n for p1, p2, p3 in p:\n assert p1.ordinal == p3.ordinal\n assert p2.ordinal == p3.ordinal\n\n assert p1.freq == offsets.Hour(25)\n assert p1.freqstr == \"25H\"\n\n assert p2.freq == offsets.Hour(25)\n assert p2.freqstr == \"25H\"\n\n assert p3.freq == offsets.Hour()\n assert p3.freqstr == \"H\"\n\n result = p1 + 1\n assert result.ordinal == (p3 + 25).ordinal\n assert result.freq == p1.freq\n assert result.freqstr == \"25H\"\n\n result = p2 + 1\n assert result.ordinal == (p3 + 25).ordinal\n assert result.freq == p2.freq\n assert result.freqstr == \"25H\"\n\n result = p1 - 1\n assert result.ordinal == (p3 - 25).ordinal\n assert result.freq == p1.freq\n assert result.freqstr == \"25H\"\n\n result = p2 - 1\n assert result.ordinal == (p3 - 25).ordinal\n assert result.freq == p2.freq\n assert result.freqstr == \"25H\"\n\n msg = \"Frequency must be positive, because it represents span: -25H\"\n with pytest.raises(ValueError, match=msg):\n Period(\"2011-01\", freq=\"-1D1H\")\n with pytest.raises(ValueError, match=msg):\n Period(\"2011-01\", freq=\"-1H1D\")\n with pytest.raises(ValueError, match=msg):\n Period(ordinal=1, freq=\"-1D1H\")\n with pytest.raises(ValueError, match=msg):\n Period(ordinal=1, freq=\"-1H1D\")\n\n msg = \"Frequency must be positive, because it represents span: 0D\"\n with pytest.raises(ValueError, match=msg):\n Period(\"2011-01\", freq=\"0D0H\")\n with pytest.raises(ValueError, match=msg):\n Period(ordinal=1, freq=\"0D0H\")\n\n # You can only combine together day and intraday offsets\n msg = \"Invalid frequency: 1W1D\"\n with pytest.raises(ValueError, match=msg):\n Period(\"2011-01\", freq=\"1W1D\")\n msg = \"Invalid frequency: 1D1W\"\n with pytest.raises(ValueError, match=msg):\n Period(\"2011-01\", freq=\"1D1W\")\n\n @pytest.mark.parametrize(\"hour\", range(24))\n def test_period_large_ordinal(self, hour):\n # Issue #36430\n # Integer overflow for Period over the maximum timestamp\n p = pd.Period(ordinal=2562048 + hour, freq=\"1H\")\n assert p.hour == hour\n\n\nclass TestPeriodMethods:\n def test_round_trip(self):\n p = Period(\"2000Q1\")\n new_p = tm.round_trip_pickle(p)\n assert new_p == p\n\n def test_hash(self):\n assert hash(Period(\"2011-01\", freq=\"M\")) == hash(Period(\"2011-01\", freq=\"M\"))\n\n assert hash(Period(\"2011-01-01\", freq=\"D\")) != hash(Period(\"2011-01\", freq=\"M\"))\n\n assert hash(Period(\"2011-01\", freq=\"3M\")) != hash(Period(\"2011-01\", freq=\"2M\"))\n\n assert hash(Period(\"2011-01\", freq=\"M\")) != hash(Period(\"2011-02\", freq=\"M\"))\n\n # --------------------------------------------------------------\n # to_timestamp\n\n @pytest.mark.parametrize(\"tzstr\", [\"Europe/Brussels\", \"Asia/Tokyo\", \"US/Pacific\"])\n def test_to_timestamp_tz_arg(self, tzstr):\n # GH#34522 tz kwarg deprecated\n with tm.assert_produces_warning(FutureWarning):\n p = Period(\"1/1/2005\", freq=\"M\").to_timestamp(tz=tzstr)\n exp = Timestamp(\"1/1/2005\", tz=\"UTC\").tz_convert(tzstr)\n exp_zone = pytz.timezone(tzstr).normalize(p)\n\n assert p == exp\n assert p.tz == exp_zone.tzinfo\n assert p.tz == exp.tz\n\n with tm.assert_produces_warning(FutureWarning):\n p = Period(\"1/1/2005\", freq=\"3H\").to_timestamp(tz=tzstr)\n exp = Timestamp(\"1/1/2005\", tz=\"UTC\").tz_convert(tzstr)\n exp_zone = pytz.timezone(tzstr).normalize(p)\n\n assert p == exp\n assert p.tz == exp_zone.tzinfo\n assert p.tz == exp.tz\n\n with tm.assert_produces_warning(FutureWarning):\n p = Period(\"1/1/2005\", freq=\"A\").to_timestamp(freq=\"A\", tz=tzstr)\n exp = Timestamp(\"31/12/2005\", tz=\"UTC\").tz_convert(tzstr)\n exp_zone = pytz.timezone(tzstr).normalize(p)\n\n assert p == exp\n assert p.tz == exp_zone.tzinfo\n assert p.tz == exp.tz\n\n with tm.assert_produces_warning(FutureWarning):\n p = Period(\"1/1/2005\", freq=\"A\").to_timestamp(freq=\"3H\", tz=tzstr)\n exp = Timestamp(\"1/1/2005\", tz=\"UTC\").tz_convert(tzstr)\n exp_zone = pytz.timezone(tzstr).normalize(p)\n\n assert p == exp\n assert p.tz == exp_zone.tzinfo\n assert p.tz == exp.tz\n\n @pytest.mark.parametrize(\n \"tzstr\",\n [\"dateutil/Europe/Brussels\", \"dateutil/Asia/Tokyo\", \"dateutil/US/Pacific\"],\n )\n def test_to_timestamp_tz_arg_dateutil(self, tzstr):\n tz = maybe_get_tz(tzstr)\n with tm.assert_produces_warning(FutureWarning):\n p = Period(\"1/1/2005\", freq=\"M\").to_timestamp(tz=tz)\n exp = Timestamp(\"1/1/2005\", tz=\"UTC\").tz_convert(tzstr)\n assert p == exp\n assert p.tz == dateutil_gettz(tzstr.split(\"/\", 1)[1])\n assert p.tz == exp.tz\n\n with tm.assert_produces_warning(FutureWarning):\n p = Period(\"1/1/2005\", freq=\"M\").to_timestamp(freq=\"3H\", tz=tz)\n exp = Timestamp(\"1/1/2005\", tz=\"UTC\").tz_convert(tzstr)\n assert p == exp\n assert p.tz == dateutil_gettz(tzstr.split(\"/\", 1)[1])\n assert p.tz == exp.tz\n\n def test_to_timestamp_tz_arg_dateutil_from_string(self):\n with tm.assert_produces_warning(FutureWarning):\n p = Period(\"1/1/2005\", freq=\"M\").to_timestamp(tz=\"dateutil/Europe/Brussels\")\n assert p.tz == dateutil_gettz(\"Europe/Brussels\")\n\n def test_to_timestamp_mult(self):\n p = Period(\"2011-01\", freq=\"M\")\n assert p.to_timestamp(how=\"S\") == Timestamp(\"2011-01-01\")\n expected = Timestamp(\"2011-02-01\") - Timedelta(1, \"ns\")\n assert p.to_timestamp(how=\"E\") == expected\n\n p = Period(\"2011-01\", freq=\"3M\")\n assert p.to_timestamp(how=\"S\") == Timestamp(\"2011-01-01\")\n expected = Timestamp(\"2011-04-01\") - Timedelta(1, \"ns\")\n assert p.to_timestamp(how=\"E\") == expected\n\n def test_to_timestamp(self):\n p = Period(\"1982\", freq=\"A\")\n start_ts = p.to_timestamp(how=\"S\")\n aliases = [\"s\", \"StarT\", \"BEGIn\"]\n for a in aliases:\n assert start_ts == p.to_timestamp(\"D\", how=a)\n # freq with mult should not affect to the result\n assert start_ts == p.to_timestamp(\"3D\", how=a)\n\n end_ts = p.to_timestamp(how=\"E\")\n aliases = [\"e\", \"end\", \"FINIsH\"]\n for a in aliases:\n assert end_ts == p.to_timestamp(\"D\", how=a)\n assert end_ts == p.to_timestamp(\"3D\", how=a)\n\n from_lst = [\"A\", \"Q\", \"M\", \"W\", \"B\", \"D\", \"H\", \"Min\", \"S\"]\n\n def _ex(p):\n if p.freq == \"B\":\n return p.start_time + Timedelta(days=1, nanoseconds=-1)\n return Timestamp((p + p.freq).start_time.value - 1)\n\n for i, fcode in enumerate(from_lst):\n p = Period(\"1982\", freq=fcode)\n result = p.to_timestamp().to_period(fcode)\n assert result == p\n\n assert p.start_time == p.to_timestamp(how=\"S\")\n\n assert p.end_time == _ex(p)\n\n # Frequency other than daily\n\n p = Period(\"1985\", freq=\"A\")\n\n result = p.to_timestamp(\"H\", how=\"end\")\n expected = Timestamp(1986, 1, 1) - Timedelta(1, \"ns\")\n assert result == expected\n result = p.to_timestamp(\"3H\", how=\"end\")\n assert result == expected\n\n result = p.to_timestamp(\"T\", how=\"end\")\n expected = Timestamp(1986, 1, 1) - Timedelta(1, \"ns\")\n assert result == expected\n result = p.to_timestamp(\"2T\", how=\"end\")\n assert result == expected\n\n result = p.to_timestamp(how=\"end\")\n expected = Timestamp(1986, 1, 1) - Timedelta(1, \"ns\")\n assert result == expected\n\n expected = datetime(1985, 1, 1)\n result = p.to_timestamp(\"H\", how=\"start\")\n assert result == expected\n result = p.to_timestamp(\"T\", how=\"start\")\n assert result == expected\n result = p.to_timestamp(\"S\", how=\"start\")\n assert result == expected\n result = p.to_timestamp(\"3H\", how=\"start\")\n assert result == expected\n result = p.to_timestamp(\"5S\", how=\"start\")\n assert result == expected\n\n def test_to_timestamp_business_end(self):\n per = pd.Period(\"1990-01-05\", \"B\") # Friday\n result = per.to_timestamp(\"B\", how=\"E\")\n\n expected = pd.Timestamp(\"1990-01-06\") - pd.Timedelta(nanoseconds=1)\n assert result == expected\n\n @pytest.mark.parametrize(\n \"ts, expected\",\n [\n (\"1970-01-01 00:00:00\", 0),\n (\"1970-01-01 00:00:00.000001\", 1),\n (\"1970-01-01 00:00:00.00001\", 10),\n (\"1970-01-01 00:00:00.499\", 499000),\n (\"1999-12-31 23:59:59.999\", 999000),\n (\"1999-12-31 23:59:59.999999\", 999999),\n (\"2050-12-31 23:59:59.5\", 500000),\n (\"2050-12-31 23:59:59.500001\", 500001),\n (\"2050-12-31 23:59:59.123456\", 123456),\n ],\n )\n @pytest.mark.parametrize(\"freq\", [None, \"us\", \"ns\"])\n def test_to_timestamp_microsecond(self, ts, expected, freq):\n # GH 24444\n result = Period(ts).to_timestamp(freq=freq).microsecond\n assert result == expected\n\n # --------------------------------------------------------------\n # Rendering: __repr__, strftime, etc\n\n def test_repr(self):\n p = Period(\"Jan-2000\")\n assert \"2000-01\" in repr(p)\n\n p = Period(\"2000-12-15\")\n assert \"2000-12-15\" in repr(p)\n\n def test_repr_nat(self):\n p = Period(\"nat\", freq=\"M\")\n assert repr(NaT) in repr(p)\n\n def test_millisecond_repr(self):\n p = Period(\"2000-01-01 12:15:02.123\")\n\n assert repr(p) == \"Period('2000-01-01 12:15:02.123', 'L')\"\n\n def test_microsecond_repr(self):\n p = Period(\"2000-01-01 12:15:02.123567\")\n\n assert repr(p) == \"Period('2000-01-01 12:15:02.123567', 'U')\"\n\n def test_strftime(self):\n # GH#3363\n p = Period(\"2000-1-1 12:34:12\", freq=\"S\")\n res = p.strftime(\"%Y-%m-%d %H:%M:%S\")\n assert res == \"2000-01-01 12:34:12\"\n assert isinstance(res, str)\n\n\nclass TestPeriodProperties:\n \"\"\"Test properties such as year, month, weekday, etc....\"\"\"\n\n @pytest.mark.parametrize(\"freq\", [\"A\", \"M\", \"D\", \"H\"])\n def test_is_leap_year(self, freq):\n # GH 13727\n p = Period(\"2000-01-01 00:00:00\", freq=freq)\n assert p.is_leap_year\n assert isinstance(p.is_leap_year, bool)\n\n p = Period(\"1999-01-01 00:00:00\", freq=freq)\n assert not p.is_leap_year\n\n p = Period(\"2004-01-01 00:00:00\", freq=freq)\n assert p.is_leap_year\n\n p = Period(\"2100-01-01 00:00:00\", freq=freq)\n assert not p.is_leap_year\n\n def test_quarterly_negative_ordinals(self):\n p = Period(ordinal=-1, freq=\"Q-DEC\")\n assert p.year == 1969\n assert p.quarter == 4\n assert isinstance(p, Period)\n\n p = Period(ordinal=-2, freq=\"Q-DEC\")\n assert p.year == 1969\n assert p.quarter == 3\n assert isinstance(p, Period)\n\n p = Period(ordinal=-2, freq=\"M\")\n assert p.year == 1969\n assert p.month == 11\n assert isinstance(p, Period)\n\n def test_freq_str(self):\n i1 = Period(\"1982\", freq=\"Min\")\n assert i1.freq == offsets.Minute()\n assert i1.freqstr == \"T\"\n\n def test_period_deprecated_freq(self):\n cases = {\n \"M\": [\"MTH\", \"MONTH\", \"MONTHLY\", \"Mth\", \"month\", \"monthly\"],\n \"B\": [\"BUS\", \"BUSINESS\", \"BUSINESSLY\", \"WEEKDAY\", \"bus\"],\n \"D\": [\"DAY\", \"DLY\", \"DAILY\", \"Day\", \"Dly\", \"Daily\"],\n \"H\": [\"HR\", \"HOUR\", \"HRLY\", \"HOURLY\", \"hr\", \"Hour\", \"HRly\"],\n \"T\": [\"minute\", \"MINUTE\", \"MINUTELY\", \"minutely\"],\n \"S\": [\"sec\", \"SEC\", \"SECOND\", \"SECONDLY\", \"second\"],\n \"L\": [\"MILLISECOND\", \"MILLISECONDLY\", \"millisecond\"],\n \"U\": [\"MICROSECOND\", \"MICROSECONDLY\", \"microsecond\"],\n \"N\": [\"NANOSECOND\", \"NANOSECONDLY\", \"nanosecond\"],\n }\n\n msg = INVALID_FREQ_ERR_MSG\n for exp, freqs in cases.items():\n for freq in freqs:\n with pytest.raises(ValueError, match=msg):\n Period(\"2016-03-01 09:00\", freq=freq)\n with pytest.raises(ValueError, match=msg):\n Period(ordinal=1, freq=freq)\n\n # check supported freq-aliases still works\n p1 = Period(\"2016-03-01 09:00\", freq=exp)\n p2 = Period(ordinal=1, freq=exp)\n assert isinstance(p1, Period)\n assert isinstance(p2, Period)\n\n def test_start_time(self):\n freq_lst = [\"A\", \"Q\", \"M\", \"D\", \"H\", \"T\", \"S\"]\n xp = datetime(2012, 1, 1)\n for f in freq_lst:\n p = Period(\"2012\", freq=f)\n assert p.start_time == xp\n assert Period(\"2012\", freq=\"B\").start_time == datetime(2012, 1, 2)\n assert Period(\"2012\", freq=\"W\").start_time == datetime(2011, 12, 26)\n\n def test_end_time(self):\n p = Period(\"2012\", freq=\"A\")\n\n def _ex(*args):\n return Timestamp(Timestamp(datetime(*args)).value - 1)\n\n xp = _ex(2013, 1, 1)\n assert xp == p.end_time\n\n p = Period(\"2012\", freq=\"Q\")\n xp = _ex(2012, 4, 1)\n assert xp == p.end_time\n\n p = Period(\"2012\", freq=\"M\")\n xp = _ex(2012, 2, 1)\n assert xp == p.end_time\n\n p = Period(\"2012\", freq=\"D\")\n xp = _ex(2012, 1, 2)\n assert xp == p.end_time\n\n p = Period(\"2012\", freq=\"H\")\n xp = _ex(2012, 1, 1, 1)\n assert xp == p.end_time\n\n p = Period(\"2012\", freq=\"B\")\n xp = _ex(2012, 1, 3)\n assert xp == p.end_time\n\n p = Period(\"2012\", freq=\"W\")\n xp = _ex(2012, 1, 2)\n assert xp == p.end_time\n\n # Test for GH 11738\n p = Period(\"2012\", freq=\"15D\")\n xp = _ex(2012, 1, 16)\n assert xp == p.end_time\n\n p = Period(\"2012\", freq=\"1D1H\")\n xp = _ex(2012, 1, 2, 1)\n assert xp == p.end_time\n\n p = Period(\"2012\", freq=\"1H1D\")\n xp = _ex(2012, 1, 2, 1)\n assert xp == p.end_time\n\n def test_end_time_business_friday(self):\n # GH#34449\n per = Period(\"1990-01-05\", \"B\")\n result = per.end_time\n\n expected = pd.Timestamp(\"1990-01-06\") - pd.Timedelta(nanoseconds=1)\n assert result == expected\n\n def test_anchor_week_end_time(self):\n def _ex(*args):\n return Timestamp(Timestamp(datetime(*args)).value - 1)\n\n p = Period(\"2013-1-1\", \"W-SAT\")\n xp = _ex(2013, 1, 6)\n assert p.end_time == xp\n\n def test_properties_annually(self):\n # Test properties on Periods with annually frequency.\n a_date = Period(freq=\"A\", year=2007)\n assert a_date.year == 2007\n\n def test_properties_quarterly(self):\n # Test properties on Periods with daily frequency.\n qedec_date = Period(freq=\"Q-DEC\", year=2007, quarter=1)\n qejan_date = Period(freq=\"Q-JAN\", year=2007, quarter=1)\n qejun_date = Period(freq=\"Q-JUN\", year=2007, quarter=1)\n #\n for x in range(3):\n for qd in (qedec_date, qejan_date, qejun_date):\n assert (qd + x).qyear == 2007\n assert (qd + x).quarter == x + 1\n\n def test_properties_monthly(self):\n # Test properties on Periods with daily frequency.\n m_date = Period(freq=\"M\", year=2007, month=1)\n for x in range(11):\n m_ival_x = m_date + x\n assert m_ival_x.year == 2007\n if 1 <= x + 1 <= 3:\n assert m_ival_x.quarter == 1\n elif 4 <= x + 1 <= 6:\n assert m_ival_x.quarter == 2\n elif 7 <= x + 1 <= 9:\n assert m_ival_x.quarter == 3\n elif 10 <= x + 1 <= 12:\n assert m_ival_x.quarter == 4\n assert m_ival_x.month == x + 1\n\n def test_properties_weekly(self):\n # Test properties on Periods with daily frequency.\n w_date = Period(freq=\"W\", year=2007, month=1, day=7)\n #\n assert w_date.year == 2007\n assert w_date.quarter == 1\n assert w_date.month == 1\n assert w_date.week == 1\n assert (w_date - 1).week == 52\n assert w_date.days_in_month == 31\n assert Period(freq=\"W\", year=2012, month=2, day=1).days_in_month == 29\n\n def test_properties_weekly_legacy(self):\n # Test properties on Periods with daily frequency.\n w_date = Period(freq=\"W\", year=2007, month=1, day=7)\n assert w_date.year == 2007\n assert w_date.quarter == 1\n assert w_date.month == 1\n assert w_date.week == 1\n assert (w_date - 1).week == 52\n assert w_date.days_in_month == 31\n\n exp = Period(freq=\"W\", year=2012, month=2, day=1)\n assert exp.days_in_month == 29\n\n msg = INVALID_FREQ_ERR_MSG\n with pytest.raises(ValueError, match=msg):\n Period(freq=\"WK\", year=2007, month=1, day=7)\n\n def test_properties_daily(self):\n # Test properties on Periods with daily frequency.\n b_date = Period(freq=\"B\", year=2007, month=1, day=1)\n #\n assert b_date.year == 2007\n assert b_date.quarter == 1\n assert b_date.month == 1\n assert b_date.day == 1\n assert b_date.weekday == 0\n assert b_date.dayofyear == 1\n assert b_date.days_in_month == 31\n assert Period(freq=\"B\", year=2012, month=2, day=1).days_in_month == 29\n\n d_date = Period(freq=\"D\", year=2007, month=1, day=1)\n\n assert d_date.year == 2007\n assert d_date.quarter == 1\n assert d_date.month == 1\n assert d_date.day == 1\n assert d_date.weekday == 0\n assert d_date.dayofyear == 1\n assert d_date.days_in_month == 31\n assert Period(freq=\"D\", year=2012, month=2, day=1).days_in_month == 29\n\n def test_properties_hourly(self):\n # Test properties on Periods with hourly frequency.\n h_date1 = Period(freq=\"H\", year=2007, month=1, day=1, hour=0)\n h_date2 = Period(freq=\"2H\", year=2007, month=1, day=1, hour=0)\n\n for h_date in [h_date1, h_date2]:\n assert h_date.year == 2007\n assert h_date.quarter == 1\n assert h_date.month == 1\n assert h_date.day == 1\n assert h_date.weekday == 0\n assert h_date.dayofyear == 1\n assert h_date.hour == 0\n assert h_date.days_in_month == 31\n assert (\n Period(freq=\"H\", year=2012, month=2, day=1, hour=0).days_in_month == 29\n )\n\n def test_properties_minutely(self):\n # Test properties on Periods with minutely frequency.\n t_date = Period(freq=\"Min\", year=2007, month=1, day=1, hour=0, minute=0)\n #\n assert t_date.quarter == 1\n assert t_date.month == 1\n assert t_date.day == 1\n assert t_date.weekday == 0\n assert t_date.dayofyear == 1\n assert t_date.hour == 0\n assert t_date.minute == 0\n assert t_date.days_in_month == 31\n assert (\n Period(freq=\"D\", year=2012, month=2, day=1, hour=0, minute=0).days_in_month\n == 29\n )\n\n def test_properties_secondly(self):\n # Test properties on Periods with secondly frequency.\n s_date = Period(\n freq=\"Min\", year=2007, month=1, day=1, hour=0, minute=0, second=0\n )\n #\n assert s_date.year == 2007\n assert s_date.quarter == 1\n assert s_date.month == 1\n assert s_date.day == 1\n assert s_date.weekday == 0\n assert s_date.dayofyear == 1\n assert s_date.hour == 0\n assert s_date.minute == 0\n assert s_date.second == 0\n assert s_date.days_in_month == 31\n assert (\n Period(\n freq=\"Min\", year=2012, month=2, day=1, hour=0, minute=0, second=0\n ).days_in_month\n == 29\n )\n\n\nclass TestPeriodField:\n def test_get_period_field_array_raises_on_out_of_range(self):\n msg = \"Buffer dtype mismatch, expected 'const int64_t' but got 'double'\"\n with pytest.raises(ValueError, match=msg):\n libperiod.get_period_field_arr(-1, np.empty(1), 0)\n\n\nclass TestPeriodComparisons:\n def test_comparison_same_period_different_object(self):\n # Separate Period objects for the same period\n left = Period(\"2000-01\", \"M\")\n right = Period(\"2000-01\", \"M\")\n\n assert left == right\n assert left >= right\n assert left <= right\n assert not left < right\n assert not left > right\n\n def test_comparison_same_freq(self):\n jan = Period(\"2000-01\", \"M\")\n feb = Period(\"2000-02\", \"M\")\n\n assert not jan == feb\n assert jan != feb\n assert jan < feb\n assert jan <= feb\n assert not jan > feb\n assert not jan >= feb\n\n def test_comparison_mismatched_freq(self):\n jan = Period(\"2000-01\", \"M\")\n day = Period(\"2012-01-01\", \"D\")\n\n msg = r\"Input has different freq=D from Period\\(freq=M\\)\"\n with pytest.raises(IncompatibleFrequency, match=msg):\n jan == day\n with pytest.raises(IncompatibleFrequency, match=msg):\n jan != day\n with pytest.raises(IncompatibleFrequency, match=msg):\n jan < day\n with pytest.raises(IncompatibleFrequency, match=msg):\n jan <= day\n with pytest.raises(IncompatibleFrequency, match=msg):\n jan > day\n with pytest.raises(IncompatibleFrequency, match=msg):\n jan >= day\n\n def test_comparison_invalid_type(self):\n jan = Period(\"2000-01\", \"M\")\n\n assert not jan == 1\n assert jan != 1\n\n int_or_per = \"'(Period|int)'\"\n msg = f\"not supported between instances of {int_or_per} and {int_or_per}\"\n for left, right in [(jan, 1), (1, jan)]:\n\n with pytest.raises(TypeError, match=msg):\n left > right\n with pytest.raises(TypeError, match=msg):\n left >= right\n with pytest.raises(TypeError, match=msg):\n left < right\n with pytest.raises(TypeError, match=msg):\n left <= right\n\n def test_sort_periods(self):\n jan = Period(\"2000-01\", \"M\")\n feb = Period(\"2000-02\", \"M\")\n mar = Period(\"2000-03\", \"M\")\n periods = [mar, jan, feb]\n correctPeriods = [jan, feb, mar]\n assert sorted(periods) == correctPeriods\n\n def test_period_cmp_nat(self):\n p = Period(\"2011-01-01\", freq=\"D\")\n\n t = Timestamp(\"2011-01-01\")\n # confirm Period('NaT') work identical with Timestamp('NaT')\n for left, right in [\n (NaT, p),\n (p, NaT),\n (NaT, t),\n (t, NaT),\n ]:\n assert not left < right\n assert not left > right\n assert not left == right\n assert left != right\n assert not left <= right\n assert not left >= right\n\n\nclass TestArithmetic:\n def test_sub_delta(self):\n left, right = Period(\"2011\", freq=\"A\"), Period(\"2007\", freq=\"A\")\n result = left - right\n assert result == 4 * right.freq\n\n msg = r\"Input has different freq=M from Period\\(freq=A-DEC\\)\"\n with pytest.raises(IncompatibleFrequency, match=msg):\n left - Period(\"2007-01\", freq=\"M\")\n\n def test_add_integer(self):\n per1 = Period(freq=\"D\", year=2008, month=1, day=1)\n per2 = Period(freq=\"D\", year=2008, month=1, day=2)\n assert per1 + 1 == per2\n assert 1 + per1 == per2\n\n def test_add_sub_nat(self):\n # GH#13071\n p = Period(\"2011-01\", freq=\"M\")\n assert p + NaT is NaT\n assert NaT + p is NaT\n assert p - NaT is NaT\n assert NaT - p is NaT\n\n def test_add_invalid(self):\n # GH#4731\n per1 = Period(freq=\"D\", year=2008, month=1, day=1)\n per2 = Period(freq=\"D\", year=2008, month=1, day=2)\n\n msg = \"|\".join(\n [\n r\"unsupported operand type\\(s\\)\",\n \"can only concatenate str\",\n \"must be str, not Period\",\n ]\n )\n with pytest.raises(TypeError, match=msg):\n per1 + \"str\"\n with pytest.raises(TypeError, match=msg):\n \"str\" + per1\n with pytest.raises(TypeError, match=msg):\n per1 + per2\n\n boxes = [lambda x: x, lambda x: pd.Series([x]), lambda x: pd.Index([x])]\n ids = [\"identity\", \"Series\", \"Index\"]\n\n @pytest.mark.parametrize(\"lbox\", boxes, ids=ids)\n @pytest.mark.parametrize(\"rbox\", boxes, ids=ids)\n def test_add_timestamp_raises(self, rbox, lbox):\n # GH#17983\n ts = Timestamp(\"2017\")\n per = Period(\"2017\", freq=\"M\")\n\n # We may get a different message depending on which class raises\n # the error.\n msg = \"|\".join(\n [\n \"cannot add\",\n \"unsupported operand\",\n \"can only operate on a\",\n \"incompatible type\",\n \"ufunc add cannot use operands\",\n ]\n )\n with pytest.raises(TypeError, match=msg):\n lbox(ts) + rbox(per)\n\n with pytest.raises(TypeError, match=msg):\n lbox(per) + rbox(ts)\n\n with pytest.raises(TypeError, match=msg):\n lbox(per) + rbox(per)\n\n def test_sub(self):\n per1 = Period(\"2011-01-01\", freq=\"D\")\n per2 = Period(\"2011-01-15\", freq=\"D\")\n\n off = per1.freq\n assert per1 - per2 == -14 * off\n assert per2 - per1 == 14 * off\n\n msg = r\"Input has different freq=M from Period\\(freq=D\\)\"\n with pytest.raises(IncompatibleFrequency, match=msg):\n per1 - Period(\"2011-02\", freq=\"M\")\n\n @pytest.mark.parametrize(\"n\", [1, 2, 3, 4])\n def test_sub_n_gt_1_ticks(self, tick_classes, n):\n # GH 23878\n p1 = Period(\"19910905\", freq=tick_classes(n))\n p2 = Period(\"19920406\", freq=tick_classes(n))\n\n expected = Period(str(p2), freq=p2.freq.base) - Period(\n str(p1), freq=p1.freq.base\n )\n\n assert (p2 - p1) == expected\n\n @pytest.mark.parametrize(\"normalize\", [True, False])\n @pytest.mark.parametrize(\"n\", [1, 2, 3, 4])\n @pytest.mark.parametrize(\n \"offset, kwd_name\",\n [\n (offsets.YearEnd, \"month\"),\n (offsets.QuarterEnd, \"startingMonth\"),\n (offsets.MonthEnd, None),\n (offsets.Week, \"weekday\"),\n ],\n )\n def test_sub_n_gt_1_offsets(self, offset, kwd_name, n, normalize):\n # GH 23878\n kwds = {kwd_name: 3} if kwd_name is not None else {}\n p1_d = \"19910905\"\n p2_d = \"19920406\"\n p1 = Period(p1_d, freq=offset(n, normalize, **kwds))\n p2 = Period(p2_d, freq=offset(n, normalize, **kwds))\n\n expected = Period(p2_d, freq=p2.freq.base) - Period(p1_d, freq=p1.freq.base)\n\n assert (p2 - p1) == expected\n\n def test_add_offset(self):\n # freq is DateOffset\n for freq in [\"A\", \"2A\", \"3A\"]:\n p = Period(\"2011\", freq=freq)\n exp = Period(\"2013\", freq=freq)\n assert p + offsets.YearEnd(2) == exp\n assert offsets.YearEnd(2) + p == exp\n\n for o in [\n offsets.YearBegin(2),\n offsets.MonthBegin(1),\n offsets.Minute(),\n np.timedelta64(365, \"D\"),\n timedelta(365),\n ]:\n msg = \"Input has different freq|Input cannot be converted to Period\"\n with pytest.raises(IncompatibleFrequency, match=msg):\n p + o\n\n if isinstance(o, np.timedelta64):\n msg = \"cannot use operands with types\"\n with pytest.raises(TypeError, match=msg):\n o + p\n else:\n msg = \"|\".join(\n [\n \"Input has different freq\",\n \"Input cannot be converted to Period\",\n ]\n )\n with pytest.raises(IncompatibleFrequency, match=msg):\n o + p\n\n for freq in [\"M\", \"2M\", \"3M\"]:\n p = Period(\"2011-03\", freq=freq)\n exp = Period(\"2011-05\", freq=freq)\n assert p + offsets.MonthEnd(2) == exp\n assert offsets.MonthEnd(2) + p == exp\n\n exp = Period(\"2012-03\", freq=freq)\n assert p + offsets.MonthEnd(12) == exp\n assert offsets.MonthEnd(12) + p == exp\n\n for o in [\n offsets.YearBegin(2),\n offsets.MonthBegin(1),\n offsets.Minute(),\n np.timedelta64(365, \"D\"),\n timedelta(365),\n ]:\n msg = \"Input has different freq|Input cannot be converted to Period\"\n with pytest.raises(IncompatibleFrequency, match=msg):\n p + o\n\n if isinstance(o, np.timedelta64):\n msg = \"cannot use operands with types\"\n with pytest.raises(TypeError, match=msg):\n o + p\n else:\n msg = \"|\".join(\n [\n \"Input has different freq\",\n \"Input cannot be converted to Period\",\n ]\n )\n with pytest.raises(IncompatibleFrequency, match=msg):\n o + p\n\n # freq is Tick\n for freq in [\"D\", \"2D\", \"3D\"]:\n p = Period(\"2011-04-01\", freq=freq)\n\n exp = Period(\"2011-04-06\", freq=freq)\n assert p + offsets.Day(5) == exp\n assert offsets.Day(5) + p == exp\n\n exp = Period(\"2011-04-02\", freq=freq)\n assert p + offsets.Hour(24) == exp\n assert offsets.Hour(24) + p == exp\n\n exp = Period(\"2011-04-03\", freq=freq)\n assert p + np.timedelta64(2, \"D\") == exp\n msg = \"cannot use operands with types\"\n with pytest.raises(TypeError, match=msg):\n np.timedelta64(2, \"D\") + p\n\n exp = Period(\"2011-04-02\", freq=freq)\n assert p + np.timedelta64(3600 * 24, \"s\") == exp\n with pytest.raises(TypeError, match=msg):\n np.timedelta64(3600 * 24, \"s\") + p\n\n exp = Period(\"2011-03-30\", freq=freq)\n assert p + timedelta(-2) == exp\n assert timedelta(-2) + p == exp\n\n exp = Period(\"2011-04-03\", freq=freq)\n assert p + timedelta(hours=48) == exp\n assert timedelta(hours=48) + p == exp\n\n for o in [\n offsets.YearBegin(2),\n offsets.MonthBegin(1),\n offsets.Minute(),\n np.timedelta64(4, \"h\"),\n timedelta(hours=23),\n ]:\n msg = \"Input has different freq|Input cannot be converted to Period\"\n with pytest.raises(IncompatibleFrequency, match=msg):\n p + o\n\n if isinstance(o, np.timedelta64):\n msg = \"cannot use operands with types\"\n with pytest.raises(TypeError, match=msg):\n o + p\n else:\n msg = \"|\".join(\n [\n \"Input has different freq\",\n \"Input cannot be converted to Period\",\n ]\n )\n with pytest.raises(IncompatibleFrequency, match=msg):\n o + p\n\n for freq in [\"H\", \"2H\", \"3H\"]:\n p = Period(\"2011-04-01 09:00\", freq=freq)\n\n exp = Period(\"2011-04-03 09:00\", freq=freq)\n assert p + offsets.Day(2) == exp\n assert offsets.Day(2) + p == exp\n\n exp = Period(\"2011-04-01 12:00\", freq=freq)\n assert p + offsets.Hour(3) == exp\n assert offsets.Hour(3) + p == exp\n\n msg = \"cannot use operands with types\"\n exp = Period(\"2011-04-01 12:00\", freq=freq)\n assert p + np.timedelta64(3, \"h\") == exp\n with pytest.raises(TypeError, match=msg):\n np.timedelta64(3, \"h\") + p\n\n exp = Period(\"2011-04-01 10:00\", freq=freq)\n assert p + np.timedelta64(3600, \"s\") == exp\n with pytest.raises(TypeError, match=msg):\n np.timedelta64(3600, \"s\") + p\n\n exp = Period(\"2011-04-01 11:00\", freq=freq)\n assert p + timedelta(minutes=120) == exp\n assert timedelta(minutes=120) + p == exp\n\n exp = Period(\"2011-04-05 12:00\", freq=freq)\n assert p + timedelta(days=4, minutes=180) == exp\n assert timedelta(days=4, minutes=180) + p == exp\n\n for o in [\n offsets.YearBegin(2),\n offsets.MonthBegin(1),\n offsets.Minute(),\n np.timedelta64(3200, \"s\"),\n timedelta(hours=23, minutes=30),\n ]:\n msg = \"Input has different freq|Input cannot be converted to Period\"\n with pytest.raises(IncompatibleFrequency, match=msg):\n p + o\n\n if isinstance(o, np.timedelta64):\n msg = \"cannot use operands with types\"\n with pytest.raises(TypeError, match=msg):\n o + p\n else:\n msg = \"|\".join(\n [\n \"Input has different freq\",\n \"Input cannot be converted to Period\",\n ]\n )\n with pytest.raises(IncompatibleFrequency, match=msg):\n o + p\n\n def test_sub_offset(self):\n # freq is DateOffset\n msg = \"Input has different freq|Input cannot be converted to Period\"\n for freq in [\"A\", \"2A\", \"3A\"]:\n p = Period(\"2011\", freq=freq)\n assert p - offsets.YearEnd(2) == Period(\"2009\", freq=freq)\n\n for o in [\n offsets.YearBegin(2),\n offsets.MonthBegin(1),\n offsets.Minute(),\n np.timedelta64(365, \"D\"),\n timedelta(365),\n ]:\n with pytest.raises(IncompatibleFrequency, match=msg):\n p - o\n\n for freq in [\"M\", \"2M\", \"3M\"]:\n p = Period(\"2011-03\", freq=freq)\n assert p - offsets.MonthEnd(2) == Period(\"2011-01\", freq=freq)\n assert p - offsets.MonthEnd(12) == Period(\"2010-03\", freq=freq)\n\n for o in [\n offsets.YearBegin(2),\n offsets.MonthBegin(1),\n offsets.Minute(),\n np.timedelta64(365, \"D\"),\n timedelta(365),\n ]:\n with pytest.raises(IncompatibleFrequency, match=msg):\n p - o\n\n # freq is Tick\n for freq in [\"D\", \"2D\", \"3D\"]:\n p = Period(\"2011-04-01\", freq=freq)\n assert p - offsets.Day(5) == Period(\"2011-03-27\", freq=freq)\n assert p - offsets.Hour(24) == Period(\"2011-03-31\", freq=freq)\n assert p - np.timedelta64(2, \"D\") == Period(\"2011-03-30\", freq=freq)\n assert p - np.timedelta64(3600 * 24, \"s\") == Period(\"2011-03-31\", freq=freq)\n assert p - timedelta(-2) == Period(\"2011-04-03\", freq=freq)\n assert p - timedelta(hours=48) == Period(\"2011-03-30\", freq=freq)\n\n for o in [\n offsets.YearBegin(2),\n offsets.MonthBegin(1),\n offsets.Minute(),\n np.timedelta64(4, \"h\"),\n timedelta(hours=23),\n ]:\n with pytest.raises(IncompatibleFrequency, match=msg):\n p - o\n\n for freq in [\"H\", \"2H\", \"3H\"]:\n p = Period(\"2011-04-01 09:00\", freq=freq)\n assert p - offsets.Day(2) == Period(\"2011-03-30 09:00\", freq=freq)\n assert p - offsets.Hour(3) == Period(\"2011-04-01 06:00\", freq=freq)\n assert p - np.timedelta64(3, \"h\") == Period(\"2011-04-01 06:00\", freq=freq)\n assert p - np.timedelta64(3600, \"s\") == Period(\n \"2011-04-01 08:00\", freq=freq\n )\n assert p - timedelta(minutes=120) == Period(\"2011-04-01 07:00\", freq=freq)\n assert p - timedelta(days=4, minutes=180) == Period(\n \"2011-03-28 06:00\", freq=freq\n )\n\n for o in [\n offsets.YearBegin(2),\n offsets.MonthBegin(1),\n offsets.Minute(),\n np.timedelta64(3200, \"s\"),\n timedelta(hours=23, minutes=30),\n ]:\n with pytest.raises(IncompatibleFrequency, match=msg):\n p - o\n\n @pytest.mark.parametrize(\"freq\", [\"M\", \"2M\", \"3M\"])\n def test_period_addsub_nat(self, freq):\n per = Period(\"2011-01\", freq=freq)\n\n # For subtraction, NaT is treated as another Period object\n assert NaT - per is NaT\n assert per - NaT is NaT\n\n # For addition, NaT is treated as offset-like\n assert NaT + per is NaT\n assert per + NaT is NaT\n\n def test_period_ops_offset(self):\n p = Period(\"2011-04-01\", freq=\"D\")\n result = p + offsets.Day()\n exp = Period(\"2011-04-02\", freq=\"D\")\n assert result == exp\n\n result = p - offsets.Day(2)\n exp = Period(\"2011-03-30\", freq=\"D\")\n assert result == exp\n\n msg = r\"Input cannot be converted to Period\\(freq=D\\)\"\n with pytest.raises(IncompatibleFrequency, match=msg):\n p + offsets.Hour(2)\n\n with pytest.raises(IncompatibleFrequency, match=msg):\n p - offsets.Hour(2)\n\n\ndef test_period_immutable():\n # see gh-17116\n msg = \"not writable\"\n\n per = Period(\"2014Q1\")\n with pytest.raises(AttributeError, match=msg):\n per.ordinal = 14\n\n freq = per.freq\n with pytest.raises(AttributeError, match=msg):\n per.freq = 2 * freq\n\n\ndef test_small_year_parsing():\n per1 = Period(\"0001-01-07\", \"D\")\n assert per1.year == 1\n assert per1.day == 7\n\n\ndef test_negone_ordinals():\n freqs = [\"A\", \"M\", \"Q\", \"D\", \"H\", \"T\", \"S\"]\n\n period = Period(ordinal=-1, freq=\"D\")\n for freq in freqs:\n repr(period.asfreq(freq))\n\n for freq in freqs:\n period = Period(ordinal=-1, freq=freq)\n repr(period)\n assert period.year == 1969\n\n period = Period(ordinal=-1, freq=\"B\")\n repr(period)\n period = Period(ordinal=-1, freq=\"W\")\n repr(period)\n",
"from typing import List\n\nimport numpy as np\n\nfrom pandas._typing import FilePathOrBuffer, Scalar\nfrom pandas.compat._optional import import_optional_dependency\n\nfrom pandas.io.excel._base import ExcelWriter, _BaseExcelReader\nfrom pandas.io.excel._util import _validate_freeze_panes\n\n\nclass _OpenpyxlWriter(ExcelWriter):\n engine = \"openpyxl\"\n supported_extensions = (\".xlsx\", \".xlsm\")\n\n def __init__(self, path, engine=None, mode=\"w\", **engine_kwargs):\n # Use the openpyxl module as the Excel writer.\n from openpyxl.workbook import Workbook\n\n super().__init__(path, mode=mode, **engine_kwargs)\n\n if self.mode == \"a\": # Load from existing workbook\n from openpyxl import load_workbook\n\n book = load_workbook(self.path)\n self.book = book\n else:\n # Create workbook object with default optimized_write=True.\n self.book = Workbook()\n\n if self.book.worksheets:\n try:\n self.book.remove(self.book.worksheets[0])\n except AttributeError:\n\n # compat - for openpyxl <= 2.4\n self.book.remove_sheet(self.book.worksheets[0])\n\n def save(self):\n \"\"\"\n Save workbook to disk.\n \"\"\"\n return self.book.save(self.path)\n\n @classmethod\n def _convert_to_style(cls, style_dict):\n \"\"\"\n Converts a style_dict to an openpyxl style object.\n\n Parameters\n ----------\n style_dict : style dictionary to convert\n \"\"\"\n from openpyxl.style import Style\n\n xls_style = Style()\n for key, value in style_dict.items():\n for nk, nv in value.items():\n if key == \"borders\":\n (\n xls_style.borders.__getattribute__(nk).__setattr__(\n \"border_style\", nv\n )\n )\n else:\n xls_style.__getattribute__(key).__setattr__(nk, nv)\n\n return xls_style\n\n @classmethod\n def _convert_to_style_kwargs(cls, style_dict):\n \"\"\"\n Convert a style_dict to a set of kwargs suitable for initializing\n or updating-on-copy an openpyxl v2 style object.\n\n Parameters\n ----------\n style_dict : dict\n A dict with zero or more of the following keys (or their synonyms).\n 'font'\n 'fill'\n 'border' ('borders')\n 'alignment'\n 'number_format'\n 'protection'\n\n Returns\n -------\n style_kwargs : dict\n A dict with the same, normalized keys as ``style_dict`` but each\n value has been replaced with a native openpyxl style object of the\n appropriate class.\n \"\"\"\n _style_key_map = {\"borders\": \"border\"}\n\n style_kwargs = {}\n for k, v in style_dict.items():\n if k in _style_key_map:\n k = _style_key_map[k]\n _conv_to_x = getattr(cls, f\"_convert_to_{k}\", lambda x: None)\n new_v = _conv_to_x(v)\n if new_v:\n style_kwargs[k] = new_v\n\n return style_kwargs\n\n @classmethod\n def _convert_to_color(cls, color_spec):\n \"\"\"\n Convert ``color_spec`` to an openpyxl v2 Color object.\n\n Parameters\n ----------\n color_spec : str, dict\n A 32-bit ARGB hex string, or a dict with zero or more of the\n following keys.\n 'rgb'\n 'indexed'\n 'auto'\n 'theme'\n 'tint'\n 'index'\n 'type'\n\n Returns\n -------\n color : openpyxl.styles.Color\n \"\"\"\n from openpyxl.styles import Color\n\n if isinstance(color_spec, str):\n return Color(color_spec)\n else:\n return Color(**color_spec)\n\n @classmethod\n def _convert_to_font(cls, font_dict):\n \"\"\"\n Convert ``font_dict`` to an openpyxl v2 Font object.\n\n Parameters\n ----------\n font_dict : dict\n A dict with zero or more of the following keys (or their synonyms).\n 'name'\n 'size' ('sz')\n 'bold' ('b')\n 'italic' ('i')\n 'underline' ('u')\n 'strikethrough' ('strike')\n 'color'\n 'vertAlign' ('vertalign')\n 'charset'\n 'scheme'\n 'family'\n 'outline'\n 'shadow'\n 'condense'\n\n Returns\n -------\n font : openpyxl.styles.Font\n \"\"\"\n from openpyxl.styles import Font\n\n _font_key_map = {\n \"sz\": \"size\",\n \"b\": \"bold\",\n \"i\": \"italic\",\n \"u\": \"underline\",\n \"strike\": \"strikethrough\",\n \"vertalign\": \"vertAlign\",\n }\n\n font_kwargs = {}\n for k, v in font_dict.items():\n if k in _font_key_map:\n k = _font_key_map[k]\n if k == \"color\":\n v = cls._convert_to_color(v)\n font_kwargs[k] = v\n\n return Font(**font_kwargs)\n\n @classmethod\n def _convert_to_stop(cls, stop_seq):\n \"\"\"\n Convert ``stop_seq`` to a list of openpyxl v2 Color objects,\n suitable for initializing the ``GradientFill`` ``stop`` parameter.\n\n Parameters\n ----------\n stop_seq : iterable\n An iterable that yields objects suitable for consumption by\n ``_convert_to_color``.\n\n Returns\n -------\n stop : list of openpyxl.styles.Color\n \"\"\"\n return map(cls._convert_to_color, stop_seq)\n\n @classmethod\n def _convert_to_fill(cls, fill_dict):\n \"\"\"\n Convert ``fill_dict`` to an openpyxl v2 Fill object.\n\n Parameters\n ----------\n fill_dict : dict\n A dict with one or more of the following keys (or their synonyms),\n 'fill_type' ('patternType', 'patterntype')\n 'start_color' ('fgColor', 'fgcolor')\n 'end_color' ('bgColor', 'bgcolor')\n or one or more of the following keys (or their synonyms).\n 'type' ('fill_type')\n 'degree'\n 'left'\n 'right'\n 'top'\n 'bottom'\n 'stop'\n\n Returns\n -------\n fill : openpyxl.styles.Fill\n \"\"\"\n from openpyxl.styles import GradientFill, PatternFill\n\n _pattern_fill_key_map = {\n \"patternType\": \"fill_type\",\n \"patterntype\": \"fill_type\",\n \"fgColor\": \"start_color\",\n \"fgcolor\": \"start_color\",\n \"bgColor\": \"end_color\",\n \"bgcolor\": \"end_color\",\n }\n\n _gradient_fill_key_map = {\"fill_type\": \"type\"}\n\n pfill_kwargs = {}\n gfill_kwargs = {}\n for k, v in fill_dict.items():\n pk = gk = None\n if k in _pattern_fill_key_map:\n pk = _pattern_fill_key_map[k]\n if k in _gradient_fill_key_map:\n gk = _gradient_fill_key_map[k]\n if pk in [\"start_color\", \"end_color\"]:\n v = cls._convert_to_color(v)\n if gk == \"stop\":\n v = cls._convert_to_stop(v)\n if pk:\n pfill_kwargs[pk] = v\n elif gk:\n gfill_kwargs[gk] = v\n else:\n pfill_kwargs[k] = v\n gfill_kwargs[k] = v\n\n try:\n return PatternFill(**pfill_kwargs)\n except TypeError:\n return GradientFill(**gfill_kwargs)\n\n @classmethod\n def _convert_to_side(cls, side_spec):\n \"\"\"\n Convert ``side_spec`` to an openpyxl v2 Side object.\n\n Parameters\n ----------\n side_spec : str, dict\n A string specifying the border style, or a dict with zero or more\n of the following keys (or their synonyms).\n 'style' ('border_style')\n 'color'\n\n Returns\n -------\n side : openpyxl.styles.Side\n \"\"\"\n from openpyxl.styles import Side\n\n _side_key_map = {\"border_style\": \"style\"}\n\n if isinstance(side_spec, str):\n return Side(style=side_spec)\n\n side_kwargs = {}\n for k, v in side_spec.items():\n if k in _side_key_map:\n k = _side_key_map[k]\n if k == \"color\":\n v = cls._convert_to_color(v)\n side_kwargs[k] = v\n\n return Side(**side_kwargs)\n\n @classmethod\n def _convert_to_border(cls, border_dict):\n \"\"\"\n Convert ``border_dict`` to an openpyxl v2 Border object.\n\n Parameters\n ----------\n border_dict : dict\n A dict with zero or more of the following keys (or their synonyms).\n 'left'\n 'right'\n 'top'\n 'bottom'\n 'diagonal'\n 'diagonal_direction'\n 'vertical'\n 'horizontal'\n 'diagonalUp' ('diagonalup')\n 'diagonalDown' ('diagonaldown')\n 'outline'\n\n Returns\n -------\n border : openpyxl.styles.Border\n \"\"\"\n from openpyxl.styles import Border\n\n _border_key_map = {\"diagonalup\": \"diagonalUp\", \"diagonaldown\": \"diagonalDown\"}\n\n border_kwargs = {}\n for k, v in border_dict.items():\n if k in _border_key_map:\n k = _border_key_map[k]\n if k == \"color\":\n v = cls._convert_to_color(v)\n if k in [\"left\", \"right\", \"top\", \"bottom\", \"diagonal\"]:\n v = cls._convert_to_side(v)\n border_kwargs[k] = v\n\n return Border(**border_kwargs)\n\n @classmethod\n def _convert_to_alignment(cls, alignment_dict):\n \"\"\"\n Convert ``alignment_dict`` to an openpyxl v2 Alignment object.\n\n Parameters\n ----------\n alignment_dict : dict\n A dict with zero or more of the following keys (or their synonyms).\n 'horizontal'\n 'vertical'\n 'text_rotation'\n 'wrap_text'\n 'shrink_to_fit'\n 'indent'\n Returns\n -------\n alignment : openpyxl.styles.Alignment\n \"\"\"\n from openpyxl.styles import Alignment\n\n return Alignment(**alignment_dict)\n\n @classmethod\n def _convert_to_number_format(cls, number_format_dict):\n \"\"\"\n Convert ``number_format_dict`` to an openpyxl v2.1.0 number format\n initializer.\n\n Parameters\n ----------\n number_format_dict : dict\n A dict with zero or more of the following keys.\n 'format_code' : str\n\n Returns\n -------\n number_format : str\n \"\"\"\n return number_format_dict[\"format_code\"]\n\n @classmethod\n def _convert_to_protection(cls, protection_dict):\n \"\"\"\n Convert ``protection_dict`` to an openpyxl v2 Protection object.\n\n Parameters\n ----------\n protection_dict : dict\n A dict with zero or more of the following keys.\n 'locked'\n 'hidden'\n\n Returns\n -------\n \"\"\"\n from openpyxl.styles import Protection\n\n return Protection(**protection_dict)\n\n def write_cells(\n self, cells, sheet_name=None, startrow=0, startcol=0, freeze_panes=None\n ):\n # Write the frame cells using openpyxl.\n sheet_name = self._get_sheet_name(sheet_name)\n\n _style_cache = {}\n\n if sheet_name in self.sheets:\n wks = self.sheets[sheet_name]\n else:\n wks = self.book.create_sheet()\n wks.title = sheet_name\n self.sheets[sheet_name] = wks\n\n if _validate_freeze_panes(freeze_panes):\n wks.freeze_panes = wks.cell(\n row=freeze_panes[0] + 1, column=freeze_panes[1] + 1\n )\n\n for cell in cells:\n xcell = wks.cell(\n row=startrow + cell.row + 1, column=startcol + cell.col + 1\n )\n xcell.value, fmt = self._value_with_fmt(cell.val)\n if fmt:\n xcell.number_format = fmt\n\n style_kwargs = {}\n if cell.style:\n key = str(cell.style)\n style_kwargs = _style_cache.get(key)\n if style_kwargs is None:\n style_kwargs = self._convert_to_style_kwargs(cell.style)\n _style_cache[key] = style_kwargs\n\n if style_kwargs:\n for k, v in style_kwargs.items():\n setattr(xcell, k, v)\n\n if cell.mergestart is not None and cell.mergeend is not None:\n\n wks.merge_cells(\n start_row=startrow + cell.row + 1,\n start_column=startcol + cell.col + 1,\n end_column=startcol + cell.mergeend + 1,\n end_row=startrow + cell.mergestart + 1,\n )\n\n # When cells are merged only the top-left cell is preserved\n # The behaviour of the other cells in a merged range is\n # undefined\n if style_kwargs:\n first_row = startrow + cell.row + 1\n last_row = startrow + cell.mergestart + 1\n first_col = startcol + cell.col + 1\n last_col = startcol + cell.mergeend + 1\n\n for row in range(first_row, last_row + 1):\n for col in range(first_col, last_col + 1):\n if row == first_row and col == first_col:\n # Ignore first cell. It is already handled.\n continue\n xcell = wks.cell(column=col, row=row)\n for k, v in style_kwargs.items():\n setattr(xcell, k, v)\n\n\nclass _OpenpyxlReader(_BaseExcelReader):\n def __init__(self, filepath_or_buffer: FilePathOrBuffer) -> None:\n \"\"\"\n Reader using openpyxl engine.\n\n Parameters\n ----------\n filepath_or_buffer : string, path object or Workbook\n Object to be parsed.\n \"\"\"\n import_optional_dependency(\"openpyxl\")\n super().__init__(filepath_or_buffer)\n\n @property\n def _workbook_class(self):\n from openpyxl import Workbook\n\n return Workbook\n\n def load_workbook(self, filepath_or_buffer: FilePathOrBuffer):\n from openpyxl import load_workbook\n\n return load_workbook(\n filepath_or_buffer, read_only=True, data_only=True, keep_links=False\n )\n\n def close(self):\n # https://stackoverflow.com/questions/31416842/\n # openpyxl-does-not-close-excel-workbook-in-read-only-mode\n self.book.close()\n\n @property\n def sheet_names(self) -> List[str]:\n return self.book.sheetnames\n\n def get_sheet_by_name(self, name: str):\n return self.book[name]\n\n def get_sheet_by_index(self, index: int):\n return self.book.worksheets[index]\n\n def _convert_cell(self, cell, convert_float: bool) -> Scalar:\n\n # TODO: replace with openpyxl constants\n if cell.is_date:\n return cell.value\n elif cell.data_type == \"e\":\n return np.nan\n elif cell.data_type == \"b\":\n return bool(cell.value)\n elif cell.value is None:\n return \"\" # compat with xlrd\n elif cell.data_type == \"n\":\n # GH5394\n if convert_float:\n val = int(cell.value)\n if val == cell.value:\n return val\n else:\n return float(cell.value)\n\n return cell.value\n\n def get_sheet_data(self, sheet, convert_float: bool) -> List[List[Scalar]]:\n data: List[List[Scalar]] = []\n for row in sheet.rows:\n data.append([self._convert_cell(cell, convert_float) for cell in row])\n\n return data\n",
"import numpy as np\nimport pytest\n\nfrom pandas._libs.tslibs.period import IncompatibleFrequency\nimport pandas.util._test_decorators as td\n\nimport pandas as pd\nfrom pandas import (\n DataFrame,\n DatetimeIndex,\n Index,\n NaT,\n Period,\n PeriodIndex,\n Series,\n date_range,\n offsets,\n period_range,\n)\nimport pandas._testing as tm\n\nfrom ..datetimelike import DatetimeLike\n\n\nclass TestPeriodIndex(DatetimeLike):\n _holder = PeriodIndex\n\n @pytest.fixture(\n params=[\n tm.makePeriodIndex(10),\n period_range(\"20130101\", periods=10, freq=\"D\")[::-1],\n ],\n ids=[\"index_inc\", \"index_dec\"],\n )\n def index(self, request):\n return request.param\n\n def create_index(self) -> PeriodIndex:\n return period_range(\"20130101\", periods=5, freq=\"D\")\n\n def test_pickle_compat_construction(self):\n pass\n\n @pytest.mark.parametrize(\"freq\", [\"D\", \"M\", \"A\"])\n def test_pickle_round_trip(self, freq):\n idx = PeriodIndex([\"2016-05-16\", \"NaT\", NaT, np.NaN], freq=freq)\n result = tm.round_trip_pickle(idx)\n tm.assert_index_equal(result, idx)\n\n def test_where(self):\n # This is handled in test_indexing\n pass\n\n @pytest.mark.parametrize(\"use_numpy\", [True, False])\n @pytest.mark.parametrize(\n \"index\",\n [\n period_range(\"2000-01-01\", periods=3, freq=\"D\"),\n period_range(\"2001-01-01\", periods=3, freq=\"2D\"),\n PeriodIndex([\"2001-01\", \"NaT\", \"2003-01\"], freq=\"M\"),\n ],\n )\n def test_repeat_freqstr(self, index, use_numpy):\n # GH10183\n expected = PeriodIndex([p for p in index for _ in range(3)])\n result = np.repeat(index, 3) if use_numpy else index.repeat(3)\n tm.assert_index_equal(result, expected)\n assert result.freqstr == index.freqstr\n\n def test_no_millisecond_field(self):\n msg = \"type object 'DatetimeIndex' has no attribute 'millisecond'\"\n with pytest.raises(AttributeError, match=msg):\n DatetimeIndex.millisecond\n\n msg = \"'DatetimeIndex' object has no attribute 'millisecond'\"\n with pytest.raises(AttributeError, match=msg):\n DatetimeIndex([]).millisecond\n\n def test_make_time_series(self):\n index = period_range(freq=\"A\", start=\"1/1/2001\", end=\"12/1/2009\")\n series = Series(1, index=index)\n assert isinstance(series, Series)\n\n def test_shallow_copy_empty(self):\n # GH13067\n idx = PeriodIndex([], freq=\"M\")\n result = idx._shallow_copy()\n expected = idx\n\n tm.assert_index_equal(result, expected)\n\n def test_shallow_copy_disallow_i8(self):\n # GH-24391\n pi = period_range(\"2018-01-01\", periods=3, freq=\"2D\")\n with pytest.raises(AssertionError, match=\"ndarray\"):\n pi._shallow_copy(pi.asi8)\n\n def test_shallow_copy_requires_disallow_period_index(self):\n pi = period_range(\"2018-01-01\", periods=3, freq=\"2D\")\n with pytest.raises(AssertionError, match=\"PeriodIndex\"):\n pi._shallow_copy(pi)\n\n def test_view_asi8(self):\n idx = PeriodIndex([], freq=\"M\")\n\n exp = np.array([], dtype=np.int64)\n tm.assert_numpy_array_equal(idx.view(\"i8\"), exp)\n tm.assert_numpy_array_equal(idx.asi8, exp)\n\n idx = PeriodIndex([\"2011-01\", NaT], freq=\"M\")\n\n exp = np.array([492, -9223372036854775808], dtype=np.int64)\n tm.assert_numpy_array_equal(idx.view(\"i8\"), exp)\n tm.assert_numpy_array_equal(idx.asi8, exp)\n\n exp = np.array([14975, -9223372036854775808], dtype=np.int64)\n idx = PeriodIndex([\"2011-01-01\", NaT], freq=\"D\")\n tm.assert_numpy_array_equal(idx.view(\"i8\"), exp)\n tm.assert_numpy_array_equal(idx.asi8, exp)\n\n def test_values(self):\n idx = PeriodIndex([], freq=\"M\")\n\n exp = np.array([], dtype=object)\n tm.assert_numpy_array_equal(idx.values, exp)\n tm.assert_numpy_array_equal(idx.to_numpy(), exp)\n\n exp = np.array([], dtype=np.int64)\n tm.assert_numpy_array_equal(idx.asi8, exp)\n\n idx = PeriodIndex([\"2011-01\", NaT], freq=\"M\")\n\n exp = np.array([Period(\"2011-01\", freq=\"M\"), NaT], dtype=object)\n tm.assert_numpy_array_equal(idx.values, exp)\n tm.assert_numpy_array_equal(idx.to_numpy(), exp)\n exp = np.array([492, -9223372036854775808], dtype=np.int64)\n tm.assert_numpy_array_equal(idx.asi8, exp)\n\n idx = PeriodIndex([\"2011-01-01\", NaT], freq=\"D\")\n\n exp = np.array([Period(\"2011-01-01\", freq=\"D\"), NaT], dtype=object)\n tm.assert_numpy_array_equal(idx.values, exp)\n tm.assert_numpy_array_equal(idx.to_numpy(), exp)\n exp = np.array([14975, -9223372036854775808], dtype=np.int64)\n tm.assert_numpy_array_equal(idx.asi8, exp)\n\n def test_period_index_length(self):\n pi = period_range(freq=\"A\", start=\"1/1/2001\", end=\"12/1/2009\")\n assert len(pi) == 9\n\n pi = period_range(freq=\"Q\", start=\"1/1/2001\", end=\"12/1/2009\")\n assert len(pi) == 4 * 9\n\n pi = period_range(freq=\"M\", start=\"1/1/2001\", end=\"12/1/2009\")\n assert len(pi) == 12 * 9\n\n start = Period(\"02-Apr-2005\", \"B\")\n i1 = period_range(start=start, periods=20)\n assert len(i1) == 20\n assert i1.freq == start.freq\n assert i1[0] == start\n\n end_intv = Period(\"2006-12-31\", \"W\")\n i1 = period_range(end=end_intv, periods=10)\n assert len(i1) == 10\n assert i1.freq == end_intv.freq\n assert i1[-1] == end_intv\n\n end_intv = Period(\"2006-12-31\", \"1w\")\n i2 = period_range(end=end_intv, periods=10)\n assert len(i1) == len(i2)\n assert (i1 == i2).all()\n assert i1.freq == i2.freq\n\n msg = \"start and end must have same freq\"\n with pytest.raises(ValueError, match=msg):\n period_range(start=start, end=end_intv)\n\n end_intv = Period(\"2005-05-01\", \"B\")\n i1 = period_range(start=start, end=end_intv)\n\n msg = (\n \"Of the three parameters: start, end, and periods, exactly two \"\n \"must be specified\"\n )\n with pytest.raises(ValueError, match=msg):\n period_range(start=start)\n\n # infer freq from first element\n i2 = PeriodIndex([end_intv, Period(\"2005-05-05\", \"B\")])\n assert len(i2) == 2\n assert i2[0] == end_intv\n\n i2 = PeriodIndex(np.array([end_intv, Period(\"2005-05-05\", \"B\")]))\n assert len(i2) == 2\n assert i2[0] == end_intv\n\n # Mixed freq should fail\n vals = [end_intv, Period(\"2006-12-31\", \"w\")]\n msg = r\"Input has different freq=W-SUN from PeriodIndex\\(freq=B\\)\"\n with pytest.raises(IncompatibleFrequency, match=msg):\n PeriodIndex(vals)\n vals = np.array(vals)\n with pytest.raises(ValueError, match=msg):\n PeriodIndex(vals)\n\n def test_fields(self):\n # year, month, day, hour, minute\n # second, weekofyear, week, dayofweek, weekday, dayofyear, quarter\n # qyear\n pi = period_range(freq=\"A\", start=\"1/1/2001\", end=\"12/1/2005\")\n self._check_all_fields(pi)\n\n pi = period_range(freq=\"Q\", start=\"1/1/2001\", end=\"12/1/2002\")\n self._check_all_fields(pi)\n\n pi = period_range(freq=\"M\", start=\"1/1/2001\", end=\"1/1/2002\")\n self._check_all_fields(pi)\n\n pi = period_range(freq=\"D\", start=\"12/1/2001\", end=\"6/1/2001\")\n self._check_all_fields(pi)\n\n pi = period_range(freq=\"B\", start=\"12/1/2001\", end=\"6/1/2001\")\n self._check_all_fields(pi)\n\n pi = period_range(freq=\"H\", start=\"12/31/2001\", end=\"1/1/2002 23:00\")\n self._check_all_fields(pi)\n\n pi = period_range(freq=\"Min\", start=\"12/31/2001\", end=\"1/1/2002 00:20\")\n self._check_all_fields(pi)\n\n pi = period_range(\n freq=\"S\", start=\"12/31/2001 00:00:00\", end=\"12/31/2001 00:05:00\"\n )\n self._check_all_fields(pi)\n\n end_intv = Period(\"2006-12-31\", \"W\")\n i1 = period_range(end=end_intv, periods=10)\n self._check_all_fields(i1)\n\n def _check_all_fields(self, periodindex):\n fields = [\n \"year\",\n \"month\",\n \"day\",\n \"hour\",\n \"minute\",\n \"second\",\n \"weekofyear\",\n \"week\",\n \"dayofweek\",\n \"dayofyear\",\n \"quarter\",\n \"qyear\",\n \"days_in_month\",\n ]\n\n periods = list(periodindex)\n s = pd.Series(periodindex)\n\n for field in fields:\n field_idx = getattr(periodindex, field)\n assert len(periodindex) == len(field_idx)\n for x, val in zip(periods, field_idx):\n assert getattr(x, field) == val\n\n if len(s) == 0:\n continue\n\n field_s = getattr(s.dt, field)\n assert len(periodindex) == len(field_s)\n for x, val in zip(periods, field_s):\n assert getattr(x, field) == val\n\n def test_period_set_index_reindex(self):\n # GH 6631\n df = DataFrame(np.random.random(6))\n idx1 = period_range(\"2011/01/01\", periods=6, freq=\"M\")\n idx2 = period_range(\"2013\", periods=6, freq=\"A\")\n\n df = df.set_index(idx1)\n tm.assert_index_equal(df.index, idx1)\n df = df.set_index(idx2)\n tm.assert_index_equal(df.index, idx2)\n\n @pytest.mark.parametrize(\n \"p_values, o_values, values, expected_values\",\n [\n (\n [Period(\"2019Q1\", \"Q-DEC\"), Period(\"2019Q2\", \"Q-DEC\")],\n [Period(\"2019Q1\", \"Q-DEC\"), Period(\"2019Q2\", \"Q-DEC\"), \"All\"],\n [1.0, 1.0],\n [1.0, 1.0, np.nan],\n ),\n (\n [Period(\"2019Q1\", \"Q-DEC\"), Period(\"2019Q2\", \"Q-DEC\")],\n [Period(\"2019Q1\", \"Q-DEC\"), Period(\"2019Q2\", \"Q-DEC\")],\n [1.0, 1.0],\n [1.0, 1.0],\n ),\n ],\n )\n def test_period_reindex_with_object(\n self, p_values, o_values, values, expected_values\n ):\n # GH 28337\n period_index = PeriodIndex(p_values)\n object_index = Index(o_values)\n\n s = pd.Series(values, index=period_index)\n result = s.reindex(object_index)\n expected = pd.Series(expected_values, index=object_index)\n tm.assert_series_equal(result, expected)\n\n def test_is_(self):\n create_index = lambda: period_range(freq=\"A\", start=\"1/1/2001\", end=\"12/1/2009\")\n index = create_index()\n assert index.is_(index)\n assert not index.is_(create_index())\n assert index.is_(index.view())\n assert index.is_(index.view().view().view().view().view())\n assert index.view().is_(index)\n ind2 = index.view()\n index.name = \"Apple\"\n assert ind2.is_(index)\n assert not index.is_(index[:])\n assert not index.is_(index.asfreq(\"M\"))\n assert not index.is_(index.asfreq(\"A\"))\n\n assert not index.is_(index - 2)\n assert not index.is_(index - 0)\n\n def test_periods_number_check(self):\n msg = (\n \"Of the three parameters: start, end, and periods, exactly two \"\n \"must be specified\"\n )\n with pytest.raises(ValueError, match=msg):\n period_range(\"2011-1-1\", \"2012-1-1\", \"B\")\n\n def test_index_duplicate_periods(self):\n # monotonic\n idx = PeriodIndex([2000, 2007, 2007, 2009, 2009], freq=\"A-JUN\")\n ts = Series(np.random.randn(len(idx)), index=idx)\n\n result = ts[\"2007\"]\n expected = ts[1:3]\n tm.assert_series_equal(result, expected)\n result[:] = 1\n assert (ts[1:3] == 1).all()\n\n # not monotonic\n idx = PeriodIndex([2000, 2007, 2007, 2009, 2007], freq=\"A-JUN\")\n ts = Series(np.random.randn(len(idx)), index=idx)\n\n result = ts[\"2007\"]\n expected = ts[idx == \"2007\"]\n tm.assert_series_equal(result, expected)\n\n def test_index_unique(self):\n idx = PeriodIndex([2000, 2007, 2007, 2009, 2009], freq=\"A-JUN\")\n expected = PeriodIndex([2000, 2007, 2009], freq=\"A-JUN\")\n tm.assert_index_equal(idx.unique(), expected)\n assert idx.nunique() == 3\n\n idx = PeriodIndex([2000, 2007, 2007, 2009, 2007], freq=\"A-JUN\", tz=\"US/Eastern\")\n expected = PeriodIndex([2000, 2007, 2009], freq=\"A-JUN\", tz=\"US/Eastern\")\n tm.assert_index_equal(idx.unique(), expected)\n assert idx.nunique() == 3\n\n def test_shift(self):\n # This is tested in test_arithmetic\n pass\n\n @td.skip_if_32bit\n def test_ndarray_compat_properties(self):\n super().test_ndarray_compat_properties()\n\n def test_negative_ordinals(self):\n Period(ordinal=-1000, freq=\"A\")\n Period(ordinal=0, freq=\"A\")\n\n idx1 = PeriodIndex(ordinal=[-1, 0, 1], freq=\"A\")\n idx2 = PeriodIndex(ordinal=np.array([-1, 0, 1]), freq=\"A\")\n tm.assert_index_equal(idx1, idx2)\n\n def test_pindex_fieldaccessor_nat(self):\n idx = PeriodIndex(\n [\"2011-01\", \"2011-02\", \"NaT\", \"2012-03\", \"2012-04\"], freq=\"D\", name=\"name\"\n )\n\n exp = Index([2011, 2011, -1, 2012, 2012], dtype=np.int64, name=\"name\")\n tm.assert_index_equal(idx.year, exp)\n exp = Index([1, 2, -1, 3, 4], dtype=np.int64, name=\"name\")\n tm.assert_index_equal(idx.month, exp)\n\n def test_pindex_qaccess(self):\n pi = PeriodIndex([\"2Q05\", \"3Q05\", \"4Q05\", \"1Q06\", \"2Q06\"], freq=\"Q\")\n s = Series(np.random.rand(len(pi)), index=pi).cumsum()\n # Todo: fix these accessors!\n assert s[\"05Q4\"] == s[2]\n\n def test_pindex_multiples(self):\n expected = PeriodIndex(\n [\"2011-01\", \"2011-03\", \"2011-05\", \"2011-07\", \"2011-09\", \"2011-11\"],\n freq=\"2M\",\n )\n\n pi = period_range(start=\"1/1/11\", end=\"12/31/11\", freq=\"2M\")\n tm.assert_index_equal(pi, expected)\n assert pi.freq == offsets.MonthEnd(2)\n assert pi.freqstr == \"2M\"\n\n pi = period_range(start=\"1/1/11\", periods=6, freq=\"2M\")\n tm.assert_index_equal(pi, expected)\n assert pi.freq == offsets.MonthEnd(2)\n assert pi.freqstr == \"2M\"\n\n def test_iteration(self):\n index = period_range(start=\"1/1/10\", periods=4, freq=\"B\")\n\n result = list(index)\n assert isinstance(result[0], Period)\n assert result[0].freq == index.freq\n\n def test_is_full(self):\n index = PeriodIndex([2005, 2007, 2009], freq=\"A\")\n assert not index.is_full\n\n index = PeriodIndex([2005, 2006, 2007], freq=\"A\")\n assert index.is_full\n\n index = PeriodIndex([2005, 2005, 2007], freq=\"A\")\n assert not index.is_full\n\n index = PeriodIndex([2005, 2005, 2006], freq=\"A\")\n assert index.is_full\n\n index = PeriodIndex([2006, 2005, 2005], freq=\"A\")\n with pytest.raises(ValueError, match=\"Index is not monotonic\"):\n index.is_full\n\n assert index[:0].is_full\n\n def test_with_multi_index(self):\n # #1705\n index = date_range(\"1/1/2012\", periods=4, freq=\"12H\")\n index_as_arrays = [index.to_period(freq=\"D\"), index.hour]\n\n s = Series([0, 1, 2, 3], index_as_arrays)\n\n assert isinstance(s.index.levels[0], PeriodIndex)\n\n assert isinstance(s.index.values[0][0], Period)\n\n def test_convert_array_of_periods(self):\n rng = period_range(\"1/1/2000\", periods=20, freq=\"D\")\n periods = list(rng)\n\n result = Index(periods)\n assert isinstance(result, PeriodIndex)\n\n def test_append_concat(self):\n # #1815\n d1 = date_range(\"12/31/1990\", \"12/31/1999\", freq=\"A-DEC\")\n d2 = date_range(\"12/31/2000\", \"12/31/2009\", freq=\"A-DEC\")\n\n s1 = Series(np.random.randn(10), d1)\n s2 = Series(np.random.randn(10), d2)\n\n s1 = s1.to_period()\n s2 = s2.to_period()\n\n # drops index\n result = pd.concat([s1, s2])\n assert isinstance(result.index, PeriodIndex)\n assert result.index[0] == s1.index[0]\n\n def test_pickle_freq(self):\n # GH2891\n prng = period_range(\"1/1/2011\", \"1/1/2012\", freq=\"M\")\n new_prng = tm.round_trip_pickle(prng)\n assert new_prng.freq == offsets.MonthEnd()\n assert new_prng.freqstr == \"M\"\n\n def test_map(self):\n # test_map_dictlike generally tests\n\n index = PeriodIndex([2005, 2007, 2009], freq=\"A\")\n result = index.map(lambda x: x.ordinal)\n exp = Index([x.ordinal for x in index])\n tm.assert_index_equal(result, exp)\n\n def test_insert(self):\n # GH 18295 (test missing)\n expected = PeriodIndex([\"2017Q1\", NaT, \"2017Q2\", \"2017Q3\", \"2017Q4\"], freq=\"Q\")\n for na in (np.nan, NaT, None):\n result = period_range(\"2017Q1\", periods=4, freq=\"Q\").insert(1, na)\n tm.assert_index_equal(result, expected)\n\n @pytest.mark.parametrize(\n \"msg, key\",\n [\n (r\"Period\\('2019', 'A-DEC'\\), 'foo', 'bar'\", (Period(2019), \"foo\", \"bar\")),\n (r\"Period\\('2019', 'A-DEC'\\), 'y1', 'bar'\", (Period(2019), \"y1\", \"bar\")),\n (r\"Period\\('2019', 'A-DEC'\\), 'foo', 'z1'\", (Period(2019), \"foo\", \"z1\")),\n (\n r\"Period\\('2018', 'A-DEC'\\), Period\\('2016', 'A-DEC'\\), 'bar'\",\n (Period(2018), Period(2016), \"bar\"),\n ),\n (r\"Period\\('2018', 'A-DEC'\\), 'foo', 'y1'\", (Period(2018), \"foo\", \"y1\")),\n (\n r\"Period\\('2017', 'A-DEC'\\), 'foo', Period\\('2015', 'A-DEC'\\)\",\n (Period(2017), \"foo\", Period(2015)),\n ),\n (r\"Period\\('2017', 'A-DEC'\\), 'z1', 'bar'\", (Period(2017), \"z1\", \"bar\")),\n ],\n )\n def test_contains_raise_error_if_period_index_is_in_multi_index(self, msg, key):\n # issue 20684\n \"\"\"\n parse_time_string return parameter if type not matched.\n PeriodIndex.get_loc takes returned value from parse_time_string as a tuple.\n If first argument is Period and a tuple has 3 items,\n process go on not raise exception\n \"\"\"\n df = DataFrame(\n {\n \"A\": [Period(2019), \"x1\", \"x2\"],\n \"B\": [Period(2018), Period(2016), \"y1\"],\n \"C\": [Period(2017), \"z1\", Period(2015)],\n \"V1\": [1, 2, 3],\n \"V2\": [10, 20, 30],\n }\n ).set_index([\"A\", \"B\", \"C\"])\n with pytest.raises(KeyError, match=msg):\n df.loc[key]\n\n def test_format_empty(self):\n # GH35712\n empty_idx = self._holder([], freq=\"A\")\n assert empty_idx.format() == []\n assert empty_idx.format(name=True) == [\"\"]\n\n\ndef test_maybe_convert_timedelta():\n pi = PeriodIndex([\"2000\", \"2001\"], freq=\"D\")\n offset = offsets.Day(2)\n assert pi._maybe_convert_timedelta(offset) == 2\n assert pi._maybe_convert_timedelta(2) == 2\n\n offset = offsets.BusinessDay()\n msg = r\"Input has different freq=B from PeriodIndex\\(freq=D\\)\"\n with pytest.raises(ValueError, match=msg):\n pi._maybe_convert_timedelta(offset)\n\n\ndef test_is_monotonic_with_nat():\n # GH#31437\n # PeriodIndex.is_monotonic should behave analogously to DatetimeIndex,\n # in particular never be monotonic when we have NaT\n dti = date_range(\"2016-01-01\", periods=3)\n pi = dti.to_period(\"D\")\n tdi = Index(dti.view(\"timedelta64[ns]\"))\n\n for obj in [pi, pi._engine, dti, dti._engine, tdi, tdi._engine]:\n if isinstance(obj, Index):\n # i.e. not Engines\n assert obj.is_monotonic\n assert obj.is_monotonic_increasing\n assert not obj.is_monotonic_decreasing\n assert obj.is_unique\n\n dti1 = dti.insert(0, NaT)\n pi1 = dti1.to_period(\"D\")\n tdi1 = Index(dti1.view(\"timedelta64[ns]\"))\n\n for obj in [pi1, pi1._engine, dti1, dti1._engine, tdi1, tdi1._engine]:\n if isinstance(obj, Index):\n # i.e. not Engines\n assert not obj.is_monotonic\n assert not obj.is_monotonic_increasing\n assert not obj.is_monotonic_decreasing\n assert obj.is_unique\n\n dti2 = dti.insert(3, NaT)\n pi2 = dti2.to_period(\"H\")\n tdi2 = Index(dti2.view(\"timedelta64[ns]\"))\n\n for obj in [pi2, pi2._engine, dti2, dti2._engine, tdi2, tdi2._engine]:\n if isinstance(obj, Index):\n # i.e. not Engines\n assert not obj.is_monotonic\n assert not obj.is_monotonic_increasing\n assert not obj.is_monotonic_decreasing\n assert obj.is_unique\n\n\[email protected](\"array\", [True, False])\ndef test_dunder_array(array):\n obj = PeriodIndex([\"2000-01-01\", \"2001-01-01\"], freq=\"D\")\n if array:\n obj = obj._data\n\n expected = np.array([obj[0], obj[1]], dtype=object)\n result = np.array(obj)\n tm.assert_numpy_array_equal(result, expected)\n\n result = np.asarray(obj)\n tm.assert_numpy_array_equal(result, expected)\n\n expected = obj.asi8\n for dtype in [\"i8\", \"int64\", np.int64]:\n result = np.array(obj, dtype=dtype)\n tm.assert_numpy_array_equal(result, expected)\n\n result = np.asarray(obj, dtype=dtype)\n tm.assert_numpy_array_equal(result, expected)\n\n for dtype in [\"float64\", \"int32\", \"uint64\"]:\n msg = \"argument must be\"\n with pytest.raises(TypeError, match=msg):\n np.array(obj, dtype=dtype)\n with pytest.raises(TypeError, match=msg):\n np.array(obj, dtype=getattr(np, dtype))\n",
"from datetime import datetime, timedelta\nimport re\n\nimport numpy as np\nimport pytest\n\nfrom pandas._libs import iNaT\nimport pandas._libs.index as _index\n\nimport pandas as pd\nfrom pandas import DataFrame, DatetimeIndex, NaT, Series, Timestamp, date_range\nimport pandas._testing as tm\n\n\"\"\"\nAlso test support for datetime64[ns] in Series / DataFrame\n\"\"\"\n\n\ndef test_fancy_getitem():\n dti = date_range(\n freq=\"WOM-1FRI\", start=datetime(2005, 1, 1), end=datetime(2010, 1, 1)\n )\n\n s = Series(np.arange(len(dti)), index=dti)\n\n assert s[48] == 48\n assert s[\"1/2/2009\"] == 48\n assert s[\"2009-1-2\"] == 48\n assert s[datetime(2009, 1, 2)] == 48\n assert s[Timestamp(datetime(2009, 1, 2))] == 48\n with pytest.raises(KeyError, match=r\"^'2009-1-3'$\"):\n s[\"2009-1-3\"]\n tm.assert_series_equal(\n s[\"3/6/2009\":\"2009-06-05\"], s[datetime(2009, 3, 6) : datetime(2009, 6, 5)]\n )\n\n\ndef test_fancy_setitem():\n dti = date_range(\n freq=\"WOM-1FRI\", start=datetime(2005, 1, 1), end=datetime(2010, 1, 1)\n )\n\n s = Series(np.arange(len(dti)), index=dti)\n s[48] = -1\n assert s[48] == -1\n s[\"1/2/2009\"] = -2\n assert s[48] == -2\n s[\"1/2/2009\":\"2009-06-05\"] = -3\n assert (s[48:54] == -3).all()\n\n\ndef test_dti_reset_index_round_trip():\n dti = date_range(start=\"1/1/2001\", end=\"6/1/2001\", freq=\"D\")._with_freq(None)\n d1 = DataFrame({\"v\": np.random.rand(len(dti))}, index=dti)\n d2 = d1.reset_index()\n assert d2.dtypes[0] == np.dtype(\"M8[ns]\")\n d3 = d2.set_index(\"index\")\n tm.assert_frame_equal(d1, d3, check_names=False)\n\n # #2329\n stamp = datetime(2012, 11, 22)\n df = DataFrame([[stamp, 12.1]], columns=[\"Date\", \"Value\"])\n df = df.set_index(\"Date\")\n\n assert df.index[0] == stamp\n assert df.reset_index()[\"Date\"][0] == stamp\n\n\ndef test_series_set_value():\n # #1561\n\n dates = [datetime(2001, 1, 1), datetime(2001, 1, 2)]\n index = DatetimeIndex(dates)\n\n s = Series(dtype=object)\n s._set_value(dates[0], 1.0)\n s._set_value(dates[1], np.nan)\n\n expected = Series([1.0, np.nan], index=index)\n\n tm.assert_series_equal(s, expected)\n\n\[email protected]\ndef test_slice_locs_indexerror():\n times = [datetime(2000, 1, 1) + timedelta(minutes=i * 10) for i in range(100000)]\n s = Series(range(100000), times)\n s.loc[datetime(1900, 1, 1) : datetime(2100, 1, 1)]\n\n\ndef test_slicing_datetimes():\n # GH 7523\n\n # unique\n df = DataFrame(\n np.arange(4.0, dtype=\"float64\"),\n index=[datetime(2001, 1, i, 10, 00) for i in [1, 2, 3, 4]],\n )\n result = df.loc[datetime(2001, 1, 1, 10) :]\n tm.assert_frame_equal(result, df)\n result = df.loc[: datetime(2001, 1, 4, 10)]\n tm.assert_frame_equal(result, df)\n result = df.loc[datetime(2001, 1, 1, 10) : datetime(2001, 1, 4, 10)]\n tm.assert_frame_equal(result, df)\n\n result = df.loc[datetime(2001, 1, 1, 11) :]\n expected = df.iloc[1:]\n tm.assert_frame_equal(result, expected)\n result = df.loc[\"20010101 11\":]\n tm.assert_frame_equal(result, expected)\n\n # duplicates\n df = pd.DataFrame(\n np.arange(5.0, dtype=\"float64\"),\n index=[datetime(2001, 1, i, 10, 00) for i in [1, 2, 2, 3, 4]],\n )\n\n result = df.loc[datetime(2001, 1, 1, 10) :]\n tm.assert_frame_equal(result, df)\n result = df.loc[: datetime(2001, 1, 4, 10)]\n tm.assert_frame_equal(result, df)\n result = df.loc[datetime(2001, 1, 1, 10) : datetime(2001, 1, 4, 10)]\n tm.assert_frame_equal(result, df)\n\n result = df.loc[datetime(2001, 1, 1, 11) :]\n expected = df.iloc[1:]\n tm.assert_frame_equal(result, expected)\n result = df.loc[\"20010101 11\":]\n tm.assert_frame_equal(result, expected)\n\n\ndef test_getitem_setitem_datetime_tz_pytz():\n from pytz import timezone as tz\n\n N = 50\n # testing with timezone, GH #2785\n rng = date_range(\"1/1/1990\", periods=N, freq=\"H\", tz=\"US/Eastern\")\n ts = Series(np.random.randn(N), index=rng)\n\n # also test Timestamp tz handling, GH #2789\n result = ts.copy()\n result[\"1990-01-01 09:00:00+00:00\"] = 0\n result[\"1990-01-01 09:00:00+00:00\"] = ts[4]\n tm.assert_series_equal(result, ts)\n\n result = ts.copy()\n result[\"1990-01-01 03:00:00-06:00\"] = 0\n result[\"1990-01-01 03:00:00-06:00\"] = ts[4]\n tm.assert_series_equal(result, ts)\n\n # repeat with datetimes\n result = ts.copy()\n result[datetime(1990, 1, 1, 9, tzinfo=tz(\"UTC\"))] = 0\n result[datetime(1990, 1, 1, 9, tzinfo=tz(\"UTC\"))] = ts[4]\n tm.assert_series_equal(result, ts)\n\n result = ts.copy()\n\n # comparison dates with datetime MUST be localized!\n date = tz(\"US/Central\").localize(datetime(1990, 1, 1, 3))\n result[date] = 0\n result[date] = ts[4]\n tm.assert_series_equal(result, ts)\n\n\ndef test_getitem_setitem_datetime_tz_dateutil():\n from dateutil.tz import tzutc\n\n from pandas._libs.tslibs.timezones import dateutil_gettz as gettz\n\n tz = (\n lambda x: tzutc() if x == \"UTC\" else gettz(x)\n ) # handle special case for utc in dateutil\n\n N = 50\n\n # testing with timezone, GH #2785\n rng = date_range(\"1/1/1990\", periods=N, freq=\"H\", tz=\"America/New_York\")\n ts = Series(np.random.randn(N), index=rng)\n\n # also test Timestamp tz handling, GH #2789\n result = ts.copy()\n result[\"1990-01-01 09:00:00+00:00\"] = 0\n result[\"1990-01-01 09:00:00+00:00\"] = ts[4]\n tm.assert_series_equal(result, ts)\n\n result = ts.copy()\n result[\"1990-01-01 03:00:00-06:00\"] = 0\n result[\"1990-01-01 03:00:00-06:00\"] = ts[4]\n tm.assert_series_equal(result, ts)\n\n # repeat with datetimes\n result = ts.copy()\n result[datetime(1990, 1, 1, 9, tzinfo=tz(\"UTC\"))] = 0\n result[datetime(1990, 1, 1, 9, tzinfo=tz(\"UTC\"))] = ts[4]\n tm.assert_series_equal(result, ts)\n\n result = ts.copy()\n result[datetime(1990, 1, 1, 3, tzinfo=tz(\"America/Chicago\"))] = 0\n result[datetime(1990, 1, 1, 3, tzinfo=tz(\"America/Chicago\"))] = ts[4]\n tm.assert_series_equal(result, ts)\n\n\ndef test_getitem_setitem_datetimeindex():\n N = 50\n # testing with timezone, GH #2785\n rng = date_range(\"1/1/1990\", periods=N, freq=\"H\", tz=\"US/Eastern\")\n ts = Series(np.random.randn(N), index=rng)\n\n result = ts[\"1990-01-01 04:00:00\"]\n expected = ts[4]\n assert result == expected\n\n result = ts.copy()\n result[\"1990-01-01 04:00:00\"] = 0\n result[\"1990-01-01 04:00:00\"] = ts[4]\n tm.assert_series_equal(result, ts)\n\n result = ts[\"1990-01-01 04:00:00\":\"1990-01-01 07:00:00\"]\n expected = ts[4:8]\n tm.assert_series_equal(result, expected)\n\n result = ts.copy()\n result[\"1990-01-01 04:00:00\":\"1990-01-01 07:00:00\"] = 0\n result[\"1990-01-01 04:00:00\":\"1990-01-01 07:00:00\"] = ts[4:8]\n tm.assert_series_equal(result, ts)\n\n lb = \"1990-01-01 04:00:00\"\n rb = \"1990-01-01 07:00:00\"\n # GH#18435 strings get a pass from tzawareness compat\n result = ts[(ts.index >= lb) & (ts.index <= rb)]\n expected = ts[4:8]\n tm.assert_series_equal(result, expected)\n\n lb = \"1990-01-01 04:00:00-0500\"\n rb = \"1990-01-01 07:00:00-0500\"\n result = ts[(ts.index >= lb) & (ts.index <= rb)]\n expected = ts[4:8]\n tm.assert_series_equal(result, expected)\n\n # repeat all the above with naive datetimes\n result = ts[datetime(1990, 1, 1, 4)]\n expected = ts[4]\n assert result == expected\n\n result = ts.copy()\n result[datetime(1990, 1, 1, 4)] = 0\n result[datetime(1990, 1, 1, 4)] = ts[4]\n tm.assert_series_equal(result, ts)\n\n result = ts[datetime(1990, 1, 1, 4) : datetime(1990, 1, 1, 7)]\n expected = ts[4:8]\n tm.assert_series_equal(result, expected)\n\n result = ts.copy()\n result[datetime(1990, 1, 1, 4) : datetime(1990, 1, 1, 7)] = 0\n result[datetime(1990, 1, 1, 4) : datetime(1990, 1, 1, 7)] = ts[4:8]\n tm.assert_series_equal(result, ts)\n\n lb = datetime(1990, 1, 1, 4)\n rb = datetime(1990, 1, 1, 7)\n msg = \"Cannot compare tz-naive and tz-aware datetime-like objects\"\n with pytest.raises(TypeError, match=msg):\n # tznaive vs tzaware comparison is invalid\n # see GH#18376, GH#18162\n ts[(ts.index >= lb) & (ts.index <= rb)]\n\n lb = pd.Timestamp(datetime(1990, 1, 1, 4)).tz_localize(rng.tzinfo)\n rb = pd.Timestamp(datetime(1990, 1, 1, 7)).tz_localize(rng.tzinfo)\n result = ts[(ts.index >= lb) & (ts.index <= rb)]\n expected = ts[4:8]\n tm.assert_series_equal(result, expected)\n\n result = ts[ts.index[4]]\n expected = ts[4]\n assert result == expected\n\n result = ts[ts.index[4:8]]\n expected = ts[4:8]\n tm.assert_series_equal(result, expected)\n\n result = ts.copy()\n result[ts.index[4:8]] = 0\n result.iloc[4:8] = ts.iloc[4:8]\n tm.assert_series_equal(result, ts)\n\n # also test partial date slicing\n result = ts[\"1990-01-02\"]\n expected = ts[24:48]\n tm.assert_series_equal(result, expected)\n\n result = ts.copy()\n result[\"1990-01-02\"] = 0\n result[\"1990-01-02\"] = ts[24:48]\n tm.assert_series_equal(result, ts)\n\n\ndef test_getitem_setitem_periodindex():\n from pandas import period_range\n\n N = 50\n rng = period_range(\"1/1/1990\", periods=N, freq=\"H\")\n ts = Series(np.random.randn(N), index=rng)\n\n result = ts[\"1990-01-01 04\"]\n expected = ts[4]\n assert result == expected\n\n result = ts.copy()\n result[\"1990-01-01 04\"] = 0\n result[\"1990-01-01 04\"] = ts[4]\n tm.assert_series_equal(result, ts)\n\n result = ts[\"1990-01-01 04\":\"1990-01-01 07\"]\n expected = ts[4:8]\n tm.assert_series_equal(result, expected)\n\n result = ts.copy()\n result[\"1990-01-01 04\":\"1990-01-01 07\"] = 0\n result[\"1990-01-01 04\":\"1990-01-01 07\"] = ts[4:8]\n tm.assert_series_equal(result, ts)\n\n lb = \"1990-01-01 04\"\n rb = \"1990-01-01 07\"\n result = ts[(ts.index >= lb) & (ts.index <= rb)]\n expected = ts[4:8]\n tm.assert_series_equal(result, expected)\n\n # GH 2782\n result = ts[ts.index[4]]\n expected = ts[4]\n assert result == expected\n\n result = ts[ts.index[4:8]]\n expected = ts[4:8]\n tm.assert_series_equal(result, expected)\n\n result = ts.copy()\n result[ts.index[4:8]] = 0\n result.iloc[4:8] = ts.iloc[4:8]\n tm.assert_series_equal(result, ts)\n\n\ndef test_datetime_indexing():\n\n index = date_range(\"1/1/2000\", \"1/7/2000\")\n index = index.repeat(3)\n\n s = Series(len(index), index=index)\n stamp = Timestamp(\"1/8/2000\")\n\n with pytest.raises(KeyError, match=re.escape(repr(stamp))):\n s[stamp]\n s[stamp] = 0\n assert s[stamp] == 0\n\n # not monotonic\n s = Series(len(index), index=index)\n s = s[::-1]\n\n with pytest.raises(KeyError, match=re.escape(repr(stamp))):\n s[stamp]\n s[stamp] = 0\n assert s[stamp] == 0\n\n\n\"\"\"\ntest duplicates in time series\n\"\"\"\n\n\[email protected]\ndef dups():\n dates = [\n datetime(2000, 1, 2),\n datetime(2000, 1, 2),\n datetime(2000, 1, 2),\n datetime(2000, 1, 3),\n datetime(2000, 1, 3),\n datetime(2000, 1, 3),\n datetime(2000, 1, 4),\n datetime(2000, 1, 4),\n datetime(2000, 1, 4),\n datetime(2000, 1, 5),\n ]\n\n return Series(np.random.randn(len(dates)), index=dates)\n\n\ndef test_constructor(dups):\n assert isinstance(dups, Series)\n assert isinstance(dups.index, DatetimeIndex)\n\n\ndef test_is_unique_monotonic(dups):\n assert not dups.index.is_unique\n\n\ndef test_index_unique(dups):\n uniques = dups.index.unique()\n expected = DatetimeIndex(\n [\n datetime(2000, 1, 2),\n datetime(2000, 1, 3),\n datetime(2000, 1, 4),\n datetime(2000, 1, 5),\n ]\n )\n assert uniques.dtype == \"M8[ns]\" # sanity\n tm.assert_index_equal(uniques, expected)\n assert dups.index.nunique() == 4\n\n # #2563\n assert isinstance(uniques, DatetimeIndex)\n\n dups_local = dups.index.tz_localize(\"US/Eastern\")\n dups_local.name = \"foo\"\n result = dups_local.unique()\n expected = DatetimeIndex(expected, name=\"foo\")\n expected = expected.tz_localize(\"US/Eastern\")\n assert result.tz is not None\n assert result.name == \"foo\"\n tm.assert_index_equal(result, expected)\n\n # NaT, note this is excluded\n arr = [1370745748 + t for t in range(20)] + [iNaT]\n idx = DatetimeIndex(arr * 3)\n tm.assert_index_equal(idx.unique(), DatetimeIndex(arr))\n assert idx.nunique() == 20\n assert idx.nunique(dropna=False) == 21\n\n arr = [\n Timestamp(\"2013-06-09 02:42:28\") + timedelta(seconds=t) for t in range(20)\n ] + [NaT]\n idx = DatetimeIndex(arr * 3)\n tm.assert_index_equal(idx.unique(), DatetimeIndex(arr))\n assert idx.nunique() == 20\n assert idx.nunique(dropna=False) == 21\n\n\ndef test_duplicate_dates_indexing(dups):\n ts = dups\n\n uniques = ts.index.unique()\n for date in uniques:\n result = ts[date]\n\n mask = ts.index == date\n total = (ts.index == date).sum()\n expected = ts[mask]\n if total > 1:\n tm.assert_series_equal(result, expected)\n else:\n tm.assert_almost_equal(result, expected[0])\n\n cp = ts.copy()\n cp[date] = 0\n expected = Series(np.where(mask, 0, ts), index=ts.index)\n tm.assert_series_equal(cp, expected)\n\n key = datetime(2000, 1, 6)\n with pytest.raises(KeyError, match=re.escape(repr(key))):\n ts[key]\n\n # new index\n ts[datetime(2000, 1, 6)] = 0\n assert ts[datetime(2000, 1, 6)] == 0\n\n\ndef test_range_slice():\n idx = DatetimeIndex([\"1/1/2000\", \"1/2/2000\", \"1/2/2000\", \"1/3/2000\", \"1/4/2000\"])\n\n ts = Series(np.random.randn(len(idx)), index=idx)\n\n result = ts[\"1/2/2000\":]\n expected = ts[1:]\n tm.assert_series_equal(result, expected)\n\n result = ts[\"1/2/2000\":\"1/3/2000\"]\n expected = ts[1:4]\n tm.assert_series_equal(result, expected)\n\n\ndef test_groupby_average_dup_values(dups):\n result = dups.groupby(level=0).mean()\n expected = dups.groupby(dups.index).mean()\n tm.assert_series_equal(result, expected)\n\n\ndef test_indexing_over_size_cutoff():\n import datetime\n\n # #1821\n\n old_cutoff = _index._SIZE_CUTOFF\n try:\n _index._SIZE_CUTOFF = 1000\n\n # create large list of non periodic datetime\n dates = []\n sec = datetime.timedelta(seconds=1)\n half_sec = datetime.timedelta(microseconds=500000)\n d = datetime.datetime(2011, 12, 5, 20, 30)\n n = 1100\n for i in range(n):\n dates.append(d)\n dates.append(d + sec)\n dates.append(d + sec + half_sec)\n dates.append(d + sec + sec + half_sec)\n d += 3 * sec\n\n # duplicate some values in the list\n duplicate_positions = np.random.randint(0, len(dates) - 1, 20)\n for p in duplicate_positions:\n dates[p + 1] = dates[p]\n\n df = DataFrame(\n np.random.randn(len(dates), 4), index=dates, columns=list(\"ABCD\")\n )\n\n pos = n * 3\n timestamp = df.index[pos]\n assert timestamp in df.index\n\n # it works!\n df.loc[timestamp]\n assert len(df.loc[[timestamp]]) > 0\n finally:\n _index._SIZE_CUTOFF = old_cutoff\n\n\ndef test_indexing_over_size_cutoff_period_index(monkeypatch):\n # GH 27136\n\n monkeypatch.setattr(_index, \"_SIZE_CUTOFF\", 1000)\n\n n = 1100\n idx = pd.period_range(\"1/1/2000\", freq=\"T\", periods=n)\n assert idx._engine.over_size_threshold\n\n s = pd.Series(np.random.randn(len(idx)), index=idx)\n\n pos = n - 1\n timestamp = idx[pos]\n assert timestamp in s.index\n\n # it works!\n s[timestamp]\n assert len(s.loc[[timestamp]]) > 0\n\n\ndef test_indexing_unordered():\n # GH 2437\n rng = date_range(start=\"2011-01-01\", end=\"2011-01-15\")\n ts = Series(np.random.rand(len(rng)), index=rng)\n ts2 = pd.concat([ts[0:4], ts[-4:], ts[4:-4]])\n\n for t in ts.index:\n\n expected = ts[t]\n result = ts2[t]\n assert expected == result\n\n # GH 3448 (ranges)\n def compare(slobj):\n result = ts2[slobj].copy()\n result = result.sort_index()\n expected = ts[slobj]\n expected.index = expected.index._with_freq(None)\n tm.assert_series_equal(result, expected)\n\n compare(slice(\"2011-01-01\", \"2011-01-15\"))\n compare(slice(\"2010-12-30\", \"2011-01-15\"))\n compare(slice(\"2011-01-01\", \"2011-01-16\"))\n\n # partial ranges\n compare(slice(\"2011-01-01\", \"2011-01-6\"))\n compare(slice(\"2011-01-06\", \"2011-01-8\"))\n compare(slice(\"2011-01-06\", \"2011-01-12\"))\n\n # single values\n result = ts2[\"2011\"].sort_index()\n expected = ts[\"2011\"]\n expected.index = expected.index._with_freq(None)\n tm.assert_series_equal(result, expected)\n\n # diff freq\n rng = date_range(datetime(2005, 1, 1), periods=20, freq=\"M\")\n ts = Series(np.arange(len(rng)), index=rng)\n ts = ts.take(np.random.permutation(20))\n\n result = ts[\"2005\"]\n for t in result.index:\n assert t.year == 2005\n\n\ndef test_indexing():\n idx = date_range(\"2001-1-1\", periods=20, freq=\"M\")\n ts = Series(np.random.rand(len(idx)), index=idx)\n\n # getting\n\n # GH 3070, make sure semantics work on Series/Frame\n expected = ts[\"2001\"]\n expected.name = \"A\"\n\n df = DataFrame(dict(A=ts))\n result = df[\"2001\"][\"A\"]\n tm.assert_series_equal(expected, result)\n\n # setting\n ts[\"2001\"] = 1\n expected = ts[\"2001\"]\n expected.name = \"A\"\n\n df.loc[\"2001\", \"A\"] = 1\n\n result = df[\"2001\"][\"A\"]\n tm.assert_series_equal(expected, result)\n\n # GH3546 (not including times on the last day)\n idx = date_range(start=\"2013-05-31 00:00\", end=\"2013-05-31 23:00\", freq=\"H\")\n ts = Series(range(len(idx)), index=idx)\n expected = ts[\"2013-05\"]\n tm.assert_series_equal(expected, ts)\n\n idx = date_range(start=\"2013-05-31 00:00\", end=\"2013-05-31 23:59\", freq=\"S\")\n ts = Series(range(len(idx)), index=idx)\n expected = ts[\"2013-05\"]\n tm.assert_series_equal(expected, ts)\n\n idx = [\n Timestamp(\"2013-05-31 00:00\"),\n Timestamp(datetime(2013, 5, 31, 23, 59, 59, 999999)),\n ]\n ts = Series(range(len(idx)), index=idx)\n expected = ts[\"2013\"]\n tm.assert_series_equal(expected, ts)\n\n # GH14826, indexing with a seconds resolution string / datetime object\n df = DataFrame(\n np.random.rand(5, 5),\n columns=[\"open\", \"high\", \"low\", \"close\", \"volume\"],\n index=date_range(\"2012-01-02 18:01:00\", periods=5, tz=\"US/Central\", freq=\"s\"),\n )\n expected = df.loc[[df.index[2]]]\n\n # this is a single date, so will raise\n with pytest.raises(KeyError, match=r\"^'2012-01-02 18:01:02'$\"):\n df[\"2012-01-02 18:01:02\"]\n msg = r\"Timestamp\\('2012-01-02 18:01:02-0600', tz='US/Central', freq='S'\\)\"\n with pytest.raises(KeyError, match=msg):\n df[df.index[2]]\n\n\n\"\"\"\ntest NaT support\n\"\"\"\n\n\ndef test_setitem_tuple_with_datetimetz():\n # GH 20441\n arr = date_range(\"2017\", periods=4, tz=\"US/Eastern\")\n index = [(0, 1), (0, 2), (0, 3), (0, 4)]\n result = Series(arr, index=index)\n expected = result.copy()\n result[(0, 1)] = np.nan\n expected.iloc[0] = np.nan\n tm.assert_series_equal(result, expected)\n",
"import collections\nfrom datetime import timedelta\nimport functools\nimport gc\nimport json\nimport operator\nimport pickle\nimport re\nfrom textwrap import dedent\nfrom typing import (\n TYPE_CHECKING,\n Any,\n Callable,\n Dict,\n FrozenSet,\n Hashable,\n List,\n Mapping,\n Optional,\n Sequence,\n Set,\n Tuple,\n Type,\n Union,\n)\nimport warnings\nimport weakref\n\nimport numpy as np\n\nfrom pandas._config import config\n\nfrom pandas._libs import lib\nfrom pandas._libs.tslibs import Tick, Timestamp, to_offset\nfrom pandas._typing import (\n Axis,\n FilePathOrBuffer,\n FrameOrSeries,\n JSONSerializable,\n Label,\n Level,\n Renamer,\n TimedeltaConvertibleTypes,\n TimestampConvertibleTypes,\n ValueKeyFunc,\n)\nfrom pandas.compat import set_function_name\nfrom pandas.compat._optional import import_optional_dependency\nfrom pandas.compat.numpy import function as nv\nfrom pandas.errors import AbstractMethodError, InvalidIndexError\nfrom pandas.util._decorators import (\n Appender,\n Substitution,\n doc,\n rewrite_axis_style_signature,\n)\nfrom pandas.util._validators import (\n validate_bool_kwarg,\n validate_fillna_kwargs,\n validate_percentile,\n)\n\nfrom pandas.core.dtypes.common import (\n ensure_int64,\n ensure_object,\n ensure_str,\n is_bool,\n is_bool_dtype,\n is_datetime64_any_dtype,\n is_datetime64tz_dtype,\n is_dict_like,\n is_extension_array_dtype,\n is_float,\n is_list_like,\n is_number,\n is_numeric_dtype,\n is_object_dtype,\n is_re_compilable,\n is_scalar,\n is_timedelta64_dtype,\n pandas_dtype,\n)\nfrom pandas.core.dtypes.generic import ABCDataFrame, ABCSeries\nfrom pandas.core.dtypes.inference import is_hashable\nfrom pandas.core.dtypes.missing import isna, notna\n\nimport pandas as pd\nfrom pandas.core import missing, nanops\nimport pandas.core.algorithms as algos\nfrom pandas.core.base import PandasObject, SelectionMixin\nimport pandas.core.common as com\nfrom pandas.core.construction import create_series_with_explicit_dtype\nfrom pandas.core.indexes.api import Index, MultiIndex, RangeIndex, ensure_index\nfrom pandas.core.indexes.datetimes import DatetimeIndex\nfrom pandas.core.indexes.period import Period, PeriodIndex\nimport pandas.core.indexing as indexing\nfrom pandas.core.internals import BlockManager\nfrom pandas.core.missing import find_valid_index\nfrom pandas.core.ops import _align_method_FRAME\nfrom pandas.core.shared_docs import _shared_docs\n\nfrom pandas.io.formats import format as fmt\nfrom pandas.io.formats.format import DataFrameFormatter, format_percentiles\nfrom pandas.io.formats.printing import pprint_thing\n\nif TYPE_CHECKING:\n from pandas.core.resample import Resampler\n from pandas.core.series import Series # noqa: F401\n\n# goal is to be able to define the docs close to function, while still being\n# able to share\n_shared_doc_kwargs = dict(\n axes=\"keywords for axes\",\n klass=\"Series/DataFrame\",\n axes_single_arg=\"int or labels for object\",\n args_transpose=\"axes to permute (int or label for object)\",\n optional_by=\"\"\"\n by : str or list of str\n Name or list of names to sort by\"\"\",\n)\n\n\ndef _single_replace(self, to_replace, method, inplace, limit):\n \"\"\"\n Replaces values in a Series using the fill method specified when no\n replacement value is given in the replace method\n \"\"\"\n if self.ndim != 1:\n raise TypeError(\n f\"cannot replace {to_replace} with method {method} on a \"\n f\"{type(self).__name__}\"\n )\n\n orig_dtype = self.dtype\n result = self if inplace else self.copy()\n fill_f = missing.get_fill_func(method)\n\n mask = missing.mask_missing(result.values, to_replace)\n values = fill_f(result.values, limit=limit, mask=mask)\n\n if values.dtype == orig_dtype and inplace:\n return\n\n result = pd.Series(values, index=self.index, dtype=self.dtype).__finalize__(self)\n\n if inplace:\n self._update_inplace(result)\n return\n\n return result\n\n\nbool_t = bool # Need alias because NDFrame has def bool:\n\n\nclass NDFrame(PandasObject, SelectionMixin, indexing.IndexingMixin):\n \"\"\"\n N-dimensional analogue of DataFrame. Store multi-dimensional in a\n size-mutable, labeled data structure\n\n Parameters\n ----------\n data : BlockManager\n axes : list\n copy : bool, default False\n \"\"\"\n\n _internal_names: List[str] = [\n \"_mgr\",\n \"_cacher\",\n \"_item_cache\",\n \"_cache\",\n \"_is_copy\",\n \"_subtyp\",\n \"_name\",\n \"_index\",\n \"_default_kind\",\n \"_default_fill_value\",\n \"_metadata\",\n \"__array_struct__\",\n \"__array_interface__\",\n ]\n _internal_names_set: Set[str] = set(_internal_names)\n _accessors: Set[str] = set()\n _deprecations: FrozenSet[str] = frozenset([\"get_values\", \"tshift\"])\n _metadata: List[str] = []\n _is_copy = None\n _mgr: BlockManager\n _attrs: Dict[Optional[Hashable], Any]\n _typ: str\n\n # ----------------------------------------------------------------------\n # Constructors\n\n def __init__(\n self,\n data: BlockManager,\n copy: bool = False,\n attrs: Optional[Mapping[Optional[Hashable], Any]] = None,\n ):\n # copy kwarg is retained for mypy compat, is not used\n\n object.__setattr__(self, \"_is_copy\", None)\n object.__setattr__(self, \"_mgr\", data)\n object.__setattr__(self, \"_item_cache\", {})\n if attrs is None:\n attrs = {}\n else:\n attrs = dict(attrs)\n object.__setattr__(self, \"_attrs\", attrs)\n\n @classmethod\n def _init_mgr(cls, mgr, axes, dtype=None, copy: bool = False) -> BlockManager:\n \"\"\" passed a manager and a axes dict \"\"\"\n for a, axe in axes.items():\n if axe is not None:\n axe = ensure_index(axe)\n bm_axis = cls._get_block_manager_axis(a)\n mgr = mgr.reindex_axis(axe, axis=bm_axis, copy=False)\n\n # make a copy if explicitly requested\n if copy:\n mgr = mgr.copy()\n if dtype is not None:\n # avoid further copies if we can\n if len(mgr.blocks) > 1 or mgr.blocks[0].values.dtype != dtype:\n mgr = mgr.astype(dtype=dtype)\n return mgr\n\n # ----------------------------------------------------------------------\n\n @property\n def attrs(self) -> Dict[Optional[Hashable], Any]:\n \"\"\"\n Dictionary of global attributes on this object.\n\n .. warning::\n\n attrs is experimental and may change without warning.\n \"\"\"\n if self._attrs is None:\n self._attrs = {}\n return self._attrs\n\n @attrs.setter\n def attrs(self, value: Mapping[Optional[Hashable], Any]) -> None:\n self._attrs = dict(value)\n\n @classmethod\n def _validate_dtype(cls, dtype):\n \"\"\" validate the passed dtype \"\"\"\n if dtype is not None:\n dtype = pandas_dtype(dtype)\n\n # a compound dtype\n if dtype.kind == \"V\":\n raise NotImplementedError(\n \"compound dtypes are not implemented \"\n f\"in the {cls.__name__} constructor\"\n )\n\n return dtype\n\n # ----------------------------------------------------------------------\n # Construction\n\n @property\n def _constructor(self: FrameOrSeries) -> Type[FrameOrSeries]:\n \"\"\"\n Used when a manipulation result has the same dimensions as the\n original.\n \"\"\"\n raise AbstractMethodError(self)\n\n @property\n def _constructor_sliced(self):\n \"\"\"\n Used when a manipulation result has one lower dimension(s) as the\n original, such as DataFrame single columns slicing.\n \"\"\"\n raise AbstractMethodError(self)\n\n @property\n def _constructor_expanddim(self):\n \"\"\"\n Used when a manipulation result has one higher dimension as the\n original, such as Series.to_frame()\n \"\"\"\n raise NotImplementedError\n\n # ----------------------------------------------------------------------\n # Internals\n\n @property\n def _data(self):\n # GH#33054 retained because some downstream packages uses this,\n # e.g. fastparquet\n return self._mgr\n\n # ----------------------------------------------------------------------\n # Axis\n _stat_axis_number = 0\n _stat_axis_name = \"index\"\n _ix = None\n _AXIS_ORDERS: List[str]\n _AXIS_TO_AXIS_NUMBER: Dict[Axis, int] = {0: 0, \"index\": 0, \"rows\": 0}\n _AXIS_REVERSED: bool\n _info_axis_number: int\n _info_axis_name: str\n _AXIS_LEN: int\n\n @property\n def _AXIS_NUMBERS(self) -> Dict[str, int]:\n \"\"\".. deprecated:: 1.1.0\"\"\"\n warnings.warn(\n \"_AXIS_NUMBERS has been deprecated.\", FutureWarning, stacklevel=3,\n )\n return {\"index\": 0}\n\n @property\n def _AXIS_NAMES(self) -> Dict[int, str]:\n \"\"\".. deprecated:: 1.1.0\"\"\"\n warnings.warn(\n \"_AXIS_NAMES has been deprecated.\", FutureWarning, stacklevel=3,\n )\n return {0: \"index\"}\n\n def _construct_axes_dict(self, axes=None, **kwargs):\n \"\"\"Return an axes dictionary for myself.\"\"\"\n d = {a: self._get_axis(a) for a in (axes or self._AXIS_ORDERS)}\n d.update(kwargs)\n return d\n\n @classmethod\n def _construct_axes_from_arguments(\n cls, args, kwargs, require_all: bool = False, sentinel=None\n ):\n \"\"\"\n Construct and returns axes if supplied in args/kwargs.\n\n If require_all, raise if all axis arguments are not supplied\n return a tuple of (axes, kwargs).\n\n sentinel specifies the default parameter when an axis is not\n supplied; useful to distinguish when a user explicitly passes None\n in scenarios where None has special meaning.\n \"\"\"\n # construct the args\n args = list(args)\n for a in cls._AXIS_ORDERS:\n\n # look for a argument by position\n if a not in kwargs:\n try:\n kwargs[a] = args.pop(0)\n except IndexError as err:\n if require_all:\n raise TypeError(\n \"not enough/duplicate arguments specified!\"\n ) from err\n\n axes = {a: kwargs.pop(a, sentinel) for a in cls._AXIS_ORDERS}\n return axes, kwargs\n\n @classmethod\n def _get_axis_number(cls, axis: Axis) -> int:\n try:\n return cls._AXIS_TO_AXIS_NUMBER[axis]\n except KeyError:\n raise ValueError(f\"No axis named {axis} for object type {cls.__name__}\")\n\n @classmethod\n def _get_axis_name(cls, axis: Axis) -> str:\n axis_number = cls._get_axis_number(axis)\n return cls._AXIS_ORDERS[axis_number]\n\n def _get_axis(self, axis: Axis) -> Index:\n axis_number = self._get_axis_number(axis)\n assert axis_number in {0, 1}\n return self.index if axis_number == 0 else self.columns\n\n @classmethod\n def _get_block_manager_axis(cls, axis: Axis) -> int:\n \"\"\"Map the axis to the block_manager axis.\"\"\"\n axis = cls._get_axis_number(axis)\n if cls._AXIS_REVERSED:\n m = cls._AXIS_LEN - 1\n return m - axis\n return axis\n\n def _get_axis_resolvers(self, axis: str) -> Dict[str, ABCSeries]:\n # index or columns\n axis_index = getattr(self, axis)\n d = dict()\n prefix = axis[0]\n\n for i, name in enumerate(axis_index.names):\n if name is not None:\n key = level = name\n else:\n # prefix with 'i' or 'c' depending on the input axis\n # e.g., you must do ilevel_0 for the 0th level of an unnamed\n # multiiindex\n key = f\"{prefix}level_{i}\"\n level = i\n\n level_values = axis_index.get_level_values(level)\n s = level_values.to_series()\n s.index = axis_index\n d[key] = s\n\n # put the index/columns itself in the dict\n if isinstance(axis_index, MultiIndex):\n dindex = axis_index\n else:\n dindex = axis_index.to_series()\n\n d[axis] = dindex\n return d\n\n def _get_index_resolvers(self) -> Dict[str, ABCSeries]:\n from pandas.core.computation.parsing import clean_column_name\n\n d: Dict[str, ABCSeries] = {}\n for axis_name in self._AXIS_ORDERS:\n d.update(self._get_axis_resolvers(axis_name))\n\n return {clean_column_name(k): v for k, v in d.items() if not isinstance(k, int)}\n\n def _get_cleaned_column_resolvers(self) -> Dict[str, ABCSeries]:\n \"\"\"\n Return the special character free column resolvers of a dataframe.\n\n Column names with special characters are 'cleaned up' so that they can\n be referred to by backtick quoting.\n Used in :meth:`DataFrame.eval`.\n \"\"\"\n from pandas.core.computation.parsing import clean_column_name\n\n if isinstance(self, ABCSeries):\n return {clean_column_name(self.name): self}\n\n return {\n clean_column_name(k): v for k, v in self.items() if not isinstance(k, int)\n }\n\n @property\n def _info_axis(self) -> Index:\n return getattr(self, self._info_axis_name)\n\n @property\n def _stat_axis(self) -> Index:\n return getattr(self, self._stat_axis_name)\n\n @property\n def shape(self) -> Tuple[int, ...]:\n \"\"\"\n Return a tuple of axis dimensions\n \"\"\"\n return tuple(len(self._get_axis(a)) for a in self._AXIS_ORDERS)\n\n @property\n def axes(self) -> List[Index]:\n \"\"\"\n Return index label(s) of the internal NDFrame\n \"\"\"\n # we do it this way because if we have reversed axes, then\n # the block manager shows then reversed\n return [self._get_axis(a) for a in self._AXIS_ORDERS]\n\n @property\n def ndim(self) -> int:\n \"\"\"\n Return an int representing the number of axes / array dimensions.\n\n Return 1 if Series. Otherwise return 2 if DataFrame.\n\n See Also\n --------\n ndarray.ndim : Number of array dimensions.\n\n Examples\n --------\n >>> s = pd.Series({'a': 1, 'b': 2, 'c': 3})\n >>> s.ndim\n 1\n\n >>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]})\n >>> df.ndim\n 2\n \"\"\"\n return self._mgr.ndim\n\n @property\n def size(self) -> int:\n \"\"\"\n Return an int representing the number of elements in this object.\n\n Return the number of rows if Series. Otherwise return the number of\n rows times number of columns if DataFrame.\n\n See Also\n --------\n ndarray.size : Number of elements in the array.\n\n Examples\n --------\n >>> s = pd.Series({'a': 1, 'b': 2, 'c': 3})\n >>> s.size\n 3\n\n >>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]})\n >>> df.size\n 4\n \"\"\"\n return np.prod(self.shape)\n\n @property\n def _selected_obj(self: FrameOrSeries) -> FrameOrSeries:\n \"\"\" internal compat with SelectionMixin \"\"\"\n return self\n\n @property\n def _obj_with_exclusions(self: FrameOrSeries) -> FrameOrSeries:\n \"\"\" internal compat with SelectionMixin \"\"\"\n return self\n\n def set_axis(self, labels, axis: Axis = 0, inplace: bool = False):\n \"\"\"\n Assign desired index to given axis.\n\n Indexes for%(extended_summary_sub)s row labels can be changed by assigning\n a list-like or Index.\n\n Parameters\n ----------\n labels : list-like, Index\n The values for the new index.\n\n axis : %(axes_single_arg)s, default 0\n The axis to update. The value 0 identifies the rows%(axis_description_sub)s.\n\n inplace : bool, default False\n Whether to return a new %(klass)s instance.\n\n Returns\n -------\n renamed : %(klass)s or None\n An object of type %(klass)s if inplace=False, None otherwise.\n\n See Also\n --------\n %(klass)s.rename_axis : Alter the name of the index%(see_also_sub)s.\n \"\"\"\n if inplace:\n setattr(self, self._get_axis_name(axis), labels)\n else:\n obj = self.copy()\n obj.set_axis(labels, axis=axis, inplace=True)\n return obj\n\n def _set_axis(self, axis: int, labels: Index) -> None:\n labels = ensure_index(labels)\n self._mgr.set_axis(axis, labels)\n self._clear_item_cache()\n\n def swapaxes(self: FrameOrSeries, axis1, axis2, copy=True) -> FrameOrSeries:\n \"\"\"\n Interchange axes and swap values axes appropriately.\n\n Returns\n -------\n y : same as input\n \"\"\"\n i = self._get_axis_number(axis1)\n j = self._get_axis_number(axis2)\n\n if i == j:\n if copy:\n return self.copy()\n return self\n\n mapping = {i: j, j: i}\n\n new_axes = (self._get_axis(mapping.get(k, k)) for k in range(self._AXIS_LEN))\n new_values = self.values.swapaxes(i, j)\n if copy:\n new_values = new_values.copy()\n\n # ignore needed because of NDFrame constructor is different than\n # DataFrame/Series constructors.\n return self._constructor(new_values, *new_axes).__finalize__( # type: ignore\n self, method=\"swapaxes\"\n )\n\n def droplevel(self: FrameOrSeries, level, axis=0) -> FrameOrSeries:\n \"\"\"\n Return DataFrame with requested index / column level(s) removed.\n\n .. versionadded:: 0.24.0\n\n Parameters\n ----------\n level : int, str, or list-like\n If a string is given, must be the name of a level\n If list-like, elements must be names or positional indexes\n of levels.\n\n axis : {0 or 'index', 1 or 'columns'}, default 0\n Axis along which the level(s) is removed:\n\n * 0 or 'index': remove level(s) in column.\n * 1 or 'columns': remove level(s) in row.\n\n Returns\n -------\n DataFrame\n DataFrame with requested index / column level(s) removed.\n\n Examples\n --------\n >>> df = pd.DataFrame([\n ... [1, 2, 3, 4],\n ... [5, 6, 7, 8],\n ... [9, 10, 11, 12]\n ... ]).set_index([0, 1]).rename_axis(['a', 'b'])\n\n >>> df.columns = pd.MultiIndex.from_tuples([\n ... ('c', 'e'), ('d', 'f')\n ... ], names=['level_1', 'level_2'])\n\n >>> df\n level_1 c d\n level_2 e f\n a b\n 1 2 3 4\n 5 6 7 8\n 9 10 11 12\n\n >>> df.droplevel('a')\n level_1 c d\n level_2 e f\n b\n 2 3 4\n 6 7 8\n 10 11 12\n\n >>> df.droplevel('level_2', axis=1)\n level_1 c d\n a b\n 1 2 3 4\n 5 6 7 8\n 9 10 11 12\n \"\"\"\n labels = self._get_axis(axis)\n new_labels = labels.droplevel(level)\n result = self.set_axis(new_labels, axis=axis, inplace=False)\n return result\n\n def pop(self, item: Label) -> Union[\"Series\", Any]:\n result = self[item]\n del self[item]\n if self.ndim == 2:\n result._reset_cacher()\n\n return result\n\n def squeeze(self, axis=None):\n \"\"\"\n Squeeze 1 dimensional axis objects into scalars.\n\n Series or DataFrames with a single element are squeezed to a scalar.\n DataFrames with a single column or a single row are squeezed to a\n Series. Otherwise the object is unchanged.\n\n This method is most useful when you don't know if your\n object is a Series or DataFrame, but you do know it has just a single\n column. In that case you can safely call `squeeze` to ensure you have a\n Series.\n\n Parameters\n ----------\n axis : {0 or 'index', 1 or 'columns', None}, default None\n A specific axis to squeeze. By default, all length-1 axes are\n squeezed.\n\n Returns\n -------\n DataFrame, Series, or scalar\n The projection after squeezing `axis` or all the axes.\n\n See Also\n --------\n Series.iloc : Integer-location based indexing for selecting scalars.\n DataFrame.iloc : Integer-location based indexing for selecting Series.\n Series.to_frame : Inverse of DataFrame.squeeze for a\n single-column DataFrame.\n\n Examples\n --------\n >>> primes = pd.Series([2, 3, 5, 7])\n\n Slicing might produce a Series with a single value:\n\n >>> even_primes = primes[primes % 2 == 0]\n >>> even_primes\n 0 2\n dtype: int64\n\n >>> even_primes.squeeze()\n 2\n\n Squeezing objects with more than one value in every axis does nothing:\n\n >>> odd_primes = primes[primes % 2 == 1]\n >>> odd_primes\n 1 3\n 2 5\n 3 7\n dtype: int64\n\n >>> odd_primes.squeeze()\n 1 3\n 2 5\n 3 7\n dtype: int64\n\n Squeezing is even more effective when used with DataFrames.\n\n >>> df = pd.DataFrame([[1, 2], [3, 4]], columns=['a', 'b'])\n >>> df\n a b\n 0 1 2\n 1 3 4\n\n Slicing a single column will produce a DataFrame with the columns\n having only one value:\n\n >>> df_a = df[['a']]\n >>> df_a\n a\n 0 1\n 1 3\n\n So the columns can be squeezed down, resulting in a Series:\n\n >>> df_a.squeeze('columns')\n 0 1\n 1 3\n Name: a, dtype: int64\n\n Slicing a single row from a single column will produce a single\n scalar DataFrame:\n\n >>> df_0a = df.loc[df.index < 1, ['a']]\n >>> df_0a\n a\n 0 1\n\n Squeezing the rows produces a single scalar Series:\n\n >>> df_0a.squeeze('rows')\n a 1\n Name: 0, dtype: int64\n\n Squeezing all axes will project directly into a scalar:\n\n >>> df_0a.squeeze()\n 1\n \"\"\"\n axis = range(self._AXIS_LEN) if axis is None else (self._get_axis_number(axis),)\n return self.iloc[\n tuple(\n 0 if i in axis and len(a) == 1 else slice(None)\n for i, a in enumerate(self.axes)\n )\n ]\n\n # ----------------------------------------------------------------------\n # Rename\n\n def rename(\n self: FrameOrSeries,\n mapper: Optional[Renamer] = None,\n *,\n index: Optional[Renamer] = None,\n columns: Optional[Renamer] = None,\n axis: Optional[Axis] = None,\n copy: bool = True,\n inplace: bool = False,\n level: Optional[Level] = None,\n errors: str = \"ignore\",\n ) -> Optional[FrameOrSeries]:\n \"\"\"\n Alter axes input function or functions. Function / dict values must be\n unique (1-to-1). Labels not contained in a dict / Series will be left\n as-is. Extra labels listed don't throw an error. Alternatively, change\n ``Series.name`` with a scalar value (Series only).\n\n Parameters\n ----------\n %(axes)s : scalar, list-like, dict-like or function, optional\n Scalar or list-like will alter the ``Series.name`` attribute,\n and raise on DataFrame.\n dict-like or functions are transformations to apply to\n that axis' values\n copy : bool, default True\n Also copy underlying data.\n inplace : bool, default False\n Whether to return a new {klass}. If True then value of copy is\n ignored.\n level : int or level name, default None\n In case of a MultiIndex, only rename labels in the specified\n level.\n errors : {'ignore', 'raise'}, default 'ignore'\n If 'raise', raise a `KeyError` when a dict-like `mapper`, `index`,\n or `columns` contains labels that are not present in the Index\n being transformed.\n If 'ignore', existing keys will be renamed and extra keys will be\n ignored.\n\n Returns\n -------\n renamed : {klass} (new object)\n\n Raises\n ------\n KeyError\n If any of the labels is not found in the selected axis and\n \"errors='raise'\".\n\n See Also\n --------\n NDFrame.rename_axis\n\n Examples\n --------\n >>> s = pd.Series([1, 2, 3])\n >>> s\n 0 1\n 1 2\n 2 3\n dtype: int64\n >>> s.rename(\"my_name\") # scalar, changes Series.name\n 0 1\n 1 2\n 2 3\n Name: my_name, dtype: int64\n >>> s.rename(lambda x: x ** 2) # function, changes labels\n 0 1\n 1 2\n 4 3\n dtype: int64\n >>> s.rename({1: 3, 2: 5}) # mapping, changes labels\n 0 1\n 3 2\n 5 3\n dtype: int64\n\n Since ``DataFrame`` doesn't have a ``.name`` attribute,\n only mapping-type arguments are allowed.\n\n >>> df = pd.DataFrame({\"A\": [1, 2, 3], \"B\": [4, 5, 6]})\n >>> df.rename(2)\n Traceback (most recent call last):\n ...\n TypeError: 'int' object is not callable\n\n ``DataFrame.rename`` supports two calling conventions\n\n * ``(index=index_mapper, columns=columns_mapper, ...)``\n * ``(mapper, axis={'index', 'columns'}, ...)``\n\n We *highly* recommend using keyword arguments to clarify your\n intent.\n\n >>> df.rename(index=str, columns={\"A\": \"a\", \"B\": \"c\"})\n a c\n 0 1 4\n 1 2 5\n 2 3 6\n\n >>> df.rename(index=str, columns={\"A\": \"a\", \"C\": \"c\"})\n a B\n 0 1 4\n 1 2 5\n 2 3 6\n\n Using axis-style parameters\n\n >>> df.rename(str.lower, axis='columns')\n a b\n 0 1 4\n 1 2 5\n 2 3 6\n\n >>> df.rename({1: 2, 2: 4}, axis='index')\n A B\n 0 1 4\n 2 2 5\n 4 3 6\n\n See the :ref:`user guide <basics.rename>` for more.\n \"\"\"\n if mapper is None and index is None and columns is None:\n raise TypeError(\"must pass an index to rename\")\n\n if index is not None or columns is not None:\n if axis is not None:\n raise TypeError(\n \"Cannot specify both 'axis' and any of 'index' or 'columns'\"\n )\n elif mapper is not None:\n raise TypeError(\n \"Cannot specify both 'mapper' and any of 'index' or 'columns'\"\n )\n else:\n # use the mapper argument\n if axis and self._get_axis_number(axis) == 1:\n columns = mapper\n else:\n index = mapper\n\n result = self if inplace else self.copy(deep=copy)\n\n for axis_no, replacements in enumerate((index, columns)):\n if replacements is None:\n continue\n\n ax = self._get_axis(axis_no)\n f = com.get_rename_function(replacements)\n\n if level is not None:\n level = ax._get_level_number(level)\n\n # GH 13473\n if not callable(replacements):\n indexer = ax.get_indexer_for(replacements)\n if errors == \"raise\" and len(indexer[indexer == -1]):\n missing_labels = [\n label\n for index, label in enumerate(replacements)\n if indexer[index] == -1\n ]\n raise KeyError(f\"{missing_labels} not found in axis\")\n\n new_index = ax._transform_index(f, level)\n result.set_axis(new_index, axis=axis_no, inplace=True)\n result._clear_item_cache()\n\n if inplace:\n self._update_inplace(result)\n return None\n else:\n return result.__finalize__(self, method=\"rename\")\n\n @rewrite_axis_style_signature(\"mapper\", [(\"copy\", True), (\"inplace\", False)])\n def rename_axis(self, mapper=lib.no_default, **kwargs):\n \"\"\"\n Set the name of the axis for the index or columns.\n\n Parameters\n ----------\n mapper : scalar, list-like, optional\n Value to set the axis name attribute.\n index, columns : scalar, list-like, dict-like or function, optional\n A scalar, list-like, dict-like or functions transformations to\n apply to that axis' values.\n Note that the ``columns`` parameter is not allowed if the\n object is a Series. This parameter only apply for DataFrame\n type objects.\n\n Use either ``mapper`` and ``axis`` to\n specify the axis to target with ``mapper``, or ``index``\n and/or ``columns``.\n\n .. versionchanged:: 0.24.0\n\n axis : {0 or 'index', 1 or 'columns'}, default 0\n The axis to rename.\n copy : bool, default True\n Also copy underlying data.\n inplace : bool, default False\n Modifies the object directly, instead of creating a new Series\n or DataFrame.\n\n Returns\n -------\n Series, DataFrame, or None\n The same type as the caller or None if `inplace` is True.\n\n See Also\n --------\n Series.rename : Alter Series index labels or name.\n DataFrame.rename : Alter DataFrame index labels or name.\n Index.rename : Set new names on index.\n\n Notes\n -----\n ``DataFrame.rename_axis`` supports two calling conventions\n\n * ``(index=index_mapper, columns=columns_mapper, ...)``\n * ``(mapper, axis={'index', 'columns'}, ...)``\n\n The first calling convention will only modify the names of\n the index and/or the names of the Index object that is the columns.\n In this case, the parameter ``copy`` is ignored.\n\n The second calling convention will modify the names of the\n the corresponding index if mapper is a list or a scalar.\n However, if mapper is dict-like or a function, it will use the\n deprecated behavior of modifying the axis *labels*.\n\n We *highly* recommend using keyword arguments to clarify your\n intent.\n\n Examples\n --------\n **Series**\n\n >>> s = pd.Series([\"dog\", \"cat\", \"monkey\"])\n >>> s\n 0 dog\n 1 cat\n 2 monkey\n dtype: object\n >>> s.rename_axis(\"animal\")\n animal\n 0 dog\n 1 cat\n 2 monkey\n dtype: object\n\n **DataFrame**\n\n >>> df = pd.DataFrame({\"num_legs\": [4, 4, 2],\n ... \"num_arms\": [0, 0, 2]},\n ... [\"dog\", \"cat\", \"monkey\"])\n >>> df\n num_legs num_arms\n dog 4 0\n cat 4 0\n monkey 2 2\n >>> df = df.rename_axis(\"animal\")\n >>> df\n num_legs num_arms\n animal\n dog 4 0\n cat 4 0\n monkey 2 2\n >>> df = df.rename_axis(\"limbs\", axis=\"columns\")\n >>> df\n limbs num_legs num_arms\n animal\n dog 4 0\n cat 4 0\n monkey 2 2\n\n **MultiIndex**\n\n >>> df.index = pd.MultiIndex.from_product([['mammal'],\n ... ['dog', 'cat', 'monkey']],\n ... names=['type', 'name'])\n >>> df\n limbs num_legs num_arms\n type name\n mammal dog 4 0\n cat 4 0\n monkey 2 2\n\n >>> df.rename_axis(index={'type': 'class'})\n limbs num_legs num_arms\n class name\n mammal dog 4 0\n cat 4 0\n monkey 2 2\n\n >>> df.rename_axis(columns=str.upper)\n LIMBS num_legs num_arms\n type name\n mammal dog 4 0\n cat 4 0\n monkey 2 2\n \"\"\"\n axes, kwargs = self._construct_axes_from_arguments(\n (), kwargs, sentinel=lib.no_default\n )\n copy = kwargs.pop(\"copy\", True)\n inplace = kwargs.pop(\"inplace\", False)\n axis = kwargs.pop(\"axis\", 0)\n if axis is not None:\n axis = self._get_axis_number(axis)\n\n if kwargs:\n raise TypeError(\n \"rename_axis() got an unexpected keyword \"\n f'argument \"{list(kwargs.keys())[0]}\"'\n )\n\n inplace = validate_bool_kwarg(inplace, \"inplace\")\n\n if mapper is not lib.no_default:\n # Use v0.23 behavior if a scalar or list\n non_mapper = is_scalar(mapper) or (\n is_list_like(mapper) and not is_dict_like(mapper)\n )\n if non_mapper:\n return self._set_axis_name(mapper, axis=axis, inplace=inplace)\n else:\n raise ValueError(\"Use `.rename` to alter labels with a mapper.\")\n else:\n # Use new behavior. Means that index and/or columns\n # is specified\n result = self if inplace else self.copy(deep=copy)\n\n for axis in range(self._AXIS_LEN):\n v = axes.get(self._get_axis_name(axis))\n if v is lib.no_default:\n continue\n non_mapper = is_scalar(v) or (is_list_like(v) and not is_dict_like(v))\n if non_mapper:\n newnames = v\n else:\n f = com.get_rename_function(v)\n curnames = self._get_axis(axis).names\n newnames = [f(name) for name in curnames]\n result._set_axis_name(newnames, axis=axis, inplace=True)\n if not inplace:\n return result\n\n def _set_axis_name(self, name, axis=0, inplace=False):\n \"\"\"\n Set the name(s) of the axis.\n\n Parameters\n ----------\n name : str or list of str\n Name(s) to set.\n axis : {0 or 'index', 1 or 'columns'}, default 0\n The axis to set the label. The value 0 or 'index' specifies index,\n and the value 1 or 'columns' specifies columns.\n inplace : bool, default False\n If `True`, do operation inplace and return None.\n\n Returns\n -------\n Series, DataFrame, or None\n The same type as the caller or `None` if `inplace` is `True`.\n\n See Also\n --------\n DataFrame.rename : Alter the axis labels of :class:`DataFrame`.\n Series.rename : Alter the index labels or set the index name\n of :class:`Series`.\n Index.rename : Set the name of :class:`Index` or :class:`MultiIndex`.\n\n Examples\n --------\n >>> df = pd.DataFrame({\"num_legs\": [4, 4, 2]},\n ... [\"dog\", \"cat\", \"monkey\"])\n >>> df\n num_legs\n dog 4\n cat 4\n monkey 2\n >>> df._set_axis_name(\"animal\")\n num_legs\n animal\n dog 4\n cat 4\n monkey 2\n >>> df.index = pd.MultiIndex.from_product(\n ... [[\"mammal\"], ['dog', 'cat', 'monkey']])\n >>> df._set_axis_name([\"type\", \"name\"])\n num_legs\n type name\n mammal dog 4\n cat 4\n monkey 2\n \"\"\"\n axis = self._get_axis_number(axis)\n idx = self._get_axis(axis).set_names(name)\n\n inplace = validate_bool_kwarg(inplace, \"inplace\")\n renamed = self if inplace else self.copy()\n renamed.set_axis(idx, axis=axis, inplace=True)\n if not inplace:\n return renamed\n\n # ----------------------------------------------------------------------\n # Comparison Methods\n\n def _indexed_same(self, other) -> bool:\n return all(\n self._get_axis(a).equals(other._get_axis(a)) for a in self._AXIS_ORDERS\n )\n\n def equals(self, other):\n \"\"\"\n Test whether two objects contain the same elements.\n\n This function allows two Series or DataFrames to be compared against\n each other to see if they have the same shape and elements. NaNs in\n the same location are considered equal. The column headers do not\n need to have the same type, but the elements within the columns must\n be the same dtype.\n\n Parameters\n ----------\n other : Series or DataFrame\n The other Series or DataFrame to be compared with the first.\n\n Returns\n -------\n bool\n True if all elements are the same in both objects, False\n otherwise.\n\n See Also\n --------\n Series.eq : Compare two Series objects of the same length\n and return a Series where each element is True if the element\n in each Series is equal, False otherwise.\n DataFrame.eq : Compare two DataFrame objects of the same shape and\n return a DataFrame where each element is True if the respective\n element in each DataFrame is equal, False otherwise.\n testing.assert_series_equal : Raises an AssertionError if left and\n right are not equal. Provides an easy interface to ignore\n inequality in dtypes, indexes and precision among others.\n testing.assert_frame_equal : Like assert_series_equal, but targets\n DataFrames.\n numpy.array_equal : Return True if two arrays have the same shape\n and elements, False otherwise.\n\n Notes\n -----\n This function requires that the elements have the same dtype as their\n respective elements in the other Series or DataFrame. However, the\n column labels do not need to have the same type, as long as they are\n still considered equal.\n\n Examples\n --------\n >>> df = pd.DataFrame({1: [10], 2: [20]})\n >>> df\n 1 2\n 0 10 20\n\n DataFrames df and exactly_equal have the same types and values for\n their elements and column labels, which will return True.\n\n >>> exactly_equal = pd.DataFrame({1: [10], 2: [20]})\n >>> exactly_equal\n 1 2\n 0 10 20\n >>> df.equals(exactly_equal)\n True\n\n DataFrames df and different_column_type have the same element\n types and values, but have different types for the column labels,\n which will still return True.\n\n >>> different_column_type = pd.DataFrame({1.0: [10], 2.0: [20]})\n >>> different_column_type\n 1.0 2.0\n 0 10 20\n >>> df.equals(different_column_type)\n True\n\n DataFrames df and different_data_type have different types for the\n same values for their elements, and will return False even though\n their column labels are the same values and types.\n\n >>> different_data_type = pd.DataFrame({1: [10.0], 2: [20.0]})\n >>> different_data_type\n 1 2\n 0 10.0 20.0\n >>> df.equals(different_data_type)\n False\n \"\"\"\n if not (isinstance(other, type(self)) or isinstance(self, type(other))):\n return False\n return self._mgr.equals(other._mgr)\n\n # -------------------------------------------------------------------------\n # Unary Methods\n\n def __neg__(self):\n values = self._values\n if is_bool_dtype(values):\n arr = operator.inv(values)\n elif (\n is_numeric_dtype(values)\n or is_timedelta64_dtype(values)\n or is_object_dtype(values)\n ):\n arr = operator.neg(values)\n else:\n raise TypeError(f\"Unary negative expects numeric dtype, not {values.dtype}\")\n return self.__array_wrap__(arr)\n\n def __pos__(self):\n values = self._values\n if is_bool_dtype(values):\n arr = values\n elif (\n is_numeric_dtype(values)\n or is_timedelta64_dtype(values)\n or is_object_dtype(values)\n ):\n arr = operator.pos(values)\n else:\n raise TypeError(\n \"Unary plus expects bool, numeric, timedelta, \"\n f\"or object dtype, not {values.dtype}\"\n )\n return self.__array_wrap__(arr)\n\n def __invert__(self):\n if not self.size:\n # inv fails with 0 len\n return self\n\n new_data = self._mgr.apply(operator.invert)\n result = self._constructor(new_data).__finalize__(self, method=\"__invert__\")\n return result\n\n def __nonzero__(self):\n raise ValueError(\n f\"The truth value of a {type(self).__name__} is ambiguous. \"\n \"Use a.empty, a.bool(), a.item(), a.any() or a.all().\"\n )\n\n __bool__ = __nonzero__\n\n def bool(self):\n \"\"\"\n Return the bool of a single element Series or DataFrame.\n\n This must be a boolean scalar value, either True or False. It will raise a\n ValueError if the Series or DataFrame does not have exactly 1 element, or that\n element is not boolean (integer values 0 and 1 will also raise an exception).\n\n Returns\n -------\n bool\n The value in the Series or DataFrame.\n\n See Also\n --------\n Series.astype : Change the data type of a Series, including to boolean.\n DataFrame.astype : Change the data type of a DataFrame, including to boolean.\n numpy.bool_ : NumPy boolean data type, used by pandas for boolean values.\n\n Examples\n --------\n The method will only work for single element objects with a boolean value:\n\n >>> pd.Series([True]).bool()\n True\n >>> pd.Series([False]).bool()\n False\n\n >>> pd.DataFrame({'col': [True]}).bool()\n True\n >>> pd.DataFrame({'col': [False]}).bool()\n False\n \"\"\"\n v = self.squeeze()\n if isinstance(v, (bool, np.bool_)):\n return bool(v)\n elif is_scalar(v):\n raise ValueError(\n \"bool cannot act on a non-boolean single element \"\n f\"{type(self).__name__}\"\n )\n\n self.__nonzero__()\n\n def __abs__(self: FrameOrSeries) -> FrameOrSeries:\n return self.abs()\n\n def __round__(self: FrameOrSeries, decimals: int = 0) -> FrameOrSeries:\n return self.round(decimals)\n\n # -------------------------------------------------------------------------\n # Label or Level Combination Helpers\n #\n # A collection of helper methods for DataFrame/Series operations that\n # accept a combination of column/index labels and levels. All such\n # operations should utilize/extend these methods when possible so that we\n # have consistent precedence and validation logic throughout the library.\n\n def _is_level_reference(self, key, axis=0):\n \"\"\"\n Test whether a key is a level reference for a given axis.\n\n To be considered a level reference, `key` must be a string that:\n - (axis=0): Matches the name of an index level and does NOT match\n a column label.\n - (axis=1): Matches the name of a column level and does NOT match\n an index label.\n\n Parameters\n ----------\n key : str\n Potential level name for the given axis\n axis : int, default 0\n Axis that levels are associated with (0 for index, 1 for columns)\n\n Returns\n -------\n is_level : bool\n \"\"\"\n axis = self._get_axis_number(axis)\n\n return (\n key is not None\n and is_hashable(key)\n and key in self.axes[axis].names\n and not self._is_label_reference(key, axis=axis)\n )\n\n def _is_label_reference(self, key, axis=0) -> bool_t:\n \"\"\"\n Test whether a key is a label reference for a given axis.\n\n To be considered a label reference, `key` must be a string that:\n - (axis=0): Matches a column label\n - (axis=1): Matches an index label\n\n Parameters\n ----------\n key: str\n Potential label name\n axis: int, default 0\n Axis perpendicular to the axis that labels are associated with\n (0 means search for column labels, 1 means search for index labels)\n\n Returns\n -------\n is_label: bool\n \"\"\"\n axis = self._get_axis_number(axis)\n other_axes = (ax for ax in range(self._AXIS_LEN) if ax != axis)\n\n return (\n key is not None\n and is_hashable(key)\n and any(key in self.axes[ax] for ax in other_axes)\n )\n\n def _is_label_or_level_reference(self, key: str, axis: int = 0) -> bool_t:\n \"\"\"\n Test whether a key is a label or level reference for a given axis.\n\n To be considered either a label or a level reference, `key` must be a\n string that:\n - (axis=0): Matches a column label or an index level\n - (axis=1): Matches an index label or a column level\n\n Parameters\n ----------\n key: str\n Potential label or level name\n axis: int, default 0\n Axis that levels are associated with (0 for index, 1 for columns)\n\n Returns\n -------\n is_label_or_level: bool\n \"\"\"\n return self._is_level_reference(key, axis=axis) or self._is_label_reference(\n key, axis=axis\n )\n\n def _check_label_or_level_ambiguity(self, key, axis: int = 0) -> None:\n \"\"\"\n Check whether `key` is ambiguous.\n\n By ambiguous, we mean that it matches both a level of the input\n `axis` and a label of the other axis.\n\n Parameters\n ----------\n key: str or object\n Label or level name.\n axis: int, default 0\n Axis that levels are associated with (0 for index, 1 for columns).\n\n Raises\n ------\n ValueError: `key` is ambiguous\n \"\"\"\n axis = self._get_axis_number(axis)\n other_axes = (ax for ax in range(self._AXIS_LEN) if ax != axis)\n\n if (\n key is not None\n and is_hashable(key)\n and key in self.axes[axis].names\n and any(key in self.axes[ax] for ax in other_axes)\n ):\n\n # Build an informative and grammatical warning\n level_article, level_type = (\n (\"an\", \"index\") if axis == 0 else (\"a\", \"column\")\n )\n\n label_article, label_type = (\n (\"a\", \"column\") if axis == 0 else (\"an\", \"index\")\n )\n\n msg = (\n f\"'{key}' is both {level_article} {level_type} level and \"\n f\"{label_article} {label_type} label, which is ambiguous.\"\n )\n raise ValueError(msg)\n\n def _get_label_or_level_values(self, key: str, axis: int = 0) -> np.ndarray:\n \"\"\"\n Return a 1-D array of values associated with `key`, a label or level\n from the given `axis`.\n\n Retrieval logic:\n - (axis=0): Return column values if `key` matches a column label.\n Otherwise return index level values if `key` matches an index\n level.\n - (axis=1): Return row values if `key` matches an index label.\n Otherwise return column level values if 'key' matches a column\n level\n\n Parameters\n ----------\n key: str\n Label or level name.\n axis: int, default 0\n Axis that levels are associated with (0 for index, 1 for columns)\n\n Returns\n -------\n values: np.ndarray\n\n Raises\n ------\n KeyError\n if `key` matches neither a label nor a level\n ValueError\n if `key` matches multiple labels\n FutureWarning\n if `key` is ambiguous. This will become an ambiguity error in a\n future version\n \"\"\"\n axis = self._get_axis_number(axis)\n other_axes = [ax for ax in range(self._AXIS_LEN) if ax != axis]\n\n if self._is_label_reference(key, axis=axis):\n self._check_label_or_level_ambiguity(key, axis=axis)\n values = self.xs(key, axis=other_axes[0])._values\n elif self._is_level_reference(key, axis=axis):\n values = self.axes[axis].get_level_values(key)._values\n else:\n raise KeyError(key)\n\n # Check for duplicates\n if values.ndim > 1:\n\n if other_axes and isinstance(self._get_axis(other_axes[0]), MultiIndex):\n multi_message = (\n \"\\n\"\n \"For a multi-index, the label must be a \"\n \"tuple with elements corresponding to each level.\"\n )\n else:\n multi_message = \"\"\n\n label_axis_name = \"column\" if axis == 0 else \"index\"\n raise ValueError(\n (\n f\"The {label_axis_name} label '{key}' \"\n f\"is not unique.{multi_message}\"\n )\n )\n\n return values\n\n def _drop_labels_or_levels(self, keys, axis: int = 0):\n \"\"\"\n Drop labels and/or levels for the given `axis`.\n\n For each key in `keys`:\n - (axis=0): If key matches a column label then drop the column.\n Otherwise if key matches an index level then drop the level.\n - (axis=1): If key matches an index label then drop the row.\n Otherwise if key matches a column level then drop the level.\n\n Parameters\n ----------\n keys: str or list of str\n labels or levels to drop\n axis: int, default 0\n Axis that levels are associated with (0 for index, 1 for columns)\n\n Returns\n -------\n dropped: DataFrame\n\n Raises\n ------\n ValueError\n if any `keys` match neither a label nor a level\n \"\"\"\n axis = self._get_axis_number(axis)\n\n # Validate keys\n keys = com.maybe_make_list(keys)\n invalid_keys = [\n k for k in keys if not self._is_label_or_level_reference(k, axis=axis)\n ]\n\n if invalid_keys:\n raise ValueError(\n (\n \"The following keys are not valid labels or \"\n f\"levels for axis {axis}: {invalid_keys}\"\n )\n )\n\n # Compute levels and labels to drop\n levels_to_drop = [k for k in keys if self._is_level_reference(k, axis=axis)]\n\n labels_to_drop = [k for k in keys if not self._is_level_reference(k, axis=axis)]\n\n # Perform copy upfront and then use inplace operations below.\n # This ensures that we always perform exactly one copy.\n # ``copy`` and/or ``inplace`` options could be added in the future.\n dropped = self.copy()\n\n if axis == 0:\n # Handle dropping index levels\n if levels_to_drop:\n dropped.reset_index(levels_to_drop, drop=True, inplace=True)\n\n # Handle dropping columns labels\n if labels_to_drop:\n dropped.drop(labels_to_drop, axis=1, inplace=True)\n else:\n # Handle dropping column levels\n if levels_to_drop:\n if isinstance(dropped.columns, MultiIndex):\n # Drop the specified levels from the MultiIndex\n dropped.columns = dropped.columns.droplevel(levels_to_drop)\n else:\n # Drop the last level of Index by replacing with\n # a RangeIndex\n dropped.columns = RangeIndex(dropped.columns.size)\n\n # Handle dropping index labels\n if labels_to_drop:\n dropped.drop(labels_to_drop, axis=0, inplace=True)\n\n return dropped\n\n # ----------------------------------------------------------------------\n # Iteration\n\n def __hash__(self):\n raise TypeError(\n f\"{repr(type(self).__name__)} objects are mutable, \"\n f\"thus they cannot be hashed\"\n )\n\n def __iter__(self):\n \"\"\"\n Iterate over info axis.\n\n Returns\n -------\n iterator\n Info axis as iterator.\n \"\"\"\n return iter(self._info_axis)\n\n # can we get a better explanation of this?\n def keys(self):\n \"\"\"\n Get the 'info axis' (see Indexing for more).\n\n This is index for Series, columns for DataFrame.\n\n Returns\n -------\n Index\n Info axis.\n \"\"\"\n return self._info_axis\n\n def items(self):\n \"\"\"\n Iterate over (label, values) on info axis\n\n This is index for Series and columns for DataFrame.\n\n Returns\n -------\n Generator\n \"\"\"\n for h in self._info_axis:\n yield h, self[h]\n\n @doc(items)\n def iteritems(self):\n return self.items()\n\n def __len__(self) -> int:\n \"\"\"Returns length of info axis\"\"\"\n return len(self._info_axis)\n\n def __contains__(self, key) -> bool_t:\n \"\"\"True if the key is in the info axis\"\"\"\n return key in self._info_axis\n\n @property\n def empty(self) -> bool_t:\n \"\"\"\n Indicator whether DataFrame is empty.\n\n True if DataFrame is entirely empty (no items), meaning any of the\n axes are of length 0.\n\n Returns\n -------\n bool\n If DataFrame is empty, return True, if not return False.\n\n See Also\n --------\n Series.dropna : Return series without null values.\n DataFrame.dropna : Return DataFrame with labels on given axis omitted\n where (all or any) data are missing.\n\n Notes\n -----\n If DataFrame contains only NaNs, it is still not considered empty. See\n the example below.\n\n Examples\n --------\n An example of an actual empty DataFrame. Notice the index is empty:\n\n >>> df_empty = pd.DataFrame({'A' : []})\n >>> df_empty\n Empty DataFrame\n Columns: [A]\n Index: []\n >>> df_empty.empty\n True\n\n If we only have NaNs in our DataFrame, it is not considered empty! We\n will need to drop the NaNs to make the DataFrame empty:\n\n >>> df = pd.DataFrame({'A' : [np.nan]})\n >>> df\n A\n 0 NaN\n >>> df.empty\n False\n >>> df.dropna().empty\n True\n \"\"\"\n return any(len(self._get_axis(a)) == 0 for a in self._AXIS_ORDERS)\n\n # ----------------------------------------------------------------------\n # Array Interface\n\n # This is also set in IndexOpsMixin\n # GH#23114 Ensure ndarray.__op__(DataFrame) returns NotImplemented\n __array_priority__ = 1000\n\n def __array__(self, dtype=None) -> np.ndarray:\n return np.asarray(self._values, dtype=dtype)\n\n def __array_wrap__(self, result, context=None):\n result = lib.item_from_zerodim(result)\n if is_scalar(result):\n # e.g. we get here with np.ptp(series)\n # ptp also requires the item_from_zerodim\n return result\n d = self._construct_axes_dict(self._AXIS_ORDERS, copy=False)\n return self._constructor(result, **d).__finalize__(\n self, method=\"__array_wrap__\"\n )\n\n # ideally we would define this to avoid the getattr checks, but\n # is slower\n # @property\n # def __array_interface__(self):\n # \"\"\" provide numpy array interface method \"\"\"\n # values = self.values\n # return dict(typestr=values.dtype.str,shape=values.shape,data=values)\n\n # ----------------------------------------------------------------------\n # Picklability\n\n def __getstate__(self) -> Dict[str, Any]:\n meta = {k: getattr(self, k, None) for k in self._metadata}\n return dict(\n _mgr=self._mgr,\n _typ=self._typ,\n _metadata=self._metadata,\n attrs=self.attrs,\n **meta,\n )\n\n def __setstate__(self, state):\n\n if isinstance(state, BlockManager):\n self._mgr = state\n elif isinstance(state, dict):\n if \"_data\" in state and \"_mgr\" not in state:\n # compat for older pickles\n state[\"_mgr\"] = state.pop(\"_data\")\n typ = state.get(\"_typ\")\n if typ is not None:\n attrs = state.get(\"_attrs\", {})\n object.__setattr__(self, \"_attrs\", attrs)\n\n # set in the order of internal names\n # to avoid definitional recursion\n # e.g. say fill_value needing _mgr to be\n # defined\n meta = set(self._internal_names + self._metadata)\n for k in list(meta):\n if k in state:\n v = state[k]\n object.__setattr__(self, k, v)\n\n for k, v in state.items():\n if k not in meta:\n object.__setattr__(self, k, v)\n\n else:\n raise NotImplementedError(\"Pre-0.12 pickles are no longer supported\")\n elif len(state) == 2:\n raise NotImplementedError(\"Pre-0.12 pickles are no longer supported\")\n\n self._item_cache = {}\n\n # ----------------------------------------------------------------------\n # Rendering Methods\n\n def __repr__(self) -> str:\n # string representation based upon iterating over self\n # (since, by definition, `PandasContainers` are iterable)\n prepr = f\"[{','.join(map(pprint_thing, self))}]\"\n return f\"{type(self).__name__}({prepr})\"\n\n def _repr_latex_(self):\n \"\"\"\n Returns a LaTeX representation for a particular object.\n Mainly for use with nbconvert (jupyter notebook conversion to pdf).\n \"\"\"\n if config.get_option(\"display.latex.repr\"):\n return self.to_latex()\n else:\n return None\n\n def _repr_data_resource_(self):\n \"\"\"\n Not a real Jupyter special repr method, but we use the same\n naming convention.\n \"\"\"\n if config.get_option(\"display.html.table_schema\"):\n data = self.head(config.get_option(\"display.max_rows\"))\n payload = json.loads(\n data.to_json(orient=\"table\"), object_pairs_hook=collections.OrderedDict\n )\n return payload\n\n # ----------------------------------------------------------------------\n # I/O Methods\n\n @doc(klass=\"object\")\n def to_excel(\n self,\n excel_writer,\n sheet_name=\"Sheet1\",\n na_rep=\"\",\n float_format=None,\n columns=None,\n header=True,\n index=True,\n index_label=None,\n startrow=0,\n startcol=0,\n engine=None,\n merge_cells=True,\n encoding=None,\n inf_rep=\"inf\",\n verbose=True,\n freeze_panes=None,\n ) -> None:\n \"\"\"\n Write {klass} to an Excel sheet.\n\n To write a single {klass} to an Excel .xlsx file it is only necessary to\n specify a target file name. To write to multiple sheets it is necessary to\n create an `ExcelWriter` object with a target file name, and specify a sheet\n in the file to write to.\n\n Multiple sheets may be written to by specifying unique `sheet_name`.\n With all data written to the file it is necessary to save the changes.\n Note that creating an `ExcelWriter` object with a file name that already\n exists will result in the contents of the existing file being erased.\n\n Parameters\n ----------\n excel_writer : str or ExcelWriter object\n File path or existing ExcelWriter.\n sheet_name : str, default 'Sheet1'\n Name of sheet which will contain DataFrame.\n na_rep : str, default ''\n Missing data representation.\n float_format : str, optional\n Format string for floating point numbers. For example\n ``float_format=\"%.2f\"`` will format 0.1234 to 0.12.\n columns : sequence or list of str, optional\n Columns to write.\n header : bool or list of str, default True\n Write out the column names. If a list of string is given it is\n assumed to be aliases for the column names.\n index : bool, default True\n Write row names (index).\n index_label : str or sequence, optional\n Column label for index column(s) if desired. If not specified, and\n `header` and `index` are True, then the index names are used. A\n sequence should be given if the DataFrame uses MultiIndex.\n startrow : int, default 0\n Upper left cell row to dump data frame.\n startcol : int, default 0\n Upper left cell column to dump data frame.\n engine : str, optional\n Write engine to use, 'openpyxl' or 'xlsxwriter'. You can also set this\n via the options ``io.excel.xlsx.writer``, ``io.excel.xls.writer``, and\n ``io.excel.xlsm.writer``.\n merge_cells : bool, default True\n Write MultiIndex and Hierarchical Rows as merged cells.\n encoding : str, optional\n Encoding of the resulting excel file. Only necessary for xlwt,\n other writers support unicode natively.\n inf_rep : str, default 'inf'\n Representation for infinity (there is no native representation for\n infinity in Excel).\n verbose : bool, default True\n Display more information in the error logs.\n freeze_panes : tuple of int (length 2), optional\n Specifies the one-based bottommost row and rightmost column that\n is to be frozen.\n\n See Also\n --------\n to_csv : Write DataFrame to a comma-separated values (csv) file.\n ExcelWriter : Class for writing DataFrame objects into excel sheets.\n read_excel : Read an Excel file into a pandas DataFrame.\n read_csv : Read a comma-separated values (csv) file into DataFrame.\n\n Notes\n -----\n For compatibility with :meth:`~DataFrame.to_csv`,\n to_excel serializes lists and dicts to strings before writing.\n\n Once a workbook has been saved it is not possible write further data\n without rewriting the whole workbook.\n\n Examples\n --------\n\n Create, write to and save a workbook:\n\n >>> df1 = pd.DataFrame([['a', 'b'], ['c', 'd']],\n ... index=['row 1', 'row 2'],\n ... columns=['col 1', 'col 2'])\n >>> df1.to_excel(\"output.xlsx\") # doctest: +SKIP\n\n To specify the sheet name:\n\n >>> df1.to_excel(\"output.xlsx\",\n ... sheet_name='Sheet_name_1') # doctest: +SKIP\n\n If you wish to write to more than one sheet in the workbook, it is\n necessary to specify an ExcelWriter object:\n\n >>> df2 = df1.copy()\n >>> with pd.ExcelWriter('output.xlsx') as writer: # doctest: +SKIP\n ... df1.to_excel(writer, sheet_name='Sheet_name_1')\n ... df2.to_excel(writer, sheet_name='Sheet_name_2')\n\n ExcelWriter can also be used to append to an existing Excel file:\n\n >>> with pd.ExcelWriter('output.xlsx',\n ... mode='a') as writer: # doctest: +SKIP\n ... df.to_excel(writer, sheet_name='Sheet_name_3')\n\n To set the library that is used to write the Excel file,\n you can pass the `engine` keyword (the default engine is\n automatically chosen depending on the file extension):\n\n >>> df1.to_excel('output1.xlsx', engine='xlsxwriter') # doctest: +SKIP\n \"\"\"\n\n df = self if isinstance(self, ABCDataFrame) else self.to_frame()\n\n from pandas.io.formats.excel import ExcelFormatter\n\n formatter = ExcelFormatter(\n df,\n na_rep=na_rep,\n cols=columns,\n header=header,\n float_format=float_format,\n index=index,\n index_label=index_label,\n merge_cells=merge_cells,\n inf_rep=inf_rep,\n )\n formatter.write(\n excel_writer,\n sheet_name=sheet_name,\n startrow=startrow,\n startcol=startcol,\n freeze_panes=freeze_panes,\n engine=engine,\n )\n\n def to_json(\n self,\n path_or_buf: Optional[FilePathOrBuffer] = None,\n orient: Optional[str] = None,\n date_format: Optional[str] = None,\n double_precision: int = 10,\n force_ascii: bool_t = True,\n date_unit: str = \"ms\",\n default_handler: Optional[Callable[[Any], JSONSerializable]] = None,\n lines: bool_t = False,\n compression: Optional[str] = \"infer\",\n index: bool_t = True,\n indent: Optional[int] = None,\n ) -> Optional[str]:\n \"\"\"\n Convert the object to a JSON string.\n\n Note NaN's and None will be converted to null and datetime objects\n will be converted to UNIX timestamps.\n\n Parameters\n ----------\n path_or_buf : str or file handle, optional\n File path or object. If not specified, the result is returned as\n a string.\n orient : str\n Indication of expected JSON string format.\n\n * Series:\n\n - default is 'index'\n - allowed values are: {'split','records','index','table'}.\n\n * DataFrame:\n\n - default is 'columns'\n - allowed values are: {'split', 'records', 'index', 'columns',\n 'values', 'table'}.\n\n * The format of the JSON string:\n\n - 'split' : dict like {'index' -> [index], 'columns' -> [columns],\n 'data' -> [values]}\n - 'records' : list like [{column -> value}, ... , {column -> value}]\n - 'index' : dict like {index -> {column -> value}}\n - 'columns' : dict like {column -> {index -> value}}\n - 'values' : just the values array\n - 'table' : dict like {'schema': {schema}, 'data': {data}}\n\n Describing the data, where data component is like ``orient='records'``.\n\n .. versionchanged:: 0.20.0\n\n date_format : {None, 'epoch', 'iso'}\n Type of date conversion. 'epoch' = epoch milliseconds,\n 'iso' = ISO8601. The default depends on the `orient`. For\n ``orient='table'``, the default is 'iso'. For all other orients,\n the default is 'epoch'.\n double_precision : int, default 10\n The number of decimal places to use when encoding\n floating point values.\n force_ascii : bool, default True\n Force encoded string to be ASCII.\n date_unit : str, default 'ms' (milliseconds)\n The time unit to encode to, governs timestamp and ISO8601\n precision. One of 's', 'ms', 'us', 'ns' for second, millisecond,\n microsecond, and nanosecond respectively.\n default_handler : callable, default None\n Handler to call if object cannot otherwise be converted to a\n suitable format for JSON. Should receive a single argument which is\n the object to convert and return a serialisable object.\n lines : bool, default False\n If 'orient' is 'records' write out line delimited json format. Will\n throw ValueError if incorrect 'orient' since others are not list\n like.\n\n compression : {'infer', 'gzip', 'bz2', 'zip', 'xz', None}\n\n A string representing the compression to use in the output file,\n only used when the first argument is a filename. By default, the\n compression is inferred from the filename.\n\n .. versionchanged:: 0.24.0\n 'infer' option added and set to default\n index : bool, default True\n Whether to include the index values in the JSON string. Not\n including the index (``index=False``) is only supported when\n orient is 'split' or 'table'.\n\n .. versionadded:: 0.23.0\n\n indent : int, optional\n Length of whitespace used to indent each record.\n\n .. versionadded:: 1.0.0\n\n Returns\n -------\n None or str\n If path_or_buf is None, returns the resulting json format as a\n string. Otherwise returns None.\n\n See Also\n --------\n read_json : Convert a JSON string to pandas object.\n\n Notes\n -----\n The behavior of ``indent=0`` varies from the stdlib, which does not\n indent the output but does insert newlines. Currently, ``indent=0``\n and the default ``indent=None`` are equivalent in pandas, though this\n may change in a future release.\n\n Examples\n --------\n >>> import json\n >>> df = pd.DataFrame(\n ... [[\"a\", \"b\"], [\"c\", \"d\"]],\n ... index=[\"row 1\", \"row 2\"],\n ... columns=[\"col 1\", \"col 2\"],\n ... )\n\n >>> result = df.to_json(orient=\"split\")\n >>> parsed = json.loads(result)\n >>> json.dumps(parsed, indent=4) # doctest: +SKIP\n {\n \"columns\": [\n \"col 1\",\n \"col 2\"\n ],\n \"index\": [\n \"row 1\",\n \"row 2\"\n ],\n \"data\": [\n [\n \"a\",\n \"b\"\n ],\n [\n \"c\",\n \"d\"\n ]\n ]\n }\n\n Encoding/decoding a Dataframe using ``'records'`` formatted JSON.\n Note that index labels are not preserved with this encoding.\n\n >>> result = df.to_json(orient=\"records\")\n >>> parsed = json.loads(result)\n >>> json.dumps(parsed, indent=4) # doctest: +SKIP\n [\n {\n \"col 1\": \"a\",\n \"col 2\": \"b\"\n },\n {\n \"col 1\": \"c\",\n \"col 2\": \"d\"\n }\n ]\n\n Encoding/decoding a Dataframe using ``'index'`` formatted JSON:\n\n >>> result = df.to_json(orient=\"index\")\n >>> parsed = json.loads(result)\n >>> json.dumps(parsed, indent=4) # doctest: +SKIP\n {\n \"row 1\": {\n \"col 1\": \"a\",\n \"col 2\": \"b\"\n },\n \"row 2\": {\n \"col 1\": \"c\",\n \"col 2\": \"d\"\n }\n }\n\n Encoding/decoding a Dataframe using ``'columns'`` formatted JSON:\n\n >>> result = df.to_json(orient=\"columns\")\n >>> parsed = json.loads(result)\n >>> json.dumps(parsed, indent=4) # doctest: +SKIP\n {\n \"col 1\": {\n \"row 1\": \"a\",\n \"row 2\": \"c\"\n },\n \"col 2\": {\n \"row 1\": \"b\",\n \"row 2\": \"d\"\n }\n }\n\n Encoding/decoding a Dataframe using ``'values'`` formatted JSON:\n\n >>> result = df.to_json(orient=\"values\")\n >>> parsed = json.loads(result)\n >>> json.dumps(parsed, indent=4) # doctest: +SKIP\n [\n [\n \"a\",\n \"b\"\n ],\n [\n \"c\",\n \"d\"\n ]\n ]\n\n Encoding with Table Schema:\n\n >>> result = df.to_json(orient=\"table\")\n >>> parsed = json.loads(result)\n >>> json.dumps(parsed, indent=4) # doctest: +SKIP\n {\n \"schema\": {\n \"fields\": [\n {\n \"name\": \"index\",\n \"type\": \"string\"\n },\n {\n \"name\": \"col 1\",\n \"type\": \"string\"\n },\n {\n \"name\": \"col 2\",\n \"type\": \"string\"\n }\n ],\n \"primaryKey\": [\n \"index\"\n ],\n \"pandas_version\": \"0.20.0\"\n },\n \"data\": [\n {\n \"index\": \"row 1\",\n \"col 1\": \"a\",\n \"col 2\": \"b\"\n },\n {\n \"index\": \"row 2\",\n \"col 1\": \"c\",\n \"col 2\": \"d\"\n }\n ]\n }\n \"\"\"\n from pandas.io import json\n\n if date_format is None and orient == \"table\":\n date_format = \"iso\"\n elif date_format is None:\n date_format = \"epoch\"\n\n config.is_nonnegative_int(indent)\n indent = indent or 0\n\n return json.to_json(\n path_or_buf=path_or_buf,\n obj=self,\n orient=orient,\n date_format=date_format,\n double_precision=double_precision,\n force_ascii=force_ascii,\n date_unit=date_unit,\n default_handler=default_handler,\n lines=lines,\n compression=compression,\n index=index,\n indent=indent,\n )\n\n def to_hdf(\n self,\n path_or_buf,\n key: str,\n mode: str = \"a\",\n complevel: Optional[int] = None,\n complib: Optional[str] = None,\n append: bool_t = False,\n format: Optional[str] = None,\n index: bool_t = True,\n min_itemsize: Optional[Union[int, Dict[str, int]]] = None,\n nan_rep=None,\n dropna: Optional[bool_t] = None,\n data_columns: Optional[Union[bool_t, List[str]]] = None,\n errors: str = \"strict\",\n encoding: str = \"UTF-8\",\n ) -> None:\n \"\"\"\n Write the contained data to an HDF5 file using HDFStore.\n\n Hierarchical Data Format (HDF) is self-describing, allowing an\n application to interpret the structure and contents of a file with\n no outside information. One HDF file can hold a mix of related objects\n which can be accessed as a group or as individual objects.\n\n In order to add another DataFrame or Series to an existing HDF file\n please use append mode and a different a key.\n\n For more information see the :ref:`user guide <io.hdf5>`.\n\n Parameters\n ----------\n path_or_buf : str or pandas.HDFStore\n File path or HDFStore object.\n key : str\n Identifier for the group in the store.\n mode : {'a', 'w', 'r+'}, default 'a'\n Mode to open file:\n\n - 'w': write, a new file is created (an existing file with\n the same name would be deleted).\n - 'a': append, an existing file is opened for reading and\n writing, and if the file does not exist it is created.\n - 'r+': similar to 'a', but the file must already exist.\n complevel : {0-9}, optional\n Specifies a compression level for data.\n A value of 0 disables compression.\n complib : {'zlib', 'lzo', 'bzip2', 'blosc'}, default 'zlib'\n Specifies the compression library to be used.\n As of v0.20.2 these additional compressors for Blosc are supported\n (default if no compressor specified: 'blosc:blosclz'):\n {'blosc:blosclz', 'blosc:lz4', 'blosc:lz4hc', 'blosc:snappy',\n 'blosc:zlib', 'blosc:zstd'}.\n Specifying a compression library which is not available issues\n a ValueError.\n append : bool, default False\n For Table formats, append the input data to the existing.\n format : {'fixed', 'table', None}, default 'fixed'\n Possible values:\n\n - 'fixed': Fixed format. Fast writing/reading. Not-appendable,\n nor searchable.\n - 'table': Table format. Write as a PyTables Table structure\n which may perform worse but allow more flexible operations\n like searching / selecting subsets of the data.\n - If None, pd.get_option('io.hdf.default_format') is checked,\n followed by fallback to \"fixed\"\n errors : str, default 'strict'\n Specifies how encoding and decoding errors are to be handled.\n See the errors argument for :func:`open` for a full list\n of options.\n encoding : str, default \"UTF-8\"\n min_itemsize : dict or int, optional\n Map column names to minimum string sizes for columns.\n nan_rep : Any, optional\n How to represent null values as str.\n Not allowed with append=True.\n data_columns : list of columns or True, optional\n List of columns to create as indexed data columns for on-disk\n queries, or True to use all columns. By default only the axes\n of the object are indexed. See :ref:`io.hdf5-query-data-columns`.\n Applicable only to format='table'.\n\n See Also\n --------\n DataFrame.read_hdf : Read from HDF file.\n DataFrame.to_parquet : Write a DataFrame to the binary parquet format.\n DataFrame.to_sql : Write to a sql table.\n DataFrame.to_feather : Write out feather-format for DataFrames.\n DataFrame.to_csv : Write out to a csv file.\n\n Examples\n --------\n >>> df = pd.DataFrame({'A': [1, 2, 3], 'B': [4, 5, 6]},\n ... index=['a', 'b', 'c'])\n >>> df.to_hdf('data.h5', key='df', mode='w')\n\n We can add another object to the same file:\n\n >>> s = pd.Series([1, 2, 3, 4])\n >>> s.to_hdf('data.h5', key='s')\n\n Reading from HDF file:\n\n >>> pd.read_hdf('data.h5', 'df')\n A B\n a 1 4\n b 2 5\n c 3 6\n >>> pd.read_hdf('data.h5', 's')\n 0 1\n 1 2\n 2 3\n 3 4\n dtype: int64\n\n Deleting file with data:\n\n >>> import os\n >>> os.remove('data.h5')\n \"\"\"\n from pandas.io import pytables\n\n pytables.to_hdf(\n path_or_buf,\n key,\n self,\n mode=mode,\n complevel=complevel,\n complib=complib,\n append=append,\n format=format,\n index=index,\n min_itemsize=min_itemsize,\n nan_rep=nan_rep,\n dropna=dropna,\n data_columns=data_columns,\n errors=errors,\n encoding=encoding,\n )\n\n def to_sql(\n self,\n name: str,\n con,\n schema=None,\n if_exists: str = \"fail\",\n index: bool_t = True,\n index_label=None,\n chunksize=None,\n dtype=None,\n method=None,\n ) -> None:\n \"\"\"\n Write records stored in a DataFrame to a SQL database.\n\n Databases supported by SQLAlchemy [1]_ are supported. Tables can be\n newly created, appended to, or overwritten.\n\n Parameters\n ----------\n name : str\n Name of SQL table.\n con : sqlalchemy.engine.(Engine or Connection) or sqlite3.Connection\n Using SQLAlchemy makes it possible to use any DB supported by that\n library. Legacy support is provided for sqlite3.Connection objects. The user\n is responsible for engine disposal and connection closure for the SQLAlchemy\n connectable See `here \\\n <https://docs.sqlalchemy.org/en/13/core/connections.html>`_.\n\n schema : str, optional\n Specify the schema (if database flavor supports this). If None, use\n default schema.\n if_exists : {'fail', 'replace', 'append'}, default 'fail'\n How to behave if the table already exists.\n\n * fail: Raise a ValueError.\n * replace: Drop the table before inserting new values.\n * append: Insert new values to the existing table.\n\n index : bool, default True\n Write DataFrame index as a column. Uses `index_label` as the column\n name in the table.\n index_label : str or sequence, default None\n Column label for index column(s). If None is given (default) and\n `index` is True, then the index names are used.\n A sequence should be given if the DataFrame uses MultiIndex.\n chunksize : int, optional\n Specify the number of rows in each batch to be written at a time.\n By default, all rows will be written at once.\n dtype : dict or scalar, optional\n Specifying the datatype for columns. If a dictionary is used, the\n keys should be the column names and the values should be the\n SQLAlchemy types or strings for the sqlite3 legacy mode. If a\n scalar is provided, it will be applied to all columns.\n method : {None, 'multi', callable}, optional\n Controls the SQL insertion clause used:\n\n * None : Uses standard SQL ``INSERT`` clause (one per row).\n * 'multi': Pass multiple values in a single ``INSERT`` clause.\n * callable with signature ``(pd_table, conn, keys, data_iter)``.\n\n Details and a sample callable implementation can be found in the\n section :ref:`insert method <io.sql.method>`.\n\n .. versionadded:: 0.24.0\n\n Raises\n ------\n ValueError\n When the table already exists and `if_exists` is 'fail' (the\n default).\n\n See Also\n --------\n read_sql : Read a DataFrame from a table.\n\n Notes\n -----\n Timezone aware datetime columns will be written as\n ``Timestamp with timezone`` type with SQLAlchemy if supported by the\n database. Otherwise, the datetimes will be stored as timezone unaware\n timestamps local to the original timezone.\n\n .. versionadded:: 0.24.0\n\n References\n ----------\n .. [1] https://docs.sqlalchemy.org\n .. [2] https://www.python.org/dev/peps/pep-0249/\n\n Examples\n --------\n Create an in-memory SQLite database.\n\n >>> from sqlalchemy import create_engine\n >>> engine = create_engine('sqlite://', echo=False)\n\n Create a table from scratch with 3 rows.\n\n >>> df = pd.DataFrame({'name' : ['User 1', 'User 2', 'User 3']})\n >>> df\n name\n 0 User 1\n 1 User 2\n 2 User 3\n\n >>> df.to_sql('users', con=engine)\n >>> engine.execute(\"SELECT * FROM users\").fetchall()\n [(0, 'User 1'), (1, 'User 2'), (2, 'User 3')]\n\n An `sqlalchemy.engine.Connection` can also be passed to to `con`:\n >>> with engine.begin() as connection:\n ... df1 = pd.DataFrame({'name' : ['User 4', 'User 5']})\n ... df1.to_sql('users', con=connection, if_exists='append')\n\n This is allowed to support operations that require that the same\n DBAPI connection is used for the entire operation.\n\n >>> df2 = pd.DataFrame({'name' : ['User 6', 'User 7']})\n >>> df2.to_sql('users', con=engine, if_exists='append')\n >>> engine.execute(\"SELECT * FROM users\").fetchall()\n [(0, 'User 1'), (1, 'User 2'), (2, 'User 3'),\n (0, 'User 4'), (1, 'User 5'), (0, 'User 6'),\n (1, 'User 7')]\n\n Overwrite the table with just ``df2``.\n\n >>> df2.to_sql('users', con=engine, if_exists='replace',\n ... index_label='id')\n >>> engine.execute(\"SELECT * FROM users\").fetchall()\n [(0, 'User 6'), (1, 'User 7')]\n\n Specify the dtype (especially useful for integers with missing values).\n Notice that while pandas is forced to store the data as floating point,\n the database supports nullable integers. When fetching the data with\n Python, we get back integer scalars.\n\n >>> df = pd.DataFrame({\"A\": [1, None, 2]})\n >>> df\n A\n 0 1.0\n 1 NaN\n 2 2.0\n\n >>> from sqlalchemy.types import Integer\n >>> df.to_sql('integers', con=engine, index=False,\n ... dtype={\"A\": Integer()})\n\n >>> engine.execute(\"SELECT * FROM integers\").fetchall()\n [(1,), (None,), (2,)]\n \"\"\"\n from pandas.io import sql\n\n sql.to_sql(\n self,\n name,\n con,\n schema=schema,\n if_exists=if_exists,\n index=index,\n index_label=index_label,\n chunksize=chunksize,\n dtype=dtype,\n method=method,\n )\n\n def to_pickle(\n self,\n path,\n compression: Optional[str] = \"infer\",\n protocol: int = pickle.HIGHEST_PROTOCOL,\n ) -> None:\n \"\"\"\n Pickle (serialize) object to file.\n\n Parameters\n ----------\n path : str\n File path where the pickled object will be stored.\n compression : {'infer', 'gzip', 'bz2', 'zip', 'xz', None}, \\\n default 'infer'\n A string representing the compression to use in the output file. By\n default, infers from the file extension in specified path.\n protocol : int\n Int which indicates which protocol should be used by the pickler,\n default HIGHEST_PROTOCOL (see [1]_ paragraph 12.1.2). The possible\n values are 0, 1, 2, 3, 4. A negative value for the protocol\n parameter is equivalent to setting its value to HIGHEST_PROTOCOL.\n\n .. [1] https://docs.python.org/3/library/pickle.html.\n\n See Also\n --------\n read_pickle : Load pickled pandas object (or any object) from file.\n DataFrame.to_hdf : Write DataFrame to an HDF5 file.\n DataFrame.to_sql : Write DataFrame to a SQL database.\n DataFrame.to_parquet : Write a DataFrame to the binary parquet format.\n\n Examples\n --------\n >>> original_df = pd.DataFrame({\"foo\": range(5), \"bar\": range(5, 10)})\n >>> original_df\n foo bar\n 0 0 5\n 1 1 6\n 2 2 7\n 3 3 8\n 4 4 9\n >>> original_df.to_pickle(\"./dummy.pkl\")\n\n >>> unpickled_df = pd.read_pickle(\"./dummy.pkl\")\n >>> unpickled_df\n foo bar\n 0 0 5\n 1 1 6\n 2 2 7\n 3 3 8\n 4 4 9\n\n >>> import os\n >>> os.remove(\"./dummy.pkl\")\n \"\"\"\n from pandas.io.pickle import to_pickle\n\n to_pickle(self, path, compression=compression, protocol=protocol)\n\n def to_clipboard(\n self, excel: bool_t = True, sep: Optional[str] = None, **kwargs\n ) -> None:\n r\"\"\"\n Copy object to the system clipboard.\n\n Write a text representation of object to the system clipboard.\n This can be pasted into Excel, for example.\n\n Parameters\n ----------\n excel : bool, default True\n Produce output in a csv format for easy pasting into excel.\n\n - True, use the provided separator for csv pasting.\n - False, write a string representation of the object to the clipboard.\n\n sep : str, default ``'\\t'``\n Field delimiter.\n **kwargs\n These parameters will be passed to DataFrame.to_csv.\n\n See Also\n --------\n DataFrame.to_csv : Write a DataFrame to a comma-separated values\n (csv) file.\n read_clipboard : Read text from clipboard and pass to read_table.\n\n Notes\n -----\n Requirements for your platform.\n\n - Linux : `xclip`, or `xsel` (with `PyQt4` modules)\n - Windows : none\n - OS X : none\n\n Examples\n --------\n Copy the contents of a DataFrame to the clipboard.\n\n >>> df = pd.DataFrame([[1, 2, 3], [4, 5, 6]], columns=['A', 'B', 'C'])\n\n >>> df.to_clipboard(sep=',') # doctest: +SKIP\n ... # Wrote the following to the system clipboard:\n ... # ,A,B,C\n ... # 0,1,2,3\n ... # 1,4,5,6\n\n We can omit the index by passing the keyword `index` and setting\n it to false.\n\n >>> df.to_clipboard(sep=',', index=False) # doctest: +SKIP\n ... # Wrote the following to the system clipboard:\n ... # A,B,C\n ... # 1,2,3\n ... # 4,5,6\n \"\"\"\n from pandas.io import clipboards\n\n clipboards.to_clipboard(self, excel=excel, sep=sep, **kwargs)\n\n def to_xarray(self):\n \"\"\"\n Return an xarray object from the pandas object.\n\n Returns\n -------\n xarray.DataArray or xarray.Dataset\n Data in the pandas structure converted to Dataset if the object is\n a DataFrame, or a DataArray if the object is a Series.\n\n See Also\n --------\n DataFrame.to_hdf : Write DataFrame to an HDF5 file.\n DataFrame.to_parquet : Write a DataFrame to the binary parquet format.\n\n Notes\n -----\n See the `xarray docs <https://xarray.pydata.org/en/stable/>`__\n\n Examples\n --------\n >>> df = pd.DataFrame([('falcon', 'bird', 389.0, 2),\n ... ('parrot', 'bird', 24.0, 2),\n ... ('lion', 'mammal', 80.5, 4),\n ... ('monkey', 'mammal', np.nan, 4)],\n ... columns=['name', 'class', 'max_speed',\n ... 'num_legs'])\n >>> df\n name class max_speed num_legs\n 0 falcon bird 389.0 2\n 1 parrot bird 24.0 2\n 2 lion mammal 80.5 4\n 3 monkey mammal NaN 4\n\n >>> df.to_xarray()\n <xarray.Dataset>\n Dimensions: (index: 4)\n Coordinates:\n * index (index) int64 0 1 2 3\n Data variables:\n name (index) object 'falcon' 'parrot' 'lion' 'monkey'\n class (index) object 'bird' 'bird' 'mammal' 'mammal'\n max_speed (index) float64 389.0 24.0 80.5 nan\n num_legs (index) int64 2 2 4 4\n\n >>> df['max_speed'].to_xarray()\n <xarray.DataArray 'max_speed' (index: 4)>\n array([389. , 24. , 80.5, nan])\n Coordinates:\n * index (index) int64 0 1 2 3\n\n >>> dates = pd.to_datetime(['2018-01-01', '2018-01-01',\n ... '2018-01-02', '2018-01-02'])\n >>> df_multiindex = pd.DataFrame({'date': dates,\n ... 'animal': ['falcon', 'parrot',\n ... 'falcon', 'parrot'],\n ... 'speed': [350, 18, 361, 15]})\n >>> df_multiindex = df_multiindex.set_index(['date', 'animal'])\n\n >>> df_multiindex\n speed\n date animal\n 2018-01-01 falcon 350\n parrot 18\n 2018-01-02 falcon 361\n parrot 15\n\n >>> df_multiindex.to_xarray()\n <xarray.Dataset>\n Dimensions: (animal: 2, date: 2)\n Coordinates:\n * date (date) datetime64[ns] 2018-01-01 2018-01-02\n * animal (animal) object 'falcon' 'parrot'\n Data variables:\n speed (date, animal) int64 350 18 361 15\n \"\"\"\n xarray = import_optional_dependency(\"xarray\")\n\n if self.ndim == 1:\n return xarray.DataArray.from_series(self)\n else:\n return xarray.Dataset.from_dataframe(self)\n\n @Substitution(returns=fmt.return_docstring)\n def to_latex(\n self,\n buf=None,\n columns=None,\n col_space=None,\n header=True,\n index=True,\n na_rep=\"NaN\",\n formatters=None,\n float_format=None,\n sparsify=None,\n index_names=True,\n bold_rows=False,\n column_format=None,\n longtable=None,\n escape=None,\n encoding=None,\n decimal=\".\",\n multicolumn=None,\n multicolumn_format=None,\n multirow=None,\n caption=None,\n label=None,\n ):\n r\"\"\"\n Render object to a LaTeX tabular, longtable, or nested table/tabular.\n\n Requires ``\\usepackage{booktabs}``. The output can be copy/pasted\n into a main LaTeX document or read from an external file\n with ``\\input{table.tex}``.\n\n .. versionchanged:: 0.20.2\n Added to Series.\n\n .. versionchanged:: 1.0.0\n Added caption and label arguments.\n\n Parameters\n ----------\n buf : str, Path or StringIO-like, optional, default None\n Buffer to write to. If None, the output is returned as a string.\n columns : list of label, optional\n The subset of columns to write. Writes all columns by default.\n col_space : int, optional\n The minimum width of each column.\n header : bool or list of str, default True\n Write out the column names. If a list of strings is given,\n it is assumed to be aliases for the column names.\n index : bool, default True\n Write row names (index).\n na_rep : str, default 'NaN'\n Missing data representation.\n formatters : list of functions or dict of {str: function}, optional\n Formatter functions to apply to columns' elements by position or\n name. The result of each function must be a unicode string.\n List must be of length equal to the number of columns.\n float_format : one-parameter function or str, optional, default None\n Formatter for floating point numbers. For example\n ``float_format=\"%%.2f\"`` and ``float_format=\"{:0.2f}\".format`` will\n both result in 0.1234 being formatted as 0.12.\n sparsify : bool, optional\n Set to False for a DataFrame with a hierarchical index to print\n every multiindex key at each row. By default, the value will be\n read from the config module.\n index_names : bool, default True\n Prints the names of the indexes.\n bold_rows : bool, default False\n Make the row labels bold in the output.\n column_format : str, optional\n The columns format as specified in `LaTeX table format\n <https://en.wikibooks.org/wiki/LaTeX/Tables>`__ e.g. 'rcl' for 3\n columns. By default, 'l' will be used for all columns except\n columns of numbers, which default to 'r'.\n longtable : bool, optional\n By default, the value will be read from the pandas config\n module. Use a longtable environment instead of tabular. Requires\n adding a \\usepackage{longtable} to your LaTeX preamble.\n escape : bool, optional\n By default, the value will be read from the pandas config\n module. When set to False prevents from escaping latex special\n characters in column names.\n encoding : str, optional\n A string representing the encoding to use in the output file,\n defaults to 'utf-8'.\n decimal : str, default '.'\n Character recognized as decimal separator, e.g. ',' in Europe.\n multicolumn : bool, default True\n Use \\multicolumn to enhance MultiIndex columns.\n The default will be read from the config module.\n multicolumn_format : str, default 'l'\n The alignment for multicolumns, similar to `column_format`\n The default will be read from the config module.\n multirow : bool, default False\n Use \\multirow to enhance MultiIndex rows. Requires adding a\n \\usepackage{multirow} to your LaTeX preamble. Will print\n centered labels (instead of top-aligned) across the contained\n rows, separating groups via clines. The default will be read\n from the pandas config module.\n caption : str, optional\n The LaTeX caption to be placed inside ``\\caption{}`` in the output.\n\n .. versionadded:: 1.0.0\n\n label : str, optional\n The LaTeX label to be placed inside ``\\label{}`` in the output.\n This is used with ``\\ref{}`` in the main ``.tex`` file.\n\n .. versionadded:: 1.0.0\n %(returns)s\n See Also\n --------\n DataFrame.to_string : Render a DataFrame to a console-friendly\n tabular output.\n DataFrame.to_html : Render a DataFrame as an HTML table.\n\n Examples\n --------\n >>> df = pd.DataFrame({'name': ['Raphael', 'Donatello'],\n ... 'mask': ['red', 'purple'],\n ... 'weapon': ['sai', 'bo staff']})\n >>> print(df.to_latex(index=False)) # doctest: +NORMALIZE_WHITESPACE\n \\begin{tabular}{lll}\n \\toprule\n name & mask & weapon \\\\\n \\midrule\n Raphael & red & sai \\\\\n Donatello & purple & bo staff \\\\\n \\bottomrule\n \\end{tabular}\n \"\"\"\n # Get defaults from the pandas config\n if self.ndim == 1:\n self = self.to_frame()\n if longtable is None:\n longtable = config.get_option(\"display.latex.longtable\")\n if escape is None:\n escape = config.get_option(\"display.latex.escape\")\n if multicolumn is None:\n multicolumn = config.get_option(\"display.latex.multicolumn\")\n if multicolumn_format is None:\n multicolumn_format = config.get_option(\"display.latex.multicolumn_format\")\n if multirow is None:\n multirow = config.get_option(\"display.latex.multirow\")\n\n formatter = DataFrameFormatter(\n self,\n columns=columns,\n col_space=col_space,\n na_rep=na_rep,\n header=header,\n index=index,\n formatters=formatters,\n float_format=float_format,\n bold_rows=bold_rows,\n sparsify=sparsify,\n index_names=index_names,\n escape=escape,\n decimal=decimal,\n )\n return formatter.to_latex(\n buf=buf,\n column_format=column_format,\n longtable=longtable,\n encoding=encoding,\n multicolumn=multicolumn,\n multicolumn_format=multicolumn_format,\n multirow=multirow,\n caption=caption,\n label=label,\n )\n\n def to_csv(\n self,\n path_or_buf: Optional[FilePathOrBuffer] = None,\n sep: str = \",\",\n na_rep: str = \"\",\n float_format: Optional[str] = None,\n columns: Optional[Sequence[Label]] = None,\n header: Union[bool_t, List[str]] = True,\n index: bool_t = True,\n index_label: Optional[Union[bool_t, str, Sequence[Label]]] = None,\n mode: str = \"w\",\n encoding: Optional[str] = None,\n compression: Optional[Union[str, Mapping[str, str]]] = \"infer\",\n quoting: Optional[int] = None,\n quotechar: str = '\"',\n line_terminator: Optional[str] = None,\n chunksize: Optional[int] = None,\n date_format: Optional[str] = None,\n doublequote: bool_t = True,\n escapechar: Optional[str] = None,\n decimal: Optional[str] = \".\",\n errors: str = \"strict\",\n ) -> Optional[str]:\n r\"\"\"\n Write object to a comma-separated values (csv) file.\n\n .. versionchanged:: 0.24.0\n The order of arguments for Series was changed.\n\n Parameters\n ----------\n path_or_buf : str or file handle, default None\n File path or object, if None is provided the result is returned as\n a string. If a file object is passed it should be opened with\n `newline=''`, disabling universal newlines.\n\n .. versionchanged:: 0.24.0\n\n Was previously named \"path\" for Series.\n\n sep : str, default ','\n String of length 1. Field delimiter for the output file.\n na_rep : str, default ''\n Missing data representation.\n float_format : str, default None\n Format string for floating point numbers.\n columns : sequence, optional\n Columns to write.\n header : bool or list of str, default True\n Write out the column names. If a list of strings is given it is\n assumed to be aliases for the column names.\n\n .. versionchanged:: 0.24.0\n\n Previously defaulted to False for Series.\n\n index : bool, default True\n Write row names (index).\n index_label : str or sequence, or False, default None\n Column label for index column(s) if desired. If None is given, and\n `header` and `index` are True, then the index names are used. A\n sequence should be given if the object uses MultiIndex. If\n False do not print fields for index names. Use index_label=False\n for easier importing in R.\n mode : str\n Python write mode, default 'w'.\n encoding : str, optional\n A string representing the encoding to use in the output file,\n defaults to 'utf-8'.\n compression : str or dict, default 'infer'\n If str, represents compression mode. If dict, value at 'method' is\n the compression mode. Compression mode may be any of the following\n possible values: {'infer', 'gzip', 'bz2', 'zip', 'xz', None}. If\n compression mode is 'infer' and `path_or_buf` is path-like, then\n detect compression mode from the following extensions: '.gz',\n '.bz2', '.zip' or '.xz'. (otherwise no compression). If dict given\n and mode is one of {'zip', 'gzip', 'bz2'}, or inferred as\n one of the above, other entries passed as\n additional compression options.\n\n .. versionchanged:: 1.0.0\n\n May now be a dict with key 'method' as compression mode\n and other entries as additional compression options if\n compression mode is 'zip'.\n\n .. versionchanged:: 1.1.0\n\n Passing compression options as keys in dict is\n supported for compression modes 'gzip' and 'bz2'\n as well as 'zip'.\n\n quoting : optional constant from csv module\n Defaults to csv.QUOTE_MINIMAL. If you have set a `float_format`\n then floats are converted to strings and thus csv.QUOTE_NONNUMERIC\n will treat them as non-numeric.\n quotechar : str, default '\\\"'\n String of length 1. Character used to quote fields.\n line_terminator : str, optional\n The newline character or character sequence to use in the output\n file. Defaults to `os.linesep`, which depends on the OS in which\n this method is called ('\\n' for linux, '\\r\\n' for Windows, i.e.).\n\n .. versionchanged:: 0.24.0\n chunksize : int or None\n Rows to write at a time.\n date_format : str, default None\n Format string for datetime objects.\n doublequote : bool, default True\n Control quoting of `quotechar` inside a field.\n escapechar : str, default None\n String of length 1. Character used to escape `sep` and `quotechar`\n when appropriate.\n decimal : str, default '.'\n Character recognized as decimal separator. E.g. use ',' for\n European data.\n errors : str, default 'strict'\n Specifies how encoding and decoding errors are to be handled.\n See the errors argument for :func:`open` for a full list\n of options.\n\n .. versionadded:: 1.1.0\n\n Returns\n -------\n None or str\n If path_or_buf is None, returns the resulting csv format as a\n string. Otherwise returns None.\n\n See Also\n --------\n read_csv : Load a CSV file into a DataFrame.\n to_excel : Write DataFrame to an Excel file.\n\n Examples\n --------\n >>> df = pd.DataFrame({'name': ['Raphael', 'Donatello'],\n ... 'mask': ['red', 'purple'],\n ... 'weapon': ['sai', 'bo staff']})\n >>> df.to_csv(index=False)\n 'name,mask,weapon\\nRaphael,red,sai\\nDonatello,purple,bo staff\\n'\n\n Create 'out.zip' containing 'out.csv'\n\n >>> compression_opts = dict(method='zip',\n ... archive_name='out.csv') # doctest: +SKIP\n >>> df.to_csv('out.zip', index=False,\n ... compression=compression_opts) # doctest: +SKIP\n \"\"\"\n df = self if isinstance(self, ABCDataFrame) else self.to_frame()\n\n from pandas.io.formats.csvs import CSVFormatter\n\n formatter = CSVFormatter(\n df,\n path_or_buf,\n line_terminator=line_terminator,\n sep=sep,\n encoding=encoding,\n errors=errors,\n compression=compression,\n quoting=quoting,\n na_rep=na_rep,\n float_format=float_format,\n cols=columns,\n header=header,\n index=index,\n index_label=index_label,\n mode=mode,\n chunksize=chunksize,\n quotechar=quotechar,\n date_format=date_format,\n doublequote=doublequote,\n escapechar=escapechar,\n decimal=decimal,\n )\n formatter.save()\n\n if path_or_buf is None:\n return formatter.path_or_buf.getvalue()\n\n return None\n\n # ----------------------------------------------------------------------\n # Lookup Caching\n\n def _set_as_cached(self, item, cacher) -> None:\n \"\"\"\n Set the _cacher attribute on the calling object with a weakref to\n cacher.\n \"\"\"\n self._cacher = (item, weakref.ref(cacher))\n\n def _reset_cacher(self) -> None:\n \"\"\"\n Reset the cacher.\n \"\"\"\n if hasattr(self, \"_cacher\"):\n del self._cacher\n\n def _maybe_cache_changed(self, item, value) -> None:\n \"\"\"\n The object has called back to us saying maybe it has changed.\n \"\"\"\n loc = self._info_axis.get_loc(item)\n self._mgr.iset(loc, value)\n\n @property\n def _is_cached(self) -> bool_t:\n \"\"\"Return boolean indicating if self is cached or not.\"\"\"\n return getattr(self, \"_cacher\", None) is not None\n\n def _get_cacher(self):\n \"\"\"return my cacher or None\"\"\"\n cacher = getattr(self, \"_cacher\", None)\n if cacher is not None:\n cacher = cacher[1]()\n return cacher\n\n def _maybe_update_cacher(\n self, clear: bool_t = False, verify_is_copy: bool_t = True\n ) -> None:\n \"\"\"\n See if we need to update our parent cacher if clear, then clear our\n cache.\n\n Parameters\n ----------\n clear : bool, default False\n Clear the item cache.\n verify_is_copy : bool, default True\n Provide is_copy checks.\n \"\"\"\n cacher = getattr(self, \"_cacher\", None)\n if cacher is not None:\n ref = cacher[1]()\n\n # we are trying to reference a dead referant, hence\n # a copy\n if ref is None:\n del self._cacher\n else:\n if len(self) == len(ref):\n # otherwise, either self or ref has swapped in new arrays\n ref._maybe_cache_changed(cacher[0], self)\n else:\n # GH#33675 we have swapped in a new array, so parent\n # reference to self is now invalid\n ref._item_cache.pop(cacher[0], None)\n\n if verify_is_copy:\n self._check_setitem_copy(stacklevel=5, t=\"referant\")\n\n if clear:\n self._clear_item_cache()\n\n def _clear_item_cache(self) -> None:\n self._item_cache.clear()\n\n # ----------------------------------------------------------------------\n # Indexing Methods\n\n def take(\n self: FrameOrSeries, indices, axis=0, is_copy: Optional[bool_t] = None, **kwargs\n ) -> FrameOrSeries:\n \"\"\"\n Return the elements in the given *positional* indices along an axis.\n\n This means that we are not indexing according to actual values in\n the index attribute of the object. We are indexing according to the\n actual position of the element in the object.\n\n Parameters\n ----------\n indices : array-like\n An array of ints indicating which positions to take.\n axis : {0 or 'index', 1 or 'columns', None}, default 0\n The axis on which to select elements. ``0`` means that we are\n selecting rows, ``1`` means that we are selecting columns.\n is_copy : bool\n Before pandas 1.0, ``is_copy=False`` can be specified to ensure\n that the return value is an actual copy. Starting with pandas 1.0,\n ``take`` always returns a copy, and the keyword is therefore\n deprecated.\n\n .. deprecated:: 1.0.0\n **kwargs\n For compatibility with :meth:`numpy.take`. Has no effect on the\n output.\n\n Returns\n -------\n taken : same type as caller\n An array-like containing the elements taken from the object.\n\n See Also\n --------\n DataFrame.loc : Select a subset of a DataFrame by labels.\n DataFrame.iloc : Select a subset of a DataFrame by positions.\n numpy.take : Take elements from an array along an axis.\n\n Examples\n --------\n >>> df = pd.DataFrame([('falcon', 'bird', 389.0),\n ... ('parrot', 'bird', 24.0),\n ... ('lion', 'mammal', 80.5),\n ... ('monkey', 'mammal', np.nan)],\n ... columns=['name', 'class', 'max_speed'],\n ... index=[0, 2, 3, 1])\n >>> df\n name class max_speed\n 0 falcon bird 389.0\n 2 parrot bird 24.0\n 3 lion mammal 80.5\n 1 monkey mammal NaN\n\n Take elements at positions 0 and 3 along the axis 0 (default).\n\n Note how the actual indices selected (0 and 1) do not correspond to\n our selected indices 0 and 3. That's because we are selecting the 0th\n and 3rd rows, not rows whose indices equal 0 and 3.\n\n >>> df.take([0, 3])\n name class max_speed\n 0 falcon bird 389.0\n 1 monkey mammal NaN\n\n Take elements at indices 1 and 2 along the axis 1 (column selection).\n\n >>> df.take([1, 2], axis=1)\n class max_speed\n 0 bird 389.0\n 2 bird 24.0\n 3 mammal 80.5\n 1 mammal NaN\n\n We may take elements using negative integers for positive indices,\n starting from the end of the object, just like with Python lists.\n\n >>> df.take([-1, -2])\n name class max_speed\n 1 monkey mammal NaN\n 3 lion mammal 80.5\n \"\"\"\n if is_copy is not None:\n warnings.warn(\n \"is_copy is deprecated and will be removed in a future version. \"\n \"'take' always returns a copy, so there is no need to specify this.\",\n FutureWarning,\n stacklevel=2,\n )\n\n nv.validate_take(tuple(), kwargs)\n\n self._consolidate_inplace()\n\n new_data = self._mgr.take(\n indices, axis=self._get_block_manager_axis(axis), verify=True\n )\n return self._constructor(new_data).__finalize__(self, method=\"take\")\n\n def _take_with_is_copy(self: FrameOrSeries, indices, axis=0) -> FrameOrSeries:\n \"\"\"\n Internal version of the `take` method that sets the `_is_copy`\n attribute to keep track of the parent dataframe (using in indexing\n for the SettingWithCopyWarning).\n\n See the docstring of `take` for full explanation of the parameters.\n \"\"\"\n result = self.take(indices=indices, axis=axis)\n # Maybe set copy if we didn't actually change the index.\n if not result._get_axis(axis).equals(self._get_axis(axis)):\n result._set_is_copy(self)\n return result\n\n def xs(self, key, axis=0, level=None, drop_level: bool_t = True):\n \"\"\"\n Return cross-section from the Series/DataFrame.\n\n This method takes a `key` argument to select data at a particular\n level of a MultiIndex.\n\n Parameters\n ----------\n key : label or tuple of label\n Label contained in the index, or partially in a MultiIndex.\n axis : {0 or 'index', 1 or 'columns'}, default 0\n Axis to retrieve cross-section on.\n level : object, defaults to first n levels (n=1 or len(key))\n In case of a key partially contained in a MultiIndex, indicate\n which levels are used. Levels can be referred by label or position.\n drop_level : bool, default True\n If False, returns object with same levels as self.\n\n Returns\n -------\n Series or DataFrame\n Cross-section from the original Series or DataFrame\n corresponding to the selected index levels.\n\n See Also\n --------\n DataFrame.loc : Access a group of rows and columns\n by label(s) or a boolean array.\n DataFrame.iloc : Purely integer-location based indexing\n for selection by position.\n\n Notes\n -----\n `xs` can not be used to set values.\n\n MultiIndex Slicers is a generic way to get/set values on\n any level or levels.\n It is a superset of `xs` functionality, see\n :ref:`MultiIndex Slicers <advanced.mi_slicers>`.\n\n Examples\n --------\n >>> d = {'num_legs': [4, 4, 2, 2],\n ... 'num_wings': [0, 0, 2, 2],\n ... 'class': ['mammal', 'mammal', 'mammal', 'bird'],\n ... 'animal': ['cat', 'dog', 'bat', 'penguin'],\n ... 'locomotion': ['walks', 'walks', 'flies', 'walks']}\n >>> df = pd.DataFrame(data=d)\n >>> df = df.set_index(['class', 'animal', 'locomotion'])\n >>> df\n num_legs num_wings\n class animal locomotion\n mammal cat walks 4 0\n dog walks 4 0\n bat flies 2 2\n bird penguin walks 2 2\n\n Get values at specified index\n\n >>> df.xs('mammal')\n num_legs num_wings\n animal locomotion\n cat walks 4 0\n dog walks 4 0\n bat flies 2 2\n\n Get values at several indexes\n\n >>> df.xs(('mammal', 'dog'))\n num_legs num_wings\n locomotion\n walks 4 0\n\n Get values at specified index and level\n\n >>> df.xs('cat', level=1)\n num_legs num_wings\n class locomotion\n mammal walks 4 0\n\n Get values at several indexes and levels\n\n >>> df.xs(('bird', 'walks'),\n ... level=[0, 'locomotion'])\n num_legs num_wings\n animal\n penguin 2 2\n\n Get values at specified column and axis\n\n >>> df.xs('num_wings', axis=1)\n class animal locomotion\n mammal cat walks 0\n dog walks 0\n bat flies 2\n bird penguin walks 2\n Name: num_wings, dtype: int64\n \"\"\"\n axis = self._get_axis_number(axis)\n labels = self._get_axis(axis)\n if level is not None:\n if not isinstance(labels, MultiIndex):\n raise TypeError(\"Index must be a MultiIndex\")\n loc, new_ax = labels.get_loc_level(key, level=level, drop_level=drop_level)\n\n # create the tuple of the indexer\n _indexer = [slice(None)] * self.ndim\n _indexer[axis] = loc\n indexer = tuple(_indexer)\n\n result = self.iloc[indexer]\n setattr(result, result._get_axis_name(axis), new_ax)\n return result\n\n if axis == 1:\n return self[key]\n\n self._consolidate_inplace()\n\n index = self.index\n if isinstance(index, MultiIndex):\n loc, new_index = self.index.get_loc_level(key, drop_level=drop_level)\n else:\n loc = self.index.get_loc(key)\n\n if isinstance(loc, np.ndarray):\n if loc.dtype == np.bool_:\n (inds,) = loc.nonzero()\n return self._take_with_is_copy(inds, axis=axis)\n else:\n return self._take_with_is_copy(loc, axis=axis)\n\n if not is_scalar(loc):\n new_index = self.index[loc]\n\n if is_scalar(loc):\n # In this case loc should be an integer\n if self.ndim == 1:\n # if we encounter an array-like and we only have 1 dim\n # that means that their are list/ndarrays inside the Series!\n # so just return them (GH 6394)\n return self._values[loc]\n\n new_values = self._mgr.fast_xs(loc)\n\n result = self._constructor_sliced(\n new_values,\n index=self.columns,\n name=self.index[loc],\n dtype=new_values.dtype,\n )\n\n else:\n result = self.iloc[loc]\n result.index = new_index\n\n # this could be a view\n # but only in a single-dtyped view sliceable case\n result._set_is_copy(self, copy=not result._is_view)\n return result\n\n def __getitem__(self, item):\n raise AbstractMethodError(self)\n\n def _get_item_cache(self, item):\n \"\"\"Return the cached item, item represents a label indexer.\"\"\"\n cache = self._item_cache\n res = cache.get(item)\n if res is None:\n # All places that call _get_item_cache have unique columns,\n # pending resolution of GH#33047\n\n loc = self.columns.get_loc(item)\n values = self._mgr.iget(loc)\n res = self._box_col_values(values, loc)\n\n cache[item] = res\n res._set_as_cached(item, self)\n\n # for a chain\n res._is_copy = self._is_copy\n return res\n\n def _slice(self: FrameOrSeries, slobj: slice, axis=0) -> FrameOrSeries:\n \"\"\"\n Construct a slice of this container.\n\n Slicing with this method is *always* positional.\n \"\"\"\n assert isinstance(slobj, slice), type(slobj)\n axis = self._get_block_manager_axis(axis)\n result = self._constructor(self._mgr.get_slice(slobj, axis=axis))\n result = result.__finalize__(self)\n\n # this could be a view\n # but only in a single-dtyped view sliceable case\n is_copy = axis != 0 or result._is_view\n result._set_is_copy(self, copy=is_copy)\n return result\n\n def _iset_item(self, loc: int, value) -> None:\n self._mgr.iset(loc, value)\n self._clear_item_cache()\n\n def _set_item(self, key, value) -> None:\n try:\n loc = self._info_axis.get_loc(key)\n except KeyError:\n # This item wasn't present, just insert at end\n self._mgr.insert(len(self._info_axis), key, value)\n return\n\n NDFrame._iset_item(self, loc, value)\n\n def _set_is_copy(self, ref, copy: bool_t = True) -> None:\n if not copy:\n self._is_copy = None\n else:\n assert ref is not None\n self._is_copy = weakref.ref(ref)\n\n def _check_is_chained_assignment_possible(self) -> bool_t:\n \"\"\"\n Check if we are a view, have a cacher, and are of mixed type.\n If so, then force a setitem_copy check.\n\n Should be called just near setting a value\n\n Will return a boolean if it we are a view and are cached, but a\n single-dtype meaning that the cacher should be updated following\n setting.\n \"\"\"\n if self._is_view and self._is_cached:\n ref = self._get_cacher()\n if ref is not None and ref._is_mixed_type:\n self._check_setitem_copy(stacklevel=4, t=\"referant\", force=True)\n return True\n elif self._is_copy:\n self._check_setitem_copy(stacklevel=4, t=\"referant\")\n return False\n\n def _check_setitem_copy(self, stacklevel=4, t=\"setting\", force=False):\n \"\"\"\n\n Parameters\n ----------\n stacklevel : int, default 4\n the level to show of the stack when the error is output\n t : str, the type of setting error\n force : bool, default False\n If True, then force showing an error.\n\n validate if we are doing a setitem on a chained copy.\n\n If you call this function, be sure to set the stacklevel such that the\n user will see the error *at the level of setting*\n\n It is technically possible to figure out that we are setting on\n a copy even WITH a multi-dtyped pandas object. In other words, some\n blocks may be views while other are not. Currently _is_view will ALWAYS\n return False for multi-blocks to avoid having to handle this case.\n\n df = DataFrame(np.arange(0,9), columns=['count'])\n df['group'] = 'b'\n\n # This technically need not raise SettingWithCopy if both are view\n # (which is not # generally guaranteed but is usually True. However,\n # this is in general not a good practice and we recommend using .loc.\n df.iloc[0:5]['group'] = 'a'\n\n \"\"\"\n # return early if the check is not needed\n if not (force or self._is_copy):\n return\n\n value = config.get_option(\"mode.chained_assignment\")\n if value is None:\n return\n\n # see if the copy is not actually referred; if so, then dissolve\n # the copy weakref\n if self._is_copy is not None and not isinstance(self._is_copy, str):\n r = self._is_copy()\n if not gc.get_referents(r) or r.shape == self.shape:\n self._is_copy = None\n return\n\n # a custom message\n if isinstance(self._is_copy, str):\n t = self._is_copy\n\n elif t == \"referant\":\n t = (\n \"\\n\"\n \"A value is trying to be set on a copy of a slice from a \"\n \"DataFrame\\n\\n\"\n \"See the caveats in the documentation: \"\n \"https://pandas.pydata.org/pandas-docs/stable/user_guide/\"\n \"indexing.html#returning-a-view-versus-a-copy\"\n )\n\n else:\n t = (\n \"\\n\"\n \"A value is trying to be set on a copy of a slice from a \"\n \"DataFrame.\\n\"\n \"Try using .loc[row_indexer,col_indexer] = value \"\n \"instead\\n\\nSee the caveats in the documentation: \"\n \"https://pandas.pydata.org/pandas-docs/stable/user_guide/\"\n \"indexing.html#returning-a-view-versus-a-copy\"\n )\n\n if value == \"raise\":\n raise com.SettingWithCopyError(t)\n elif value == \"warn\":\n warnings.warn(t, com.SettingWithCopyWarning, stacklevel=stacklevel)\n\n def __delitem__(self, key) -> None:\n \"\"\"\n Delete item\n \"\"\"\n deleted = False\n\n maybe_shortcut = False\n if self.ndim == 2 and isinstance(self.columns, MultiIndex):\n try:\n maybe_shortcut = key not in self.columns._engine\n except TypeError:\n pass\n\n if maybe_shortcut:\n # Allow shorthand to delete all columns whose first len(key)\n # elements match key:\n if not isinstance(key, tuple):\n key = (key,)\n for col in self.columns:\n if isinstance(col, tuple) and col[: len(key)] == key:\n del self[col]\n deleted = True\n if not deleted:\n # If the above loop ran and didn't delete anything because\n # there was no match, this call should raise the appropriate\n # exception:\n loc = self.axes[-1].get_loc(key)\n self._mgr.idelete(loc)\n\n # delete from the caches\n try:\n del self._item_cache[key]\n except KeyError:\n pass\n\n # ----------------------------------------------------------------------\n # Unsorted\n\n def get(self, key, default=None):\n \"\"\"\n Get item from object for given key (ex: DataFrame column).\n\n Returns default value if not found.\n\n Parameters\n ----------\n key : object\n\n Returns\n -------\n value : same type as items contained in object\n \"\"\"\n try:\n return self[key]\n except (KeyError, ValueError, IndexError):\n return default\n\n @property\n def _is_view(self) -> bool_t:\n \"\"\"Return boolean indicating if self is view of another array \"\"\"\n return self._mgr.is_view\n\n def reindex_like(\n self: FrameOrSeries,\n other,\n method: Optional[str] = None,\n copy: bool_t = True,\n limit=None,\n tolerance=None,\n ) -> FrameOrSeries:\n \"\"\"\n Return an object with matching indices as other object.\n\n Conform the object to the same index on all axes. Optional\n filling logic, placing NaN in locations having no value\n in the previous index. A new object is produced unless the\n new index is equivalent to the current one and copy=False.\n\n Parameters\n ----------\n other : Object of the same data type\n Its row and column indices are used to define the new indices\n of this object.\n method : {None, 'backfill'/'bfill', 'pad'/'ffill', 'nearest'}\n Method to use for filling holes in reindexed DataFrame.\n Please note: this is only applicable to DataFrames/Series with a\n monotonically increasing/decreasing index.\n\n * None (default): don't fill gaps\n * pad / ffill: propagate last valid observation forward to next\n valid\n * backfill / bfill: use next valid observation to fill gap\n * nearest: use nearest valid observations to fill gap.\n\n copy : bool, default True\n Return a new object, even if the passed indexes are the same.\n limit : int, default None\n Maximum number of consecutive labels to fill for inexact matches.\n tolerance : optional\n Maximum distance between original and new labels for inexact\n matches. The values of the index at the matching locations most\n satisfy the equation ``abs(index[indexer] - target) <= tolerance``.\n\n Tolerance may be a scalar value, which applies the same tolerance\n to all values, or list-like, which applies variable tolerance per\n element. List-like includes list, tuple, array, Series, and must be\n the same size as the index and its dtype must exactly match the\n index's type.\n\n Returns\n -------\n Series or DataFrame\n Same type as caller, but with changed indices on each axis.\n\n See Also\n --------\n DataFrame.set_index : Set row labels.\n DataFrame.reset_index : Remove row labels or move them to new columns.\n DataFrame.reindex : Change to new indices or expand indices.\n\n Notes\n -----\n Same as calling\n ``.reindex(index=other.index, columns=other.columns,...)``.\n\n Examples\n --------\n >>> df1 = pd.DataFrame([[24.3, 75.7, 'high'],\n ... [31, 87.8, 'high'],\n ... [22, 71.6, 'medium'],\n ... [35, 95, 'medium']],\n ... columns=['temp_celsius', 'temp_fahrenheit',\n ... 'windspeed'],\n ... index=pd.date_range(start='2014-02-12',\n ... end='2014-02-15', freq='D'))\n\n >>> df1\n temp_celsius temp_fahrenheit windspeed\n 2014-02-12 24.3 75.7 high\n 2014-02-13 31.0 87.8 high\n 2014-02-14 22.0 71.6 medium\n 2014-02-15 35.0 95.0 medium\n\n >>> df2 = pd.DataFrame([[28, 'low'],\n ... [30, 'low'],\n ... [35.1, 'medium']],\n ... columns=['temp_celsius', 'windspeed'],\n ... index=pd.DatetimeIndex(['2014-02-12', '2014-02-13',\n ... '2014-02-15']))\n\n >>> df2\n temp_celsius windspeed\n 2014-02-12 28.0 low\n 2014-02-13 30.0 low\n 2014-02-15 35.1 medium\n\n >>> df2.reindex_like(df1)\n temp_celsius temp_fahrenheit windspeed\n 2014-02-12 28.0 NaN low\n 2014-02-13 30.0 NaN low\n 2014-02-14 NaN NaN NaN\n 2014-02-15 35.1 NaN medium\n \"\"\"\n d = other._construct_axes_dict(\n axes=self._AXIS_ORDERS,\n method=method,\n copy=copy,\n limit=limit,\n tolerance=tolerance,\n )\n\n return self.reindex(**d)\n\n def drop(\n self,\n labels=None,\n axis=0,\n index=None,\n columns=None,\n level=None,\n inplace: bool_t = False,\n errors: str = \"raise\",\n ):\n\n inplace = validate_bool_kwarg(inplace, \"inplace\")\n\n if labels is not None:\n if index is not None or columns is not None:\n raise ValueError(\"Cannot specify both 'labels' and 'index'/'columns'\")\n axis_name = self._get_axis_name(axis)\n axes = {axis_name: labels}\n elif index is not None or columns is not None:\n axes, _ = self._construct_axes_from_arguments((index, columns), {})\n else:\n raise ValueError(\n \"Need to specify at least one of 'labels', 'index' or 'columns'\"\n )\n\n obj = self\n\n for axis, labels in axes.items():\n if labels is not None:\n obj = obj._drop_axis(labels, axis, level=level, errors=errors)\n\n if inplace:\n self._update_inplace(obj)\n else:\n return obj\n\n def _drop_axis(\n self: FrameOrSeries, labels, axis, level=None, errors: str = \"raise\"\n ) -> FrameOrSeries:\n \"\"\"\n Drop labels from specified axis. Used in the ``drop`` method\n internally.\n\n Parameters\n ----------\n labels : single label or list-like\n axis : int or axis name\n level : int or level name, default None\n For MultiIndex\n errors : {'ignore', 'raise'}, default 'raise'\n If 'ignore', suppress error and existing labels are dropped.\n\n \"\"\"\n axis = self._get_axis_number(axis)\n axis_name = self._get_axis_name(axis)\n axis = self._get_axis(axis)\n\n if axis.is_unique:\n if level is not None:\n if not isinstance(axis, MultiIndex):\n raise AssertionError(\"axis must be a MultiIndex\")\n new_axis = axis.drop(labels, level=level, errors=errors)\n else:\n new_axis = axis.drop(labels, errors=errors)\n result = self.reindex(**{axis_name: new_axis})\n\n # Case for non-unique axis\n else:\n labels = ensure_object(com.index_labels_to_array(labels))\n if level is not None:\n if not isinstance(axis, MultiIndex):\n raise AssertionError(\"axis must be a MultiIndex\")\n indexer = ~axis.get_level_values(level).isin(labels)\n\n # GH 18561 MultiIndex.drop should raise if label is absent\n if errors == \"raise\" and indexer.all():\n raise KeyError(f\"{labels} not found in axis\")\n else:\n indexer = ~axis.isin(labels)\n # Check if label doesn't exist along axis\n labels_missing = (axis.get_indexer_for(labels) == -1).any()\n if errors == \"raise\" and labels_missing:\n raise KeyError(f\"{labels} not found in axis\")\n\n slicer = [slice(None)] * self.ndim\n slicer[self._get_axis_number(axis_name)] = indexer\n\n result = self.loc[tuple(slicer)]\n\n return result\n\n def _update_inplace(self, result, verify_is_copy: bool_t = True) -> None:\n \"\"\"\n Replace self internals with result.\n\n Parameters\n ----------\n result : same type as self\n verify_is_copy : bool, default True\n Provide is_copy checks.\n \"\"\"\n # NOTE: This does *not* call __finalize__ and that's an explicit\n # decision that we may revisit in the future.\n self._reset_cache()\n self._clear_item_cache()\n self._mgr = result._mgr\n self._maybe_update_cacher(verify_is_copy=verify_is_copy)\n\n def add_prefix(self: FrameOrSeries, prefix: str) -> FrameOrSeries:\n \"\"\"\n Prefix labels with string `prefix`.\n\n For Series, the row labels are prefixed.\n For DataFrame, the column labels are prefixed.\n\n Parameters\n ----------\n prefix : str\n The string to add before each label.\n\n Returns\n -------\n Series or DataFrame\n New Series or DataFrame with updated labels.\n\n See Also\n --------\n Series.add_suffix: Suffix row labels with string `suffix`.\n DataFrame.add_suffix: Suffix column labels with string `suffix`.\n\n Examples\n --------\n >>> s = pd.Series([1, 2, 3, 4])\n >>> s\n 0 1\n 1 2\n 2 3\n 3 4\n dtype: int64\n\n >>> s.add_prefix('item_')\n item_0 1\n item_1 2\n item_2 3\n item_3 4\n dtype: int64\n\n >>> df = pd.DataFrame({'A': [1, 2, 3, 4], 'B': [3, 4, 5, 6]})\n >>> df\n A B\n 0 1 3\n 1 2 4\n 2 3 5\n 3 4 6\n\n >>> df.add_prefix('col_')\n col_A col_B\n 0 1 3\n 1 2 4\n 2 3 5\n 3 4 6\n \"\"\"\n f = functools.partial(\"{prefix}{}\".format, prefix=prefix)\n\n mapper = {self._info_axis_name: f}\n return self.rename(**mapper) # type: ignore\n\n def add_suffix(self: FrameOrSeries, suffix: str) -> FrameOrSeries:\n \"\"\"\n Suffix labels with string `suffix`.\n\n For Series, the row labels are suffixed.\n For DataFrame, the column labels are suffixed.\n\n Parameters\n ----------\n suffix : str\n The string to add after each label.\n\n Returns\n -------\n Series or DataFrame\n New Series or DataFrame with updated labels.\n\n See Also\n --------\n Series.add_prefix: Prefix row labels with string `prefix`.\n DataFrame.add_prefix: Prefix column labels with string `prefix`.\n\n Examples\n --------\n >>> s = pd.Series([1, 2, 3, 4])\n >>> s\n 0 1\n 1 2\n 2 3\n 3 4\n dtype: int64\n\n >>> s.add_suffix('_item')\n 0_item 1\n 1_item 2\n 2_item 3\n 3_item 4\n dtype: int64\n\n >>> df = pd.DataFrame({'A': [1, 2, 3, 4], 'B': [3, 4, 5, 6]})\n >>> df\n A B\n 0 1 3\n 1 2 4\n 2 3 5\n 3 4 6\n\n >>> df.add_suffix('_col')\n A_col B_col\n 0 1 3\n 1 2 4\n 2 3 5\n 3 4 6\n \"\"\"\n f = functools.partial(\"{}{suffix}\".format, suffix=suffix)\n\n mapper = {self._info_axis_name: f}\n return self.rename(**mapper) # type: ignore\n\n def sort_values(\n self,\n axis=0,\n ascending=True,\n inplace: bool_t = False,\n kind: str = \"quicksort\",\n na_position: str = \"last\",\n ignore_index: bool_t = False,\n key: ValueKeyFunc = None,\n ):\n \"\"\"\n Sort by the values along either axis.\n\n Parameters\n ----------%(optional_by)s\n axis : %(axes_single_arg)s, default 0\n Axis to be sorted.\n ascending : bool or list of bool, default True\n Sort ascending vs. descending. Specify list for multiple sort\n orders. If this is a list of bools, must match the length of\n the by.\n inplace : bool, default False\n If True, perform operation in-place.\n kind : {'quicksort', 'mergesort', 'heapsort'}, default 'quicksort'\n Choice of sorting algorithm. See also ndarray.np.sort for more\n information. `mergesort` is the only stable algorithm. For\n DataFrames, this option is only applied when sorting on a single\n column or label.\n na_position : {'first', 'last'}, default 'last'\n Puts NaNs at the beginning if `first`; `last` puts NaNs at the\n end.\n ignore_index : bool, default False\n If True, the resulting axis will be labeled 0, 1, …, n - 1.\n\n .. versionadded:: 1.0.0\n\n key : callable, optional\n Apply the key function to the values\n before sorting. This is similar to the `key` argument in the\n builtin :meth:`sorted` function, with the notable difference that\n this `key` function should be *vectorized*. It should expect a\n ``Series`` and return a Series with the same shape as the input.\n It will be applied to each column in `by` independently.\n\n .. versionadded:: 1.1.0\n\n Returns\n -------\n DataFrame or None\n DataFrame with sorted values if inplace=False, None otherwise.\n\n See Also\n --------\n DataFrame.sort_index : Sort a DataFrame by the index.\n Series.sort_values : Similar method for a Series.\n\n Examples\n --------\n >>> df = pd.DataFrame({\n ... 'col1': ['A', 'A', 'B', np.nan, 'D', 'C'],\n ... 'col2': [2, 1, 9, 8, 7, 4],\n ... 'col3': [0, 1, 9, 4, 2, 3],\n ... 'col4': ['a', 'B', 'c', 'D', 'e', 'F']\n ... })\n >>> df\n col1 col2 col3 col4\n 0 A 2 0 a\n 1 A 1 1 B\n 2 B 9 9 c\n 3 NaN 8 4 D\n 4 D 7 2 e\n 5 C 4 3 F\n\n Sort by col1\n\n >>> df.sort_values(by=['col1'])\n col1 col2 col3 col4\n 0 A 2 0 a\n 1 A 1 1 B\n 2 B 9 9 c\n 5 C 4 3 F\n 4 D 7 2 e\n 3 NaN 8 4 D\n\n Sort by multiple columns\n\n >>> df.sort_values(by=['col1', 'col2'])\n col1 col2 col3 col4\n 1 A 1 1 B\n 0 A 2 0 a\n 2 B 9 9 c\n 5 C 4 3 F\n 4 D 7 2 e\n 3 NaN 8 4 D\n\n Sort Descending\n\n >>> df.sort_values(by='col1', ascending=False)\n col1 col2 col3 col4\n 4 D 7 2 e\n 5 C 4 3 F\n 2 B 9 9 c\n 0 A 2 0 a\n 1 A 1 1 B\n 3 NaN 8 4 D\n\n Putting NAs first\n\n >>> df.sort_values(by='col1', ascending=False, na_position='first')\n col1 col2 col3 col4\n 3 NaN 8 4 D\n 4 D 7 2 e\n 5 C 4 3 F\n 2 B 9 9 c\n 0 A 2 0 a\n 1 A 1 1 B\n\n Sorting with a key function\n\n >>> df.sort_values(by='col4', key=lambda col: col.str.lower())\n col1 col2 col3 col4\n 0 A 2 0 a\n 1 A 1 1 B\n 2 B 9 9 c\n 3 NaN 8 4 D\n 4 D 7 2 e\n 5 C 4 3 F\n \"\"\"\n raise AbstractMethodError(self)\n\n @doc(\n klass=_shared_doc_kwargs[\"klass\"],\n axes=_shared_doc_kwargs[\"axes\"],\n optional_labels=\"\",\n optional_axis=\"\",\n )\n def reindex(self: FrameOrSeries, *args, **kwargs) -> FrameOrSeries:\n \"\"\"\n Conform {klass} to new index with optional filling logic.\n\n Places NA/NaN in locations having no value in the previous index. A new object\n is produced unless the new index is equivalent to the current one and\n ``copy=False``.\n\n Parameters\n ----------\n {optional_labels}\n {axes} : array-like, optional\n New labels / index to conform to, should be specified using\n keywords. Preferably an Index object to avoid duplicating data.\n {optional_axis}\n method : {{None, 'backfill'/'bfill', 'pad'/'ffill', 'nearest'}}\n Method to use for filling holes in reindexed DataFrame.\n Please note: this is only applicable to DataFrames/Series with a\n monotonically increasing/decreasing index.\n\n * None (default): don't fill gaps\n * pad / ffill: Propagate last valid observation forward to next\n valid.\n * backfill / bfill: Use next valid observation to fill gap.\n * nearest: Use nearest valid observations to fill gap.\n\n copy : bool, default True\n Return a new object, even if the passed indexes are the same.\n level : int or name\n Broadcast across a level, matching Index values on the\n passed MultiIndex level.\n fill_value : scalar, default np.NaN\n Value to use for missing values. Defaults to NaN, but can be any\n \"compatible\" value.\n limit : int, default None\n Maximum number of consecutive elements to forward or backward fill.\n tolerance : optional\n Maximum distance between original and new labels for inexact\n matches. The values of the index at the matching locations most\n satisfy the equation ``abs(index[indexer] - target) <= tolerance``.\n\n Tolerance may be a scalar value, which applies the same tolerance\n to all values, or list-like, which applies variable tolerance per\n element. List-like includes list, tuple, array, Series, and must be\n the same size as the index and its dtype must exactly match the\n index's type.\n\n Returns\n -------\n {klass} with changed index.\n\n See Also\n --------\n DataFrame.set_index : Set row labels.\n DataFrame.reset_index : Remove row labels or move them to new columns.\n DataFrame.reindex_like : Change to same indices as other DataFrame.\n\n Examples\n --------\n ``DataFrame.reindex`` supports two calling conventions\n\n * ``(index=index_labels, columns=column_labels, ...)``\n * ``(labels, axis={{'index', 'columns'}}, ...)``\n\n We *highly* recommend using keyword arguments to clarify your\n intent.\n\n Create a dataframe with some fictional data.\n\n >>> index = ['Firefox', 'Chrome', 'Safari', 'IE10', 'Konqueror']\n >>> df = pd.DataFrame({{'http_status': [200, 200, 404, 404, 301],\n ... 'response_time': [0.04, 0.02, 0.07, 0.08, 1.0]}},\n ... index=index)\n >>> df\n http_status response_time\n Firefox 200 0.04\n Chrome 200 0.02\n Safari 404 0.07\n IE10 404 0.08\n Konqueror 301 1.00\n\n Create a new index and reindex the dataframe. By default\n values in the new index that do not have corresponding\n records in the dataframe are assigned ``NaN``.\n\n >>> new_index = ['Safari', 'Iceweasel', 'Comodo Dragon', 'IE10',\n ... 'Chrome']\n >>> df.reindex(new_index)\n http_status response_time\n Safari 404.0 0.07\n Iceweasel NaN NaN\n Comodo Dragon NaN NaN\n IE10 404.0 0.08\n Chrome 200.0 0.02\n\n We can fill in the missing values by passing a value to\n the keyword ``fill_value``. Because the index is not monotonically\n increasing or decreasing, we cannot use arguments to the keyword\n ``method`` to fill the ``NaN`` values.\n\n >>> df.reindex(new_index, fill_value=0)\n http_status response_time\n Safari 404 0.07\n Iceweasel 0 0.00\n Comodo Dragon 0 0.00\n IE10 404 0.08\n Chrome 200 0.02\n\n >>> df.reindex(new_index, fill_value='missing')\n http_status response_time\n Safari 404 0.07\n Iceweasel missing missing\n Comodo Dragon missing missing\n IE10 404 0.08\n Chrome 200 0.02\n\n We can also reindex the columns.\n\n >>> df.reindex(columns=['http_status', 'user_agent'])\n http_status user_agent\n Firefox 200 NaN\n Chrome 200 NaN\n Safari 404 NaN\n IE10 404 NaN\n Konqueror 301 NaN\n\n Or we can use \"axis-style\" keyword arguments\n\n >>> df.reindex(['http_status', 'user_agent'], axis=\"columns\")\n http_status user_agent\n Firefox 200 NaN\n Chrome 200 NaN\n Safari 404 NaN\n IE10 404 NaN\n Konqueror 301 NaN\n\n To further illustrate the filling functionality in\n ``reindex``, we will create a dataframe with a\n monotonically increasing index (for example, a sequence\n of dates).\n\n >>> date_index = pd.date_range('1/1/2010', periods=6, freq='D')\n >>> df2 = pd.DataFrame({{\"prices\": [100, 101, np.nan, 100, 89, 88]}},\n ... index=date_index)\n >>> df2\n prices\n 2010-01-01 100.0\n 2010-01-02 101.0\n 2010-01-03 NaN\n 2010-01-04 100.0\n 2010-01-05 89.0\n 2010-01-06 88.0\n\n Suppose we decide to expand the dataframe to cover a wider\n date range.\n\n >>> date_index2 = pd.date_range('12/29/2009', periods=10, freq='D')\n >>> df2.reindex(date_index2)\n prices\n 2009-12-29 NaN\n 2009-12-30 NaN\n 2009-12-31 NaN\n 2010-01-01 100.0\n 2010-01-02 101.0\n 2010-01-03 NaN\n 2010-01-04 100.0\n 2010-01-05 89.0\n 2010-01-06 88.0\n 2010-01-07 NaN\n\n The index entries that did not have a value in the original data frame\n (for example, '2009-12-29') are by default filled with ``NaN``.\n If desired, we can fill in the missing values using one of several\n options.\n\n For example, to back-propagate the last valid value to fill the ``NaN``\n values, pass ``bfill`` as an argument to the ``method`` keyword.\n\n >>> df2.reindex(date_index2, method='bfill')\n prices\n 2009-12-29 100.0\n 2009-12-30 100.0\n 2009-12-31 100.0\n 2010-01-01 100.0\n 2010-01-02 101.0\n 2010-01-03 NaN\n 2010-01-04 100.0\n 2010-01-05 89.0\n 2010-01-06 88.0\n 2010-01-07 NaN\n\n Please note that the ``NaN`` value present in the original dataframe\n (at index value 2010-01-03) will not be filled by any of the\n value propagation schemes. This is because filling while reindexing\n does not look at dataframe values, but only compares the original and\n desired indexes. If you do want to fill in the ``NaN`` values present\n in the original dataframe, use the ``fillna()`` method.\n\n See the :ref:`user guide <basics.reindexing>` for more.\n \"\"\"\n # TODO: Decide if we care about having different examples for different\n # kinds\n\n # construct the args\n axes, kwargs = self._construct_axes_from_arguments(args, kwargs)\n method = missing.clean_reindex_fill_method(kwargs.pop(\"method\", None))\n level = kwargs.pop(\"level\", None)\n copy = kwargs.pop(\"copy\", True)\n limit = kwargs.pop(\"limit\", None)\n tolerance = kwargs.pop(\"tolerance\", None)\n fill_value = kwargs.pop(\"fill_value\", None)\n\n # Series.reindex doesn't use / need the axis kwarg\n # We pop and ignore it here, to make writing Series/Frame generic code\n # easier\n kwargs.pop(\"axis\", None)\n\n if kwargs:\n raise TypeError(\n \"reindex() got an unexpected keyword \"\n f'argument \"{list(kwargs.keys())[0]}\"'\n )\n\n self._consolidate_inplace()\n\n # if all axes that are requested to reindex are equal, then only copy\n # if indicated must have index names equal here as well as values\n if all(\n self._get_axis(axis).identical(ax)\n for axis, ax in axes.items()\n if ax is not None\n ):\n if copy:\n return self.copy()\n return self\n\n # check if we are a multi reindex\n if self._needs_reindex_multi(axes, method, level):\n return self._reindex_multi(axes, copy, fill_value)\n\n # perform the reindex on the axes\n return self._reindex_axes(\n axes, level, limit, tolerance, method, fill_value, copy\n ).__finalize__(self, method=\"reindex\")\n\n def _reindex_axes(\n self: FrameOrSeries, axes, level, limit, tolerance, method, fill_value, copy\n ) -> FrameOrSeries:\n \"\"\"Perform the reindex for all the axes.\"\"\"\n obj = self\n for a in self._AXIS_ORDERS:\n labels = axes[a]\n if labels is None:\n continue\n\n ax = self._get_axis(a)\n new_index, indexer = ax.reindex(\n labels, level=level, limit=limit, tolerance=tolerance, method=method\n )\n\n axis = self._get_axis_number(a)\n obj = obj._reindex_with_indexers(\n {axis: [new_index, indexer]},\n fill_value=fill_value,\n copy=copy,\n allow_dups=False,\n )\n\n return obj\n\n def _needs_reindex_multi(self, axes, method, level) -> bool_t:\n \"\"\"Check if we do need a multi reindex.\"\"\"\n return (\n (com.count_not_none(*axes.values()) == self._AXIS_LEN)\n and method is None\n and level is None\n and not self._is_mixed_type\n )\n\n def _reindex_multi(self, axes, copy, fill_value):\n raise AbstractMethodError(self)\n\n def _reindex_with_indexers(\n self: FrameOrSeries,\n reindexers,\n fill_value=None,\n copy: bool_t = False,\n allow_dups: bool_t = False,\n ) -> FrameOrSeries:\n \"\"\"allow_dups indicates an internal call here \"\"\"\n # reindex doing multiple operations on different axes if indicated\n new_data = self._mgr\n for axis in sorted(reindexers.keys()):\n index, indexer = reindexers[axis]\n baxis = self._get_block_manager_axis(axis)\n\n if index is None:\n continue\n\n index = ensure_index(index)\n if indexer is not None:\n indexer = ensure_int64(indexer)\n\n # TODO: speed up on homogeneous DataFrame objects\n new_data = new_data.reindex_indexer(\n index,\n indexer,\n axis=baxis,\n fill_value=fill_value,\n allow_dups=allow_dups,\n copy=copy,\n )\n # If we've made a copy once, no need to make another one\n copy = False\n\n if copy and new_data is self._mgr:\n new_data = new_data.copy()\n\n return self._constructor(new_data).__finalize__(self)\n\n def filter(\n self: FrameOrSeries,\n items=None,\n like: Optional[str] = None,\n regex: Optional[str] = None,\n axis=None,\n ) -> FrameOrSeries:\n \"\"\"\n Subset the dataframe rows or columns according to the specified index labels.\n\n Note that this routine does not filter a dataframe on its\n contents. The filter is applied to the labels of the index.\n\n Parameters\n ----------\n items : list-like\n Keep labels from axis which are in items.\n like : str\n Keep labels from axis for which \"like in label == True\".\n regex : str (regular expression)\n Keep labels from axis for which re.search(regex, label) == True.\n axis : {0 or ‘index’, 1 or ‘columns’, None}, default None\n The axis to filter on, expressed either as an index (int)\n or axis name (str). By default this is the info axis,\n 'index' for Series, 'columns' for DataFrame.\n\n Returns\n -------\n same type as input object\n\n See Also\n --------\n DataFrame.loc : Access a group of rows and columns\n by label(s) or a boolean array.\n\n Notes\n -----\n The ``items``, ``like``, and ``regex`` parameters are\n enforced to be mutually exclusive.\n\n ``axis`` defaults to the info axis that is used when indexing\n with ``[]``.\n\n Examples\n --------\n >>> df = pd.DataFrame(np.array(([1, 2, 3], [4, 5, 6])),\n ... index=['mouse', 'rabbit'],\n ... columns=['one', 'two', 'three'])\n >>> df\n one two three\n mouse 1 2 3\n rabbit 4 5 6\n\n >>> # select columns by name\n >>> df.filter(items=['one', 'three'])\n one three\n mouse 1 3\n rabbit 4 6\n\n >>> # select columns by regular expression\n >>> df.filter(regex='e$', axis=1)\n one three\n mouse 1 3\n rabbit 4 6\n\n >>> # select rows containing 'bbi'\n >>> df.filter(like='bbi', axis=0)\n one two three\n rabbit 4 5 6\n \"\"\"\n nkw = com.count_not_none(items, like, regex)\n if nkw > 1:\n raise TypeError(\n \"Keyword arguments `items`, `like`, or `regex` \"\n \"are mutually exclusive\"\n )\n\n if axis is None:\n axis = self._info_axis_name\n labels = self._get_axis(axis)\n\n if items is not None:\n name = self._get_axis_name(axis)\n return self.reindex(**{name: [r for r in items if r in labels]})\n elif like:\n\n def f(x):\n return like in ensure_str(x)\n\n values = labels.map(f)\n return self.loc(axis=axis)[values]\n elif regex:\n\n def f(x):\n return matcher.search(ensure_str(x)) is not None\n\n matcher = re.compile(regex)\n values = labels.map(f)\n return self.loc(axis=axis)[values]\n else:\n raise TypeError(\"Must pass either `items`, `like`, or `regex`\")\n\n def head(self: FrameOrSeries, n: int = 5) -> FrameOrSeries:\n \"\"\"\n Return the first `n` rows.\n\n This function returns the first `n` rows for the object based\n on position. It is useful for quickly testing if your object\n has the right type of data in it.\n\n For negative values of `n`, this function returns all rows except\n the last `n` rows, equivalent to ``df[:-n]``.\n\n Parameters\n ----------\n n : int, default 5\n Number of rows to select.\n\n Returns\n -------\n same type as caller\n The first `n` rows of the caller object.\n\n See Also\n --------\n DataFrame.tail: Returns the last `n` rows.\n\n Examples\n --------\n >>> df = pd.DataFrame({'animal': ['alligator', 'bee', 'falcon', 'lion',\n ... 'monkey', 'parrot', 'shark', 'whale', 'zebra']})\n >>> df\n animal\n 0 alligator\n 1 bee\n 2 falcon\n 3 lion\n 4 monkey\n 5 parrot\n 6 shark\n 7 whale\n 8 zebra\n\n Viewing the first 5 lines\n\n >>> df.head()\n animal\n 0 alligator\n 1 bee\n 2 falcon\n 3 lion\n 4 monkey\n\n Viewing the first `n` lines (three in this case)\n\n >>> df.head(3)\n animal\n 0 alligator\n 1 bee\n 2 falcon\n\n For negative values of `n`\n\n >>> df.head(-3)\n animal\n 0 alligator\n 1 bee\n 2 falcon\n 3 lion\n 4 monkey\n 5 parrot\n \"\"\"\n return self.iloc[:n]\n\n def tail(self: FrameOrSeries, n: int = 5) -> FrameOrSeries:\n \"\"\"\n Return the last `n` rows.\n\n This function returns last `n` rows from the object based on\n position. It is useful for quickly verifying data, for example,\n after sorting or appending rows.\n\n For negative values of `n`, this function returns all rows except\n the first `n` rows, equivalent to ``df[n:]``.\n\n Parameters\n ----------\n n : int, default 5\n Number of rows to select.\n\n Returns\n -------\n type of caller\n The last `n` rows of the caller object.\n\n See Also\n --------\n DataFrame.head : The first `n` rows of the caller object.\n\n Examples\n --------\n >>> df = pd.DataFrame({'animal': ['alligator', 'bee', 'falcon', 'lion',\n ... 'monkey', 'parrot', 'shark', 'whale', 'zebra']})\n >>> df\n animal\n 0 alligator\n 1 bee\n 2 falcon\n 3 lion\n 4 monkey\n 5 parrot\n 6 shark\n 7 whale\n 8 zebra\n\n Viewing the last 5 lines\n\n >>> df.tail()\n animal\n 4 monkey\n 5 parrot\n 6 shark\n 7 whale\n 8 zebra\n\n Viewing the last `n` lines (three in this case)\n\n >>> df.tail(3)\n animal\n 6 shark\n 7 whale\n 8 zebra\n\n For negative values of `n`\n\n >>> df.tail(-3)\n animal\n 3 lion\n 4 monkey\n 5 parrot\n 6 shark\n 7 whale\n 8 zebra\n \"\"\"\n if n == 0:\n return self.iloc[0:0]\n return self.iloc[-n:]\n\n def sample(\n self: FrameOrSeries,\n n=None,\n frac=None,\n replace=False,\n weights=None,\n random_state=None,\n axis=None,\n ) -> FrameOrSeries:\n \"\"\"\n Return a random sample of items from an axis of object.\n\n You can use `random_state` for reproducibility.\n\n Parameters\n ----------\n n : int, optional\n Number of items from axis to return. Cannot be used with `frac`.\n Default = 1 if `frac` = None.\n frac : float, optional\n Fraction of axis items to return. Cannot be used with `n`.\n replace : bool, default False\n Allow or disallow sampling of the same row more than once.\n weights : str or ndarray-like, optional\n Default 'None' results in equal probability weighting.\n If passed a Series, will align with target object on index. Index\n values in weights not found in sampled object will be ignored and\n index values in sampled object not in weights will be assigned\n weights of zero.\n If called on a DataFrame, will accept the name of a column\n when axis = 0.\n Unless weights are a Series, weights must be same length as axis\n being sampled.\n If weights do not sum to 1, they will be normalized to sum to 1.\n Missing values in the weights column will be treated as zero.\n Infinite values not allowed.\n random_state : int, array-like, BitGenerator, np.random.RandomState, optional\n If int, array-like, or BitGenerator (NumPy>=1.17), seed for\n random number generator\n If np.random.RandomState, use as numpy RandomState object.\n\n .. versionchanged:: 1.1.0\n\n array-like and BitGenerator (for NumPy>=1.17) object now passed to\n np.random.RandomState() as seed\n\n axis : {0 or ‘index’, 1 or ‘columns’, None}, default None\n Axis to sample. Accepts axis number or name. Default is stat axis\n for given data type (0 for Series and DataFrames).\n\n Returns\n -------\n Series or DataFrame\n A new object of same type as caller containing `n` items randomly\n sampled from the caller object.\n\n See Also\n --------\n DataFrameGroupBy.sample: Generates random samples from each group of a\n DataFrame object.\n SeriesGroupBy.sample: Generates random samples from each group of a\n Series object.\n numpy.random.choice: Generates a random sample from a given 1-D numpy\n array.\n\n Notes\n -----\n If `frac` > 1, `replacement` should be set to `True`.\n\n Examples\n --------\n >>> df = pd.DataFrame({'num_legs': [2, 4, 8, 0],\n ... 'num_wings': [2, 0, 0, 0],\n ... 'num_specimen_seen': [10, 2, 1, 8]},\n ... index=['falcon', 'dog', 'spider', 'fish'])\n >>> df\n num_legs num_wings num_specimen_seen\n falcon 2 2 10\n dog 4 0 2\n spider 8 0 1\n fish 0 0 8\n\n Extract 3 random elements from the ``Series`` ``df['num_legs']``:\n Note that we use `random_state` to ensure the reproducibility of\n the examples.\n\n >>> df['num_legs'].sample(n=3, random_state=1)\n fish 0\n spider 8\n falcon 2\n Name: num_legs, dtype: int64\n\n A random 50% sample of the ``DataFrame`` with replacement:\n\n >>> df.sample(frac=0.5, replace=True, random_state=1)\n num_legs num_wings num_specimen_seen\n dog 4 0 2\n fish 0 0 8\n\n An upsample sample of the ``DataFrame`` with replacement:\n Note that `replace` parameter has to be `True` for `frac` parameter > 1.\n\n >>> df.sample(frac=2, replace=True, random_state=1)\n num_legs num_wings num_specimen_seen\n dog 4 0 2\n fish 0 0 8\n falcon 2 2 10\n falcon 2 2 10\n fish 0 0 8\n dog 4 0 2\n fish 0 0 8\n dog 4 0 2\n\n Using a DataFrame column as weights. Rows with larger value in the\n `num_specimen_seen` column are more likely to be sampled.\n\n >>> df.sample(n=2, weights='num_specimen_seen', random_state=1)\n num_legs num_wings num_specimen_seen\n falcon 2 2 10\n fish 0 0 8\n \"\"\"\n if axis is None:\n axis = self._stat_axis_number\n\n axis = self._get_axis_number(axis)\n axis_length = self.shape[axis]\n\n # Process random_state argument\n rs = com.random_state(random_state)\n\n # Check weights for compliance\n if weights is not None:\n\n # If a series, align with frame\n if isinstance(weights, ABCSeries):\n weights = weights.reindex(self.axes[axis])\n\n # Strings acceptable if a dataframe and axis = 0\n if isinstance(weights, str):\n if isinstance(self, ABCDataFrame):\n if axis == 0:\n try:\n weights = self[weights]\n except KeyError as err:\n raise KeyError(\n \"String passed to weights not a valid column\"\n ) from err\n else:\n raise ValueError(\n \"Strings can only be passed to \"\n \"weights when sampling from rows on \"\n \"a DataFrame\"\n )\n else:\n raise ValueError(\n \"Strings cannot be passed as weights \"\n \"when sampling from a Series.\"\n )\n\n weights = pd.Series(weights, dtype=\"float64\")\n\n if len(weights) != axis_length:\n raise ValueError(\n \"Weights and axis to be sampled must be of same length\"\n )\n\n if (weights == np.inf).any() or (weights == -np.inf).any():\n raise ValueError(\"weight vector may not include `inf` values\")\n\n if (weights < 0).any():\n raise ValueError(\"weight vector many not include negative values\")\n\n # If has nan, set to zero.\n weights = weights.fillna(0)\n\n # Renormalize if don't sum to 1\n if weights.sum() != 1:\n if weights.sum() != 0:\n weights = weights / weights.sum()\n else:\n raise ValueError(\"Invalid weights: weights sum to zero\")\n\n weights = weights._values\n\n # If no frac or n, default to n=1.\n if n is None and frac is None:\n n = 1\n elif frac is not None and frac > 1 and not replace:\n raise ValueError(\n \"Replace has to be set to `True` when \"\n \"upsampling the population `frac` > 1.\"\n )\n elif n is not None and frac is None and n % 1 != 0:\n raise ValueError(\"Only integers accepted as `n` values\")\n elif n is None and frac is not None:\n n = int(round(frac * axis_length))\n elif n is not None and frac is not None:\n raise ValueError(\"Please enter a value for `frac` OR `n`, not both\")\n\n # Check for negative sizes\n if n < 0:\n raise ValueError(\n \"A negative number of rows requested. Please provide positive value.\"\n )\n\n locs = rs.choice(axis_length, size=n, replace=replace, p=weights)\n return self.take(locs, axis=axis)\n\n @doc(klass=_shared_doc_kwargs[\"klass\"])\n def pipe(self, func, *args, **kwargs):\n r\"\"\"\n Apply func(self, \\*args, \\*\\*kwargs).\n\n Parameters\n ----------\n func : function\n Function to apply to the {klass}.\n ``args``, and ``kwargs`` are passed into ``func``.\n Alternatively a ``(callable, data_keyword)`` tuple where\n ``data_keyword`` is a string indicating the keyword of\n ``callable`` that expects the {klass}.\n args : iterable, optional\n Positional arguments passed into ``func``.\n kwargs : mapping, optional\n A dictionary of keyword arguments passed into ``func``.\n\n Returns\n -------\n object : the return type of ``func``.\n\n See Also\n --------\n DataFrame.apply : Apply a function along input axis of DataFrame.\n DataFrame.applymap : Apply a function elementwise on a whole DataFrame.\n Series.map : Apply a mapping correspondence on a\n :class:`~pandas.Series`.\n\n Notes\n -----\n Use ``.pipe`` when chaining together functions that expect\n Series, DataFrames or GroupBy objects. Instead of writing\n\n >>> func(g(h(df), arg1=a), arg2=b, arg3=c) # doctest: +SKIP\n\n You can write\n\n >>> (df.pipe(h)\n ... .pipe(g, arg1=a)\n ... .pipe(func, arg2=b, arg3=c)\n ... ) # doctest: +SKIP\n\n If you have a function that takes the data as (say) the second\n argument, pass a tuple indicating which keyword expects the\n data. For example, suppose ``f`` takes its data as ``arg2``:\n\n >>> (df.pipe(h)\n ... .pipe(g, arg1=a)\n ... .pipe((func, 'arg2'), arg1=a, arg3=c)\n ... ) # doctest: +SKIP\n \"\"\"\n return com.pipe(self, func, *args, **kwargs)\n\n _shared_docs[\"aggregate\"] = dedent(\n \"\"\"\n Aggregate using one or more operations over the specified axis.\n {versionadded}\n Parameters\n ----------\n func : function, str, list or dict\n Function to use for aggregating the data. If a function, must either\n work when passed a {klass} or when passed to {klass}.apply.\n\n Accepted combinations are:\n\n - function\n - string function name\n - list of functions and/or function names, e.g. ``[np.sum, 'mean']``\n - dict of axis labels -> functions, function names or list of such.\n {axis}\n *args\n Positional arguments to pass to `func`.\n **kwargs\n Keyword arguments to pass to `func`.\n\n Returns\n -------\n scalar, Series or DataFrame\n\n The return can be:\n\n * scalar : when Series.agg is called with single function\n * Series : when DataFrame.agg is called with a single function\n * DataFrame : when DataFrame.agg is called with several functions\n\n Return scalar, Series or DataFrame.\n {see_also}\n Notes\n -----\n `agg` is an alias for `aggregate`. Use the alias.\n\n A passed user-defined-function will be passed a Series for evaluation.\n {examples}\"\"\"\n )\n\n # ----------------------------------------------------------------------\n # Attribute access\n\n def __finalize__(\n self: FrameOrSeries, other, method: Optional[str] = None, **kwargs\n ) -> FrameOrSeries:\n \"\"\"\n Propagate metadata from other to self.\n\n Parameters\n ----------\n other : the object from which to get the attributes that we are going\n to propagate\n method : str, optional\n A passed method name providing context on where ``__finalize__``\n was called.\n\n .. warning:\n\n The value passed as `method` are not currently considered\n stable across pandas releases.\n \"\"\"\n if isinstance(other, NDFrame):\n for name in other.attrs:\n self.attrs[name] = other.attrs[name]\n # For subclasses using _metadata.\n for name in self._metadata:\n assert isinstance(name, str)\n object.__setattr__(self, name, getattr(other, name, None))\n return self\n\n def __getattr__(self, name: str):\n \"\"\"\n After regular attribute access, try looking up the name\n This allows simpler access to columns for interactive use.\n \"\"\"\n # Note: obj.x will always call obj.__getattribute__('x') prior to\n # calling obj.__getattr__('x').\n if (\n name in self._internal_names_set\n or name in self._metadata\n or name in self._accessors\n ):\n return object.__getattribute__(self, name)\n else:\n if self._info_axis._can_hold_identifiers_and_holds_name(name):\n return self[name]\n return object.__getattribute__(self, name)\n\n def __setattr__(self, name: str, value) -> None:\n \"\"\"\n After regular attribute access, try setting the name\n This allows simpler access to columns for interactive use.\n \"\"\"\n # first try regular attribute access via __getattribute__, so that\n # e.g. ``obj.x`` and ``obj.x = 4`` will always reference/modify\n # the same attribute.\n\n try:\n object.__getattribute__(self, name)\n return object.__setattr__(self, name, value)\n except AttributeError:\n pass\n\n # if this fails, go on to more involved attribute setting\n # (note that this matches __getattr__, above).\n if name in self._internal_names_set:\n object.__setattr__(self, name, value)\n elif name in self._metadata:\n object.__setattr__(self, name, value)\n else:\n try:\n existing = getattr(self, name)\n if isinstance(existing, Index):\n object.__setattr__(self, name, value)\n elif name in self._info_axis:\n self[name] = value\n else:\n object.__setattr__(self, name, value)\n except (AttributeError, TypeError):\n if isinstance(self, ABCDataFrame) and (is_list_like(value)):\n warnings.warn(\n \"Pandas doesn't allow columns to be \"\n \"created via a new attribute name - see \"\n \"https://pandas.pydata.org/pandas-docs/\"\n \"stable/indexing.html#attribute-access\",\n stacklevel=2,\n )\n object.__setattr__(self, name, value)\n\n def _dir_additions(self):\n \"\"\"\n add the string-like attributes from the info_axis.\n If info_axis is a MultiIndex, it's first level values are used.\n \"\"\"\n additions = {\n c\n for c in self._info_axis.unique(level=0)[:100]\n if isinstance(c, str) and c.isidentifier()\n }\n return super()._dir_additions().union(additions)\n\n # ----------------------------------------------------------------------\n # Consolidation of internals\n\n def _protect_consolidate(self, f):\n \"\"\"\n Consolidate _mgr -- if the blocks have changed, then clear the\n cache\n \"\"\"\n blocks_before = len(self._mgr.blocks)\n result = f()\n if len(self._mgr.blocks) != blocks_before:\n self._clear_item_cache()\n return result\n\n def _consolidate_inplace(self) -> None:\n \"\"\"Consolidate data in place and return None\"\"\"\n\n def f():\n self._mgr = self._mgr.consolidate()\n\n self._protect_consolidate(f)\n\n def _consolidate(self, inplace: bool_t = False):\n \"\"\"\n Compute NDFrame with \"consolidated\" internals (data of each dtype\n grouped together in a single ndarray).\n\n Parameters\n ----------\n inplace : bool, default False\n If False return new object, otherwise modify existing object.\n\n Returns\n -------\n consolidated : same type as caller\n \"\"\"\n inplace = validate_bool_kwarg(inplace, \"inplace\")\n if inplace:\n self._consolidate_inplace()\n else:\n f = lambda: self._mgr.consolidate()\n cons_data = self._protect_consolidate(f)\n return self._constructor(cons_data).__finalize__(self)\n\n @property\n def _is_mixed_type(self) -> bool_t:\n f = lambda: self._mgr.is_mixed_type\n return self._protect_consolidate(f)\n\n def _check_inplace_setting(self, value) -> bool_t:\n \"\"\" check whether we allow in-place setting with this type of value \"\"\"\n if self._is_mixed_type:\n if not self._mgr.is_numeric_mixed_type:\n\n # allow an actual np.nan thru\n if is_float(value) and np.isnan(value):\n return True\n\n raise TypeError(\n \"Cannot do inplace boolean setting on \"\n \"mixed-types with a non np.nan value\"\n )\n\n return True\n\n def _get_numeric_data(self):\n return self._constructor(self._mgr.get_numeric_data()).__finalize__(self)\n\n def _get_bool_data(self):\n return self._constructor(self._mgr.get_bool_data()).__finalize__(self)\n\n # ----------------------------------------------------------------------\n # Internal Interface Methods\n\n @property\n def values(self) -> np.ndarray:\n \"\"\"\n Return a Numpy representation of the DataFrame.\n\n .. warning::\n\n We recommend using :meth:`DataFrame.to_numpy` instead.\n\n Only the values in the DataFrame will be returned, the axes labels\n will be removed.\n\n Returns\n -------\n numpy.ndarray\n The values of the DataFrame.\n\n See Also\n --------\n DataFrame.to_numpy : Recommended alternative to this method.\n DataFrame.index : Retrieve the index labels.\n DataFrame.columns : Retrieving the column names.\n\n Notes\n -----\n The dtype will be a lower-common-denominator dtype (implicit\n upcasting); that is to say if the dtypes (even of numeric types)\n are mixed, the one that accommodates all will be chosen. Use this\n with care if you are not dealing with the blocks.\n\n e.g. If the dtypes are float16 and float32, dtype will be upcast to\n float32. If dtypes are int32 and uint8, dtype will be upcast to\n int32. By :func:`numpy.find_common_type` convention, mixing int64\n and uint64 will result in a float64 dtype.\n\n Examples\n --------\n A DataFrame where all columns are the same type (e.g., int64) results\n in an array of the same type.\n\n >>> df = pd.DataFrame({'age': [ 3, 29],\n ... 'height': [94, 170],\n ... 'weight': [31, 115]})\n >>> df\n age height weight\n 0 3 94 31\n 1 29 170 115\n >>> df.dtypes\n age int64\n height int64\n weight int64\n dtype: object\n >>> df.values\n array([[ 3, 94, 31],\n [ 29, 170, 115]])\n\n A DataFrame with mixed type columns(e.g., str/object, int64, float32)\n results in an ndarray of the broadest type that accommodates these\n mixed types (e.g., object).\n\n >>> df2 = pd.DataFrame([('parrot', 24.0, 'second'),\n ... ('lion', 80.5, 1),\n ... ('monkey', np.nan, None)],\n ... columns=('name', 'max_speed', 'rank'))\n >>> df2.dtypes\n name object\n max_speed float64\n rank object\n dtype: object\n >>> df2.values\n array([['parrot', 24.0, 'second'],\n ['lion', 80.5, 1],\n ['monkey', nan, None]], dtype=object)\n \"\"\"\n self._consolidate_inplace()\n return self._mgr.as_array(transpose=self._AXIS_REVERSED)\n\n @property\n def _values(self) -> np.ndarray:\n \"\"\"internal implementation\"\"\"\n return self.values\n\n @property\n def dtypes(self):\n \"\"\"\n Return the dtypes in the DataFrame.\n\n This returns a Series with the data type of each column.\n The result's index is the original DataFrame's columns. Columns\n with mixed types are stored with the ``object`` dtype. See\n :ref:`the User Guide <basics.dtypes>` for more.\n\n Returns\n -------\n pandas.Series\n The data type of each column.\n\n Examples\n --------\n >>> df = pd.DataFrame({'float': [1.0],\n ... 'int': [1],\n ... 'datetime': [pd.Timestamp('20180310')],\n ... 'string': ['foo']})\n >>> df.dtypes\n float float64\n int int64\n datetime datetime64[ns]\n string object\n dtype: object\n \"\"\"\n data = self._mgr.get_dtypes()\n return self._constructor_sliced(data, index=self._info_axis, dtype=np.object_)\n\n def _to_dict_of_blocks(self, copy: bool_t = True):\n \"\"\"\n Return a dict of dtype -> Constructor Types that\n each is a homogeneous dtype.\n\n Internal ONLY\n \"\"\"\n return {\n k: self._constructor(v).__finalize__(self)\n for k, v, in self._mgr.to_dict(copy=copy).items()\n }\n\n def astype(\n self: FrameOrSeries, dtype, copy: bool_t = True, errors: str = \"raise\"\n ) -> FrameOrSeries:\n \"\"\"\n Cast a pandas object to a specified dtype ``dtype``.\n\n Parameters\n ----------\n dtype : data type, or dict of column name -> data type\n Use a numpy.dtype or Python type to cast entire pandas object to\n the same type. Alternatively, use {col: dtype, ...}, where col is a\n column label and dtype is a numpy.dtype or Python type to cast one\n or more of the DataFrame's columns to column-specific types.\n copy : bool, default True\n Return a copy when ``copy=True`` (be very careful setting\n ``copy=False`` as changes to values then may propagate to other\n pandas objects).\n errors : {'raise', 'ignore'}, default 'raise'\n Control raising of exceptions on invalid data for provided dtype.\n\n - ``raise`` : allow exceptions to be raised\n - ``ignore`` : suppress exceptions. On error return original object.\n\n Returns\n -------\n casted : same type as caller\n\n See Also\n --------\n to_datetime : Convert argument to datetime.\n to_timedelta : Convert argument to timedelta.\n to_numeric : Convert argument to a numeric type.\n numpy.ndarray.astype : Cast a numpy array to a specified type.\n\n Examples\n --------\n Create a DataFrame:\n\n >>> d = {'col1': [1, 2], 'col2': [3, 4]}\n >>> df = pd.DataFrame(data=d)\n >>> df.dtypes\n col1 int64\n col2 int64\n dtype: object\n\n Cast all columns to int32:\n\n >>> df.astype('int32').dtypes\n col1 int32\n col2 int32\n dtype: object\n\n Cast col1 to int32 using a dictionary:\n\n >>> df.astype({'col1': 'int32'}).dtypes\n col1 int32\n col2 int64\n dtype: object\n\n Create a series:\n\n >>> ser = pd.Series([1, 2], dtype='int32')\n >>> ser\n 0 1\n 1 2\n dtype: int32\n >>> ser.astype('int64')\n 0 1\n 1 2\n dtype: int64\n\n Convert to categorical type:\n\n >>> ser.astype('category')\n 0 1\n 1 2\n dtype: category\n Categories (2, int64): [1, 2]\n\n Convert to ordered categorical type with custom ordering:\n\n >>> cat_dtype = pd.api.types.CategoricalDtype(\n ... categories=[2, 1], ordered=True)\n >>> ser.astype(cat_dtype)\n 0 1\n 1 2\n dtype: category\n Categories (2, int64): [2 < 1]\n\n Note that using ``copy=False`` and changing data on a new\n pandas object may propagate changes:\n\n >>> s1 = pd.Series([1, 2])\n >>> s2 = s1.astype('int64', copy=False)\n >>> s2[0] = 10\n >>> s1 # note that s1[0] has changed too\n 0 10\n 1 2\n dtype: int64\n\n Create a series of dates:\n\n >>> ser_date = pd.Series(pd.date_range('20200101', periods=3))\n >>> ser_date\n 0 2020-01-01\n 1 2020-01-02\n 2 2020-01-03\n dtype: datetime64[ns]\n\n Datetimes are localized to UTC first before\n converting to the specified timezone:\n\n >>> ser_date.astype('datetime64[ns, US/Eastern]')\n 0 2019-12-31 19:00:00-05:00\n 1 2020-01-01 19:00:00-05:00\n 2 2020-01-02 19:00:00-05:00\n dtype: datetime64[ns, US/Eastern]\n \"\"\"\n if is_dict_like(dtype):\n if self.ndim == 1: # i.e. Series\n if len(dtype) > 1 or self.name not in dtype:\n raise KeyError(\n \"Only the Series name can be used for \"\n \"the key in Series dtype mappings.\"\n )\n new_type = dtype[self.name]\n return self.astype(new_type, copy, errors)\n\n for col_name in dtype.keys():\n if col_name not in self:\n raise KeyError(\n \"Only a column name can be used for the \"\n \"key in a dtype mappings argument.\"\n )\n results = []\n for col_name, col in self.items():\n if col_name in dtype:\n results.append(\n col.astype(dtype=dtype[col_name], copy=copy, errors=errors)\n )\n else:\n results.append(col.copy() if copy else col)\n\n elif is_extension_array_dtype(dtype) and self.ndim > 1:\n # GH 18099/22869: columnwise conversion to extension dtype\n # GH 24704: use iloc to handle duplicate column names\n results = [\n self.iloc[:, i].astype(dtype, copy=copy)\n for i in range(len(self.columns))\n ]\n\n else:\n # else, only a single dtype is given\n new_data = self._mgr.astype(dtype=dtype, copy=copy, errors=errors,)\n return self._constructor(new_data).__finalize__(self, method=\"astype\")\n\n # GH 33113: handle empty frame or series\n if not results:\n return self.copy()\n\n # GH 19920: retain column metadata after concat\n result = pd.concat(results, axis=1, copy=False)\n result.columns = self.columns\n return result\n\n def copy(self: FrameOrSeries, deep: bool_t = True) -> FrameOrSeries:\n \"\"\"\n Make a copy of this object's indices and data.\n\n When ``deep=True`` (default), a new object will be created with a\n copy of the calling object's data and indices. Modifications to\n the data or indices of the copy will not be reflected in the\n original object (see notes below).\n\n When ``deep=False``, a new object will be created without copying\n the calling object's data or index (only references to the data\n and index are copied). Any changes to the data of the original\n will be reflected in the shallow copy (and vice versa).\n\n Parameters\n ----------\n deep : bool, default True\n Make a deep copy, including a copy of the data and the indices.\n With ``deep=False`` neither the indices nor the data are copied.\n\n Returns\n -------\n copy : Series or DataFrame\n Object type matches caller.\n\n Notes\n -----\n When ``deep=True``, data is copied but actual Python objects\n will not be copied recursively, only the reference to the object.\n This is in contrast to `copy.deepcopy` in the Standard Library,\n which recursively copies object data (see examples below).\n\n While ``Index`` objects are copied when ``deep=True``, the underlying\n numpy array is not copied for performance reasons. Since ``Index`` is\n immutable, the underlying data can be safely shared and a copy\n is not needed.\n\n Examples\n --------\n >>> s = pd.Series([1, 2], index=[\"a\", \"b\"])\n >>> s\n a 1\n b 2\n dtype: int64\n\n >>> s_copy = s.copy()\n >>> s_copy\n a 1\n b 2\n dtype: int64\n\n **Shallow copy versus default (deep) copy:**\n\n >>> s = pd.Series([1, 2], index=[\"a\", \"b\"])\n >>> deep = s.copy()\n >>> shallow = s.copy(deep=False)\n\n Shallow copy shares data and index with original.\n\n >>> s is shallow\n False\n >>> s.values is shallow.values and s.index is shallow.index\n True\n\n Deep copy has own copy of data and index.\n\n >>> s is deep\n False\n >>> s.values is deep.values or s.index is deep.index\n False\n\n Updates to the data shared by shallow copy and original is reflected\n in both; deep copy remains unchanged.\n\n >>> s[0] = 3\n >>> shallow[1] = 4\n >>> s\n a 3\n b 4\n dtype: int64\n >>> shallow\n a 3\n b 4\n dtype: int64\n >>> deep\n a 1\n b 2\n dtype: int64\n\n Note that when copying an object containing Python objects, a deep copy\n will copy the data, but will not do so recursively. Updating a nested\n data object will be reflected in the deep copy.\n\n >>> s = pd.Series([[1, 2], [3, 4]])\n >>> deep = s.copy()\n >>> s[0][0] = 10\n >>> s\n 0 [10, 2]\n 1 [3, 4]\n dtype: object\n >>> deep\n 0 [10, 2]\n 1 [3, 4]\n dtype: object\n \"\"\"\n data = self._mgr.copy(deep=deep)\n self._clear_item_cache()\n return self._constructor(data).__finalize__(self, method=\"copy\")\n\n def __copy__(self: FrameOrSeries, deep: bool_t = True) -> FrameOrSeries:\n return self.copy(deep=deep)\n\n def __deepcopy__(self: FrameOrSeries, memo=None) -> FrameOrSeries:\n \"\"\"\n Parameters\n ----------\n memo, default None\n Standard signature. Unused\n \"\"\"\n return self.copy(deep=True)\n\n def _convert(\n self: FrameOrSeries,\n datetime: bool_t = False,\n numeric: bool_t = False,\n timedelta: bool_t = False,\n coerce: bool_t = False,\n ) -> FrameOrSeries:\n \"\"\"\n Attempt to infer better dtype for object columns\n\n Parameters\n ----------\n datetime : bool, default False\n If True, convert to date where possible.\n numeric : bool, default False\n If True, attempt to convert to numbers (including strings), with\n unconvertible values becoming NaN.\n timedelta : bool, default False\n If True, convert to timedelta where possible.\n coerce : bool, default False\n If True, force conversion with unconvertible values converted to\n nulls (NaN or NaT).\n\n Returns\n -------\n converted : same as input object\n \"\"\"\n validate_bool_kwarg(datetime, \"datetime\")\n validate_bool_kwarg(numeric, \"numeric\")\n validate_bool_kwarg(timedelta, \"timedelta\")\n validate_bool_kwarg(coerce, \"coerce\")\n return self._constructor(\n self._mgr.convert(\n datetime=datetime,\n numeric=numeric,\n timedelta=timedelta,\n coerce=coerce,\n copy=True,\n )\n ).__finalize__(self)\n\n def infer_objects(self: FrameOrSeries) -> FrameOrSeries:\n \"\"\"\n Attempt to infer better dtypes for object columns.\n\n Attempts soft conversion of object-dtyped\n columns, leaving non-object and unconvertible\n columns unchanged. The inference rules are the\n same as during normal Series/DataFrame construction.\n\n Returns\n -------\n converted : same type as input object\n\n See Also\n --------\n to_datetime : Convert argument to datetime.\n to_timedelta : Convert argument to timedelta.\n to_numeric : Convert argument to numeric type.\n convert_dtypes : Convert argument to best possible dtype.\n\n Examples\n --------\n >>> df = pd.DataFrame({\"A\": [\"a\", 1, 2, 3]})\n >>> df = df.iloc[1:]\n >>> df\n A\n 1 1\n 2 2\n 3 3\n\n >>> df.dtypes\n A object\n dtype: object\n\n >>> df.infer_objects().dtypes\n A int64\n dtype: object\n \"\"\"\n # numeric=False necessary to only soft convert;\n # python objects will still be converted to\n # native numpy numeric types\n return self._constructor(\n self._mgr.convert(\n datetime=True, numeric=False, timedelta=True, coerce=False, copy=True\n )\n ).__finalize__(self, method=\"infer_objects\")\n\n def convert_dtypes(\n self: FrameOrSeries,\n infer_objects: bool_t = True,\n convert_string: bool_t = True,\n convert_integer: bool_t = True,\n convert_boolean: bool_t = True,\n ) -> FrameOrSeries:\n \"\"\"\n Convert columns to best possible dtypes using dtypes supporting ``pd.NA``.\n\n .. versionadded:: 1.0.0\n\n Parameters\n ----------\n infer_objects : bool, default True\n Whether object dtypes should be converted to the best possible types.\n convert_string : bool, default True\n Whether object dtypes should be converted to ``StringDtype()``.\n convert_integer : bool, default True\n Whether, if possible, conversion can be done to integer extension types.\n convert_boolean : bool, defaults True\n Whether object dtypes should be converted to ``BooleanDtypes()``.\n\n Returns\n -------\n Series or DataFrame\n Copy of input object with new dtype.\n\n See Also\n --------\n infer_objects : Infer dtypes of objects.\n to_datetime : Convert argument to datetime.\n to_timedelta : Convert argument to timedelta.\n to_numeric : Convert argument to a numeric type.\n\n Notes\n -----\n By default, ``convert_dtypes`` will attempt to convert a Series (or each\n Series in a DataFrame) to dtypes that support ``pd.NA``. By using the options\n ``convert_string``, ``convert_integer``, and ``convert_boolean``, it is\n possible to turn off individual conversions to ``StringDtype``, the integer\n extension types or ``BooleanDtype``, respectively.\n\n For object-dtyped columns, if ``infer_objects`` is ``True``, use the inference\n rules as during normal Series/DataFrame construction. Then, if possible,\n convert to ``StringDtype``, ``BooleanDtype`` or an appropriate integer extension\n type, otherwise leave as ``object``.\n\n If the dtype is integer, convert to an appropriate integer extension type.\n\n If the dtype is numeric, and consists of all integers, convert to an\n appropriate integer extension type.\n\n In the future, as new dtypes are added that support ``pd.NA``, the results\n of this method will change to support those new dtypes.\n\n Examples\n --------\n >>> df = pd.DataFrame(\n ... {\n ... \"a\": pd.Series([1, 2, 3], dtype=np.dtype(\"int32\")),\n ... \"b\": pd.Series([\"x\", \"y\", \"z\"], dtype=np.dtype(\"O\")),\n ... \"c\": pd.Series([True, False, np.nan], dtype=np.dtype(\"O\")),\n ... \"d\": pd.Series([\"h\", \"i\", np.nan], dtype=np.dtype(\"O\")),\n ... \"e\": pd.Series([10, np.nan, 20], dtype=np.dtype(\"float\")),\n ... \"f\": pd.Series([np.nan, 100.5, 200], dtype=np.dtype(\"float\")),\n ... }\n ... )\n\n Start with a DataFrame with default dtypes.\n\n >>> df\n a b c d e f\n 0 1 x True h 10.0 NaN\n 1 2 y False i NaN 100.5\n 2 3 z NaN NaN 20.0 200.0\n\n >>> df.dtypes\n a int32\n b object\n c object\n d object\n e float64\n f float64\n dtype: object\n\n Convert the DataFrame to use best possible dtypes.\n\n >>> dfn = df.convert_dtypes()\n >>> dfn\n a b c d e f\n 0 1 x True h 10 NaN\n 1 2 y False i <NA> 100.5\n 2 3 z <NA> <NA> 20 200.0\n\n >>> dfn.dtypes\n a Int32\n b string\n c boolean\n d string\n e Int64\n f float64\n dtype: object\n\n Start with a Series of strings and missing data represented by ``np.nan``.\n\n >>> s = pd.Series([\"a\", \"b\", np.nan])\n >>> s\n 0 a\n 1 b\n 2 NaN\n dtype: object\n\n Obtain a Series with dtype ``StringDtype``.\n\n >>> s.convert_dtypes()\n 0 a\n 1 b\n 2 <NA>\n dtype: string\n \"\"\"\n if self.ndim == 1:\n return self._convert_dtypes(\n infer_objects, convert_string, convert_integer, convert_boolean\n )\n else:\n results = [\n col._convert_dtypes(\n infer_objects, convert_string, convert_integer, convert_boolean\n )\n for col_name, col in self.items()\n ]\n result = pd.concat(results, axis=1, copy=False)\n return result\n\n # ----------------------------------------------------------------------\n # Filling NA's\n\n @doc(**_shared_doc_kwargs)\n def fillna(\n self: FrameOrSeries,\n value=None,\n method=None,\n axis=None,\n inplace: bool_t = False,\n limit=None,\n downcast=None,\n ) -> Optional[FrameOrSeries]:\n \"\"\"\n Fill NA/NaN values using the specified method.\n\n Parameters\n ----------\n value : scalar, dict, Series, or DataFrame\n Value to use to fill holes (e.g. 0), alternately a\n dict/Series/DataFrame of values specifying which value to use for\n each index (for a Series) or column (for a DataFrame). Values not\n in the dict/Series/DataFrame will not be filled. This value cannot\n be a list.\n method : {{'backfill', 'bfill', 'pad', 'ffill', None}}, default None\n Method to use for filling holes in reindexed Series\n pad / ffill: propagate last valid observation forward to next valid\n backfill / bfill: use next valid observation to fill gap.\n axis : {axes_single_arg}\n Axis along which to fill missing values.\n inplace : bool, default False\n If True, fill in-place. Note: this will modify any\n other views on this object (e.g., a no-copy slice for a column in a\n DataFrame).\n limit : int, default None\n If method is specified, this is the maximum number of consecutive\n NaN values to forward/backward fill. In other words, if there is\n a gap with more than this number of consecutive NaNs, it will only\n be partially filled. If method is not specified, this is the\n maximum number of entries along the entire axis where NaNs will be\n filled. Must be greater than 0 if not None.\n downcast : dict, default is None\n A dict of item->dtype of what to downcast if possible,\n or the string 'infer' which will try to downcast to an appropriate\n equal type (e.g. float64 to int64 if possible).\n\n Returns\n -------\n {klass} or None\n Object with missing values filled or None if ``inplace=True``.\n\n See Also\n --------\n interpolate : Fill NaN values using interpolation.\n reindex : Conform object to new index.\n asfreq : Convert TimeSeries to specified frequency.\n\n Examples\n --------\n >>> df = pd.DataFrame([[np.nan, 2, np.nan, 0],\n ... [3, 4, np.nan, 1],\n ... [np.nan, np.nan, np.nan, 5],\n ... [np.nan, 3, np.nan, 4]],\n ... columns=list('ABCD'))\n >>> df\n A B C D\n 0 NaN 2.0 NaN 0\n 1 3.0 4.0 NaN 1\n 2 NaN NaN NaN 5\n 3 NaN 3.0 NaN 4\n\n Replace all NaN elements with 0s.\n\n >>> df.fillna(0)\n A B C D\n 0 0.0 2.0 0.0 0\n 1 3.0 4.0 0.0 1\n 2 0.0 0.0 0.0 5\n 3 0.0 3.0 0.0 4\n\n We can also propagate non-null values forward or backward.\n\n >>> df.fillna(method='ffill')\n A B C D\n 0 NaN 2.0 NaN 0\n 1 3.0 4.0 NaN 1\n 2 3.0 4.0 NaN 5\n 3 3.0 3.0 NaN 4\n\n Replace all NaN elements in column 'A', 'B', 'C', and 'D', with 0, 1,\n 2, and 3 respectively.\n\n >>> values = {{'A': 0, 'B': 1, 'C': 2, 'D': 3}}\n >>> df.fillna(value=values)\n A B C D\n 0 0.0 2.0 2.0 0\n 1 3.0 4.0 2.0 1\n 2 0.0 1.0 2.0 5\n 3 0.0 3.0 2.0 4\n\n Only replace the first NaN element.\n\n >>> df.fillna(value=values, limit=1)\n A B C D\n 0 0.0 2.0 2.0 0\n 1 3.0 4.0 NaN 1\n 2 NaN 1.0 NaN 5\n 3 NaN 3.0 NaN 4\n \"\"\"\n inplace = validate_bool_kwarg(inplace, \"inplace\")\n value, method = validate_fillna_kwargs(value, method)\n\n self._consolidate_inplace()\n\n # set the default here, so functions examining the signaure\n # can detect if something was set (e.g. in groupby) (GH9221)\n if axis is None:\n axis = 0\n axis = self._get_axis_number(axis)\n\n if value is None:\n\n if self._is_mixed_type and axis == 1:\n if inplace:\n raise NotImplementedError()\n result = self.T.fillna(method=method, limit=limit).T\n\n # need to downcast here because of all of the transposes\n result._mgr = result._mgr.downcast()\n\n return result\n\n new_data = self._mgr.interpolate(\n method=method,\n axis=axis,\n limit=limit,\n inplace=inplace,\n coerce=True,\n downcast=downcast,\n )\n else:\n if self.ndim == 1:\n if isinstance(value, (dict, ABCSeries)):\n value = create_series_with_explicit_dtype(\n value, dtype_if_empty=object\n )\n value = value.reindex(self.index, copy=False)\n value = value._values\n elif not is_list_like(value):\n pass\n else:\n raise TypeError(\n '\"value\" parameter must be a scalar, dict '\n \"or Series, but you passed a \"\n f'\"{type(value).__name__}\"'\n )\n\n new_data = self._mgr.fillna(\n value=value, limit=limit, inplace=inplace, downcast=downcast\n )\n\n elif isinstance(value, (dict, ABCSeries)):\n if axis == 1:\n raise NotImplementedError(\n \"Currently only can fill \"\n \"with dict/Series column \"\n \"by column\"\n )\n\n result = self if inplace else self.copy()\n for k, v in value.items():\n if k not in result:\n continue\n obj = result[k]\n obj.fillna(v, limit=limit, inplace=True, downcast=downcast)\n return result if not inplace else None\n\n elif not is_list_like(value):\n new_data = self._mgr.fillna(\n value=value, limit=limit, inplace=inplace, downcast=downcast\n )\n elif isinstance(value, ABCDataFrame) and self.ndim == 2:\n new_data = self.where(self.notna(), value)._data\n else:\n raise ValueError(f\"invalid fill value with a {type(value)}\")\n\n result = self._constructor(new_data)\n if inplace:\n return self._update_inplace(result)\n else:\n return result.__finalize__(self, method=\"fillna\")\n\n def ffill(\n self: FrameOrSeries,\n axis=None,\n inplace: bool_t = False,\n limit=None,\n downcast=None,\n ) -> Optional[FrameOrSeries]:\n \"\"\"\n Synonym for :meth:`DataFrame.fillna` with ``method='ffill'``.\n\n Returns\n -------\n {klass} or None\n Object with missing values filled or None if ``inplace=True``.\n \"\"\"\n return self.fillna(\n method=\"ffill\", axis=axis, inplace=inplace, limit=limit, downcast=downcast\n )\n\n pad = ffill\n\n def bfill(\n self: FrameOrSeries,\n axis=None,\n inplace: bool_t = False,\n limit=None,\n downcast=None,\n ) -> Optional[FrameOrSeries]:\n \"\"\"\n Synonym for :meth:`DataFrame.fillna` with ``method='bfill'``.\n\n Returns\n -------\n {klass} or None\n Object with missing values filled or None if ``inplace=True``.\n \"\"\"\n return self.fillna(\n method=\"bfill\", axis=axis, inplace=inplace, limit=limit, downcast=downcast\n )\n\n backfill = bfill\n\n @doc(klass=_shared_doc_kwargs[\"klass\"])\n def replace(\n self,\n to_replace=None,\n value=None,\n inplace=False,\n limit=None,\n regex=False,\n method=\"pad\",\n ):\n \"\"\"\n Replace values given in `to_replace` with `value`.\n\n Values of the {klass} are replaced with other values dynamically.\n This differs from updating with ``.loc`` or ``.iloc``, which require\n you to specify a location to update with some value.\n\n Parameters\n ----------\n to_replace : str, regex, list, dict, Series, int, float, or None\n How to find the values that will be replaced.\n\n * numeric, str or regex:\n\n - numeric: numeric values equal to `to_replace` will be\n replaced with `value`\n - str: string exactly matching `to_replace` will be replaced\n with `value`\n - regex: regexs matching `to_replace` will be replaced with\n `value`\n\n * list of str, regex, or numeric:\n\n - First, if `to_replace` and `value` are both lists, they\n **must** be the same length.\n - Second, if ``regex=True`` then all of the strings in **both**\n lists will be interpreted as regexs otherwise they will match\n directly. This doesn't matter much for `value` since there\n are only a few possible substitution regexes you can use.\n - str, regex and numeric rules apply as above.\n\n * dict:\n\n - Dicts can be used to specify different replacement values\n for different existing values. For example,\n ``{{'a': 'b', 'y': 'z'}}`` replaces the value 'a' with 'b' and\n 'y' with 'z'. To use a dict in this way the `value`\n parameter should be `None`.\n - For a DataFrame a dict can specify that different values\n should be replaced in different columns. For example,\n ``{{'a': 1, 'b': 'z'}}`` looks for the value 1 in column 'a'\n and the value 'z' in column 'b' and replaces these values\n with whatever is specified in `value`. The `value` parameter\n should not be ``None`` in this case. You can treat this as a\n special case of passing two lists except that you are\n specifying the column to search in.\n - For a DataFrame nested dictionaries, e.g.,\n ``{{'a': {{'b': np.nan}}}}``, are read as follows: look in column\n 'a' for the value 'b' and replace it with NaN. The `value`\n parameter should be ``None`` to use a nested dict in this\n way. You can nest regular expressions as well. Note that\n column names (the top-level dictionary keys in a nested\n dictionary) **cannot** be regular expressions.\n\n * None:\n\n - This means that the `regex` argument must be a string,\n compiled regular expression, or list, dict, ndarray or\n Series of such elements. If `value` is also ``None`` then\n this **must** be a nested dictionary or Series.\n\n See the examples section for examples of each of these.\n value : scalar, dict, list, str, regex, default None\n Value to replace any values matching `to_replace` with.\n For a DataFrame a dict of values can be used to specify which\n value to use for each column (columns not in the dict will not be\n filled). Regular expressions, strings and lists or dicts of such\n objects are also allowed.\n inplace : bool, default False\n If True, in place. Note: this will modify any\n other views on this object (e.g. a column from a DataFrame).\n Returns the caller if this is True.\n limit : int, default None\n Maximum size gap to forward or backward fill.\n regex : bool or same types as `to_replace`, default False\n Whether to interpret `to_replace` and/or `value` as regular\n expressions. If this is ``True`` then `to_replace` *must* be a\n string. Alternatively, this could be a regular expression or a\n list, dict, or array of regular expressions in which case\n `to_replace` must be ``None``.\n method : {{'pad', 'ffill', 'bfill', `None`}}\n The method to use when for replacement, when `to_replace` is a\n scalar, list or tuple and `value` is ``None``.\n\n .. versionchanged:: 0.23.0\n Added to DataFrame.\n\n Returns\n -------\n {klass}\n Object after replacement.\n\n Raises\n ------\n AssertionError\n * If `regex` is not a ``bool`` and `to_replace` is not\n ``None``.\n\n TypeError\n * If `to_replace` is not a scalar, array-like, ``dict``, or ``None``\n * If `to_replace` is a ``dict`` and `value` is not a ``list``,\n ``dict``, ``ndarray``, or ``Series``\n * If `to_replace` is ``None`` and `regex` is not compilable\n into a regular expression or is a list, dict, ndarray, or\n Series.\n * When replacing multiple ``bool`` or ``datetime64`` objects and\n the arguments to `to_replace` does not match the type of the\n value being replaced\n\n ValueError\n * If a ``list`` or an ``ndarray`` is passed to `to_replace` and\n `value` but they are not the same length.\n\n See Also\n --------\n {klass}.fillna : Fill NA values.\n {klass}.where : Replace values based on boolean condition.\n Series.str.replace : Simple string replacement.\n\n Notes\n -----\n * Regex substitution is performed under the hood with ``re.sub``. The\n rules for substitution for ``re.sub`` are the same.\n * Regular expressions will only substitute on strings, meaning you\n cannot provide, for example, a regular expression matching floating\n point numbers and expect the columns in your frame that have a\n numeric dtype to be matched. However, if those floating point\n numbers *are* strings, then you can do this.\n * This method has *a lot* of options. You are encouraged to experiment\n and play with this method to gain intuition about how it works.\n * When dict is used as the `to_replace` value, it is like\n key(s) in the dict are the to_replace part and\n value(s) in the dict are the value parameter.\n\n Examples\n --------\n\n **Scalar `to_replace` and `value`**\n\n >>> s = pd.Series([0, 1, 2, 3, 4])\n >>> s.replace(0, 5)\n 0 5\n 1 1\n 2 2\n 3 3\n 4 4\n dtype: int64\n\n >>> df = pd.DataFrame({{'A': [0, 1, 2, 3, 4],\n ... 'B': [5, 6, 7, 8, 9],\n ... 'C': ['a', 'b', 'c', 'd', 'e']}})\n >>> df.replace(0, 5)\n A B C\n 0 5 5 a\n 1 1 6 b\n 2 2 7 c\n 3 3 8 d\n 4 4 9 e\n\n **List-like `to_replace`**\n\n >>> df.replace([0, 1, 2, 3], 4)\n A B C\n 0 4 5 a\n 1 4 6 b\n 2 4 7 c\n 3 4 8 d\n 4 4 9 e\n\n >>> df.replace([0, 1, 2, 3], [4, 3, 2, 1])\n A B C\n 0 4 5 a\n 1 3 6 b\n 2 2 7 c\n 3 1 8 d\n 4 4 9 e\n\n >>> s.replace([1, 2], method='bfill')\n 0 0\n 1 3\n 2 3\n 3 3\n 4 4\n dtype: int64\n\n **dict-like `to_replace`**\n\n >>> df.replace({{0: 10, 1: 100}})\n A B C\n 0 10 5 a\n 1 100 6 b\n 2 2 7 c\n 3 3 8 d\n 4 4 9 e\n\n >>> df.replace({{'A': 0, 'B': 5}}, 100)\n A B C\n 0 100 100 a\n 1 1 6 b\n 2 2 7 c\n 3 3 8 d\n 4 4 9 e\n\n >>> df.replace({{'A': {{0: 100, 4: 400}}}})\n A B C\n 0 100 5 a\n 1 1 6 b\n 2 2 7 c\n 3 3 8 d\n 4 400 9 e\n\n **Regular expression `to_replace`**\n\n >>> df = pd.DataFrame({{'A': ['bat', 'foo', 'bait'],\n ... 'B': ['abc', 'bar', 'xyz']}})\n >>> df.replace(to_replace=r'^ba.$', value='new', regex=True)\n A B\n 0 new abc\n 1 foo new\n 2 bait xyz\n\n >>> df.replace({{'A': r'^ba.$'}}, {{'A': 'new'}}, regex=True)\n A B\n 0 new abc\n 1 foo bar\n 2 bait xyz\n\n >>> df.replace(regex=r'^ba.$', value='new')\n A B\n 0 new abc\n 1 foo new\n 2 bait xyz\n\n >>> df.replace(regex={{r'^ba.$': 'new', 'foo': 'xyz'}})\n A B\n 0 new abc\n 1 xyz new\n 2 bait xyz\n\n >>> df.replace(regex=[r'^ba.$', 'foo'], value='new')\n A B\n 0 new abc\n 1 new new\n 2 bait xyz\n\n Note that when replacing multiple ``bool`` or ``datetime64`` objects,\n the data types in the `to_replace` parameter must match the data\n type of the value being replaced:\n\n >>> df = pd.DataFrame({{'A': [True, False, True],\n ... 'B': [False, True, False]}})\n >>> df.replace({{'a string': 'new value', True: False}}) # raises\n Traceback (most recent call last):\n ...\n TypeError: Cannot compare types 'ndarray(dtype=bool)' and 'str'\n\n This raises a ``TypeError`` because one of the ``dict`` keys is not of\n the correct type for replacement.\n\n Compare the behavior of ``s.replace({{'a': None}})`` and\n ``s.replace('a', None)`` to understand the peculiarities\n of the `to_replace` parameter:\n\n >>> s = pd.Series([10, 'a', 'a', 'b', 'a'])\n\n When one uses a dict as the `to_replace` value, it is like the\n value(s) in the dict are equal to the `value` parameter.\n ``s.replace({{'a': None}})`` is equivalent to\n ``s.replace(to_replace={{'a': None}}, value=None, method=None)``:\n\n >>> s.replace({{'a': None}})\n 0 10\n 1 None\n 2 None\n 3 b\n 4 None\n dtype: object\n\n When ``value=None`` and `to_replace` is a scalar, list or\n tuple, `replace` uses the method parameter (default 'pad') to do the\n replacement. So this is why the 'a' values are being replaced by 10\n in rows 1 and 2 and 'b' in row 4 in this case.\n The command ``s.replace('a', None)`` is actually equivalent to\n ``s.replace(to_replace='a', value=None, method='pad')``:\n\n >>> s.replace('a', None)\n 0 10\n 1 10\n 2 10\n 3 b\n 4 b\n dtype: object\n \"\"\"\n if not (\n is_scalar(to_replace)\n or is_re_compilable(to_replace)\n or is_list_like(to_replace)\n ):\n raise TypeError(\n \"Expecting 'to_replace' to be either a scalar, array-like, \"\n \"dict or None, got invalid type \"\n f\"{repr(type(to_replace).__name__)}\"\n )\n\n inplace = validate_bool_kwarg(inplace, \"inplace\")\n if not is_bool(regex) and to_replace is not None:\n raise AssertionError(\"'to_replace' must be 'None' if 'regex' is not a bool\")\n\n self._consolidate_inplace()\n\n if value is None:\n # passing a single value that is scalar like\n # when value is None (GH5319), for compat\n if not is_dict_like(to_replace) and not is_dict_like(regex):\n to_replace = [to_replace]\n\n if isinstance(to_replace, (tuple, list)):\n if isinstance(self, ABCDataFrame):\n return self.apply(\n _single_replace, args=(to_replace, method, inplace, limit)\n )\n return _single_replace(self, to_replace, method, inplace, limit)\n\n if not is_dict_like(to_replace):\n if not is_dict_like(regex):\n raise TypeError(\n 'If \"to_replace\" and \"value\" are both None '\n 'and \"to_replace\" is not a list, then '\n \"regex must be a mapping\"\n )\n to_replace = regex\n regex = True\n\n items = list(to_replace.items())\n keys, values = zip(*items) if items else ([], [])\n\n are_mappings = [is_dict_like(v) for v in values]\n\n if any(are_mappings):\n if not all(are_mappings):\n raise TypeError(\n \"If a nested mapping is passed, all values \"\n \"of the top level mapping must be mappings\"\n )\n # passed a nested dict/Series\n to_rep_dict = {}\n value_dict = {}\n\n for k, v in items:\n keys, values = list(zip(*v.items())) or ([], [])\n\n to_rep_dict[k] = list(keys)\n value_dict[k] = list(values)\n\n to_replace, value = to_rep_dict, value_dict\n else:\n to_replace, value = keys, values\n\n return self.replace(\n to_replace, value, inplace=inplace, limit=limit, regex=regex\n )\n else:\n\n # need a non-zero len on all axes\n if not self.size:\n return self\n\n if is_dict_like(to_replace):\n if is_dict_like(value): # {'A' : NA} -> {'A' : 0}\n # Note: Checking below for `in foo.keys()` instead of\n # `in foo`is needed for when we have a Series and not dict\n mapping = {\n col: (to_replace[col], value[col])\n for col in to_replace.keys()\n if col in value.keys() and col in self\n }\n return self._replace_columnwise(mapping, inplace, regex)\n\n # {'A': NA} -> 0\n elif not is_list_like(value):\n # Operate column-wise\n if self.ndim == 1:\n raise ValueError(\n \"Series.replace cannot use dict-like to_replace \"\n \"and non-None value\"\n )\n mapping = {\n col: (to_rep, value) for col, to_rep in to_replace.items()\n }\n return self._replace_columnwise(mapping, inplace, regex)\n else:\n raise TypeError(\"value argument must be scalar, dict, or Series\")\n\n elif is_list_like(to_replace): # [NA, ''] -> [0, 'missing']\n if is_list_like(value):\n if len(to_replace) != len(value):\n raise ValueError(\n f\"Replacement lists must match in length. \"\n f\"Expecting {len(to_replace)} got {len(value)} \"\n )\n self._consolidate_inplace()\n new_data = self._mgr.replace_list(\n src_list=to_replace,\n dest_list=value,\n inplace=inplace,\n regex=regex,\n )\n\n else: # [NA, ''] -> 0\n new_data = self._mgr.replace(\n to_replace=to_replace, value=value, inplace=inplace, regex=regex\n )\n elif to_replace is None:\n if not (\n is_re_compilable(regex)\n or is_list_like(regex)\n or is_dict_like(regex)\n ):\n raise TypeError(\n f\"'regex' must be a string or a compiled regular expression \"\n f\"or a list or dict of strings or regular expressions, \"\n f\"you passed a {repr(type(regex).__name__)}\"\n )\n return self.replace(\n regex, value, inplace=inplace, limit=limit, regex=True\n )\n else:\n\n # dest iterable dict-like\n if is_dict_like(value): # NA -> {'A' : 0, 'B' : -1}\n # Operate column-wise\n if self.ndim == 1:\n raise ValueError(\n \"Series.replace cannot use dict-value and \"\n \"non-None to_replace\"\n )\n mapping = {col: (to_replace, val) for col, val in value.items()}\n return self._replace_columnwise(mapping, inplace, regex)\n\n elif not is_list_like(value): # NA -> 0\n new_data = self._mgr.replace(\n to_replace=to_replace, value=value, inplace=inplace, regex=regex\n )\n else:\n raise TypeError(\n f'Invalid \"to_replace\" type: {repr(type(to_replace).__name__)}'\n )\n\n result = self._constructor(new_data)\n if inplace:\n return self._update_inplace(result)\n else:\n return result.__finalize__(self, method=\"replace\")\n\n def interpolate(\n self: FrameOrSeries,\n method: str = \"linear\",\n axis: Axis = 0,\n limit: Optional[int] = None,\n inplace: bool_t = False,\n limit_direction: Optional[str] = None,\n limit_area: Optional[str] = None,\n downcast: Optional[str] = None,\n **kwargs,\n ) -> Optional[FrameOrSeries]:\n \"\"\"\n Please note that only ``method='linear'`` is supported for\n DataFrame/Series with a MultiIndex.\n\n Parameters\n ----------\n method : str, default 'linear'\n Interpolation technique to use. One of:\n\n * 'linear': Ignore the index and treat the values as equally\n spaced. This is the only method supported on MultiIndexes.\n * 'time': Works on daily and higher resolution data to interpolate\n given length of interval.\n * 'index', 'values': use the actual numerical values of the index.\n * 'pad': Fill in NaNs using existing values.\n * 'nearest', 'zero', 'slinear', 'quadratic', 'cubic', 'spline',\n 'barycentric', 'polynomial': Passed to\n `scipy.interpolate.interp1d`. These methods use the numerical\n values of the index. Both 'polynomial' and 'spline' require that\n you also specify an `order` (int), e.g.\n ``df.interpolate(method='polynomial', order=5)``.\n * 'krogh', 'piecewise_polynomial', 'spline', 'pchip', 'akima',\n 'cubicspline': Wrappers around the SciPy interpolation methods of\n similar names. See `Notes`.\n * 'from_derivatives': Refers to\n `scipy.interpolate.BPoly.from_derivatives` which\n replaces 'piecewise_polynomial' interpolation method in\n scipy 0.18.\n axis : {{0 or 'index', 1 or 'columns', None}}, default None\n Axis to interpolate along.\n limit : int, optional\n Maximum number of consecutive NaNs to fill. Must be greater than\n 0.\n inplace : bool, default False\n Update the data in place if possible.\n limit_direction : {{'forward', 'backward', 'both'}}, Optional\n Consecutive NaNs will be filled in this direction.\n\n If limit is specified:\n * If 'method' is 'pad' or 'ffill', 'limit_direction' must be 'forward'.\n * If 'method' is 'backfill' or 'bfill', 'limit_direction' must be\n 'backwards'.\n\n If 'limit' is not specified:\n * If 'method' is 'backfill' or 'bfill', the default is 'backward'\n * else the default is 'forward'\n\n .. versionchanged:: 1.1.0\n raises ValueError if `limit_direction` is 'forward' or 'both' and\n method is 'backfill' or 'bfill'.\n raises ValueError if `limit_direction` is 'backward' or 'both' and\n method is 'pad' or 'ffill'.\n\n limit_area : {{`None`, 'inside', 'outside'}}, default None\n If limit is specified, consecutive NaNs will be filled with this\n restriction.\n\n * ``None``: No fill restriction.\n * 'inside': Only fill NaNs surrounded by valid values\n (interpolate).\n * 'outside': Only fill NaNs outside valid values (extrapolate).\n\n .. versionadded:: 0.23.0\n\n downcast : optional, 'infer' or None, defaults to None\n Downcast dtypes if possible.\n **kwargs\n Keyword arguments to pass on to the interpolating function.\n\n Returns\n -------\n Series or DataFrame\n Returns the same object type as the caller, interpolated at\n some or all ``NaN`` values.\n\n See Also\n --------\n fillna : Fill missing values using different methods.\n scipy.interpolate.Akima1DInterpolator : Piecewise cubic polynomials\n (Akima interpolator).\n scipy.interpolate.BPoly.from_derivatives : Piecewise polynomial in the\n Bernstein basis.\n scipy.interpolate.interp1d : Interpolate a 1-D function.\n scipy.interpolate.KroghInterpolator : Interpolate polynomial (Krogh\n interpolator).\n scipy.interpolate.PchipInterpolator : PCHIP 1-d monotonic cubic\n interpolation.\n scipy.interpolate.CubicSpline : Cubic spline data interpolator.\n\n Notes\n -----\n The 'krogh', 'piecewise_polynomial', 'spline', 'pchip' and 'akima'\n methods are wrappers around the respective SciPy implementations of\n similar names. These use the actual numerical values of the index.\n For more information on their behavior, see the\n `SciPy documentation\n <https://docs.scipy.org/doc/scipy/reference/interpolate.html#univariate-interpolation>`__\n and `SciPy tutorial\n <https://docs.scipy.org/doc/scipy/reference/tutorial/interpolate.html>`__.\n\n Examples\n --------\n Filling in ``NaN`` in a :class:`~pandas.Series` via linear\n interpolation.\n\n >>> s = pd.Series([0, 1, np.nan, 3])\n >>> s\n 0 0.0\n 1 1.0\n 2 NaN\n 3 3.0\n dtype: float64\n >>> s.interpolate()\n 0 0.0\n 1 1.0\n 2 2.0\n 3 3.0\n dtype: float64\n\n Filling in ``NaN`` in a Series by padding, but filling at most two\n consecutive ``NaN`` at a time.\n\n >>> s = pd.Series([np.nan, \"single_one\", np.nan,\n ... \"fill_two_more\", np.nan, np.nan, np.nan,\n ... 4.71, np.nan])\n >>> s\n 0 NaN\n 1 single_one\n 2 NaN\n 3 fill_two_more\n 4 NaN\n 5 NaN\n 6 NaN\n 7 4.71\n 8 NaN\n dtype: object\n >>> s.interpolate(method='pad', limit=2)\n 0 NaN\n 1 single_one\n 2 single_one\n 3 fill_two_more\n 4 fill_two_more\n 5 fill_two_more\n 6 NaN\n 7 4.71\n 8 4.71\n dtype: object\n\n Filling in ``NaN`` in a Series via polynomial interpolation or splines:\n Both 'polynomial' and 'spline' methods require that you also specify\n an ``order`` (int).\n\n >>> s = pd.Series([0, 2, np.nan, 8])\n >>> s.interpolate(method='polynomial', order=2)\n 0 0.000000\n 1 2.000000\n 2 4.666667\n 3 8.000000\n dtype: float64\n\n Fill the DataFrame forward (that is, going down) along each column\n using linear interpolation.\n\n Note how the last entry in column 'a' is interpolated differently,\n because there is no entry after it to use for interpolation.\n Note how the first entry in column 'b' remains ``NaN``, because there\n is no entry before it to use for interpolation.\n\n >>> df = pd.DataFrame([(0.0, np.nan, -1.0, 1.0),\n ... (np.nan, 2.0, np.nan, np.nan),\n ... (2.0, 3.0, np.nan, 9.0),\n ... (np.nan, 4.0, -4.0, 16.0)],\n ... columns=list('abcd'))\n >>> df\n a b c d\n 0 0.0 NaN -1.0 1.0\n 1 NaN 2.0 NaN NaN\n 2 2.0 3.0 NaN 9.0\n 3 NaN 4.0 -4.0 16.0\n >>> df.interpolate(method='linear', limit_direction='forward', axis=0)\n a b c d\n 0 0.0 NaN -1.0 1.0\n 1 1.0 2.0 -2.0 5.0\n 2 2.0 3.0 -3.0 9.0\n 3 2.0 4.0 -4.0 16.0\n\n Using polynomial interpolation.\n\n >>> df['d'].interpolate(method='polynomial', order=2)\n 0 1.0\n 1 4.0\n 2 9.0\n 3 16.0\n Name: d, dtype: float64\n \"\"\"\n inplace = validate_bool_kwarg(inplace, \"inplace\")\n\n axis = self._get_axis_number(axis)\n\n fillna_methods = [\"ffill\", \"bfill\", \"pad\", \"backfill\"]\n should_transpose = axis == 1 and method not in fillna_methods\n\n obj = self.T if should_transpose else self\n\n if obj.empty:\n return self.copy()\n\n if method not in fillna_methods:\n axis = self._info_axis_number\n\n if isinstance(obj.index, MultiIndex) and method != \"linear\":\n raise ValueError(\n \"Only `method=linear` interpolation is supported on MultiIndexes.\"\n )\n\n # Set `limit_direction` depending on `method`\n if limit_direction is None:\n limit_direction = (\n \"backward\" if method in (\"backfill\", \"bfill\") else \"forward\"\n )\n else:\n if method in (\"pad\", \"ffill\") and limit_direction != \"forward\":\n raise ValueError(\n f\"`limit_direction` must be 'forward' for method `{method}`\"\n )\n if method in (\"backfill\", \"bfill\") and limit_direction != \"backward\":\n raise ValueError(\n f\"`limit_direction` must be 'backward' for method `{method}`\"\n )\n\n if obj.ndim == 2 and np.all(obj.dtypes == np.dtype(object)):\n raise TypeError(\n \"Cannot interpolate with all object-dtype columns \"\n \"in the DataFrame. Try setting at least one \"\n \"column to a numeric dtype.\"\n )\n\n # create/use the index\n if method == \"linear\":\n # prior default\n index = np.arange(len(obj.index))\n else:\n index = obj.index\n methods = {\"index\", \"values\", \"nearest\", \"time\"}\n is_numeric_or_datetime = (\n is_numeric_dtype(index.dtype)\n or is_datetime64_any_dtype(index.dtype)\n or is_timedelta64_dtype(index.dtype)\n )\n if method not in methods and not is_numeric_or_datetime:\n raise ValueError(\n \"Index column must be numeric or datetime type when \"\n f\"using {method} method other than linear. \"\n \"Try setting a numeric or datetime index column before \"\n \"interpolating.\"\n )\n\n if isna(index).any():\n raise NotImplementedError(\n \"Interpolation with NaNs in the index \"\n \"has not been implemented. Try filling \"\n \"those NaNs before interpolating.\"\n )\n new_data = obj._mgr.interpolate(\n method=method,\n axis=axis,\n index=index,\n limit=limit,\n limit_direction=limit_direction,\n limit_area=limit_area,\n inplace=inplace,\n downcast=downcast,\n **kwargs,\n )\n\n result = self._constructor(new_data)\n if should_transpose:\n result = result.T\n if inplace:\n return self._update_inplace(result)\n else:\n return result.__finalize__(self, method=\"interpolate\")\n\n # ----------------------------------------------------------------------\n # Timeseries methods Methods\n\n def asof(self, where, subset=None):\n \"\"\"\n Return the last row(s) without any NaNs before `where`.\n\n The last row (for each element in `where`, if list) without any\n NaN is taken.\n In case of a :class:`~pandas.DataFrame`, the last row without NaN\n considering only the subset of columns (if not `None`)\n\n If there is no good value, NaN is returned for a Series or\n a Series of NaN values for a DataFrame\n\n Parameters\n ----------\n where : date or array-like of dates\n Date(s) before which the last row(s) are returned.\n subset : str or array-like of str, default `None`\n For DataFrame, if not `None`, only use these columns to\n check for NaNs.\n\n Returns\n -------\n scalar, Series, or DataFrame\n\n The return can be:\n\n * scalar : when `self` is a Series and `where` is a scalar\n * Series: when `self` is a Series and `where` is an array-like,\n or when `self` is a DataFrame and `where` is a scalar\n * DataFrame : when `self` is a DataFrame and `where` is an\n array-like\n\n Return scalar, Series, or DataFrame.\n\n See Also\n --------\n merge_asof : Perform an asof merge. Similar to left join.\n\n Notes\n -----\n Dates are assumed to be sorted. Raises if this is not the case.\n\n Examples\n --------\n A Series and a scalar `where`.\n\n >>> s = pd.Series([1, 2, np.nan, 4], index=[10, 20, 30, 40])\n >>> s\n 10 1.0\n 20 2.0\n 30 NaN\n 40 4.0\n dtype: float64\n\n >>> s.asof(20)\n 2.0\n\n For a sequence `where`, a Series is returned. The first value is\n NaN, because the first element of `where` is before the first\n index value.\n\n >>> s.asof([5, 20])\n 5 NaN\n 20 2.0\n dtype: float64\n\n Missing values are not considered. The following is ``2.0``, not\n NaN, even though NaN is at the index location for ``30``.\n\n >>> s.asof(30)\n 2.0\n\n Take all columns into consideration\n\n >>> df = pd.DataFrame({'a': [10, 20, 30, 40, 50],\n ... 'b': [None, None, None, None, 500]},\n ... index=pd.DatetimeIndex(['2018-02-27 09:01:00',\n ... '2018-02-27 09:02:00',\n ... '2018-02-27 09:03:00',\n ... '2018-02-27 09:04:00',\n ... '2018-02-27 09:05:00']))\n >>> df.asof(pd.DatetimeIndex(['2018-02-27 09:03:30',\n ... '2018-02-27 09:04:30']))\n a b\n 2018-02-27 09:03:30 NaN NaN\n 2018-02-27 09:04:30 NaN NaN\n\n Take a single column into consideration\n\n >>> df.asof(pd.DatetimeIndex(['2018-02-27 09:03:30',\n ... '2018-02-27 09:04:30']),\n ... subset=['a'])\n a b\n 2018-02-27 09:03:30 30.0 NaN\n 2018-02-27 09:04:30 40.0 NaN\n \"\"\"\n if isinstance(where, str):\n where = Timestamp(where)\n\n if not self.index.is_monotonic:\n raise ValueError(\"asof requires a sorted index\")\n\n is_series = isinstance(self, ABCSeries)\n if is_series:\n if subset is not None:\n raise ValueError(\"subset is not valid for Series\")\n else:\n if subset is None:\n subset = self.columns\n if not is_list_like(subset):\n subset = [subset]\n\n is_list = is_list_like(where)\n if not is_list:\n start = self.index[0]\n if isinstance(self.index, PeriodIndex):\n where = Period(where, freq=self.index.freq)\n\n if where < start:\n if not is_series:\n return self._constructor_sliced(\n index=self.columns, name=where, dtype=np.float64\n )\n return np.nan\n\n # It's always much faster to use a *while* loop here for\n # Series than pre-computing all the NAs. However a\n # *while* loop is extremely expensive for DataFrame\n # so we later pre-compute all the NAs and use the same\n # code path whether *where* is a scalar or list.\n # See PR: https://github.com/pandas-dev/pandas/pull/14476\n if is_series:\n loc = self.index.searchsorted(where, side=\"right\")\n if loc > 0:\n loc -= 1\n\n values = self._values\n while loc > 0 and isna(values[loc]):\n loc -= 1\n return values[loc]\n\n if not isinstance(where, Index):\n where = Index(where) if is_list else Index([where])\n\n nulls = self.isna() if is_series else self[subset].isna().any(1)\n if nulls.all():\n if is_series:\n return self._constructor(np.nan, index=where, name=self.name)\n elif is_list:\n return self._constructor(np.nan, index=where, columns=self.columns)\n else:\n return self._constructor_sliced(\n np.nan, index=self.columns, name=where[0]\n )\n\n locs = self.index.asof_locs(where, ~(nulls._values))\n\n # mask the missing\n missing = locs == -1\n data = self.take(locs)\n data.index = where\n data.loc[missing] = np.nan\n return data if is_list else data.iloc[-1]\n\n # ----------------------------------------------------------------------\n # Action Methods\n\n @doc(klass=_shared_doc_kwargs[\"klass\"])\n def isna(self: FrameOrSeries) -> FrameOrSeries:\n \"\"\"\n Detect missing values.\n\n Return a boolean same-sized object indicating if the values are NA.\n NA values, such as None or :attr:`numpy.NaN`, gets mapped to True\n values.\n Everything else gets mapped to False values. Characters such as empty\n strings ``''`` or :attr:`numpy.inf` are not considered NA values\n (unless you set ``pandas.options.mode.use_inf_as_na = True``).\n\n Returns\n -------\n {klass}\n Mask of bool values for each element in {klass} that\n indicates whether an element is not an NA value.\n\n See Also\n --------\n {klass}.isnull : Alias of isna.\n {klass}.notna : Boolean inverse of isna.\n {klass}.dropna : Omit axes labels with missing values.\n isna : Top-level isna.\n\n Examples\n --------\n Show which entries in a DataFrame are NA.\n\n >>> df = pd.DataFrame({{'age': [5, 6, np.NaN],\n ... 'born': [pd.NaT, pd.Timestamp('1939-05-27'),\n ... pd.Timestamp('1940-04-25')],\n ... 'name': ['Alfred', 'Batman', ''],\n ... 'toy': [None, 'Batmobile', 'Joker']}})\n >>> df\n age born name toy\n 0 5.0 NaT Alfred None\n 1 6.0 1939-05-27 Batman Batmobile\n 2 NaN 1940-04-25 Joker\n\n >>> df.isna()\n age born name toy\n 0 False True False True\n 1 False False False False\n 2 True False False False\n\n Show which entries in a Series are NA.\n\n >>> ser = pd.Series([5, 6, np.NaN])\n >>> ser\n 0 5.0\n 1 6.0\n 2 NaN\n dtype: float64\n\n >>> ser.isna()\n 0 False\n 1 False\n 2 True\n dtype: bool\n \"\"\"\n return isna(self).__finalize__(self, method=\"isna\")\n\n @doc(isna, klass=_shared_doc_kwargs[\"klass\"])\n def isnull(self: FrameOrSeries) -> FrameOrSeries:\n return isna(self).__finalize__(self, method=\"isnull\")\n\n @doc(klass=_shared_doc_kwargs[\"klass\"])\n def notna(self: FrameOrSeries) -> FrameOrSeries:\n \"\"\"\n Detect existing (non-missing) values.\n\n Return a boolean same-sized object indicating if the values are not NA.\n Non-missing values get mapped to True. Characters such as empty\n strings ``''`` or :attr:`numpy.inf` are not considered NA values\n (unless you set ``pandas.options.mode.use_inf_as_na = True``).\n NA values, such as None or :attr:`numpy.NaN`, get mapped to False\n values.\n\n Returns\n -------\n {klass}\n Mask of bool values for each element in {klass} that\n indicates whether an element is not an NA value.\n\n See Also\n --------\n {klass}.notnull : Alias of notna.\n {klass}.isna : Boolean inverse of notna.\n {klass}.dropna : Omit axes labels with missing values.\n notna : Top-level notna.\n\n Examples\n --------\n Show which entries in a DataFrame are not NA.\n\n >>> df = pd.DataFrame({{'age': [5, 6, np.NaN],\n ... 'born': [pd.NaT, pd.Timestamp('1939-05-27'),\n ... pd.Timestamp('1940-04-25')],\n ... 'name': ['Alfred', 'Batman', ''],\n ... 'toy': [None, 'Batmobile', 'Joker']}})\n >>> df\n age born name toy\n 0 5.0 NaT Alfred None\n 1 6.0 1939-05-27 Batman Batmobile\n 2 NaN 1940-04-25 Joker\n\n >>> df.notna()\n age born name toy\n 0 True False True False\n 1 True True True True\n 2 False True True True\n\n Show which entries in a Series are not NA.\n\n >>> ser = pd.Series([5, 6, np.NaN])\n >>> ser\n 0 5.0\n 1 6.0\n 2 NaN\n dtype: float64\n\n >>> ser.notna()\n 0 True\n 1 True\n 2 False\n dtype: bool\n \"\"\"\n return notna(self).__finalize__(self, method=\"notna\")\n\n @doc(notna, klass=_shared_doc_kwargs[\"klass\"])\n def notnull(self: FrameOrSeries) -> FrameOrSeries:\n return notna(self).__finalize__(self, method=\"notnull\")\n\n def _clip_with_scalar(self, lower, upper, inplace: bool_t = False):\n if (lower is not None and np.any(isna(lower))) or (\n upper is not None and np.any(isna(upper))\n ):\n raise ValueError(\"Cannot use an NA value as a clip threshold\")\n\n result = self\n mask = isna(self._values)\n\n with np.errstate(all=\"ignore\"):\n if upper is not None:\n subset = self.to_numpy() <= upper\n result = result.where(subset, upper, axis=None, inplace=False)\n if lower is not None:\n subset = self.to_numpy() >= lower\n result = result.where(subset, lower, axis=None, inplace=False)\n\n if np.any(mask):\n result[mask] = np.nan\n\n if inplace:\n return self._update_inplace(result)\n else:\n return result\n\n def _clip_with_one_bound(self, threshold, method, axis, inplace):\n\n if axis is not None:\n axis = self._get_axis_number(axis)\n\n # method is self.le for upper bound and self.ge for lower bound\n if is_scalar(threshold) and is_number(threshold):\n if method.__name__ == \"le\":\n return self._clip_with_scalar(None, threshold, inplace=inplace)\n return self._clip_with_scalar(threshold, None, inplace=inplace)\n\n subset = method(threshold, axis=axis) | isna(self)\n\n # GH #15390\n # In order for where method to work, the threshold must\n # be transformed to NDFrame from other array like structure.\n if (not isinstance(threshold, ABCSeries)) and is_list_like(threshold):\n if isinstance(self, ABCSeries):\n threshold = self._constructor(threshold, index=self.index)\n else:\n threshold = _align_method_FRAME(self, threshold, axis, flex=None)[1]\n return self.where(subset, threshold, axis=axis, inplace=inplace)\n\n def clip(\n self: FrameOrSeries,\n lower=None,\n upper=None,\n axis=None,\n inplace: bool_t = False,\n *args,\n **kwargs,\n ) -> FrameOrSeries:\n \"\"\"\n Trim values at input threshold(s).\n\n Assigns values outside boundary to boundary values. Thresholds\n can be singular values or array like, and in the latter case\n the clipping is performed element-wise in the specified axis.\n\n Parameters\n ----------\n lower : float or array_like, default None\n Minimum threshold value. All values below this\n threshold will be set to it.\n upper : float or array_like, default None\n Maximum threshold value. All values above this\n threshold will be set to it.\n axis : int or str axis name, optional\n Align object with lower and upper along the given axis.\n inplace : bool, default False\n Whether to perform the operation in place on the data.\n *args, **kwargs\n Additional keywords have no effect but might be accepted\n for compatibility with numpy.\n\n Returns\n -------\n Series or DataFrame\n Same type as calling object with the values outside the\n clip boundaries replaced.\n\n See Also\n --------\n Series.clip : Trim values at input threshold in series.\n DataFrame.clip : Trim values at input threshold in dataframe.\n numpy.clip : Clip (limit) the values in an array.\n\n Examples\n --------\n >>> data = {'col_0': [9, -3, 0, -1, 5], 'col_1': [-2, -7, 6, 8, -5]}\n >>> df = pd.DataFrame(data)\n >>> df\n col_0 col_1\n 0 9 -2\n 1 -3 -7\n 2 0 6\n 3 -1 8\n 4 5 -5\n\n Clips per column using lower and upper thresholds:\n\n >>> df.clip(-4, 6)\n col_0 col_1\n 0 6 -2\n 1 -3 -4\n 2 0 6\n 3 -1 6\n 4 5 -4\n\n Clips using specific lower and upper thresholds per column element:\n\n >>> t = pd.Series([2, -4, -1, 6, 3])\n >>> t\n 0 2\n 1 -4\n 2 -1\n 3 6\n 4 3\n dtype: int64\n\n >>> df.clip(t, t + 4, axis=0)\n col_0 col_1\n 0 6 2\n 1 -3 -4\n 2 0 3\n 3 6 8\n 4 5 3\n \"\"\"\n inplace = validate_bool_kwarg(inplace, \"inplace\")\n\n axis = nv.validate_clip_with_axis(axis, args, kwargs)\n if axis is not None:\n axis = self._get_axis_number(axis)\n\n # GH 17276\n # numpy doesn't like NaN as a clip value\n # so ignore\n # GH 19992\n # numpy doesn't drop a list-like bound containing NaN\n if not is_list_like(lower) and np.any(isna(lower)):\n lower = None\n if not is_list_like(upper) and np.any(isna(upper)):\n upper = None\n\n # GH 2747 (arguments were reversed)\n if lower is not None and upper is not None:\n if is_scalar(lower) and is_scalar(upper):\n lower, upper = min(lower, upper), max(lower, upper)\n\n # fast-path for scalars\n if (lower is None or (is_scalar(lower) and is_number(lower))) and (\n upper is None or (is_scalar(upper) and is_number(upper))\n ):\n return self._clip_with_scalar(lower, upper, inplace=inplace)\n\n result = self\n if lower is not None:\n result = result._clip_with_one_bound(\n lower, method=self.ge, axis=axis, inplace=inplace\n )\n if upper is not None:\n if inplace:\n result = self\n result = result._clip_with_one_bound(\n upper, method=self.le, axis=axis, inplace=inplace\n )\n\n return result\n\n _shared_docs[\n \"groupby\"\n ] = \"\"\"\n Group %(klass)s using a mapper or by a Series of columns.\n\n A groupby operation involves some combination of splitting the\n object, applying a function, and combining the results. This can be\n used to group large amounts of data and compute operations on these\n groups.\n\n Parameters\n ----------\n by : mapping, function, label, or list of labels\n Used to determine the groups for the groupby.\n If ``by`` is a function, it's called on each value of the object's\n index. If a dict or Series is passed, the Series or dict VALUES\n will be used to determine the groups (the Series' values are first\n aligned; see ``.align()`` method). If an ndarray is passed, the\n values are used as-is determine the groups. A label or list of\n labels may be passed to group by the columns in ``self``. Notice\n that a tuple is interpreted as a (single) key.\n axis : {0 or 'index', 1 or 'columns'}, default 0\n Split along rows (0) or columns (1).\n level : int, level name, or sequence of such, default None\n If the axis is a MultiIndex (hierarchical), group by a particular\n level or levels.\n as_index : bool, default True\n For aggregated output, return object with group labels as the\n index. Only relevant for DataFrame input. as_index=False is\n effectively \"SQL-style\" grouped output.\n sort : bool, default True\n Sort group keys. Get better performance by turning this off.\n Note this does not influence the order of observations within each\n group. Groupby preserves the order of rows within each group.\n group_keys : bool, default True\n When calling apply, add group keys to index to identify pieces.\n squeeze : bool, default False\n Reduce the dimensionality of the return type if possible,\n otherwise return a consistent type.\n\n .. deprecated:: 1.1.0\n\n observed : bool, default False\n This only applies if any of the groupers are Categoricals.\n If True: only show observed values for categorical groupers.\n If False: show all values for categorical groupers.\n\n .. versionadded:: 0.23.0\n dropna : bool, default True\n If True, and if group keys contain NA values, NA values together\n with row/column will be dropped.\n If False, NA values will also be treated as the key in groups\n\n .. versionadded:: 1.1.0\n\n Returns\n -------\n %(klass)sGroupBy\n Returns a groupby object that contains information about the groups.\n\n See Also\n --------\n resample : Convenience method for frequency conversion and resampling\n of time series.\n\n Notes\n -----\n See the `user guide\n <https://pandas.pydata.org/pandas-docs/stable/groupby.html>`_ for more.\n \"\"\"\n\n def asfreq(\n self: FrameOrSeries,\n freq,\n method=None,\n how: Optional[str] = None,\n normalize: bool_t = False,\n fill_value=None,\n ) -> FrameOrSeries:\n \"\"\"\n Convert TimeSeries to specified frequency.\n\n Optionally provide filling method to pad/backfill missing values.\n\n Returns the original data conformed to a new index with the specified\n frequency. ``resample`` is more appropriate if an operation, such as\n summarization, is necessary to represent the data at the new frequency.\n\n Parameters\n ----------\n freq : DateOffset or str\n Frequency DateOffset or string.\n method : {'backfill'/'bfill', 'pad'/'ffill'}, default None\n Method to use for filling holes in reindexed Series (note this\n does not fill NaNs that already were present):\n\n * 'pad' / 'ffill': propagate last valid observation forward to next\n valid\n * 'backfill' / 'bfill': use NEXT valid observation to fill.\n how : {'start', 'end'}, default end\n For PeriodIndex only (see PeriodIndex.asfreq).\n normalize : bool, default False\n Whether to reset output index to midnight.\n fill_value : scalar, optional\n Value to use for missing values, applied during upsampling (note\n this does not fill NaNs that already were present).\n\n Returns\n -------\n Same type as caller\n Object converted to the specified frequency.\n\n See Also\n --------\n reindex : Conform DataFrame to new index with optional filling logic.\n\n Notes\n -----\n To learn more about the frequency strings, please see `this link\n <https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`__.\n\n Examples\n --------\n Start by creating a series with 4 one minute timestamps.\n\n >>> index = pd.date_range('1/1/2000', periods=4, freq='T')\n >>> series = pd.Series([0.0, None, 2.0, 3.0], index=index)\n >>> df = pd.DataFrame({'s':series})\n >>> df\n s\n 2000-01-01 00:00:00 0.0\n 2000-01-01 00:01:00 NaN\n 2000-01-01 00:02:00 2.0\n 2000-01-01 00:03:00 3.0\n\n Upsample the series into 30 second bins.\n\n >>> df.asfreq(freq='30S')\n s\n 2000-01-01 00:00:00 0.0\n 2000-01-01 00:00:30 NaN\n 2000-01-01 00:01:00 NaN\n 2000-01-01 00:01:30 NaN\n 2000-01-01 00:02:00 2.0\n 2000-01-01 00:02:30 NaN\n 2000-01-01 00:03:00 3.0\n\n Upsample again, providing a ``fill value``.\n\n >>> df.asfreq(freq='30S', fill_value=9.0)\n s\n 2000-01-01 00:00:00 0.0\n 2000-01-01 00:00:30 9.0\n 2000-01-01 00:01:00 NaN\n 2000-01-01 00:01:30 9.0\n 2000-01-01 00:02:00 2.0\n 2000-01-01 00:02:30 9.0\n 2000-01-01 00:03:00 3.0\n\n Upsample again, providing a ``method``.\n\n >>> df.asfreq(freq='30S', method='bfill')\n s\n 2000-01-01 00:00:00 0.0\n 2000-01-01 00:00:30 NaN\n 2000-01-01 00:01:00 NaN\n 2000-01-01 00:01:30 2.0\n 2000-01-01 00:02:00 2.0\n 2000-01-01 00:02:30 3.0\n 2000-01-01 00:03:00 3.0\n \"\"\"\n from pandas.core.resample import asfreq\n\n return asfreq(\n self,\n freq,\n method=method,\n how=how,\n normalize=normalize,\n fill_value=fill_value,\n )\n\n def at_time(\n self: FrameOrSeries, time, asof: bool_t = False, axis=None\n ) -> FrameOrSeries:\n \"\"\"\n Select values at particular time of day (e.g., 9:30AM).\n\n Parameters\n ----------\n time : datetime.time or str\n axis : {0 or 'index', 1 or 'columns'}, default 0\n\n .. versionadded:: 0.24.0\n\n Returns\n -------\n Series or DataFrame\n\n Raises\n ------\n TypeError\n If the index is not a :class:`DatetimeIndex`\n\n See Also\n --------\n between_time : Select values between particular times of the day.\n first : Select initial periods of time series based on a date offset.\n last : Select final periods of time series based on a date offset.\n DatetimeIndex.indexer_at_time : Get just the index locations for\n values at particular time of the day.\n\n Examples\n --------\n >>> i = pd.date_range('2018-04-09', periods=4, freq='12H')\n >>> ts = pd.DataFrame({'A': [1, 2, 3, 4]}, index=i)\n >>> ts\n A\n 2018-04-09 00:00:00 1\n 2018-04-09 12:00:00 2\n 2018-04-10 00:00:00 3\n 2018-04-10 12:00:00 4\n\n >>> ts.at_time('12:00')\n A\n 2018-04-09 12:00:00 2\n 2018-04-10 12:00:00 4\n \"\"\"\n if axis is None:\n axis = self._stat_axis_number\n axis = self._get_axis_number(axis)\n\n index = self._get_axis(axis)\n\n if not isinstance(index, DatetimeIndex):\n raise TypeError(\"Index must be DatetimeIndex\")\n\n indexer = index.indexer_at_time(time, asof=asof)\n return self._take_with_is_copy(indexer, axis=axis)\n\n def between_time(\n self: FrameOrSeries,\n start_time,\n end_time,\n include_start: bool_t = True,\n include_end: bool_t = True,\n axis=None,\n ) -> FrameOrSeries:\n \"\"\"\n Select values between particular times of the day (e.g., 9:00-9:30 AM).\n\n By setting ``start_time`` to be later than ``end_time``,\n you can get the times that are *not* between the two times.\n\n Parameters\n ----------\n start_time : datetime.time or str\n Initial time as a time filter limit.\n end_time : datetime.time or str\n End time as a time filter limit.\n include_start : bool, default True\n Whether the start time needs to be included in the result.\n include_end : bool, default True\n Whether the end time needs to be included in the result.\n axis : {0 or 'index', 1 or 'columns'}, default 0\n Determine range time on index or columns value.\n\n .. versionadded:: 0.24.0\n\n Returns\n -------\n Series or DataFrame\n Data from the original object filtered to the specified dates range.\n\n Raises\n ------\n TypeError\n If the index is not a :class:`DatetimeIndex`\n\n See Also\n --------\n at_time : Select values at a particular time of the day.\n first : Select initial periods of time series based on a date offset.\n last : Select final periods of time series based on a date offset.\n DatetimeIndex.indexer_between_time : Get just the index locations for\n values between particular times of the day.\n\n Examples\n --------\n >>> i = pd.date_range('2018-04-09', periods=4, freq='1D20min')\n >>> ts = pd.DataFrame({'A': [1, 2, 3, 4]}, index=i)\n >>> ts\n A\n 2018-04-09 00:00:00 1\n 2018-04-10 00:20:00 2\n 2018-04-11 00:40:00 3\n 2018-04-12 01:00:00 4\n\n >>> ts.between_time('0:15', '0:45')\n A\n 2018-04-10 00:20:00 2\n 2018-04-11 00:40:00 3\n\n You get the times that are *not* between two times by setting\n ``start_time`` later than ``end_time``:\n\n >>> ts.between_time('0:45', '0:15')\n A\n 2018-04-09 00:00:00 1\n 2018-04-12 01:00:00 4\n \"\"\"\n if axis is None:\n axis = self._stat_axis_number\n axis = self._get_axis_number(axis)\n\n index = self._get_axis(axis)\n if not isinstance(index, DatetimeIndex):\n raise TypeError(\"Index must be DatetimeIndex\")\n\n indexer = index.indexer_between_time(\n start_time, end_time, include_start=include_start, include_end=include_end,\n )\n return self._take_with_is_copy(indexer, axis=axis)\n\n def resample(\n self,\n rule,\n axis=0,\n closed: Optional[str] = None,\n label: Optional[str] = None,\n convention: str = \"start\",\n kind: Optional[str] = None,\n loffset=None,\n base: Optional[int] = None,\n on=None,\n level=None,\n origin: Union[str, TimestampConvertibleTypes] = \"start_day\",\n offset: Optional[TimedeltaConvertibleTypes] = None,\n ) -> \"Resampler\":\n \"\"\"\n Resample time-series data.\n\n Convenience method for frequency conversion and resampling of time\n series. Object must have a datetime-like index (`DatetimeIndex`,\n `PeriodIndex`, or `TimedeltaIndex`), or pass datetime-like values\n to the `on` or `level` keyword.\n\n Parameters\n ----------\n rule : DateOffset, Timedelta or str\n The offset string or object representing target conversion.\n axis : {0 or 'index', 1 or 'columns'}, default 0\n Which axis to use for up- or down-sampling. For `Series` this\n will default to 0, i.e. along the rows. Must be\n `DatetimeIndex`, `TimedeltaIndex` or `PeriodIndex`.\n closed : {'right', 'left'}, default None\n Which side of bin interval is closed. The default is 'left'\n for all frequency offsets except for 'M', 'A', 'Q', 'BM',\n 'BA', 'BQ', and 'W' which all have a default of 'right'.\n label : {'right', 'left'}, default None\n Which bin edge label to label bucket with. The default is 'left'\n for all frequency offsets except for 'M', 'A', 'Q', 'BM',\n 'BA', 'BQ', and 'W' which all have a default of 'right'.\n convention : {'start', 'end', 's', 'e'}, default 'start'\n For `PeriodIndex` only, controls whether to use the start or\n end of `rule`.\n kind : {'timestamp', 'period'}, optional, default None\n Pass 'timestamp' to convert the resulting index to a\n `DateTimeIndex` or 'period' to convert it to a `PeriodIndex`.\n By default the input representation is retained.\n loffset : timedelta, default None\n Adjust the resampled time labels.\n\n .. deprecated:: 1.1.0\n You should add the loffset to the `df.index` after the resample.\n See below.\n\n base : int, default 0\n For frequencies that evenly subdivide 1 day, the \"origin\" of the\n aggregated intervals. For example, for '5min' frequency, base could\n range from 0 through 4. Defaults to 0.\n\n .. deprecated:: 1.1.0\n The new arguments that you should use are 'offset' or 'origin'.\n\n on : str, optional\n For a DataFrame, column to use instead of index for resampling.\n Column must be datetime-like.\n level : str or int, optional\n For a MultiIndex, level (name or number) to use for\n resampling. `level` must be datetime-like.\n origin : {'epoch', 'start', 'start_day'}, Timestamp or str, default 'start_day'\n The timestamp on which to adjust the grouping. The timezone of origin\n must match the timezone of the index.\n If a timestamp is not used, these values are also supported:\n\n - 'epoch': `origin` is 1970-01-01\n - 'start': `origin` is the first value of the timeseries\n - 'start_day': `origin` is the first day at midnight of the timeseries\n\n .. versionadded:: 1.1.0\n\n offset : Timedelta or str, default is None\n An offset timedelta added to the origin.\n\n .. versionadded:: 1.1.0\n\n Returns\n -------\n Resampler object\n\n See Also\n --------\n groupby : Group by mapping, function, label, or list of labels.\n Series.resample : Resample a Series.\n DataFrame.resample: Resample a DataFrame.\n\n Notes\n -----\n See the `user guide\n <https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#resampling>`_\n for more.\n\n To learn more about the offset strings, please see `this link\n <https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#dateoffset-objects>`__.\n\n Examples\n --------\n Start by creating a series with 9 one minute timestamps.\n\n >>> index = pd.date_range('1/1/2000', periods=9, freq='T')\n >>> series = pd.Series(range(9), index=index)\n >>> series\n 2000-01-01 00:00:00 0\n 2000-01-01 00:01:00 1\n 2000-01-01 00:02:00 2\n 2000-01-01 00:03:00 3\n 2000-01-01 00:04:00 4\n 2000-01-01 00:05:00 5\n 2000-01-01 00:06:00 6\n 2000-01-01 00:07:00 7\n 2000-01-01 00:08:00 8\n Freq: T, dtype: int64\n\n Downsample the series into 3 minute bins and sum the values\n of the timestamps falling into a bin.\n\n >>> series.resample('3T').sum()\n 2000-01-01 00:00:00 3\n 2000-01-01 00:03:00 12\n 2000-01-01 00:06:00 21\n Freq: 3T, dtype: int64\n\n Downsample the series into 3 minute bins as above, but label each\n bin using the right edge instead of the left. Please note that the\n value in the bucket used as the label is not included in the bucket,\n which it labels. For example, in the original series the\n bucket ``2000-01-01 00:03:00`` contains the value 3, but the summed\n value in the resampled bucket with the label ``2000-01-01 00:03:00``\n does not include 3 (if it did, the summed value would be 6, not 3).\n To include this value close the right side of the bin interval as\n illustrated in the example below this one.\n\n >>> series.resample('3T', label='right').sum()\n 2000-01-01 00:03:00 3\n 2000-01-01 00:06:00 12\n 2000-01-01 00:09:00 21\n Freq: 3T, dtype: int64\n\n Downsample the series into 3 minute bins as above, but close the right\n side of the bin interval.\n\n >>> series.resample('3T', label='right', closed='right').sum()\n 2000-01-01 00:00:00 0\n 2000-01-01 00:03:00 6\n 2000-01-01 00:06:00 15\n 2000-01-01 00:09:00 15\n Freq: 3T, dtype: int64\n\n Upsample the series into 30 second bins.\n\n >>> series.resample('30S').asfreq()[0:5] # Select first 5 rows\n 2000-01-01 00:00:00 0.0\n 2000-01-01 00:00:30 NaN\n 2000-01-01 00:01:00 1.0\n 2000-01-01 00:01:30 NaN\n 2000-01-01 00:02:00 2.0\n Freq: 30S, dtype: float64\n\n Upsample the series into 30 second bins and fill the ``NaN``\n values using the ``pad`` method.\n\n >>> series.resample('30S').pad()[0:5]\n 2000-01-01 00:00:00 0\n 2000-01-01 00:00:30 0\n 2000-01-01 00:01:00 1\n 2000-01-01 00:01:30 1\n 2000-01-01 00:02:00 2\n Freq: 30S, dtype: int64\n\n Upsample the series into 30 second bins and fill the\n ``NaN`` values using the ``bfill`` method.\n\n >>> series.resample('30S').bfill()[0:5]\n 2000-01-01 00:00:00 0\n 2000-01-01 00:00:30 1\n 2000-01-01 00:01:00 1\n 2000-01-01 00:01:30 2\n 2000-01-01 00:02:00 2\n Freq: 30S, dtype: int64\n\n Pass a custom function via ``apply``\n\n >>> def custom_resampler(array_like):\n ... return np.sum(array_like) + 5\n ...\n >>> series.resample('3T').apply(custom_resampler)\n 2000-01-01 00:00:00 8\n 2000-01-01 00:03:00 17\n 2000-01-01 00:06:00 26\n Freq: 3T, dtype: int64\n\n For a Series with a PeriodIndex, the keyword `convention` can be\n used to control whether to use the start or end of `rule`.\n\n Resample a year by quarter using 'start' `convention`. Values are\n assigned to the first quarter of the period.\n\n >>> s = pd.Series([1, 2], index=pd.period_range('2012-01-01',\n ... freq='A',\n ... periods=2))\n >>> s\n 2012 1\n 2013 2\n Freq: A-DEC, dtype: int64\n >>> s.resample('Q', convention='start').asfreq()\n 2012Q1 1.0\n 2012Q2 NaN\n 2012Q3 NaN\n 2012Q4 NaN\n 2013Q1 2.0\n 2013Q2 NaN\n 2013Q3 NaN\n 2013Q4 NaN\n Freq: Q-DEC, dtype: float64\n\n Resample quarters by month using 'end' `convention`. Values are\n assigned to the last month of the period.\n\n >>> q = pd.Series([1, 2, 3, 4], index=pd.period_range('2018-01-01',\n ... freq='Q',\n ... periods=4))\n >>> q\n 2018Q1 1\n 2018Q2 2\n 2018Q3 3\n 2018Q4 4\n Freq: Q-DEC, dtype: int64\n >>> q.resample('M', convention='end').asfreq()\n 2018-03 1.0\n 2018-04 NaN\n 2018-05 NaN\n 2018-06 2.0\n 2018-07 NaN\n 2018-08 NaN\n 2018-09 3.0\n 2018-10 NaN\n 2018-11 NaN\n 2018-12 4.0\n Freq: M, dtype: float64\n\n For DataFrame objects, the keyword `on` can be used to specify the\n column instead of the index for resampling.\n\n >>> d = dict({'price': [10, 11, 9, 13, 14, 18, 17, 19],\n ... 'volume': [50, 60, 40, 100, 50, 100, 40, 50]})\n >>> df = pd.DataFrame(d)\n >>> df['week_starting'] = pd.date_range('01/01/2018',\n ... periods=8,\n ... freq='W')\n >>> df\n price volume week_starting\n 0 10 50 2018-01-07\n 1 11 60 2018-01-14\n 2 9 40 2018-01-21\n 3 13 100 2018-01-28\n 4 14 50 2018-02-04\n 5 18 100 2018-02-11\n 6 17 40 2018-02-18\n 7 19 50 2018-02-25\n >>> df.resample('M', on='week_starting').mean()\n price volume\n week_starting\n 2018-01-31 10.75 62.5\n 2018-02-28 17.00 60.0\n\n For a DataFrame with MultiIndex, the keyword `level` can be used to\n specify on which level the resampling needs to take place.\n\n >>> days = pd.date_range('1/1/2000', periods=4, freq='D')\n >>> d2 = dict({'price': [10, 11, 9, 13, 14, 18, 17, 19],\n ... 'volume': [50, 60, 40, 100, 50, 100, 40, 50]})\n >>> df2 = pd.DataFrame(d2,\n ... index=pd.MultiIndex.from_product([days,\n ... ['morning',\n ... 'afternoon']]\n ... ))\n >>> df2\n price volume\n 2000-01-01 morning 10 50\n afternoon 11 60\n 2000-01-02 morning 9 40\n afternoon 13 100\n 2000-01-03 morning 14 50\n afternoon 18 100\n 2000-01-04 morning 17 40\n afternoon 19 50\n >>> df2.resample('D', level=0).sum()\n price volume\n 2000-01-01 21 110\n 2000-01-02 22 140\n 2000-01-03 32 150\n 2000-01-04 36 90\n\n If you want to adjust the start of the bins based on a fixed timestamp:\n\n >>> start, end = '2000-10-01 23:30:00', '2000-10-02 00:30:00'\n >>> rng = pd.date_range(start, end, freq='7min')\n >>> ts = pd.Series(np.arange(len(rng)) * 3, index=rng)\n >>> ts\n 2000-10-01 23:30:00 0\n 2000-10-01 23:37:00 3\n 2000-10-01 23:44:00 6\n 2000-10-01 23:51:00 9\n 2000-10-01 23:58:00 12\n 2000-10-02 00:05:00 15\n 2000-10-02 00:12:00 18\n 2000-10-02 00:19:00 21\n 2000-10-02 00:26:00 24\n Freq: 7T, dtype: int64\n\n >>> ts.resample('17min').sum()\n 2000-10-01 23:14:00 0\n 2000-10-01 23:31:00 9\n 2000-10-01 23:48:00 21\n 2000-10-02 00:05:00 54\n 2000-10-02 00:22:00 24\n Freq: 17T, dtype: int64\n\n >>> ts.resample('17min', origin='epoch').sum()\n 2000-10-01 23:18:00 0\n 2000-10-01 23:35:00 18\n 2000-10-01 23:52:00 27\n 2000-10-02 00:09:00 39\n 2000-10-02 00:26:00 24\n Freq: 17T, dtype: int64\n\n >>> ts.resample('17min', origin='2000-01-01').sum()\n 2000-10-01 23:24:00 3\n 2000-10-01 23:41:00 15\n 2000-10-01 23:58:00 45\n 2000-10-02 00:15:00 45\n Freq: 17T, dtype: int64\n\n If you want to adjust the start of the bins with an `offset` Timedelta, the two\n following lines are equivalent:\n\n >>> ts.resample('17min', origin='start').sum()\n 2000-10-01 23:30:00 9\n 2000-10-01 23:47:00 21\n 2000-10-02 00:04:00 54\n 2000-10-02 00:21:00 24\n Freq: 17T, dtype: int64\n\n >>> ts.resample('17min', offset='23h30min').sum()\n 2000-10-01 23:30:00 9\n 2000-10-01 23:47:00 21\n 2000-10-02 00:04:00 54\n 2000-10-02 00:21:00 24\n Freq: 17T, dtype: int64\n\n To replace the use of the deprecated `base` argument, you can now use `offset`,\n in this example it is equivalent to have `base=2`:\n\n >>> ts.resample('17min', offset='2min').sum()\n 2000-10-01 23:16:00 0\n 2000-10-01 23:33:00 9\n 2000-10-01 23:50:00 36\n 2000-10-02 00:07:00 39\n 2000-10-02 00:24:00 24\n Freq: 17T, dtype: int64\n\n To replace the use of the deprecated `loffset` argument:\n\n >>> from pandas.tseries.frequencies import to_offset\n >>> loffset = '19min'\n >>> ts_out = ts.resample('17min').sum()\n >>> ts_out.index = ts_out.index + to_offset(loffset)\n >>> ts_out\n 2000-10-01 23:33:00 0\n 2000-10-01 23:50:00 9\n 2000-10-02 00:07:00 21\n 2000-10-02 00:24:00 54\n 2000-10-02 00:41:00 24\n Freq: 17T, dtype: int64\n \"\"\"\n from pandas.core.resample import get_resampler\n\n axis = self._get_axis_number(axis)\n return get_resampler(\n self,\n freq=rule,\n label=label,\n closed=closed,\n axis=axis,\n kind=kind,\n loffset=loffset,\n convention=convention,\n base=base,\n key=on,\n level=level,\n origin=origin,\n offset=offset,\n )\n\n def first(self: FrameOrSeries, offset) -> FrameOrSeries:\n \"\"\"\n Select initial periods of time series data based on a date offset.\n\n When having a DataFrame with dates as index, this function can\n select the first few rows based on a date offset.\n\n Parameters\n ----------\n offset : str, DateOffset or dateutil.relativedelta\n The offset length of the data that will be selected. For instance,\n '1M' will display all the rows having their index within the first month.\n\n Returns\n -------\n Series or DataFrame\n A subset of the caller.\n\n Raises\n ------\n TypeError\n If the index is not a :class:`DatetimeIndex`\n\n See Also\n --------\n last : Select final periods of time series based on a date offset.\n at_time : Select values at a particular time of the day.\n between_time : Select values between particular times of the day.\n\n Examples\n --------\n >>> i = pd.date_range('2018-04-09', periods=4, freq='2D')\n >>> ts = pd.DataFrame({'A': [1, 2, 3, 4]}, index=i)\n >>> ts\n A\n 2018-04-09 1\n 2018-04-11 2\n 2018-04-13 3\n 2018-04-15 4\n\n Get the rows for the first 3 days:\n\n >>> ts.first('3D')\n A\n 2018-04-09 1\n 2018-04-11 2\n\n Notice the data for 3 first calendar days were returned, not the first\n 3 days observed in the dataset, and therefore data for 2018-04-13 was\n not returned.\n \"\"\"\n if not isinstance(self.index, DatetimeIndex):\n raise TypeError(\"'first' only supports a DatetimeIndex index\")\n\n if len(self.index) == 0:\n return self\n\n offset = to_offset(offset)\n end_date = end = self.index[0] + offset\n\n # Tick-like, e.g. 3 weeks\n if isinstance(offset, Tick):\n if end_date in self.index:\n end = self.index.searchsorted(end_date, side=\"left\")\n return self.iloc[:end]\n\n return self.loc[:end]\n\n def last(self: FrameOrSeries, offset) -> FrameOrSeries:\n \"\"\"\n Select final periods of time series data based on a date offset.\n\n When having a DataFrame with dates as index, this function can\n select the last few rows based on a date offset.\n\n Parameters\n ----------\n offset : str, DateOffset, dateutil.relativedelta\n The offset length of the data that will be selected. For instance,\n '3D' will display all the rows having their index within the last 3 days.\n\n Returns\n -------\n Series or DataFrame\n A subset of the caller.\n\n Raises\n ------\n TypeError\n If the index is not a :class:`DatetimeIndex`\n\n See Also\n --------\n first : Select initial periods of time series based on a date offset.\n at_time : Select values at a particular time of the day.\n between_time : Select values between particular times of the day.\n\n Examples\n --------\n >>> i = pd.date_range('2018-04-09', periods=4, freq='2D')\n >>> ts = pd.DataFrame({'A': [1, 2, 3, 4]}, index=i)\n >>> ts\n A\n 2018-04-09 1\n 2018-04-11 2\n 2018-04-13 3\n 2018-04-15 4\n\n Get the rows for the last 3 days:\n\n >>> ts.last('3D')\n A\n 2018-04-13 3\n 2018-04-15 4\n\n Notice the data for 3 last calendar days were returned, not the last\n 3 observed days in the dataset, and therefore data for 2018-04-11 was\n not returned.\n \"\"\"\n if not isinstance(self.index, DatetimeIndex):\n raise TypeError(\"'last' only supports a DatetimeIndex index\")\n\n if len(self.index) == 0:\n return self\n\n offset = to_offset(offset)\n\n start_date = self.index[-1] - offset\n start = self.index.searchsorted(start_date, side=\"right\")\n return self.iloc[start:]\n\n def rank(\n self: FrameOrSeries,\n axis=0,\n method: str = \"average\",\n numeric_only: Optional[bool_t] = None,\n na_option: str = \"keep\",\n ascending: bool_t = True,\n pct: bool_t = False,\n ) -> FrameOrSeries:\n \"\"\"\n Compute numerical data ranks (1 through n) along axis.\n\n By default, equal values are assigned a rank that is the average of the\n ranks of those values.\n\n Parameters\n ----------\n axis : {0 or 'index', 1 or 'columns'}, default 0\n Index to direct ranking.\n method : {'average', 'min', 'max', 'first', 'dense'}, default 'average'\n How to rank the group of records that have the same value (i.e. ties):\n\n * average: average rank of the group\n * min: lowest rank in the group\n * max: highest rank in the group\n * first: ranks assigned in order they appear in the array\n * dense: like 'min', but rank always increases by 1 between groups.\n\n numeric_only : bool, optional\n For DataFrame objects, rank only numeric columns if set to True.\n na_option : {'keep', 'top', 'bottom'}, default 'keep'\n How to rank NaN values:\n\n * keep: assign NaN rank to NaN values\n * top: assign smallest rank to NaN values if ascending\n * bottom: assign highest rank to NaN values if ascending.\n\n ascending : bool, default True\n Whether or not the elements should be ranked in ascending order.\n pct : bool, default False\n Whether or not to display the returned rankings in percentile\n form.\n\n Returns\n -------\n same type as caller\n Return a Series or DataFrame with data ranks as values.\n\n See Also\n --------\n core.groupby.GroupBy.rank : Rank of values within each group.\n\n Examples\n --------\n >>> df = pd.DataFrame(data={'Animal': ['cat', 'penguin', 'dog',\n ... 'spider', 'snake'],\n ... 'Number_legs': [4, 2, 4, 8, np.nan]})\n >>> df\n Animal Number_legs\n 0 cat 4.0\n 1 penguin 2.0\n 2 dog 4.0\n 3 spider 8.0\n 4 snake NaN\n\n The following example shows how the method behaves with the above\n parameters:\n\n * default_rank: this is the default behaviour obtained without using\n any parameter.\n * max_rank: setting ``method = 'max'`` the records that have the\n same values are ranked using the highest rank (e.g.: since 'cat'\n and 'dog' are both in the 2nd and 3rd position, rank 3 is assigned.)\n * NA_bottom: choosing ``na_option = 'bottom'``, if there are records\n with NaN values they are placed at the bottom of the ranking.\n * pct_rank: when setting ``pct = True``, the ranking is expressed as\n percentile rank.\n\n >>> df['default_rank'] = df['Number_legs'].rank()\n >>> df['max_rank'] = df['Number_legs'].rank(method='max')\n >>> df['NA_bottom'] = df['Number_legs'].rank(na_option='bottom')\n >>> df['pct_rank'] = df['Number_legs'].rank(pct=True)\n >>> df\n Animal Number_legs default_rank max_rank NA_bottom pct_rank\n 0 cat 4.0 2.5 3.0 2.5 0.625\n 1 penguin 2.0 1.0 1.0 1.0 0.250\n 2 dog 4.0 2.5 3.0 2.5 0.625\n 3 spider 8.0 4.0 4.0 4.0 1.000\n 4 snake NaN NaN NaN 5.0 NaN\n \"\"\"\n axis = self._get_axis_number(axis)\n\n if na_option not in {\"keep\", \"top\", \"bottom\"}:\n msg = \"na_option must be one of 'keep', 'top', or 'bottom'\"\n raise ValueError(msg)\n\n def ranker(data):\n ranks = algos.rank(\n data.values,\n axis=axis,\n method=method,\n ascending=ascending,\n na_option=na_option,\n pct=pct,\n )\n ranks = self._constructor(ranks, **data._construct_axes_dict())\n return ranks.__finalize__(self, method=\"rank\")\n\n # if numeric_only is None, and we can't get anything, we try with\n # numeric_only=True\n if numeric_only is None:\n try:\n return ranker(self)\n except TypeError:\n numeric_only = True\n\n if numeric_only:\n data = self._get_numeric_data()\n else:\n data = self\n\n return ranker(data)\n\n _shared_docs[\n \"compare\"\n ] = \"\"\"\n Compare to another %(klass)s and show the differences.\n\n .. versionadded:: 1.1.0\n\n Parameters\n ----------\n other : %(klass)s\n Object to compare with.\n\n align_axis : {0 or 'index', 1 or 'columns'}, default 1\n Determine which axis to align the comparison on.\n\n * 0, or 'index' : Resulting differences are stacked vertically\n with rows drawn alternately from self and other.\n * 1, or 'columns' : Resulting differences are aligned horizontally\n with columns drawn alternately from self and other.\n\n keep_shape : bool, default False\n If true, all rows and columns are kept.\n Otherwise, only the ones with different values are kept.\n\n keep_equal : bool, default False\n If true, the result keeps values that are equal.\n Otherwise, equal values are shown as NaNs.\n \"\"\"\n\n @Appender(_shared_docs[\"compare\"] % _shared_doc_kwargs)\n def compare(\n self,\n other,\n align_axis: Axis = 1,\n keep_shape: bool_t = False,\n keep_equal: bool_t = False,\n ):\n from pandas.core.reshape.concat import concat\n\n if type(self) is not type(other):\n cls_self, cls_other = type(self).__name__, type(other).__name__\n raise TypeError(\n f\"can only compare '{cls_self}' (not '{cls_other}') with '{cls_self}'\"\n )\n\n mask = ~((self == other) | (self.isna() & other.isna()))\n keys = [\"self\", \"other\"]\n\n if not keep_equal:\n self = self.where(mask)\n other = other.where(mask)\n\n if not keep_shape:\n if isinstance(self, ABCDataFrame):\n cmask = mask.any()\n rmask = mask.any(axis=1)\n self = self.loc[rmask, cmask]\n other = other.loc[rmask, cmask]\n else:\n self = self[mask]\n other = other[mask]\n\n if align_axis in (1, \"columns\"): # This is needed for Series\n axis = 1\n else:\n axis = self._get_axis_number(align_axis)\n\n diff = concat([self, other], axis=axis, keys=keys)\n\n if axis >= self.ndim:\n # No need to reorganize data if stacking on new axis\n # This currently applies for stacking two Series on columns\n return diff\n\n ax = diff._get_axis(axis)\n ax_names = np.array(ax.names)\n\n # set index names to positions to avoid confusion\n ax.names = np.arange(len(ax_names))\n\n # bring self-other to inner level\n order = list(range(1, ax.nlevels)) + [0]\n if isinstance(diff, ABCDataFrame):\n diff = diff.reorder_levels(order, axis=axis)\n else:\n diff = diff.reorder_levels(order)\n\n # restore the index names in order\n diff._get_axis(axis=axis).names = ax_names[order]\n\n # reorder axis to keep things organized\n indices = (\n np.arange(diff.shape[axis]).reshape([2, diff.shape[axis] // 2]).T.flatten()\n )\n diff = diff.take(indices, axis=axis)\n\n return diff\n\n @doc(**_shared_doc_kwargs)\n def align(\n self,\n other,\n join=\"outer\",\n axis=None,\n level=None,\n copy=True,\n fill_value=None,\n method=None,\n limit=None,\n fill_axis=0,\n broadcast_axis=None,\n ):\n \"\"\"\n Align two objects on their axes with the specified join method.\n\n Join method is specified for each axis Index.\n\n Parameters\n ----------\n other : DataFrame or Series\n join : {{'outer', 'inner', 'left', 'right'}}, default 'outer'\n axis : allowed axis of the other object, default None\n Align on index (0), columns (1), or both (None).\n level : int or level name, default None\n Broadcast across a level, matching Index values on the\n passed MultiIndex level.\n copy : bool, default True\n Always returns new objects. If copy=False and no reindexing is\n required then original objects are returned.\n fill_value : scalar, default np.NaN\n Value to use for missing values. Defaults to NaN, but can be any\n \"compatible\" value.\n method : {{'backfill', 'bfill', 'pad', 'ffill', None}}, default None\n Method to use for filling holes in reindexed Series:\n\n - pad / ffill: propagate last valid observation forward to next valid.\n - backfill / bfill: use NEXT valid observation to fill gap.\n\n limit : int, default None\n If method is specified, this is the maximum number of consecutive\n NaN values to forward/backward fill. In other words, if there is\n a gap with more than this number of consecutive NaNs, it will only\n be partially filled. If method is not specified, this is the\n maximum number of entries along the entire axis where NaNs will be\n filled. Must be greater than 0 if not None.\n fill_axis : {axes_single_arg}, default 0\n Filling axis, method and limit.\n broadcast_axis : {axes_single_arg}, default None\n Broadcast values along this axis, if aligning two objects of\n different dimensions.\n\n Returns\n -------\n (left, right) : ({klass}, type of other)\n Aligned objects.\n \"\"\"\n\n method = missing.clean_fill_method(method)\n\n if broadcast_axis == 1 and self.ndim != other.ndim:\n if isinstance(self, ABCSeries):\n # this means other is a DataFrame, and we need to broadcast\n # self\n cons = self._constructor_expanddim\n df = cons(\n {c: self for c in other.columns}, **other._construct_axes_dict()\n )\n return df._align_frame(\n other,\n join=join,\n axis=axis,\n level=level,\n copy=copy,\n fill_value=fill_value,\n method=method,\n limit=limit,\n fill_axis=fill_axis,\n )\n elif isinstance(other, ABCSeries):\n # this means self is a DataFrame, and we need to broadcast\n # other\n cons = other._constructor_expanddim\n df = cons(\n {c: other for c in self.columns}, **self._construct_axes_dict()\n )\n return self._align_frame(\n df,\n join=join,\n axis=axis,\n level=level,\n copy=copy,\n fill_value=fill_value,\n method=method,\n limit=limit,\n fill_axis=fill_axis,\n )\n\n if axis is not None:\n axis = self._get_axis_number(axis)\n if isinstance(other, ABCDataFrame):\n return self._align_frame(\n other,\n join=join,\n axis=axis,\n level=level,\n copy=copy,\n fill_value=fill_value,\n method=method,\n limit=limit,\n fill_axis=fill_axis,\n )\n elif isinstance(other, ABCSeries):\n return self._align_series(\n other,\n join=join,\n axis=axis,\n level=level,\n copy=copy,\n fill_value=fill_value,\n method=method,\n limit=limit,\n fill_axis=fill_axis,\n )\n else: # pragma: no cover\n raise TypeError(f\"unsupported type: {type(other)}\")\n\n def _align_frame(\n self,\n other,\n join=\"outer\",\n axis=None,\n level=None,\n copy: bool_t = True,\n fill_value=None,\n method=None,\n limit=None,\n fill_axis=0,\n ):\n # defaults\n join_index, join_columns = None, None\n ilidx, iridx = None, None\n clidx, cridx = None, None\n\n is_series = isinstance(self, ABCSeries)\n\n if axis is None or axis == 0:\n if not self.index.equals(other.index):\n join_index, ilidx, iridx = self.index.join(\n other.index, how=join, level=level, return_indexers=True\n )\n\n if axis is None or axis == 1:\n if not is_series and not self.columns.equals(other.columns):\n join_columns, clidx, cridx = self.columns.join(\n other.columns, how=join, level=level, return_indexers=True\n )\n\n if is_series:\n reindexers = {0: [join_index, ilidx]}\n else:\n reindexers = {0: [join_index, ilidx], 1: [join_columns, clidx]}\n\n left = self._reindex_with_indexers(\n reindexers, copy=copy, fill_value=fill_value, allow_dups=True\n )\n # other must be always DataFrame\n right = other._reindex_with_indexers(\n {0: [join_index, iridx], 1: [join_columns, cridx]},\n copy=copy,\n fill_value=fill_value,\n allow_dups=True,\n )\n\n if method is not None:\n _left = left.fillna(method=method, axis=fill_axis, limit=limit)\n assert _left is not None # needed for mypy\n left = _left\n right = right.fillna(method=method, axis=fill_axis, limit=limit)\n\n # if DatetimeIndex have different tz, convert to UTC\n if is_datetime64tz_dtype(left.index.dtype):\n if left.index.tz != right.index.tz:\n if join_index is not None:\n left.index = join_index\n right.index = join_index\n\n return (\n left.__finalize__(self),\n right.__finalize__(other),\n )\n\n def _align_series(\n self,\n other,\n join=\"outer\",\n axis=None,\n level=None,\n copy: bool_t = True,\n fill_value=None,\n method=None,\n limit=None,\n fill_axis=0,\n ):\n\n is_series = isinstance(self, ABCSeries)\n\n # series/series compat, other must always be a Series\n if is_series:\n if axis:\n raise ValueError(\"cannot align series to a series other than axis 0\")\n\n # equal\n if self.index.equals(other.index):\n join_index, lidx, ridx = None, None, None\n else:\n join_index, lidx, ridx = self.index.join(\n other.index, how=join, level=level, return_indexers=True\n )\n\n left = self._reindex_indexer(join_index, lidx, copy)\n right = other._reindex_indexer(join_index, ridx, copy)\n\n else:\n # one has > 1 ndim\n fdata = self._mgr\n if axis == 0:\n join_index = self.index\n lidx, ridx = None, None\n if not self.index.equals(other.index):\n join_index, lidx, ridx = self.index.join(\n other.index, how=join, level=level, return_indexers=True\n )\n\n if lidx is not None:\n fdata = fdata.reindex_indexer(join_index, lidx, axis=1)\n\n elif axis == 1:\n join_index = self.columns\n lidx, ridx = None, None\n if not self.columns.equals(other.index):\n join_index, lidx, ridx = self.columns.join(\n other.index, how=join, level=level, return_indexers=True\n )\n\n if lidx is not None:\n fdata = fdata.reindex_indexer(join_index, lidx, axis=0)\n else:\n raise ValueError(\"Must specify axis=0 or 1\")\n\n if copy and fdata is self._mgr:\n fdata = fdata.copy()\n\n left = self._constructor(fdata)\n\n if ridx is None:\n right = other\n else:\n right = other.reindex(join_index, level=level)\n\n # fill\n fill_na = notna(fill_value) or (method is not None)\n if fill_na:\n left = left.fillna(fill_value, method=method, limit=limit, axis=fill_axis)\n right = right.fillna(fill_value, method=method, limit=limit)\n\n # if DatetimeIndex have different tz, convert to UTC\n if is_series or (not is_series and axis == 0):\n if is_datetime64tz_dtype(left.index.dtype):\n if left.index.tz != right.index.tz:\n if join_index is not None:\n left.index = join_index\n right.index = join_index\n\n return (\n left.__finalize__(self),\n right.__finalize__(other),\n )\n\n def _where(\n self,\n cond,\n other=np.nan,\n inplace=False,\n axis=None,\n level=None,\n errors=\"raise\",\n try_cast=False,\n ):\n \"\"\"\n Equivalent to public method `where`, except that `other` is not\n applied as a function even if callable. Used in __setitem__.\n \"\"\"\n inplace = validate_bool_kwarg(inplace, \"inplace\")\n\n # align the cond to same shape as myself\n cond = com.apply_if_callable(cond, self)\n if isinstance(cond, NDFrame):\n cond, _ = cond.align(self, join=\"right\", broadcast_axis=1)\n else:\n if not hasattr(cond, \"shape\"):\n cond = np.asanyarray(cond)\n if cond.shape != self.shape:\n raise ValueError(\"Array conditional must be same shape as self\")\n cond = self._constructor(cond, **self._construct_axes_dict())\n\n # make sure we are boolean\n fill_value = bool(inplace)\n cond = cond.fillna(fill_value)\n\n msg = \"Boolean array expected for the condition, not {dtype}\"\n\n if not cond.empty:\n if not isinstance(cond, ABCDataFrame):\n # This is a single-dimensional object.\n if not is_bool_dtype(cond):\n raise ValueError(msg.format(dtype=cond.dtype))\n else:\n for dt in cond.dtypes:\n if not is_bool_dtype(dt):\n raise ValueError(msg.format(dtype=dt))\n else:\n # GH#21947 we have an empty DataFrame/Series, could be object-dtype\n cond = cond.astype(bool)\n\n cond = -cond if inplace else cond\n\n # try to align with other\n try_quick = True\n if isinstance(other, NDFrame):\n\n # align with me\n if other.ndim <= self.ndim:\n\n _, other = self.align(\n other, join=\"left\", axis=axis, level=level, fill_value=np.nan\n )\n\n # if we are NOT aligned, raise as we cannot where index\n if axis is None and not all(\n other._get_axis(i).equals(ax) for i, ax in enumerate(self.axes)\n ):\n raise InvalidIndexError\n\n # slice me out of the other\n else:\n raise NotImplementedError(\n \"cannot align with a higher dimensional NDFrame\"\n )\n\n if isinstance(other, np.ndarray):\n\n if other.shape != self.shape:\n\n if self.ndim == 1:\n\n icond = cond._values\n\n # GH 2745 / GH 4192\n # treat like a scalar\n if len(other) == 1:\n other = other[0]\n\n # GH 3235\n # match True cond to other\n elif len(cond[icond]) == len(other):\n\n # try to not change dtype at first (if try_quick)\n if try_quick:\n new_other = np.asarray(self)\n new_other = new_other.copy()\n new_other[icond] = other\n other = new_other\n\n else:\n raise ValueError(\n \"Length of replacements must equal series length\"\n )\n\n else:\n raise ValueError(\n \"other must be the same shape as self when an ndarray\"\n )\n\n # we are the same shape, so create an actual object for alignment\n else:\n other = self._constructor(other, **self._construct_axes_dict())\n\n if axis is None:\n axis = 0\n\n if self.ndim == getattr(other, \"ndim\", 0):\n align = True\n else:\n align = self._get_axis_number(axis) == 1\n\n if align and isinstance(other, NDFrame):\n other = other.reindex(self._info_axis, axis=self._info_axis_number)\n if isinstance(cond, NDFrame):\n cond = cond.reindex(self._info_axis, axis=self._info_axis_number)\n\n block_axis = self._get_block_manager_axis(axis)\n\n if inplace:\n # we may have different type blocks come out of putmask, so\n # reconstruct the block manager\n\n self._check_inplace_setting(other)\n new_data = self._mgr.putmask(\n mask=cond, new=other, align=align, axis=block_axis,\n )\n result = self._constructor(new_data)\n return self._update_inplace(result)\n\n else:\n new_data = self._mgr.where(\n other=other,\n cond=cond,\n align=align,\n errors=errors,\n try_cast=try_cast,\n axis=block_axis,\n )\n result = self._constructor(new_data)\n return result.__finalize__(self)\n\n @doc(\n klass=_shared_doc_kwargs[\"klass\"],\n cond=\"True\",\n cond_rev=\"False\",\n name=\"where\",\n name_other=\"mask\",\n )\n def where(\n self,\n cond,\n other=np.nan,\n inplace=False,\n axis=None,\n level=None,\n errors=\"raise\",\n try_cast=False,\n ):\n \"\"\"\n Replace values where the condition is {cond_rev}.\n\n Parameters\n ----------\n cond : bool {klass}, array-like, or callable\n Where `cond` is {cond}, keep the original value. Where\n {cond_rev}, replace with corresponding value from `other`.\n If `cond` is callable, it is computed on the {klass} and\n should return boolean {klass} or array. The callable must\n not change input {klass} (though pandas doesn't check it).\n other : scalar, {klass}, or callable\n Entries where `cond` is {cond_rev} are replaced with\n corresponding value from `other`.\n If other is callable, it is computed on the {klass} and\n should return scalar or {klass}. The callable must not\n change input {klass} (though pandas doesn't check it).\n inplace : bool, default False\n Whether to perform the operation in place on the data.\n axis : int, default None\n Alignment axis if needed.\n level : int, default None\n Alignment level if needed.\n errors : str, {{'raise', 'ignore'}}, default 'raise'\n Note that currently this parameter won't affect\n the results and will always coerce to a suitable dtype.\n\n - 'raise' : allow exceptions to be raised.\n - 'ignore' : suppress exceptions. On error return original object.\n\n try_cast : bool, default False\n Try to cast the result back to the input type (if possible).\n\n Returns\n -------\n Same type as caller\n\n See Also\n --------\n :func:`DataFrame.{name_other}` : Return an object of same shape as\n self.\n\n Notes\n -----\n The {name} method is an application of the if-then idiom. For each\n element in the calling DataFrame, if ``cond`` is ``{cond}`` the\n element is used; otherwise the corresponding element from the DataFrame\n ``other`` is used.\n\n The signature for :func:`DataFrame.where` differs from\n :func:`numpy.where`. Roughly ``df1.where(m, df2)`` is equivalent to\n ``np.where(m, df1, df2)``.\n\n For further details and examples see the ``{name}`` documentation in\n :ref:`indexing <indexing.where_mask>`.\n\n Examples\n --------\n >>> s = pd.Series(range(5))\n >>> s.where(s > 0)\n 0 NaN\n 1 1.0\n 2 2.0\n 3 3.0\n 4 4.0\n dtype: float64\n\n >>> s.mask(s > 0)\n 0 0.0\n 1 NaN\n 2 NaN\n 3 NaN\n 4 NaN\n dtype: float64\n\n >>> s.where(s > 1, 10)\n 0 10\n 1 10\n 2 2\n 3 3\n 4 4\n dtype: int64\n\n >>> df = pd.DataFrame(np.arange(10).reshape(-1, 2), columns=['A', 'B'])\n >>> df\n A B\n 0 0 1\n 1 2 3\n 2 4 5\n 3 6 7\n 4 8 9\n >>> m = df % 3 == 0\n >>> df.where(m, -df)\n A B\n 0 0 -1\n 1 -2 3\n 2 -4 -5\n 3 6 -7\n 4 -8 9\n >>> df.where(m, -df) == np.where(m, df, -df)\n A B\n 0 True True\n 1 True True\n 2 True True\n 3 True True\n 4 True True\n >>> df.where(m, -df) == df.mask(~m, -df)\n A B\n 0 True True\n 1 True True\n 2 True True\n 3 True True\n 4 True True\n \"\"\"\n other = com.apply_if_callable(other, self)\n return self._where(\n cond, other, inplace, axis, level, errors=errors, try_cast=try_cast\n )\n\n @doc(\n where,\n klass=_shared_doc_kwargs[\"klass\"],\n cond=\"False\",\n cond_rev=\"True\",\n name=\"mask\",\n name_other=\"where\",\n )\n def mask(\n self,\n cond,\n other=np.nan,\n inplace=False,\n axis=None,\n level=None,\n errors=\"raise\",\n try_cast=False,\n ):\n\n inplace = validate_bool_kwarg(inplace, \"inplace\")\n cond = com.apply_if_callable(cond, self)\n\n # see gh-21891\n if not hasattr(cond, \"__invert__\"):\n cond = np.array(cond)\n\n return self.where(\n ~cond,\n other=other,\n inplace=inplace,\n axis=axis,\n level=level,\n try_cast=try_cast,\n errors=errors,\n )\n\n @doc(klass=_shared_doc_kwargs[\"klass\"])\n def shift(\n self: FrameOrSeries, periods=1, freq=None, axis=0, fill_value=None\n ) -> FrameOrSeries:\n \"\"\"\n Shift index by desired number of periods with an optional time `freq`.\n\n When `freq` is not passed, shift the index without realigning the data.\n If `freq` is passed (in this case, the index must be date or datetime,\n or it will raise a `NotImplementedError`), the index will be\n increased using the periods and the `freq`. `freq` can be inferred\n when specified as \"infer\" as long as either freq or inferred_freq\n attribute is set in the index.\n\n Parameters\n ----------\n periods : int\n Number of periods to shift. Can be positive or negative.\n freq : DateOffset, tseries.offsets, timedelta, or str, optional\n Offset to use from the tseries module or time rule (e.g. 'EOM').\n If `freq` is specified then the index values are shifted but the\n data is not realigned. That is, use `freq` if you would like to\n extend the index when shifting and preserve the original data.\n If `freq` is specified as \"infer\" then it will be inferred from\n the freq or inferred_freq attributes of the index. If neither of\n those attributes exist, a ValueError is thrown\n axis : {{0 or 'index', 1 or 'columns', None}}, default None\n Shift direction.\n fill_value : object, optional\n The scalar value to use for newly introduced missing values.\n the default depends on the dtype of `self`.\n For numeric data, ``np.nan`` is used.\n For datetime, timedelta, or period data, etc. :attr:`NaT` is used.\n For extension dtypes, ``self.dtype.na_value`` is used.\n\n .. versionchanged:: 1.1.0\n\n Returns\n -------\n {klass}\n Copy of input object, shifted.\n\n See Also\n --------\n Index.shift : Shift values of Index.\n DatetimeIndex.shift : Shift values of DatetimeIndex.\n PeriodIndex.shift : Shift values of PeriodIndex.\n tshift : Shift the time index, using the index's frequency if\n available.\n\n Examples\n --------\n >>> df = pd.DataFrame({{\"Col1\": [10, 20, 15, 30, 45],\n ... \"Col2\": [13, 23, 18, 33, 48],\n ... \"Col3\": [17, 27, 22, 37, 52]}},\n ... index=pd.date_range(\"2020-01-01\", \"2020-01-05\"))\n >>> df\n Col1 Col2 Col3\n 2020-01-01 10 13 17\n 2020-01-02 20 23 27\n 2020-01-03 15 18 22\n 2020-01-04 30 33 37\n 2020-01-05 45 48 52\n\n >>> df.shift(periods=3)\n Col1 Col2 Col3\n 2020-01-01 NaN NaN NaN\n 2020-01-02 NaN NaN NaN\n 2020-01-03 NaN NaN NaN\n 2020-01-04 10.0 13.0 17.0\n 2020-01-05 20.0 23.0 27.0\n\n >>> df.shift(periods=1, axis=\"columns\")\n Col1 Col2 Col3\n 2020-01-01 NaN 10.0 13.0\n 2020-01-02 NaN 20.0 23.0\n 2020-01-03 NaN 15.0 18.0\n 2020-01-04 NaN 30.0 33.0\n 2020-01-05 NaN 45.0 48.0\n\n >>> df.shift(periods=3, fill_value=0)\n Col1 Col2 Col3\n 2020-01-01 0 0 0\n 2020-01-02 0 0 0\n 2020-01-03 0 0 0\n 2020-01-04 10 13 17\n 2020-01-05 20 23 27\n\n >>> df.shift(periods=3, freq=\"D\")\n Col1 Col2 Col3\n 2020-01-04 10 13 17\n 2020-01-05 20 23 27\n 2020-01-06 15 18 22\n 2020-01-07 30 33 37\n 2020-01-08 45 48 52\n\n >>> df.shift(periods=3, freq=\"infer\")\n Col1 Col2 Col3\n 2020-01-04 10 13 17\n 2020-01-05 20 23 27\n 2020-01-06 15 18 22\n 2020-01-07 30 33 37\n 2020-01-08 45 48 52\n \"\"\"\n if periods == 0:\n return self.copy()\n\n if freq is None:\n # when freq is None, data is shifted, index is not\n block_axis = self._get_block_manager_axis(axis)\n new_data = self._mgr.shift(\n periods=periods, axis=block_axis, fill_value=fill_value\n )\n return self._constructor(new_data).__finalize__(self, method=\"shift\")\n\n # when freq is given, index is shifted, data is not\n index = self._get_axis(axis)\n\n if freq == \"infer\":\n freq = getattr(index, \"freq\", None)\n\n if freq is None:\n freq = getattr(index, \"inferred_freq\", None)\n\n if freq is None:\n msg = \"Freq was not set in the index hence cannot be inferred\"\n raise ValueError(msg)\n\n elif isinstance(freq, str):\n freq = to_offset(freq)\n\n if isinstance(index, PeriodIndex):\n orig_freq = to_offset(index.freq)\n if freq != orig_freq:\n assert orig_freq is not None # for mypy\n raise ValueError(\n f\"Given freq {freq.rule_code} does not match \"\n f\"PeriodIndex freq {orig_freq.rule_code}\"\n )\n new_ax = index.shift(periods)\n else:\n new_ax = index.shift(periods, freq)\n\n result = self.set_axis(new_ax, axis)\n return result.__finalize__(self, method=\"shift\")\n\n def slice_shift(self: FrameOrSeries, periods: int = 1, axis=0) -> FrameOrSeries:\n \"\"\"\n Equivalent to `shift` without copying data.\n\n The shifted data will not include the dropped periods and the\n shifted axis will be smaller than the original.\n\n Parameters\n ----------\n periods : int\n Number of periods to move, can be positive or negative.\n\n Returns\n -------\n shifted : same type as caller\n\n Notes\n -----\n While the `slice_shift` is faster than `shift`, you may pay for it\n later during alignment.\n \"\"\"\n if periods == 0:\n return self\n\n if periods > 0:\n vslicer = slice(None, -periods)\n islicer = slice(periods, None)\n else:\n vslicer = slice(-periods, None)\n islicer = slice(None, periods)\n\n new_obj = self._slice(vslicer, axis=axis)\n shifted_axis = self._get_axis(axis)[islicer]\n new_obj.set_axis(shifted_axis, axis=axis, inplace=True)\n\n return new_obj.__finalize__(self, method=\"slice_shift\")\n\n def tshift(\n self: FrameOrSeries, periods: int = 1, freq=None, axis: Axis = 0\n ) -> FrameOrSeries:\n \"\"\"\n Shift the time index, using the index's frequency if available.\n\n .. deprecated:: 1.1.0\n Use `shift` instead.\n\n Parameters\n ----------\n periods : int\n Number of periods to move, can be positive or negative.\n freq : DateOffset, timedelta, or str, default None\n Increment to use from the tseries module\n or time rule expressed as a string (e.g. 'EOM').\n axis : {0 or ‘index’, 1 or ‘columns’, None}, default 0\n Corresponds to the axis that contains the Index.\n\n Returns\n -------\n shifted : Series/DataFrame\n\n Notes\n -----\n If freq is not specified then tries to use the freq or inferred_freq\n attributes of the index. If neither of those attributes exist, a\n ValueError is thrown\n \"\"\"\n warnings.warn(\n (\n \"tshift is deprecated and will be removed in a future version. \"\n \"Please use shift instead.\"\n ),\n FutureWarning,\n stacklevel=2,\n )\n\n if freq is None:\n freq = \"infer\"\n\n return self.shift(periods, freq, axis)\n\n def truncate(\n self: FrameOrSeries, before=None, after=None, axis=None, copy: bool_t = True\n ) -> FrameOrSeries:\n \"\"\"\n Truncate a Series or DataFrame before and after some index value.\n\n This is a useful shorthand for boolean indexing based on index\n values above or below certain thresholds.\n\n Parameters\n ----------\n before : date, str, int\n Truncate all rows before this index value.\n after : date, str, int\n Truncate all rows after this index value.\n axis : {0 or 'index', 1 or 'columns'}, optional\n Axis to truncate. Truncates the index (rows) by default.\n copy : bool, default is True,\n Return a copy of the truncated section.\n\n Returns\n -------\n type of caller\n The truncated Series or DataFrame.\n\n See Also\n --------\n DataFrame.loc : Select a subset of a DataFrame by label.\n DataFrame.iloc : Select a subset of a DataFrame by position.\n\n Notes\n -----\n If the index being truncated contains only datetime values,\n `before` and `after` may be specified as strings instead of\n Timestamps.\n\n Examples\n --------\n >>> df = pd.DataFrame({'A': ['a', 'b', 'c', 'd', 'e'],\n ... 'B': ['f', 'g', 'h', 'i', 'j'],\n ... 'C': ['k', 'l', 'm', 'n', 'o']},\n ... index=[1, 2, 3, 4, 5])\n >>> df\n A B C\n 1 a f k\n 2 b g l\n 3 c h m\n 4 d i n\n 5 e j o\n\n >>> df.truncate(before=2, after=4)\n A B C\n 2 b g l\n 3 c h m\n 4 d i n\n\n The columns of a DataFrame can be truncated.\n\n >>> df.truncate(before=\"A\", after=\"B\", axis=\"columns\")\n A B\n 1 a f\n 2 b g\n 3 c h\n 4 d i\n 5 e j\n\n For Series, only rows can be truncated.\n\n >>> df['A'].truncate(before=2, after=4)\n 2 b\n 3 c\n 4 d\n Name: A, dtype: object\n\n The index values in ``truncate`` can be datetimes or string\n dates.\n\n >>> dates = pd.date_range('2016-01-01', '2016-02-01', freq='s')\n >>> df = pd.DataFrame(index=dates, data={'A': 1})\n >>> df.tail()\n A\n 2016-01-31 23:59:56 1\n 2016-01-31 23:59:57 1\n 2016-01-31 23:59:58 1\n 2016-01-31 23:59:59 1\n 2016-02-01 00:00:00 1\n\n >>> df.truncate(before=pd.Timestamp('2016-01-05'),\n ... after=pd.Timestamp('2016-01-10')).tail()\n A\n 2016-01-09 23:59:56 1\n 2016-01-09 23:59:57 1\n 2016-01-09 23:59:58 1\n 2016-01-09 23:59:59 1\n 2016-01-10 00:00:00 1\n\n Because the index is a DatetimeIndex containing only dates, we can\n specify `before` and `after` as strings. They will be coerced to\n Timestamps before truncation.\n\n >>> df.truncate('2016-01-05', '2016-01-10').tail()\n A\n 2016-01-09 23:59:56 1\n 2016-01-09 23:59:57 1\n 2016-01-09 23:59:58 1\n 2016-01-09 23:59:59 1\n 2016-01-10 00:00:00 1\n\n Note that ``truncate`` assumes a 0 value for any unspecified time\n component (midnight). This differs from partial string slicing, which\n returns any partially matching dates.\n\n >>> df.loc['2016-01-05':'2016-01-10', :].tail()\n A\n 2016-01-10 23:59:55 1\n 2016-01-10 23:59:56 1\n 2016-01-10 23:59:57 1\n 2016-01-10 23:59:58 1\n 2016-01-10 23:59:59 1\n \"\"\"\n if axis is None:\n axis = self._stat_axis_number\n axis = self._get_axis_number(axis)\n ax = self._get_axis(axis)\n\n # GH 17935\n # Check that index is sorted\n if not ax.is_monotonic_increasing and not ax.is_monotonic_decreasing:\n raise ValueError(\"truncate requires a sorted index\")\n\n # if we have a date index, convert to dates, otherwise\n # treat like a slice\n if ax.is_all_dates:\n from pandas.core.tools.datetimes import to_datetime\n\n before = to_datetime(before)\n after = to_datetime(after)\n\n if before is not None and after is not None:\n if before > after:\n raise ValueError(f\"Truncate: {after} must be after {before}\")\n\n if len(ax) > 1 and ax.is_monotonic_decreasing:\n before, after = after, before\n\n slicer = [slice(None, None)] * self._AXIS_LEN\n slicer[axis] = slice(before, after)\n result = self.loc[tuple(slicer)]\n\n if isinstance(ax, MultiIndex):\n setattr(result, self._get_axis_name(axis), ax.truncate(before, after))\n\n if copy:\n result = result.copy()\n\n return result\n\n def tz_convert(\n self: FrameOrSeries, tz, axis=0, level=None, copy: bool_t = True\n ) -> FrameOrSeries:\n \"\"\"\n Convert tz-aware axis to target time zone.\n\n Parameters\n ----------\n tz : str or tzinfo object\n axis : the axis to convert\n level : int, str, default None\n If axis is a MultiIndex, convert a specific level. Otherwise\n must be None.\n copy : bool, default True\n Also make a copy of the underlying data.\n\n Returns\n -------\n {klass}\n Object with time zone converted axis.\n\n Raises\n ------\n TypeError\n If the axis is tz-naive.\n \"\"\"\n axis = self._get_axis_number(axis)\n ax = self._get_axis(axis)\n\n def _tz_convert(ax, tz):\n if not hasattr(ax, \"tz_convert\"):\n if len(ax) > 0:\n ax_name = self._get_axis_name(axis)\n raise TypeError(\n f\"{ax_name} is not a valid DatetimeIndex or PeriodIndex\"\n )\n else:\n ax = DatetimeIndex([], tz=tz)\n else:\n ax = ax.tz_convert(tz)\n return ax\n\n # if a level is given it must be a MultiIndex level or\n # equivalent to the axis name\n if isinstance(ax, MultiIndex):\n level = ax._get_level_number(level)\n new_level = _tz_convert(ax.levels[level], tz)\n ax = ax.set_levels(new_level, level=level)\n else:\n if level not in (None, 0, ax.name):\n raise ValueError(f\"The level {level} is not valid\")\n ax = _tz_convert(ax, tz)\n\n result = self.copy(deep=copy)\n result = result.set_axis(ax, axis=axis, inplace=False)\n return result.__finalize__(self, method=\"tz_convert\")\n\n def tz_localize(\n self: FrameOrSeries,\n tz,\n axis=0,\n level=None,\n copy: bool_t = True,\n ambiguous=\"raise\",\n nonexistent: str = \"raise\",\n ) -> FrameOrSeries:\n \"\"\"\n Localize tz-naive index of a Series or DataFrame to target time zone.\n\n This operation localizes the Index. To localize the values in a\n timezone-naive Series, use :meth:`Series.dt.tz_localize`.\n\n Parameters\n ----------\n tz : str or tzinfo\n axis : the axis to localize\n level : int, str, default None\n If axis ia a MultiIndex, localize a specific level. Otherwise\n must be None.\n copy : bool, default True\n Also make a copy of the underlying data.\n ambiguous : 'infer', bool-ndarray, 'NaT', default 'raise'\n When clocks moved backward due to DST, ambiguous times may arise.\n For example in Central European Time (UTC+01), when going from\n 03:00 DST to 02:00 non-DST, 02:30:00 local time occurs both at\n 00:30:00 UTC and at 01:30:00 UTC. In such a situation, the\n `ambiguous` parameter dictates how ambiguous times should be\n handled.\n\n - 'infer' will attempt to infer fall dst-transition hours based on\n order\n - bool-ndarray where True signifies a DST time, False designates\n a non-DST time (note that this flag is only applicable for\n ambiguous times)\n - 'NaT' will return NaT where there are ambiguous times\n - 'raise' will raise an AmbiguousTimeError if there are ambiguous\n times.\n nonexistent : str, default 'raise'\n A nonexistent time does not exist in a particular timezone\n where clocks moved forward due to DST. Valid values are:\n\n - 'shift_forward' will shift the nonexistent time forward to the\n closest existing time\n - 'shift_backward' will shift the nonexistent time backward to the\n closest existing time\n - 'NaT' will return NaT where there are nonexistent times\n - timedelta objects will shift nonexistent times by the timedelta\n - 'raise' will raise an NonExistentTimeError if there are\n nonexistent times.\n\n .. versionadded:: 0.24.0\n\n Returns\n -------\n Series or DataFrame\n Same type as the input.\n\n Raises\n ------\n TypeError\n If the TimeSeries is tz-aware and tz is not None.\n\n Examples\n --------\n Localize local times:\n\n >>> s = pd.Series([1],\n ... index=pd.DatetimeIndex(['2018-09-15 01:30:00']))\n >>> s.tz_localize('CET')\n 2018-09-15 01:30:00+02:00 1\n dtype: int64\n\n Be careful with DST changes. When there is sequential data, pandas\n can infer the DST time:\n\n >>> s = pd.Series(range(7),\n ... index=pd.DatetimeIndex(['2018-10-28 01:30:00',\n ... '2018-10-28 02:00:00',\n ... '2018-10-28 02:30:00',\n ... '2018-10-28 02:00:00',\n ... '2018-10-28 02:30:00',\n ... '2018-10-28 03:00:00',\n ... '2018-10-28 03:30:00']))\n >>> s.tz_localize('CET', ambiguous='infer')\n 2018-10-28 01:30:00+02:00 0\n 2018-10-28 02:00:00+02:00 1\n 2018-10-28 02:30:00+02:00 2\n 2018-10-28 02:00:00+01:00 3\n 2018-10-28 02:30:00+01:00 4\n 2018-10-28 03:00:00+01:00 5\n 2018-10-28 03:30:00+01:00 6\n dtype: int64\n\n In some cases, inferring the DST is impossible. In such cases, you can\n pass an ndarray to the ambiguous parameter to set the DST explicitly\n\n >>> s = pd.Series(range(3),\n ... index=pd.DatetimeIndex(['2018-10-28 01:20:00',\n ... '2018-10-28 02:36:00',\n ... '2018-10-28 03:46:00']))\n >>> s.tz_localize('CET', ambiguous=np.array([True, True, False]))\n 2018-10-28 01:20:00+02:00 0\n 2018-10-28 02:36:00+02:00 1\n 2018-10-28 03:46:00+01:00 2\n dtype: int64\n\n If the DST transition causes nonexistent times, you can shift these\n dates forward or backward with a timedelta object or `'shift_forward'`\n or `'shift_backward'`.\n\n >>> s = pd.Series(range(2),\n ... index=pd.DatetimeIndex(['2015-03-29 02:30:00',\n ... '2015-03-29 03:30:00']))\n >>> s.tz_localize('Europe/Warsaw', nonexistent='shift_forward')\n 2015-03-29 03:00:00+02:00 0\n 2015-03-29 03:30:00+02:00 1\n dtype: int64\n >>> s.tz_localize('Europe/Warsaw', nonexistent='shift_backward')\n 2015-03-29 01:59:59.999999999+01:00 0\n 2015-03-29 03:30:00+02:00 1\n dtype: int64\n >>> s.tz_localize('Europe/Warsaw', nonexistent=pd.Timedelta('1H'))\n 2015-03-29 03:30:00+02:00 0\n 2015-03-29 03:30:00+02:00 1\n dtype: int64\n \"\"\"\n nonexistent_options = (\"raise\", \"NaT\", \"shift_forward\", \"shift_backward\")\n if nonexistent not in nonexistent_options and not isinstance(\n nonexistent, timedelta\n ):\n raise ValueError(\n \"The nonexistent argument must be one of 'raise', \"\n \"'NaT', 'shift_forward', 'shift_backward' or \"\n \"a timedelta object\"\n )\n\n axis = self._get_axis_number(axis)\n ax = self._get_axis(axis)\n\n def _tz_localize(ax, tz, ambiguous, nonexistent):\n if not hasattr(ax, \"tz_localize\"):\n if len(ax) > 0:\n ax_name = self._get_axis_name(axis)\n raise TypeError(\n f\"{ax_name} is not a valid DatetimeIndex or PeriodIndex\"\n )\n else:\n ax = DatetimeIndex([], tz=tz)\n else:\n ax = ax.tz_localize(tz, ambiguous=ambiguous, nonexistent=nonexistent)\n return ax\n\n # if a level is given it must be a MultiIndex level or\n # equivalent to the axis name\n if isinstance(ax, MultiIndex):\n level = ax._get_level_number(level)\n new_level = _tz_localize(ax.levels[level], tz, ambiguous, nonexistent)\n ax = ax.set_levels(new_level, level=level)\n else:\n if level not in (None, 0, ax.name):\n raise ValueError(f\"The level {level} is not valid\")\n ax = _tz_localize(ax, tz, ambiguous, nonexistent)\n\n result = self.copy(deep=copy)\n result = result.set_axis(ax, axis=axis, inplace=False)\n return result.__finalize__(self, method=\"tz_localize\")\n\n # ----------------------------------------------------------------------\n # Numeric Methods\n def abs(self: FrameOrSeries) -> FrameOrSeries:\n \"\"\"\n Return a Series/DataFrame with absolute numeric value of each element.\n\n This function only applies to elements that are all numeric.\n\n Returns\n -------\n abs\n Series/DataFrame containing the absolute value of each element.\n\n See Also\n --------\n numpy.absolute : Calculate the absolute value element-wise.\n\n Notes\n -----\n For ``complex`` inputs, ``1.2 + 1j``, the absolute value is\n :math:`\\\\sqrt{ a^2 + b^2 }`.\n\n Examples\n --------\n Absolute numeric values in a Series.\n\n >>> s = pd.Series([-1.10, 2, -3.33, 4])\n >>> s.abs()\n 0 1.10\n 1 2.00\n 2 3.33\n 3 4.00\n dtype: float64\n\n Absolute numeric values in a Series with complex numbers.\n\n >>> s = pd.Series([1.2 + 1j])\n >>> s.abs()\n 0 1.56205\n dtype: float64\n\n Absolute numeric values in a Series with a Timedelta element.\n\n >>> s = pd.Series([pd.Timedelta('1 days')])\n >>> s.abs()\n 0 1 days\n dtype: timedelta64[ns]\n\n Select rows with data closest to certain value using argsort (from\n `StackOverflow <https://stackoverflow.com/a/17758115>`__).\n\n >>> df = pd.DataFrame({\n ... 'a': [4, 5, 6, 7],\n ... 'b': [10, 20, 30, 40],\n ... 'c': [100, 50, -30, -50]\n ... })\n >>> df\n a b c\n 0 4 10 100\n 1 5 20 50\n 2 6 30 -30\n 3 7 40 -50\n >>> df.loc[(df.c - 43).abs().argsort()]\n a b c\n 1 5 20 50\n 0 4 10 100\n 2 6 30 -30\n 3 7 40 -50\n \"\"\"\n return np.abs(self)\n\n def describe(\n self: FrameOrSeries,\n percentiles=None,\n include=None,\n exclude=None,\n datetime_is_numeric=False,\n ) -> FrameOrSeries:\n \"\"\"\n Generate descriptive statistics.\n\n Descriptive statistics include those that summarize the central\n tendency, dispersion and shape of a\n dataset's distribution, excluding ``NaN`` values.\n\n Analyzes both numeric and object series, as well\n as ``DataFrame`` column sets of mixed data types. The output\n will vary depending on what is provided. Refer to the notes\n below for more detail.\n\n Parameters\n ----------\n percentiles : list-like of numbers, optional\n The percentiles to include in the output. All should\n fall between 0 and 1. The default is\n ``[.25, .5, .75]``, which returns the 25th, 50th, and\n 75th percentiles.\n include : 'all', list-like of dtypes or None (default), optional\n A white list of data types to include in the result. Ignored\n for ``Series``. Here are the options:\n\n - 'all' : All columns of the input will be included in the output.\n - A list-like of dtypes : Limits the results to the\n provided data types.\n To limit the result to numeric types submit\n ``numpy.number``. To limit it instead to object columns submit\n the ``numpy.object`` data type. Strings\n can also be used in the style of\n ``select_dtypes`` (e.g. ``df.describe(include=['O'])``). To\n select pandas categorical columns, use ``'category'``\n - None (default) : The result will include all numeric columns.\n exclude : list-like of dtypes or None (default), optional,\n A black list of data types to omit from the result. Ignored\n for ``Series``. Here are the options:\n\n - A list-like of dtypes : Excludes the provided data types\n from the result. To exclude numeric types submit\n ``numpy.number``. To exclude object columns submit the data\n type ``numpy.object``. Strings can also be used in the style of\n ``select_dtypes`` (e.g. ``df.describe(include=['O'])``). To\n exclude pandas categorical columns, use ``'category'``\n - None (default) : The result will exclude nothing.\n datetime_is_numeric : bool, default False\n Whether to treat datetime dtypes as numeric. This affects statistics\n calculated for the column. For DataFrame input, this also\n controls whether datetime columns are included by default.\n\n .. versionadded:: 1.1.0\n\n Returns\n -------\n Series or DataFrame\n Summary statistics of the Series or Dataframe provided.\n\n See Also\n --------\n DataFrame.count: Count number of non-NA/null observations.\n DataFrame.max: Maximum of the values in the object.\n DataFrame.min: Minimum of the values in the object.\n DataFrame.mean: Mean of the values.\n DataFrame.std: Standard deviation of the observations.\n DataFrame.select_dtypes: Subset of a DataFrame including/excluding\n columns based on their dtype.\n\n Notes\n -----\n For numeric data, the result's index will include ``count``,\n ``mean``, ``std``, ``min``, ``max`` as well as lower, ``50`` and\n upper percentiles. By default the lower percentile is ``25`` and the\n upper percentile is ``75``. The ``50`` percentile is the\n same as the median.\n\n For object data (e.g. strings or timestamps), the result's index\n will include ``count``, ``unique``, ``top``, and ``freq``. The ``top``\n is the most common value. The ``freq`` is the most common value's\n frequency. Timestamps also include the ``first`` and ``last`` items.\n\n If multiple object values have the highest count, then the\n ``count`` and ``top`` results will be arbitrarily chosen from\n among those with the highest count.\n\n For mixed data types provided via a ``DataFrame``, the default is to\n return only an analysis of numeric columns. If the dataframe consists\n only of object and categorical data without any numeric columns, the\n default is to return an analysis of both the object and categorical\n columns. If ``include='all'`` is provided as an option, the result\n will include a union of attributes of each type.\n\n The `include` and `exclude` parameters can be used to limit\n which columns in a ``DataFrame`` are analyzed for the output.\n The parameters are ignored when analyzing a ``Series``.\n\n Examples\n --------\n Describing a numeric ``Series``.\n\n >>> s = pd.Series([1, 2, 3])\n >>> s.describe()\n count 3.0\n mean 2.0\n std 1.0\n min 1.0\n 25% 1.5\n 50% 2.0\n 75% 2.5\n max 3.0\n dtype: float64\n\n Describing a categorical ``Series``.\n\n >>> s = pd.Series(['a', 'a', 'b', 'c'])\n >>> s.describe()\n count 4\n unique 3\n top a\n freq 2\n dtype: object\n\n Describing a timestamp ``Series``.\n\n >>> s = pd.Series([\n ... np.datetime64(\"2000-01-01\"),\n ... np.datetime64(\"2010-01-01\"),\n ... np.datetime64(\"2010-01-01\")\n ... ])\n >>> s.describe(datetime_is_numeric=True)\n count 3\n mean 2006-09-01 08:00:00\n min 2000-01-01 00:00:00\n 25% 2004-12-31 12:00:00\n 50% 2010-01-01 00:00:00\n 75% 2010-01-01 00:00:00\n max 2010-01-01 00:00:00\n dtype: object\n\n Describing a ``DataFrame``. By default only numeric fields\n are returned.\n\n >>> df = pd.DataFrame({'categorical': pd.Categorical(['d','e','f']),\n ... 'numeric': [1, 2, 3],\n ... 'object': ['a', 'b', 'c']\n ... })\n >>> df.describe()\n numeric\n count 3.0\n mean 2.0\n std 1.0\n min 1.0\n 25% 1.5\n 50% 2.0\n 75% 2.5\n max 3.0\n\n Describing all columns of a ``DataFrame`` regardless of data type.\n\n >>> df.describe(include='all') # doctest: +SKIP\n categorical numeric object\n count 3 3.0 3\n unique 3 NaN 3\n top f NaN a\n freq 1 NaN 1\n mean NaN 2.0 NaN\n std NaN 1.0 NaN\n min NaN 1.0 NaN\n 25% NaN 1.5 NaN\n 50% NaN 2.0 NaN\n 75% NaN 2.5 NaN\n max NaN 3.0 NaN\n\n Describing a column from a ``DataFrame`` by accessing it as\n an attribute.\n\n >>> df.numeric.describe()\n count 3.0\n mean 2.0\n std 1.0\n min 1.0\n 25% 1.5\n 50% 2.0\n 75% 2.5\n max 3.0\n Name: numeric, dtype: float64\n\n Including only numeric columns in a ``DataFrame`` description.\n\n >>> df.describe(include=[np.number])\n numeric\n count 3.0\n mean 2.0\n std 1.0\n min 1.0\n 25% 1.5\n 50% 2.0\n 75% 2.5\n max 3.0\n\n Including only string columns in a ``DataFrame`` description.\n\n >>> df.describe(include=[object]) # doctest: +SKIP\n object\n count 3\n unique 3\n top a\n freq 1\n\n Including only categorical columns from a ``DataFrame`` description.\n\n >>> df.describe(include=['category'])\n categorical\n count 3\n unique 3\n top f\n freq 1\n\n Excluding numeric columns from a ``DataFrame`` description.\n\n >>> df.describe(exclude=[np.number]) # doctest: +SKIP\n categorical object\n count 3 3\n unique 3 3\n top f a\n freq 1 1\n\n Excluding object columns from a ``DataFrame`` description.\n\n >>> df.describe(exclude=[object]) # doctest: +SKIP\n categorical numeric\n count 3 3.0\n unique 3 NaN\n top f NaN\n freq 1 NaN\n mean NaN 2.0\n std NaN 1.0\n min NaN 1.0\n 25% NaN 1.5\n 50% NaN 2.0\n 75% NaN 2.5\n max NaN 3.0\n \"\"\"\n if self.ndim == 2 and self.columns.size == 0:\n raise ValueError(\"Cannot describe a DataFrame without columns\")\n\n if percentiles is not None:\n # explicit conversion of `percentiles` to list\n percentiles = list(percentiles)\n\n # get them all to be in [0, 1]\n validate_percentile(percentiles)\n\n # median should always be included\n if 0.5 not in percentiles:\n percentiles.append(0.5)\n percentiles = np.asarray(percentiles)\n else:\n percentiles = np.array([0.25, 0.5, 0.75])\n\n # sort and check for duplicates\n unique_pcts = np.unique(percentiles)\n if len(unique_pcts) < len(percentiles):\n raise ValueError(\"percentiles cannot contain duplicates\")\n percentiles = unique_pcts\n\n formatted_percentiles = format_percentiles(percentiles)\n\n def describe_numeric_1d(series):\n stat_index = (\n [\"count\", \"mean\", \"std\", \"min\"] + formatted_percentiles + [\"max\"]\n )\n d = (\n [series.count(), series.mean(), series.std(), series.min()]\n + series.quantile(percentiles).tolist()\n + [series.max()]\n )\n return pd.Series(d, index=stat_index, name=series.name)\n\n def describe_categorical_1d(data):\n names = [\"count\", \"unique\"]\n objcounts = data.value_counts()\n count_unique = len(objcounts[objcounts != 0])\n result = [data.count(), count_unique]\n dtype = None\n if result[1] > 0:\n top, freq = objcounts.index[0], objcounts.iloc[0]\n if is_datetime64_any_dtype(data.dtype):\n if self.ndim == 1:\n stacklevel = 4\n else:\n stacklevel = 5\n warnings.warn(\n \"Treating datetime data as categorical rather than numeric in \"\n \"`.describe` is deprecated and will be removed in a future \"\n \"version of pandas. Specify `datetime_is_numeric=True` to \"\n \"silence this warning and adopt the future behavior now.\",\n FutureWarning,\n stacklevel=stacklevel,\n )\n tz = data.dt.tz\n asint = data.dropna().values.view(\"i8\")\n top = Timestamp(top)\n if top.tzinfo is not None and tz is not None:\n # Don't tz_localize(None) if key is already tz-aware\n top = top.tz_convert(tz)\n else:\n top = top.tz_localize(tz)\n names += [\"top\", \"freq\", \"first\", \"last\"]\n result += [\n top,\n freq,\n Timestamp(asint.min(), tz=tz),\n Timestamp(asint.max(), tz=tz),\n ]\n else:\n names += [\"top\", \"freq\"]\n result += [top, freq]\n\n # If the DataFrame is empty, set 'top' and 'freq' to None\n # to maintain output shape consistency\n else:\n names += [\"top\", \"freq\"]\n result += [np.nan, np.nan]\n dtype = \"object\"\n\n return pd.Series(result, index=names, name=data.name, dtype=dtype)\n\n def describe_timestamp_1d(data):\n # GH-30164\n stat_index = [\"count\", \"mean\", \"min\"] + formatted_percentiles + [\"max\"]\n d = (\n [data.count(), data.mean(), data.min()]\n + data.quantile(percentiles).tolist()\n + [data.max()]\n )\n return pd.Series(d, index=stat_index, name=data.name)\n\n def describe_1d(data):\n if is_bool_dtype(data.dtype):\n return describe_categorical_1d(data)\n elif is_numeric_dtype(data):\n return describe_numeric_1d(data)\n elif is_datetime64_any_dtype(data.dtype) and datetime_is_numeric:\n return describe_timestamp_1d(data)\n elif is_timedelta64_dtype(data.dtype):\n return describe_numeric_1d(data)\n else:\n return describe_categorical_1d(data)\n\n if self.ndim == 1:\n return describe_1d(self)\n elif (include is None) and (exclude is None):\n # when some numerics are found, keep only numerics\n default_include = [np.number]\n if datetime_is_numeric:\n default_include.append(\"datetime\")\n data = self.select_dtypes(include=default_include)\n if len(data.columns) == 0:\n data = self\n elif include == \"all\":\n if exclude is not None:\n msg = \"exclude must be None when include is 'all'\"\n raise ValueError(msg)\n data = self\n else:\n data = self.select_dtypes(include=include, exclude=exclude)\n\n ldesc = [describe_1d(s) for _, s in data.items()]\n # set a convenient order for rows\n names: List[Label] = []\n ldesc_indexes = sorted((x.index for x in ldesc), key=len)\n for idxnames in ldesc_indexes:\n for name in idxnames:\n if name not in names:\n names.append(name)\n\n d = pd.concat([x.reindex(names, copy=False) for x in ldesc], axis=1, sort=False)\n d.columns = data.columns.copy()\n return d\n\n def pct_change(\n self: FrameOrSeries,\n periods=1,\n fill_method=\"pad\",\n limit=None,\n freq=None,\n **kwargs,\n ) -> FrameOrSeries:\n \"\"\"\n Percentage change between the current and a prior element.\n\n Computes the percentage change from the immediately previous row by\n default. This is useful in comparing the percentage of change in a time\n series of elements.\n\n Parameters\n ----------\n periods : int, default 1\n Periods to shift for forming percent change.\n fill_method : str, default 'pad'\n How to handle NAs before computing percent changes.\n limit : int, default None\n The number of consecutive NAs to fill before stopping.\n freq : DateOffset, timedelta, or str, optional\n Increment to use from time series API (e.g. 'M' or BDay()).\n **kwargs\n Additional keyword arguments are passed into\n `DataFrame.shift` or `Series.shift`.\n\n Returns\n -------\n chg : Series or DataFrame\n The same type as the calling object.\n\n See Also\n --------\n Series.diff : Compute the difference of two elements in a Series.\n DataFrame.diff : Compute the difference of two elements in a DataFrame.\n Series.shift : Shift the index by some number of periods.\n DataFrame.shift : Shift the index by some number of periods.\n\n Examples\n --------\n **Series**\n\n >>> s = pd.Series([90, 91, 85])\n >>> s\n 0 90\n 1 91\n 2 85\n dtype: int64\n\n >>> s.pct_change()\n 0 NaN\n 1 0.011111\n 2 -0.065934\n dtype: float64\n\n >>> s.pct_change(periods=2)\n 0 NaN\n 1 NaN\n 2 -0.055556\n dtype: float64\n\n See the percentage change in a Series where filling NAs with last\n valid observation forward to next valid.\n\n >>> s = pd.Series([90, 91, None, 85])\n >>> s\n 0 90.0\n 1 91.0\n 2 NaN\n 3 85.0\n dtype: float64\n\n >>> s.pct_change(fill_method='ffill')\n 0 NaN\n 1 0.011111\n 2 0.000000\n 3 -0.065934\n dtype: float64\n\n **DataFrame**\n\n Percentage change in French franc, Deutsche Mark, and Italian lira from\n 1980-01-01 to 1980-03-01.\n\n >>> df = pd.DataFrame({\n ... 'FR': [4.0405, 4.0963, 4.3149],\n ... 'GR': [1.7246, 1.7482, 1.8519],\n ... 'IT': [804.74, 810.01, 860.13]},\n ... index=['1980-01-01', '1980-02-01', '1980-03-01'])\n >>> df\n FR GR IT\n 1980-01-01 4.0405 1.7246 804.74\n 1980-02-01 4.0963 1.7482 810.01\n 1980-03-01 4.3149 1.8519 860.13\n\n >>> df.pct_change()\n FR GR IT\n 1980-01-01 NaN NaN NaN\n 1980-02-01 0.013810 0.013684 0.006549\n 1980-03-01 0.053365 0.059318 0.061876\n\n Percentage of change in GOOG and APPL stock volume. Shows computing\n the percentage change between columns.\n\n >>> df = pd.DataFrame({\n ... '2016': [1769950, 30586265],\n ... '2015': [1500923, 40912316],\n ... '2014': [1371819, 41403351]},\n ... index=['GOOG', 'APPL'])\n >>> df\n 2016 2015 2014\n GOOG 1769950 1500923 1371819\n APPL 30586265 40912316 41403351\n\n >>> df.pct_change(axis='columns')\n 2016 2015 2014\n GOOG NaN -0.151997 -0.086016\n APPL NaN 0.337604 0.012002\n \"\"\"\n axis = self._get_axis_number(kwargs.pop(\"axis\", self._stat_axis_name))\n if fill_method is None:\n data = self\n else:\n _data = self.fillna(method=fill_method, axis=axis, limit=limit)\n assert _data is not None # needed for mypy\n data = _data\n\n rs = data.div(data.shift(periods=periods, freq=freq, axis=axis, **kwargs)) - 1\n if freq is not None:\n # Shift method is implemented differently when freq is not None\n # We want to restore the original index\n rs = rs.loc[~rs.index.duplicated()]\n rs = rs.reindex_like(data)\n return rs\n\n def _agg_by_level(self, name, axis=0, level=0, skipna=True, **kwargs):\n if axis is None:\n raise ValueError(\"Must specify 'axis' when aggregating by level.\")\n grouped = self.groupby(level=level, axis=axis, sort=False)\n if hasattr(grouped, name) and skipna:\n return getattr(grouped, name)(**kwargs)\n axis = self._get_axis_number(axis)\n method = getattr(type(self), name)\n applyf = lambda x: method(x, axis=axis, skipna=skipna, **kwargs)\n return grouped.aggregate(applyf)\n\n @classmethod\n def _add_numeric_operations(cls):\n \"\"\"\n Add the operations to the cls; evaluate the doc strings again\n \"\"\"\n axis_descr, name1, name2 = _doc_parms(cls)\n\n cls.any = _make_logical_function(\n cls,\n \"any\",\n name1=name1,\n name2=name2,\n axis_descr=axis_descr,\n desc=_any_desc,\n func=nanops.nanany,\n see_also=_any_see_also,\n examples=_any_examples,\n empty_value=False,\n )\n cls.all = _make_logical_function(\n cls,\n \"all\",\n name1=name1,\n name2=name2,\n axis_descr=axis_descr,\n desc=_all_desc,\n func=nanops.nanall,\n see_also=_all_see_also,\n examples=_all_examples,\n empty_value=True,\n )\n\n @doc(\n desc=\"Return the mean absolute deviation of the values \"\n \"for the requested axis.\",\n name1=name1,\n name2=name2,\n axis_descr=axis_descr,\n see_also=\"\",\n examples=\"\",\n )\n def mad(self, axis=None, skipna=None, level=None):\n \"\"\"\n {desc}\n\n Parameters\n ----------\n axis : {axis_descr}\n Axis for the function to be applied on.\n skipna : bool, default None\n Exclude NA/null values when computing the result.\n level : int or level name, default None\n If the axis is a MultiIndex (hierarchical), count along a\n particular level, collapsing into a {name1}.\n\n Returns\n -------\n {name1} or {name2} (if level specified)\\\n {see_also}\\\n {examples}\n \"\"\"\n if skipna is None:\n skipna = True\n if axis is None:\n axis = self._stat_axis_number\n if level is not None:\n return self._agg_by_level(\"mad\", axis=axis, level=level, skipna=skipna)\n\n data = self._get_numeric_data()\n if axis == 0:\n demeaned = data - data.mean(axis=0)\n else:\n demeaned = data.sub(data.mean(axis=1), axis=0)\n return np.abs(demeaned).mean(axis=axis, skipna=skipna)\n\n cls.mad = mad\n\n cls.sem = _make_stat_function_ddof(\n cls,\n \"sem\",\n name1=name1,\n name2=name2,\n axis_descr=axis_descr,\n desc=\"Return unbiased standard error of the mean over requested \"\n \"axis.\\n\\nNormalized by N-1 by default. This can be changed \"\n \"using the ddof argument\",\n func=nanops.nansem,\n )\n cls.var = _make_stat_function_ddof(\n cls,\n \"var\",\n name1=name1,\n name2=name2,\n axis_descr=axis_descr,\n desc=\"Return unbiased variance over requested axis.\\n\\nNormalized by \"\n \"N-1 by default. This can be changed using the ddof argument\",\n func=nanops.nanvar,\n )\n cls.std = _make_stat_function_ddof(\n cls,\n \"std\",\n name1=name1,\n name2=name2,\n axis_descr=axis_descr,\n desc=\"Return sample standard deviation over requested axis.\"\n \"\\n\\nNormalized by N-1 by default. This can be changed using the \"\n \"ddof argument\",\n func=nanops.nanstd,\n )\n\n cls.cummin = _make_cum_function(\n cls,\n \"cummin\",\n name1=name1,\n name2=name2,\n axis_descr=axis_descr,\n desc=\"minimum\",\n accum_func=np.minimum.accumulate,\n accum_func_name=\"min\",\n examples=_cummin_examples,\n )\n cls.cumsum = _make_cum_function(\n cls,\n \"cumsum\",\n name1=name1,\n name2=name2,\n axis_descr=axis_descr,\n desc=\"sum\",\n accum_func=np.cumsum,\n accum_func_name=\"sum\",\n examples=_cumsum_examples,\n )\n cls.cumprod = _make_cum_function(\n cls,\n \"cumprod\",\n name1=name1,\n name2=name2,\n axis_descr=axis_descr,\n desc=\"product\",\n accum_func=np.cumprod,\n accum_func_name=\"prod\",\n examples=_cumprod_examples,\n )\n cls.cummax = _make_cum_function(\n cls,\n \"cummax\",\n name1=name1,\n name2=name2,\n axis_descr=axis_descr,\n desc=\"maximum\",\n accum_func=np.maximum.accumulate,\n accum_func_name=\"max\",\n examples=_cummax_examples,\n )\n\n cls.sum = _make_min_count_stat_function(\n cls,\n \"sum\",\n name1=name1,\n name2=name2,\n axis_descr=axis_descr,\n desc=\"Return the sum of the values for the requested axis.\\n\\n\"\n \"This is equivalent to the method ``numpy.sum``.\",\n func=nanops.nansum,\n see_also=_stat_func_see_also,\n examples=_sum_examples,\n )\n cls.mean = _make_stat_function(\n cls,\n \"mean\",\n name1=name1,\n name2=name2,\n axis_descr=axis_descr,\n desc=\"Return the mean of the values for the requested axis.\",\n func=nanops.nanmean,\n )\n cls.skew = _make_stat_function(\n cls,\n \"skew\",\n name1=name1,\n name2=name2,\n axis_descr=axis_descr,\n desc=\"Return unbiased skew over requested axis.\\n\\nNormalized by N-1.\",\n func=nanops.nanskew,\n )\n cls.kurt = _make_stat_function(\n cls,\n \"kurt\",\n name1=name1,\n name2=name2,\n axis_descr=axis_descr,\n desc=\"Return unbiased kurtosis over requested axis.\\n\\n\"\n \"Kurtosis obtained using Fisher's definition of\\n\"\n \"kurtosis (kurtosis of normal == 0.0). Normalized \"\n \"by N-1.\",\n func=nanops.nankurt,\n )\n cls.kurtosis = cls.kurt\n cls.prod = _make_min_count_stat_function(\n cls,\n \"prod\",\n name1=name1,\n name2=name2,\n axis_descr=axis_descr,\n desc=\"Return the product of the values for the requested axis.\",\n func=nanops.nanprod,\n examples=_prod_examples,\n )\n cls.product = cls.prod\n cls.median = _make_stat_function(\n cls,\n \"median\",\n name1=name1,\n name2=name2,\n axis_descr=axis_descr,\n desc=\"Return the median of the values for the requested axis.\",\n func=nanops.nanmedian,\n )\n cls.max = _make_stat_function(\n cls,\n \"max\",\n name1=name1,\n name2=name2,\n axis_descr=axis_descr,\n desc=\"Return the maximum of the values for the requested axis.\\n\\n\"\n \"If you want the *index* of the maximum, use ``idxmax``. This is\"\n \"the equivalent of the ``numpy.ndarray`` method ``argmax``.\",\n func=nanops.nanmax,\n see_also=_stat_func_see_also,\n examples=_max_examples,\n )\n cls.min = _make_stat_function(\n cls,\n \"min\",\n name1=name1,\n name2=name2,\n axis_descr=axis_descr,\n desc=\"Return the minimum of the values for the requested axis.\\n\\n\"\n \"If you want the *index* of the minimum, use ``idxmin``. This is\"\n \"the equivalent of the ``numpy.ndarray`` method ``argmin``.\",\n func=nanops.nanmin,\n see_also=_stat_func_see_also,\n examples=_min_examples,\n )\n\n @classmethod\n def _add_series_or_dataframe_operations(cls):\n \"\"\"\n Add the series or dataframe only operations to the cls; evaluate\n the doc strings again.\n \"\"\"\n from pandas.core.window import (\n Expanding,\n ExponentialMovingWindow,\n Rolling,\n Window,\n )\n\n @doc(Rolling)\n def rolling(\n self,\n window,\n min_periods=None,\n center=False,\n win_type=None,\n on=None,\n axis=0,\n closed=None,\n ):\n axis = self._get_axis_number(axis)\n\n if win_type is not None:\n return Window(\n self,\n window=window,\n min_periods=min_periods,\n center=center,\n win_type=win_type,\n on=on,\n axis=axis,\n closed=closed,\n )\n\n return Rolling(\n self,\n window=window,\n min_periods=min_periods,\n center=center,\n win_type=win_type,\n on=on,\n axis=axis,\n closed=closed,\n )\n\n cls.rolling = rolling\n\n @doc(Expanding)\n def expanding(self, min_periods=1, center=None, axis=0):\n axis = self._get_axis_number(axis)\n if center is not None:\n warnings.warn(\n \"The `center` argument on `expanding` \"\n \"will be removed in the future\",\n FutureWarning,\n stacklevel=2,\n )\n else:\n center = False\n\n return Expanding(self, min_periods=min_periods, center=center, axis=axis)\n\n cls.expanding = expanding\n\n @doc(ExponentialMovingWindow)\n def ewm(\n self,\n com=None,\n span=None,\n halflife=None,\n alpha=None,\n min_periods=0,\n adjust=True,\n ignore_na=False,\n axis=0,\n times=None,\n ):\n axis = self._get_axis_number(axis)\n return ExponentialMovingWindow(\n self,\n com=com,\n span=span,\n halflife=halflife,\n alpha=alpha,\n min_periods=min_periods,\n adjust=adjust,\n ignore_na=ignore_na,\n axis=axis,\n times=times,\n )\n\n cls.ewm = ewm\n\n @doc(klass=_shared_doc_kwargs[\"klass\"], axis=\"\")\n def transform(self, func, *args, **kwargs):\n \"\"\"\n Call ``func`` on self producing a {klass} with transformed values.\n\n Produced {klass} will have same axis length as self.\n\n Parameters\n ----------\n func : function, str, list or dict\n Function to use for transforming the data. If a function, must either\n work when passed a {klass} or when passed to {klass}.apply.\n\n Accepted combinations are:\n\n - function\n - string function name\n - list of functions and/or function names, e.g. ``[np.exp. 'sqrt']``\n - dict of axis labels -> functions, function names or list of such.\n {axis}\n *args\n Positional arguments to pass to `func`.\n **kwargs\n Keyword arguments to pass to `func`.\n\n Returns\n -------\n {klass}\n A {klass} that must have the same length as self.\n\n Raises\n ------\n ValueError : If the returned {klass} has a different length than self.\n\n See Also\n --------\n {klass}.agg : Only perform aggregating type operations.\n {klass}.apply : Invoke function on a {klass}.\n\n Examples\n --------\n >>> df = pd.DataFrame({{'A': range(3), 'B': range(1, 4)}})\n >>> df\n A B\n 0 0 1\n 1 1 2\n 2 2 3\n >>> df.transform(lambda x: x + 1)\n A B\n 0 1 2\n 1 2 3\n 2 3 4\n\n Even though the resulting {klass} must have the same length as the\n input {klass}, it is possible to provide several input functions:\n\n >>> s = pd.Series(range(3))\n >>> s\n 0 0\n 1 1\n 2 2\n dtype: int64\n >>> s.transform([np.sqrt, np.exp])\n sqrt exp\n 0 0.000000 1.000000\n 1 1.000000 2.718282\n 2 1.414214 7.389056\n \"\"\"\n result = self.agg(func, *args, **kwargs)\n if is_scalar(result) or len(result) != len(self):\n raise ValueError(\"transforms cannot produce aggregated results\")\n\n return result\n\n # ----------------------------------------------------------------------\n # Misc methods\n\n def _find_valid_index(self, how: str):\n \"\"\"\n Retrieves the index of the first valid value.\n\n Parameters\n ----------\n how : {'first', 'last'}\n Use this parameter to change between the first or last valid index.\n\n Returns\n -------\n idx_first_valid : type of index\n \"\"\"\n idxpos = find_valid_index(self._values, how)\n if idxpos is None:\n return None\n return self.index[idxpos]\n\n @doc(position=\"first\", klass=_shared_doc_kwargs[\"klass\"])\n def first_valid_index(self):\n \"\"\"\n Return index for {position} non-NA/null value.\n\n Returns\n -------\n scalar : type of index\n\n Notes\n -----\n If all elements are non-NA/null, returns None.\n Also returns None for empty {klass}.\n \"\"\"\n return self._find_valid_index(\"first\")\n\n @doc(first_valid_index, position=\"last\", klass=_shared_doc_kwargs[\"klass\"])\n def last_valid_index(self):\n return self._find_valid_index(\"last\")\n\n\ndef _doc_parms(cls):\n \"\"\"Return a tuple of the doc parms.\"\"\"\n axis_descr = (\n f\"{{{', '.join(f'{a} ({i})' for i, a in enumerate(cls._AXIS_ORDERS))}}}\"\n )\n name = cls._constructor_sliced.__name__ if cls._AXIS_LEN > 1 else \"scalar\"\n name2 = cls.__name__\n return axis_descr, name, name2\n\n\n_num_doc = \"\"\"\n%(desc)s\n\nParameters\n----------\naxis : %(axis_descr)s\n Axis for the function to be applied on.\nskipna : bool, default True\n Exclude NA/null values when computing the result.\nlevel : int or level name, default None\n If the axis is a MultiIndex (hierarchical), count along a\n particular level, collapsing into a %(name1)s.\nnumeric_only : bool, default None\n Include only float, int, boolean columns. If None, will attempt to use\n everything, then use only numeric data. Not implemented for Series.\n%(min_count)s\\\n**kwargs\n Additional keyword arguments to be passed to the function.\n\nReturns\n-------\n%(name1)s or %(name2)s (if level specified)\\\n%(see_also)s\\\n%(examples)s\n\"\"\"\n\n_num_ddof_doc = \"\"\"\n%(desc)s\n\nParameters\n----------\naxis : %(axis_descr)s\nskipna : bool, default True\n Exclude NA/null values. If an entire row/column is NA, the result\n will be NA.\nlevel : int or level name, default None\n If the axis is a MultiIndex (hierarchical), count along a\n particular level, collapsing into a %(name1)s.\nddof : int, default 1\n Delta Degrees of Freedom. The divisor used in calculations is N - ddof,\n where N represents the number of elements.\nnumeric_only : bool, default None\n Include only float, int, boolean columns. If None, will attempt to use\n everything, then use only numeric data. Not implemented for Series.\n\nReturns\n-------\n%(name1)s or %(name2)s (if level specified)\\n\"\"\"\n\n_bool_doc = \"\"\"\n%(desc)s\n\nParameters\n----------\naxis : {0 or 'index', 1 or 'columns', None}, default 0\n Indicate which axis or axes should be reduced.\n\n * 0 / 'index' : reduce the index, return a Series whose index is the\n original column labels.\n * 1 / 'columns' : reduce the columns, return a Series whose index is the\n original index.\n * None : reduce all axes, return a scalar.\n\nbool_only : bool, default None\n Include only boolean columns. If None, will attempt to use everything,\n then use only boolean data. Not implemented for Series.\nskipna : bool, default True\n Exclude NA/null values. If the entire row/column is NA and skipna is\n True, then the result will be %(empty_value)s, as for an empty row/column.\n If skipna is False, then NA are treated as True, because these are not\n equal to zero.\nlevel : int or level name, default None\n If the axis is a MultiIndex (hierarchical), count along a\n particular level, collapsing into a %(name1)s.\n**kwargs : any, default None\n Additional keywords have no effect but might be accepted for\n compatibility with NumPy.\n\nReturns\n-------\n%(name1)s or %(name2)s\n If level is specified, then, %(name2)s is returned; otherwise, %(name1)s\n is returned.\n\n%(see_also)s\n%(examples)s\"\"\"\n\n_all_desc = \"\"\"\\\nReturn whether all elements are True, potentially over an axis.\n\nReturns True unless there at least one element within a series or\nalong a Dataframe axis that is False or equivalent (e.g. zero or\nempty).\"\"\"\n\n_all_examples = \"\"\"\\\nExamples\n--------\n**Series**\n\n>>> pd.Series([True, True]).all()\nTrue\n>>> pd.Series([True, False]).all()\nFalse\n>>> pd.Series([]).all()\nTrue\n>>> pd.Series([np.nan]).all()\nTrue\n>>> pd.Series([np.nan]).all(skipna=False)\nTrue\n\n**DataFrames**\n\nCreate a dataframe from a dictionary.\n\n>>> df = pd.DataFrame({'col1': [True, True], 'col2': [True, False]})\n>>> df\n col1 col2\n0 True True\n1 True False\n\nDefault behaviour checks if column-wise values all return True.\n\n>>> df.all()\ncol1 True\ncol2 False\ndtype: bool\n\nSpecify ``axis='columns'`` to check if row-wise values all return True.\n\n>>> df.all(axis='columns')\n0 True\n1 False\ndtype: bool\n\nOr ``axis=None`` for whether every value is True.\n\n>>> df.all(axis=None)\nFalse\n\"\"\"\n\n_all_see_also = \"\"\"\\\nSee Also\n--------\nSeries.all : Return True if all elements are True.\nDataFrame.any : Return True if one (or more) elements are True.\n\"\"\"\n\n_cnum_doc = \"\"\"\nReturn cumulative %(desc)s over a DataFrame or Series axis.\n\nReturns a DataFrame or Series of the same size containing the cumulative\n%(desc)s.\n\nParameters\n----------\naxis : {0 or 'index', 1 or 'columns'}, default 0\n The index or the name of the axis. 0 is equivalent to None or 'index'.\nskipna : bool, default True\n Exclude NA/null values. If an entire row/column is NA, the result\n will be NA.\n*args, **kwargs\n Additional keywords have no effect but might be accepted for\n compatibility with NumPy.\n\nReturns\n-------\n%(name1)s or %(name2)s\n Return cumulative %(desc)s of %(name1)s or %(name2)s.\n\nSee Also\n--------\ncore.window.Expanding.%(accum_func_name)s : Similar functionality\n but ignores ``NaN`` values.\n%(name2)s.%(accum_func_name)s : Return the %(desc)s over\n %(name2)s axis.\n%(name2)s.cummax : Return cumulative maximum over %(name2)s axis.\n%(name2)s.cummin : Return cumulative minimum over %(name2)s axis.\n%(name2)s.cumsum : Return cumulative sum over %(name2)s axis.\n%(name2)s.cumprod : Return cumulative product over %(name2)s axis.\n\n%(examples)s\"\"\"\n\n_cummin_examples = \"\"\"\\\nExamples\n--------\n**Series**\n\n>>> s = pd.Series([2, np.nan, 5, -1, 0])\n>>> s\n0 2.0\n1 NaN\n2 5.0\n3 -1.0\n4 0.0\ndtype: float64\n\nBy default, NA values are ignored.\n\n>>> s.cummin()\n0 2.0\n1 NaN\n2 2.0\n3 -1.0\n4 -1.0\ndtype: float64\n\nTo include NA values in the operation, use ``skipna=False``\n\n>>> s.cummin(skipna=False)\n0 2.0\n1 NaN\n2 NaN\n3 NaN\n4 NaN\ndtype: float64\n\n**DataFrame**\n\n>>> df = pd.DataFrame([[2.0, 1.0],\n... [3.0, np.nan],\n... [1.0, 0.0]],\n... columns=list('AB'))\n>>> df\n A B\n0 2.0 1.0\n1 3.0 NaN\n2 1.0 0.0\n\nBy default, iterates over rows and finds the minimum\nin each column. This is equivalent to ``axis=None`` or ``axis='index'``.\n\n>>> df.cummin()\n A B\n0 2.0 1.0\n1 2.0 NaN\n2 1.0 0.0\n\nTo iterate over columns and find the minimum in each row,\nuse ``axis=1``\n\n>>> df.cummin(axis=1)\n A B\n0 2.0 1.0\n1 3.0 NaN\n2 1.0 0.0\n\"\"\"\n\n_cumsum_examples = \"\"\"\\\nExamples\n--------\n**Series**\n\n>>> s = pd.Series([2, np.nan, 5, -1, 0])\n>>> s\n0 2.0\n1 NaN\n2 5.0\n3 -1.0\n4 0.0\ndtype: float64\n\nBy default, NA values are ignored.\n\n>>> s.cumsum()\n0 2.0\n1 NaN\n2 7.0\n3 6.0\n4 6.0\ndtype: float64\n\nTo include NA values in the operation, use ``skipna=False``\n\n>>> s.cumsum(skipna=False)\n0 2.0\n1 NaN\n2 NaN\n3 NaN\n4 NaN\ndtype: float64\n\n**DataFrame**\n\n>>> df = pd.DataFrame([[2.0, 1.0],\n... [3.0, np.nan],\n... [1.0, 0.0]],\n... columns=list('AB'))\n>>> df\n A B\n0 2.0 1.0\n1 3.0 NaN\n2 1.0 0.0\n\nBy default, iterates over rows and finds the sum\nin each column. This is equivalent to ``axis=None`` or ``axis='index'``.\n\n>>> df.cumsum()\n A B\n0 2.0 1.0\n1 5.0 NaN\n2 6.0 1.0\n\nTo iterate over columns and find the sum in each row,\nuse ``axis=1``\n\n>>> df.cumsum(axis=1)\n A B\n0 2.0 3.0\n1 3.0 NaN\n2 1.0 1.0\n\"\"\"\n\n_cumprod_examples = \"\"\"\\\nExamples\n--------\n**Series**\n\n>>> s = pd.Series([2, np.nan, 5, -1, 0])\n>>> s\n0 2.0\n1 NaN\n2 5.0\n3 -1.0\n4 0.0\ndtype: float64\n\nBy default, NA values are ignored.\n\n>>> s.cumprod()\n0 2.0\n1 NaN\n2 10.0\n3 -10.0\n4 -0.0\ndtype: float64\n\nTo include NA values in the operation, use ``skipna=False``\n\n>>> s.cumprod(skipna=False)\n0 2.0\n1 NaN\n2 NaN\n3 NaN\n4 NaN\ndtype: float64\n\n**DataFrame**\n\n>>> df = pd.DataFrame([[2.0, 1.0],\n... [3.0, np.nan],\n... [1.0, 0.0]],\n... columns=list('AB'))\n>>> df\n A B\n0 2.0 1.0\n1 3.0 NaN\n2 1.0 0.0\n\nBy default, iterates over rows and finds the product\nin each column. This is equivalent to ``axis=None`` or ``axis='index'``.\n\n>>> df.cumprod()\n A B\n0 2.0 1.0\n1 6.0 NaN\n2 6.0 0.0\n\nTo iterate over columns and find the product in each row,\nuse ``axis=1``\n\n>>> df.cumprod(axis=1)\n A B\n0 2.0 2.0\n1 3.0 NaN\n2 1.0 0.0\n\"\"\"\n\n_cummax_examples = \"\"\"\\\nExamples\n--------\n**Series**\n\n>>> s = pd.Series([2, np.nan, 5, -1, 0])\n>>> s\n0 2.0\n1 NaN\n2 5.0\n3 -1.0\n4 0.0\ndtype: float64\n\nBy default, NA values are ignored.\n\n>>> s.cummax()\n0 2.0\n1 NaN\n2 5.0\n3 5.0\n4 5.0\ndtype: float64\n\nTo include NA values in the operation, use ``skipna=False``\n\n>>> s.cummax(skipna=False)\n0 2.0\n1 NaN\n2 NaN\n3 NaN\n4 NaN\ndtype: float64\n\n**DataFrame**\n\n>>> df = pd.DataFrame([[2.0, 1.0],\n... [3.0, np.nan],\n... [1.0, 0.0]],\n... columns=list('AB'))\n>>> df\n A B\n0 2.0 1.0\n1 3.0 NaN\n2 1.0 0.0\n\nBy default, iterates over rows and finds the maximum\nin each column. This is equivalent to ``axis=None`` or ``axis='index'``.\n\n>>> df.cummax()\n A B\n0 2.0 1.0\n1 3.0 NaN\n2 3.0 1.0\n\nTo iterate over columns and find the maximum in each row,\nuse ``axis=1``\n\n>>> df.cummax(axis=1)\n A B\n0 2.0 2.0\n1 3.0 NaN\n2 1.0 1.0\n\"\"\"\n\n_any_see_also = \"\"\"\\\nSee Also\n--------\nnumpy.any : Numpy version of this method.\nSeries.any : Return whether any element is True.\nSeries.all : Return whether all elements are True.\nDataFrame.any : Return whether any element is True over requested axis.\nDataFrame.all : Return whether all elements are True over requested axis.\n\"\"\"\n\n_any_desc = \"\"\"\\\nReturn whether any element is True, potentially over an axis.\n\nReturns False unless there at least one element within a series or\nalong a Dataframe axis that is True or equivalent (e.g. non-zero or\nnon-empty).\"\"\"\n\n_any_examples = \"\"\"\\\nExamples\n--------\n**Series**\n\nFor Series input, the output is a scalar indicating whether any element\nis True.\n\n>>> pd.Series([False, False]).any()\nFalse\n>>> pd.Series([True, False]).any()\nTrue\n>>> pd.Series([]).any()\nFalse\n>>> pd.Series([np.nan]).any()\nFalse\n>>> pd.Series([np.nan]).any(skipna=False)\nTrue\n\n**DataFrame**\n\nWhether each column contains at least one True element (the default).\n\n>>> df = pd.DataFrame({\"A\": [1, 2], \"B\": [0, 2], \"C\": [0, 0]})\n>>> df\n A B C\n0 1 0 0\n1 2 2 0\n\n>>> df.any()\nA True\nB True\nC False\ndtype: bool\n\nAggregating over the columns.\n\n>>> df = pd.DataFrame({\"A\": [True, False], \"B\": [1, 2]})\n>>> df\n A B\n0 True 1\n1 False 2\n\n>>> df.any(axis='columns')\n0 True\n1 True\ndtype: bool\n\n>>> df = pd.DataFrame({\"A\": [True, False], \"B\": [1, 0]})\n>>> df\n A B\n0 True 1\n1 False 0\n\n>>> df.any(axis='columns')\n0 True\n1 False\ndtype: bool\n\nAggregating over the entire DataFrame with ``axis=None``.\n\n>>> df.any(axis=None)\nTrue\n\n`any` for an empty DataFrame is an empty Series.\n\n>>> pd.DataFrame([]).any()\nSeries([], dtype: bool)\n\"\"\"\n\n_shared_docs[\n \"stat_func_example\"\n] = \"\"\"\n\nExamples\n--------\n>>> idx = pd.MultiIndex.from_arrays([\n... ['warm', 'warm', 'cold', 'cold'],\n... ['dog', 'falcon', 'fish', 'spider']],\n... names=['blooded', 'animal'])\n>>> s = pd.Series([4, 2, 0, 8], name='legs', index=idx)\n>>> s\nblooded animal\nwarm dog 4\n falcon 2\ncold fish 0\n spider 8\nName: legs, dtype: int64\n\n>>> s.{stat_func}()\n{default_output}\n\n{verb} using level names, as well as indices.\n\n>>> s.{stat_func}(level='blooded')\nblooded\nwarm {level_output_0}\ncold {level_output_1}\nName: legs, dtype: int64\n\n>>> s.{stat_func}(level=0)\nblooded\nwarm {level_output_0}\ncold {level_output_1}\nName: legs, dtype: int64\"\"\"\n\n_sum_examples = _shared_docs[\"stat_func_example\"].format(\n stat_func=\"sum\", verb=\"Sum\", default_output=14, level_output_0=6, level_output_1=8\n)\n\n_sum_examples += \"\"\"\n\nBy default, the sum of an empty or all-NA Series is ``0``.\n\n>>> pd.Series([]).sum() # min_count=0 is the default\n0.0\n\nThis can be controlled with the ``min_count`` parameter. For example, if\nyou'd like the sum of an empty series to be NaN, pass ``min_count=1``.\n\n>>> pd.Series([]).sum(min_count=1)\nnan\n\nThanks to the ``skipna`` parameter, ``min_count`` handles all-NA and\nempty series identically.\n\n>>> pd.Series([np.nan]).sum()\n0.0\n\n>>> pd.Series([np.nan]).sum(min_count=1)\nnan\"\"\"\n\n_max_examples = _shared_docs[\"stat_func_example\"].format(\n stat_func=\"max\", verb=\"Max\", default_output=8, level_output_0=4, level_output_1=8\n)\n\n_min_examples = _shared_docs[\"stat_func_example\"].format(\n stat_func=\"min\", verb=\"Min\", default_output=0, level_output_0=2, level_output_1=0\n)\n\n_stat_func_see_also = \"\"\"\n\nSee Also\n--------\nSeries.sum : Return the sum.\nSeries.min : Return the minimum.\nSeries.max : Return the maximum.\nSeries.idxmin : Return the index of the minimum.\nSeries.idxmax : Return the index of the maximum.\nDataFrame.sum : Return the sum over the requested axis.\nDataFrame.min : Return the minimum over the requested axis.\nDataFrame.max : Return the maximum over the requested axis.\nDataFrame.idxmin : Return the index of the minimum over the requested axis.\nDataFrame.idxmax : Return the index of the maximum over the requested axis.\"\"\"\n\n_prod_examples = \"\"\"\n\nExamples\n--------\nBy default, the product of an empty or all-NA Series is ``1``\n\n>>> pd.Series([]).prod()\n1.0\n\nThis can be controlled with the ``min_count`` parameter\n\n>>> pd.Series([]).prod(min_count=1)\nnan\n\nThanks to the ``skipna`` parameter, ``min_count`` handles all-NA and\nempty series identically.\n\n>>> pd.Series([np.nan]).prod()\n1.0\n\n>>> pd.Series([np.nan]).prod(min_count=1)\nnan\"\"\"\n\n_min_count_stub = \"\"\"\\\nmin_count : int, default 0\n The required number of valid values to perform the operation. If fewer than\n ``min_count`` non-NA values are present the result will be NA.\n\n .. versionadded:: 0.22.0\n\n Added with the default being 0. This means the sum of an all-NA\n or empty Series is 0, and the product of an all-NA or empty\n Series is 1.\n\"\"\"\n\n\ndef _make_min_count_stat_function(\n cls,\n name: str,\n name1: str,\n name2: str,\n axis_descr: str,\n desc: str,\n func: Callable,\n see_also: str = \"\",\n examples: str = \"\",\n) -> Callable:\n @Substitution(\n desc=desc,\n name1=name1,\n name2=name2,\n axis_descr=axis_descr,\n min_count=_min_count_stub,\n see_also=see_also,\n examples=examples,\n )\n @Appender(_num_doc)\n def stat_func(\n self,\n axis=None,\n skipna=None,\n level=None,\n numeric_only=None,\n min_count=0,\n **kwargs,\n ):\n if name == \"sum\":\n nv.validate_sum(tuple(), kwargs)\n elif name == \"prod\":\n nv.validate_prod(tuple(), kwargs)\n else:\n nv.validate_stat_func(tuple(), kwargs, fname=name)\n if skipna is None:\n skipna = True\n if axis is None:\n axis = self._stat_axis_number\n if level is not None:\n return self._agg_by_level(\n name, axis=axis, level=level, skipna=skipna, min_count=min_count\n )\n return self._reduce(\n func,\n name=name,\n axis=axis,\n skipna=skipna,\n numeric_only=numeric_only,\n min_count=min_count,\n )\n\n return set_function_name(stat_func, name, cls)\n\n\ndef _make_stat_function(\n cls,\n name: str,\n name1: str,\n name2: str,\n axis_descr: str,\n desc: str,\n func: Callable,\n see_also: str = \"\",\n examples: str = \"\",\n) -> Callable:\n @Substitution(\n desc=desc,\n name1=name1,\n name2=name2,\n axis_descr=axis_descr,\n min_count=\"\",\n see_also=see_also,\n examples=examples,\n )\n @Appender(_num_doc)\n def stat_func(\n self, axis=None, skipna=None, level=None, numeric_only=None, **kwargs\n ):\n if name == \"median\":\n nv.validate_median(tuple(), kwargs)\n else:\n nv.validate_stat_func(tuple(), kwargs, fname=name)\n if skipna is None:\n skipna = True\n if axis is None:\n axis = self._stat_axis_number\n if level is not None:\n return self._agg_by_level(name, axis=axis, level=level, skipna=skipna)\n return self._reduce(\n func, name=name, axis=axis, skipna=skipna, numeric_only=numeric_only\n )\n\n return set_function_name(stat_func, name, cls)\n\n\ndef _make_stat_function_ddof(\n cls, name: str, name1: str, name2: str, axis_descr: str, desc: str, func: Callable\n) -> Callable:\n @Substitution(desc=desc, name1=name1, name2=name2, axis_descr=axis_descr)\n @Appender(_num_ddof_doc)\n def stat_func(\n self, axis=None, skipna=None, level=None, ddof=1, numeric_only=None, **kwargs\n ):\n nv.validate_stat_ddof_func(tuple(), kwargs, fname=name)\n if skipna is None:\n skipna = True\n if axis is None:\n axis = self._stat_axis_number\n if level is not None:\n return self._agg_by_level(\n name, axis=axis, level=level, skipna=skipna, ddof=ddof\n )\n return self._reduce(\n func, name, axis=axis, numeric_only=numeric_only, skipna=skipna, ddof=ddof\n )\n\n return set_function_name(stat_func, name, cls)\n\n\ndef _make_cum_function(\n cls,\n name: str,\n name1: str,\n name2: str,\n axis_descr: str,\n desc: str,\n accum_func: Callable,\n accum_func_name: str,\n examples: str,\n) -> Callable:\n @Substitution(\n desc=desc,\n name1=name1,\n name2=name2,\n axis_descr=axis_descr,\n accum_func_name=accum_func_name,\n examples=examples,\n )\n @Appender(_cnum_doc)\n def cum_func(self, axis=None, skipna=True, *args, **kwargs):\n skipna = nv.validate_cum_func_with_skipna(skipna, args, kwargs, name)\n if axis is None:\n axis = self._stat_axis_number\n else:\n axis = self._get_axis_number(axis)\n\n if axis == 1:\n return cum_func(self.T, axis=0, skipna=skipna, *args, **kwargs).T\n\n def block_accum_func(blk_values):\n values = blk_values.T if hasattr(blk_values, \"T\") else blk_values\n\n result = nanops.na_accum_func(values, accum_func, skipna=skipna)\n\n result = result.T if hasattr(result, \"T\") else result\n return result\n\n result = self._mgr.apply(block_accum_func)\n\n return self._constructor(result).__finalize__(self, method=name)\n\n return set_function_name(cum_func, name, cls)\n\n\ndef _make_logical_function(\n cls,\n name: str,\n name1: str,\n name2: str,\n axis_descr: str,\n desc: str,\n func: Callable,\n see_also: str,\n examples: str,\n empty_value: bool,\n) -> Callable:\n @Substitution(\n desc=desc,\n name1=name1,\n name2=name2,\n axis_descr=axis_descr,\n see_also=see_also,\n examples=examples,\n empty_value=empty_value,\n )\n @Appender(_bool_doc)\n def logical_func(self, axis=0, bool_only=None, skipna=True, level=None, **kwargs):\n nv.validate_logical_func(tuple(), kwargs, fname=name)\n if level is not None:\n if bool_only is not None:\n raise NotImplementedError(\n \"Option bool_only is not implemented with option level.\"\n )\n return self._agg_by_level(name, axis=axis, level=level, skipna=skipna)\n return self._reduce(\n func,\n name=name,\n axis=axis,\n skipna=skipna,\n numeric_only=bool_only,\n filter_type=\"bool\",\n )\n\n return set_function_name(logical_func, name, cls)\n",
"import gc\nfrom typing import Type\n\nimport numpy as np\nimport pytest\n\nfrom pandas._libs import iNaT\nfrom pandas.errors import InvalidIndexError\n\nfrom pandas.core.dtypes.common import is_datetime64tz_dtype\nfrom pandas.core.dtypes.dtypes import CategoricalDtype\n\nimport pandas as pd\nfrom pandas import (\n CategoricalIndex,\n DatetimeIndex,\n Index,\n Int64Index,\n IntervalIndex,\n MultiIndex,\n PeriodIndex,\n RangeIndex,\n Series,\n TimedeltaIndex,\n UInt64Index,\n isna,\n)\nimport pandas._testing as tm\nfrom pandas.core.indexes.datetimelike import DatetimeIndexOpsMixin\n\n\nclass Base:\n \"\"\" base class for index sub-class tests \"\"\"\n\n _holder: Type[Index]\n _compat_props = [\"shape\", \"ndim\", \"size\", \"nbytes\"]\n\n def create_index(self) -> Index:\n raise NotImplementedError(\"Method not implemented\")\n\n def test_pickle_compat_construction(self):\n # need an object to create with\n msg = (\n r\"Index\\(\\.\\.\\.\\) must be called with a collection of some \"\n r\"kind, None was passed|\"\n r\"__new__\\(\\) missing 1 required positional argument: 'data'|\"\n r\"__new__\\(\\) takes at least 2 arguments \\(1 given\\)\"\n )\n with pytest.raises(TypeError, match=msg):\n self._holder()\n\n @pytest.mark.parametrize(\"name\", [None, \"new_name\"])\n def test_to_frame(self, name):\n # see GH-15230, GH-22580\n idx = self.create_index()\n\n if name:\n idx_name = name\n else:\n idx_name = idx.name or 0\n\n df = idx.to_frame(name=idx_name)\n\n assert df.index is idx\n assert len(df.columns) == 1\n assert df.columns[0] == idx_name\n assert df[idx_name].values is not idx.values\n\n df = idx.to_frame(index=False, name=idx_name)\n assert df.index is not idx\n\n def test_shift(self):\n\n # GH8083 test the base class for shift\n idx = self.create_index()\n msg = f\"Not supported for type {type(idx).__name__}\"\n with pytest.raises(NotImplementedError, match=msg):\n idx.shift(1)\n with pytest.raises(NotImplementedError, match=msg):\n idx.shift(1, 2)\n\n def test_constructor_name_unhashable(self):\n # GH#29069 check that name is hashable\n # See also same-named test in tests.series.test_constructors\n idx = self.create_index()\n with pytest.raises(TypeError, match=\"Index.name must be a hashable type\"):\n type(idx)(idx, name=[])\n\n def test_create_index_existing_name(self):\n\n # GH11193, when an existing index is passed, and a new name is not\n # specified, the new index should inherit the previous object name\n expected = self.create_index()\n if not isinstance(expected, MultiIndex):\n expected.name = \"foo\"\n result = pd.Index(expected)\n tm.assert_index_equal(result, expected)\n\n result = pd.Index(expected, name=\"bar\")\n expected.name = \"bar\"\n tm.assert_index_equal(result, expected)\n else:\n expected.names = [\"foo\", \"bar\"]\n result = pd.Index(expected)\n tm.assert_index_equal(\n result,\n Index(\n Index(\n [\n (\"foo\", \"one\"),\n (\"foo\", \"two\"),\n (\"bar\", \"one\"),\n (\"baz\", \"two\"),\n (\"qux\", \"one\"),\n (\"qux\", \"two\"),\n ],\n dtype=\"object\",\n ),\n names=[\"foo\", \"bar\"],\n ),\n )\n\n result = pd.Index(expected, names=[\"A\", \"B\"])\n tm.assert_index_equal(\n result,\n Index(\n Index(\n [\n (\"foo\", \"one\"),\n (\"foo\", \"two\"),\n (\"bar\", \"one\"),\n (\"baz\", \"two\"),\n (\"qux\", \"one\"),\n (\"qux\", \"two\"),\n ],\n dtype=\"object\",\n ),\n names=[\"A\", \"B\"],\n ),\n )\n\n def test_numeric_compat(self):\n\n idx = self.create_index()\n # Check that this doesn't cover MultiIndex case, if/when it does,\n # we can remove multi.test_compat.test_numeric_compat\n assert not isinstance(idx, MultiIndex)\n\n with pytest.raises(TypeError, match=\"cannot perform __mul__\"):\n idx * 1\n with pytest.raises(TypeError, match=\"cannot perform __rmul__\"):\n 1 * idx\n\n div_err = \"cannot perform __truediv__\"\n with pytest.raises(TypeError, match=div_err):\n idx / 1\n\n div_err = div_err.replace(\" __\", \" __r\")\n with pytest.raises(TypeError, match=div_err):\n 1 / idx\n with pytest.raises(TypeError, match=\"cannot perform __floordiv__\"):\n idx // 1\n with pytest.raises(TypeError, match=\"cannot perform __rfloordiv__\"):\n 1 // idx\n\n def test_logical_compat(self):\n idx = self.create_index()\n with pytest.raises(TypeError, match=\"cannot perform all\"):\n idx.all()\n with pytest.raises(TypeError, match=\"cannot perform any\"):\n idx.any()\n\n def test_reindex_base(self):\n idx = self.create_index()\n expected = np.arange(idx.size, dtype=np.intp)\n\n actual = idx.get_indexer(idx)\n tm.assert_numpy_array_equal(expected, actual)\n\n with pytest.raises(ValueError, match=\"Invalid fill method\"):\n idx.get_indexer(idx, method=\"invalid\")\n\n def test_get_indexer_consistency(self, index):\n # See GH 16819\n if isinstance(index, IntervalIndex):\n return\n\n if index.is_unique or isinstance(index, CategoricalIndex):\n indexer = index.get_indexer(index[0:2])\n assert isinstance(indexer, np.ndarray)\n assert indexer.dtype == np.intp\n else:\n e = \"Reindexing only valid with uniquely valued Index objects\"\n with pytest.raises(InvalidIndexError, match=e):\n index.get_indexer(index[0:2])\n\n indexer, _ = index.get_indexer_non_unique(index[0:2])\n assert isinstance(indexer, np.ndarray)\n assert indexer.dtype == np.intp\n\n def test_ndarray_compat_properties(self):\n idx = self.create_index()\n assert idx.T.equals(idx)\n assert idx.transpose().equals(idx)\n\n values = idx.values\n for prop in self._compat_props:\n assert getattr(idx, prop) == getattr(values, prop)\n\n # test for validity\n idx.nbytes\n idx.values.nbytes\n\n def test_repr_roundtrip(self):\n\n idx = self.create_index()\n tm.assert_index_equal(eval(repr(idx)), idx)\n\n def test_repr_max_seq_item_setting(self):\n # GH10182\n idx = self.create_index()\n idx = idx.repeat(50)\n with pd.option_context(\"display.max_seq_items\", None):\n repr(idx)\n assert \"...\" not in str(idx)\n\n def test_copy_name(self, index):\n # gh-12309: Check that the \"name\" argument\n # passed at initialization is honored.\n if isinstance(index, MultiIndex):\n return\n\n first = type(index)(index, copy=True, name=\"mario\")\n second = type(first)(first, copy=False)\n\n # Even though \"copy=False\", we want a new object.\n assert first is not second\n\n # Not using tm.assert_index_equal() since names differ.\n assert index.equals(first)\n\n assert first.name == \"mario\"\n assert second.name == \"mario\"\n\n s1 = Series(2, index=first)\n s2 = Series(3, index=second[:-1])\n\n if not isinstance(index, CategoricalIndex):\n # See gh-13365\n s3 = s1 * s2\n assert s3.index.name == \"mario\"\n\n def test_ensure_copied_data(self, index):\n # Check the \"copy\" argument of each Index.__new__ is honoured\n # GH12309\n init_kwargs = {}\n if isinstance(index, PeriodIndex):\n # Needs \"freq\" specification:\n init_kwargs[\"freq\"] = index.freq\n elif isinstance(index, (RangeIndex, MultiIndex, CategoricalIndex)):\n # RangeIndex cannot be initialized from data\n # MultiIndex and CategoricalIndex are tested separately\n return\n\n index_type = type(index)\n result = index_type(index.values, copy=True, **init_kwargs)\n if is_datetime64tz_dtype(index.dtype):\n result = result.tz_localize(\"UTC\").tz_convert(index.tz)\n if isinstance(index, (DatetimeIndex, TimedeltaIndex)):\n index = index._with_freq(None)\n\n tm.assert_index_equal(index, result)\n\n if isinstance(index, PeriodIndex):\n # .values an object array of Period, thus copied\n result = index_type(ordinal=index.asi8, copy=False, **init_kwargs)\n tm.assert_numpy_array_equal(index.asi8, result.asi8, check_same=\"same\")\n elif isinstance(index, IntervalIndex):\n # checked in test_interval.py\n pass\n else:\n result = index_type(index.values, copy=False, **init_kwargs)\n tm.assert_numpy_array_equal(index.values, result.values, check_same=\"same\")\n\n def test_memory_usage(self, index):\n index._engine.clear_mapping()\n result = index.memory_usage()\n if index.empty:\n # we report 0 for no-length\n assert result == 0\n return\n\n # non-zero length\n index.get_loc(index[0])\n result2 = index.memory_usage()\n result3 = index.memory_usage(deep=True)\n\n # RangeIndex, IntervalIndex\n # don't have engines\n if not isinstance(index, (RangeIndex, IntervalIndex)):\n assert result2 > result\n\n if index.inferred_type == \"object\":\n assert result3 > result2\n\n def test_argsort(self, request, index):\n # separately tested\n if isinstance(index, CategoricalIndex):\n return\n\n result = index.argsort()\n expected = np.array(index).argsort()\n tm.assert_numpy_array_equal(result, expected, check_dtype=False)\n\n def test_numpy_argsort(self, index):\n result = np.argsort(index)\n expected = index.argsort()\n tm.assert_numpy_array_equal(result, expected)\n\n # these are the only two types that perform\n # pandas compatibility input validation - the\n # rest already perform separate (or no) such\n # validation via their 'values' attribute as\n # defined in pandas.core.indexes/base.py - they\n # cannot be changed at the moment due to\n # backwards compatibility concerns\n if isinstance(type(index), (CategoricalIndex, RangeIndex)):\n msg = \"the 'axis' parameter is not supported\"\n with pytest.raises(ValueError, match=msg):\n np.argsort(index, axis=1)\n\n msg = \"the 'kind' parameter is not supported\"\n with pytest.raises(ValueError, match=msg):\n np.argsort(index, kind=\"mergesort\")\n\n msg = \"the 'order' parameter is not supported\"\n with pytest.raises(ValueError, match=msg):\n np.argsort(index, order=(\"a\", \"b\"))\n\n def test_take(self, index):\n indexer = [4, 3, 0, 2]\n if len(index) < 5:\n # not enough elements; ignore\n return\n\n result = index.take(indexer)\n expected = index[indexer]\n assert result.equals(expected)\n\n if not isinstance(index, (DatetimeIndex, PeriodIndex, TimedeltaIndex)):\n # GH 10791\n msg = r\"'(.*Index)' object has no attribute 'freq'\"\n with pytest.raises(AttributeError, match=msg):\n index.freq\n\n def test_take_invalid_kwargs(self):\n idx = self.create_index()\n indices = [1, 2]\n\n msg = r\"take\\(\\) got an unexpected keyword argument 'foo'\"\n with pytest.raises(TypeError, match=msg):\n idx.take(indices, foo=2)\n\n msg = \"the 'out' parameter is not supported\"\n with pytest.raises(ValueError, match=msg):\n idx.take(indices, out=indices)\n\n msg = \"the 'mode' parameter is not supported\"\n with pytest.raises(ValueError, match=msg):\n idx.take(indices, mode=\"clip\")\n\n def test_repeat(self):\n rep = 2\n i = self.create_index()\n expected = pd.Index(i.values.repeat(rep), name=i.name)\n tm.assert_index_equal(i.repeat(rep), expected)\n\n i = self.create_index()\n rep = np.arange(len(i))\n expected = pd.Index(i.values.repeat(rep), name=i.name)\n tm.assert_index_equal(i.repeat(rep), expected)\n\n def test_numpy_repeat(self):\n rep = 2\n i = self.create_index()\n expected = i.repeat(rep)\n tm.assert_index_equal(np.repeat(i, rep), expected)\n\n msg = \"the 'axis' parameter is not supported\"\n with pytest.raises(ValueError, match=msg):\n np.repeat(i, rep, axis=0)\n\n @pytest.mark.parametrize(\"klass\", [list, tuple, np.array, Series])\n def test_where(self, klass):\n i = self.create_index()\n if isinstance(i, (pd.DatetimeIndex, pd.TimedeltaIndex)):\n # where does not preserve freq\n i = i._with_freq(None)\n\n cond = [True] * len(i)\n result = i.where(klass(cond))\n expected = i\n tm.assert_index_equal(result, expected)\n\n cond = [False] + [True] * len(i[1:])\n expected = pd.Index([i._na_value] + i[1:].tolist(), dtype=i.dtype)\n result = i.where(klass(cond))\n tm.assert_index_equal(result, expected)\n\n @pytest.mark.parametrize(\"case\", [0.5, \"xxx\"])\n @pytest.mark.parametrize(\n \"method\", [\"intersection\", \"union\", \"difference\", \"symmetric_difference\"]\n )\n def test_set_ops_error_cases(self, case, method, index):\n # non-iterable input\n msg = \"Input must be Index or array-like\"\n with pytest.raises(TypeError, match=msg):\n getattr(index, method)(case)\n\n def test_intersection_base(self, index):\n if isinstance(index, CategoricalIndex):\n return\n\n first = index[:5]\n second = index[:3]\n intersect = first.intersection(second)\n assert tm.equalContents(intersect, second)\n\n if is_datetime64tz_dtype(index.dtype):\n # The second.values below will drop tz, so the rest of this test\n # is not applicable.\n return\n\n # GH 10149\n cases = [klass(second.values) for klass in [np.array, Series, list]]\n for case in cases:\n result = first.intersection(case)\n assert tm.equalContents(result, second)\n\n if isinstance(index, MultiIndex):\n msg = \"other must be a MultiIndex or a list of tuples\"\n with pytest.raises(TypeError, match=msg):\n first.intersection([1, 2, 3])\n\n def test_union_base(self, index):\n first = index[3:]\n second = index[:5]\n everything = index\n union = first.union(second)\n assert tm.equalContents(union, everything)\n\n if is_datetime64tz_dtype(index.dtype):\n # The second.values below will drop tz, so the rest of this test\n # is not applicable.\n return\n\n # GH 10149\n cases = [klass(second.values) for klass in [np.array, Series, list]]\n for case in cases:\n if not isinstance(index, CategoricalIndex):\n result = first.union(case)\n assert tm.equalContents(result, everything)\n\n if isinstance(index, MultiIndex):\n msg = \"other must be a MultiIndex or a list of tuples\"\n with pytest.raises(TypeError, match=msg):\n first.union([1, 2, 3])\n\n def test_difference_base(self, sort, index):\n first = index[2:]\n second = index[:4]\n if isinstance(index, CategoricalIndex) or index.is_boolean():\n answer = []\n else:\n answer = index[4:]\n result = first.difference(second, sort)\n assert tm.equalContents(result, answer)\n\n # GH 10149\n cases = [klass(second.values) for klass in [np.array, Series, list]]\n for case in cases:\n if isinstance(index, (DatetimeIndex, TimedeltaIndex)):\n assert type(result) == type(answer)\n tm.assert_numpy_array_equal(\n result.sort_values().asi8, answer.sort_values().asi8\n )\n else:\n result = first.difference(case, sort)\n assert tm.equalContents(result, answer)\n\n if isinstance(index, MultiIndex):\n msg = \"other must be a MultiIndex or a list of tuples\"\n with pytest.raises(TypeError, match=msg):\n first.difference([1, 2, 3], sort)\n\n def test_symmetric_difference(self, index):\n if isinstance(index, CategoricalIndex):\n return\n\n first = index[1:]\n second = index[:-1]\n answer = index[[0, -1]]\n result = first.symmetric_difference(second)\n assert tm.equalContents(result, answer)\n\n # GH 10149\n cases = [klass(second.values) for klass in [np.array, Series, list]]\n for case in cases:\n result = first.symmetric_difference(case)\n assert tm.equalContents(result, answer)\n\n if isinstance(index, MultiIndex):\n msg = \"other must be a MultiIndex or a list of tuples\"\n with pytest.raises(TypeError, match=msg):\n first.symmetric_difference([1, 2, 3])\n\n def test_insert_base(self, index):\n result = index[1:4]\n\n if not len(index):\n return\n\n # test 0th element\n assert index[0:4].equals(result.insert(0, index[0]))\n\n def test_delete_base(self, index):\n if not len(index):\n return\n\n if isinstance(index, RangeIndex):\n # tested in class\n return\n\n expected = index[1:]\n result = index.delete(0)\n assert result.equals(expected)\n assert result.name == expected.name\n\n expected = index[:-1]\n result = index.delete(-1)\n assert result.equals(expected)\n assert result.name == expected.name\n\n length = len(index)\n msg = f\"index {length} is out of bounds for axis 0 with size {length}\"\n with pytest.raises(IndexError, match=msg):\n index.delete(length)\n\n def test_equals(self, index):\n if isinstance(index, IntervalIndex):\n # IntervalIndex tested separately\n return\n\n assert index.equals(index)\n assert index.equals(index.copy())\n assert index.equals(index.astype(object))\n\n assert not index.equals(list(index))\n assert not index.equals(np.array(index))\n\n # Cannot pass in non-int64 dtype to RangeIndex\n if not isinstance(index, RangeIndex):\n same_values = Index(index, dtype=object)\n assert index.equals(same_values)\n assert same_values.equals(index)\n\n if index.nlevels == 1:\n # do not test MultiIndex\n assert not index.equals(Series(index))\n\n def test_equals_op(self):\n # GH9947, GH10637\n index_a = self.create_index()\n if isinstance(index_a, PeriodIndex):\n pytest.skip(\"Skip check for PeriodIndex\")\n\n n = len(index_a)\n index_b = index_a[0:-1]\n index_c = index_a[0:-1].append(index_a[-2:-1])\n index_d = index_a[0:1]\n\n msg = \"Lengths must match|could not be broadcast\"\n with pytest.raises(ValueError, match=msg):\n index_a == index_b\n expected1 = np.array([True] * n)\n expected2 = np.array([True] * (n - 1) + [False])\n tm.assert_numpy_array_equal(index_a == index_a, expected1)\n tm.assert_numpy_array_equal(index_a == index_c, expected2)\n\n # test comparisons with numpy arrays\n array_a = np.array(index_a)\n array_b = np.array(index_a[0:-1])\n array_c = np.array(index_a[0:-1].append(index_a[-2:-1]))\n array_d = np.array(index_a[0:1])\n with pytest.raises(ValueError, match=msg):\n index_a == array_b\n tm.assert_numpy_array_equal(index_a == array_a, expected1)\n tm.assert_numpy_array_equal(index_a == array_c, expected2)\n\n # test comparisons with Series\n series_a = Series(array_a)\n series_b = Series(array_b)\n series_c = Series(array_c)\n series_d = Series(array_d)\n with pytest.raises(ValueError, match=msg):\n index_a == series_b\n\n tm.assert_numpy_array_equal(index_a == series_a, expected1)\n tm.assert_numpy_array_equal(index_a == series_c, expected2)\n\n # cases where length is 1 for one of them\n with pytest.raises(ValueError, match=\"Lengths must match\"):\n index_a == index_d\n with pytest.raises(ValueError, match=\"Lengths must match\"):\n index_a == series_d\n with pytest.raises(ValueError, match=\"Lengths must match\"):\n index_a == array_d\n msg = \"Can only compare identically-labeled Series objects\"\n with pytest.raises(ValueError, match=msg):\n series_a == series_d\n with pytest.raises(ValueError, match=\"Lengths must match\"):\n series_a == array_d\n\n # comparing with a scalar should broadcast; note that we are excluding\n # MultiIndex because in this case each item in the index is a tuple of\n # length 2, and therefore is considered an array of length 2 in the\n # comparison instead of a scalar\n if not isinstance(index_a, MultiIndex):\n expected3 = np.array([False] * (len(index_a) - 2) + [True, False])\n # assuming the 2nd to last item is unique in the data\n item = index_a[-2]\n tm.assert_numpy_array_equal(index_a == item, expected3)\n tm.assert_series_equal(series_a == item, Series(expected3))\n\n def test_format(self):\n # GH35439\n idx = self.create_index()\n expected = [str(x) for x in idx]\n assert idx.format() == expected\n\n def test_format_empty(self):\n # GH35712\n empty_idx = self._holder([])\n assert empty_idx.format() == []\n assert empty_idx.format(name=True) == [\"\"]\n\n def test_hasnans_isnans(self, index):\n # GH 11343, added tests for hasnans / isnans\n if isinstance(index, MultiIndex):\n return\n\n # cases in indices doesn't include NaN\n idx = index.copy(deep=True)\n expected = np.array([False] * len(idx), dtype=bool)\n tm.assert_numpy_array_equal(idx._isnan, expected)\n assert idx.hasnans is False\n\n idx = index.copy(deep=True)\n values = np.asarray(idx.values)\n\n if len(index) == 0:\n return\n elif isinstance(index, DatetimeIndexOpsMixin):\n values[1] = iNaT\n elif isinstance(index, (Int64Index, UInt64Index)):\n return\n else:\n values[1] = np.nan\n\n if isinstance(index, PeriodIndex):\n idx = type(index)(values, freq=index.freq)\n else:\n idx = type(index)(values)\n\n expected = np.array([False] * len(idx), dtype=bool)\n expected[1] = True\n tm.assert_numpy_array_equal(idx._isnan, expected)\n assert idx.hasnans is True\n\n def test_fillna(self, index):\n # GH 11343\n if len(index) == 0:\n pass\n elif isinstance(index, MultiIndex):\n idx = index.copy(deep=True)\n msg = \"isna is not defined for MultiIndex\"\n with pytest.raises(NotImplementedError, match=msg):\n idx.fillna(idx[0])\n else:\n idx = index.copy(deep=True)\n result = idx.fillna(idx[0])\n tm.assert_index_equal(result, idx)\n assert result is not idx\n\n msg = \"'value' must be a scalar, passed: \"\n with pytest.raises(TypeError, match=msg):\n idx.fillna([idx[0]])\n\n idx = index.copy(deep=True)\n values = np.asarray(idx.values)\n\n if isinstance(index, DatetimeIndexOpsMixin):\n values[1] = iNaT\n elif isinstance(index, (Int64Index, UInt64Index)):\n return\n else:\n values[1] = np.nan\n\n if isinstance(index, PeriodIndex):\n idx = type(index)(values, freq=index.freq)\n else:\n idx = type(index)(values)\n\n expected = np.array([False] * len(idx), dtype=bool)\n expected[1] = True\n tm.assert_numpy_array_equal(idx._isnan, expected)\n assert idx.hasnans is True\n\n def test_nulls(self, index):\n # this is really a smoke test for the methods\n # as these are adequately tested for function elsewhere\n if len(index) == 0:\n tm.assert_numpy_array_equal(index.isna(), np.array([], dtype=bool))\n elif isinstance(index, MultiIndex):\n idx = index.copy()\n msg = \"isna is not defined for MultiIndex\"\n with pytest.raises(NotImplementedError, match=msg):\n idx.isna()\n elif not index.hasnans:\n tm.assert_numpy_array_equal(index.isna(), np.zeros(len(index), dtype=bool))\n tm.assert_numpy_array_equal(index.notna(), np.ones(len(index), dtype=bool))\n else:\n result = isna(index)\n tm.assert_numpy_array_equal(index.isna(), result)\n tm.assert_numpy_array_equal(index.notna(), ~result)\n\n def test_empty(self):\n # GH 15270\n index = self.create_index()\n assert not index.empty\n assert index[:0].empty\n\n def test_join_self_unique(self, join_type):\n index = self.create_index()\n if index.is_unique:\n joined = index.join(index, how=join_type)\n assert (index == joined).all()\n\n def test_map(self):\n # callable\n index = self.create_index()\n\n # we don't infer UInt64\n if isinstance(index, pd.UInt64Index):\n expected = index.astype(\"int64\")\n else:\n expected = index\n\n result = index.map(lambda x: x)\n tm.assert_index_equal(result, expected)\n\n @pytest.mark.parametrize(\n \"mapper\",\n [\n lambda values, index: {i: e for e, i in zip(values, index)},\n lambda values, index: pd.Series(values, index),\n ],\n )\n def test_map_dictlike(self, mapper):\n\n index = self.create_index()\n if isinstance(index, (pd.CategoricalIndex, pd.IntervalIndex)):\n pytest.skip(f\"skipping tests for {type(index)}\")\n\n identity = mapper(index.values, index)\n\n # we don't infer to UInt64 for a dict\n if isinstance(index, pd.UInt64Index) and isinstance(identity, dict):\n expected = index.astype(\"int64\")\n else:\n expected = index\n\n result = index.map(identity)\n tm.assert_index_equal(result, expected)\n\n # empty mappable\n expected = pd.Index([np.nan] * len(index))\n result = index.map(mapper(expected, index))\n tm.assert_index_equal(result, expected)\n\n def test_map_str(self):\n # GH 31202\n index = self.create_index()\n result = index.map(str)\n expected = Index([str(x) for x in index], dtype=object)\n tm.assert_index_equal(result, expected)\n\n def test_putmask_with_wrong_mask(self):\n # GH18368\n index = self.create_index()\n\n msg = \"putmask: mask and data must be the same size\"\n with pytest.raises(ValueError, match=msg):\n index.putmask(np.ones(len(index) + 1, np.bool_), 1)\n\n with pytest.raises(ValueError, match=msg):\n index.putmask(np.ones(len(index) - 1, np.bool_), 1)\n\n with pytest.raises(ValueError, match=msg):\n index.putmask(\"foo\", 1)\n\n @pytest.mark.parametrize(\"copy\", [True, False])\n @pytest.mark.parametrize(\"name\", [None, \"foo\"])\n @pytest.mark.parametrize(\"ordered\", [True, False])\n def test_astype_category(self, copy, name, ordered):\n # GH 18630\n index = self.create_index()\n if name:\n index = index.rename(name)\n\n # standard categories\n dtype = CategoricalDtype(ordered=ordered)\n result = index.astype(dtype, copy=copy)\n expected = CategoricalIndex(index.values, name=name, ordered=ordered)\n tm.assert_index_equal(result, expected)\n\n # non-standard categories\n dtype = CategoricalDtype(index.unique().tolist()[:-1], ordered)\n result = index.astype(dtype, copy=copy)\n expected = CategoricalIndex(index.values, name=name, dtype=dtype)\n tm.assert_index_equal(result, expected)\n\n if ordered is False:\n # dtype='category' defaults to ordered=False, so only test once\n result = index.astype(\"category\", copy=copy)\n expected = CategoricalIndex(index.values, name=name)\n tm.assert_index_equal(result, expected)\n\n def test_is_unique(self):\n # initialize a unique index\n index = self.create_index().drop_duplicates()\n assert index.is_unique is True\n\n # empty index should be unique\n index_empty = index[:0]\n assert index_empty.is_unique is True\n\n # test basic dupes\n index_dup = index.insert(0, index[0])\n assert index_dup.is_unique is False\n\n # single NA should be unique\n index_na = index.insert(0, np.nan)\n assert index_na.is_unique is True\n\n # multiple NA should not be unique\n index_na_dup = index_na.insert(0, np.nan)\n assert index_na_dup.is_unique is False\n\n def test_engine_reference_cycle(self):\n # GH27585\n index = self.create_index()\n nrefs_pre = len(gc.get_referrers(index))\n index._engine\n assert len(gc.get_referrers(index)) == nrefs_pre\n\n def test_getitem_2d_deprecated(self):\n # GH#30588\n idx = self.create_index()\n with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):\n res = idx[:, None]\n\n assert isinstance(res, np.ndarray), type(res)\n\n def test_contains_requires_hashable_raises(self):\n idx = self.create_index()\n\n msg = \"unhashable type: 'list'\"\n with pytest.raises(TypeError, match=msg):\n [] in idx\n\n msg = \"|\".join(\n [\n r\"unhashable type: 'dict'\",\n r\"must be real number, not dict\",\n r\"an integer is required\",\n r\"\\{\\}\",\n r\"pandas\\._libs\\.interval\\.IntervalTree' is not iterable\",\n ]\n )\n with pytest.raises(TypeError, match=msg):\n {} in idx._engine\n\n def test_copy_copies_cache(self):\n # GH32898\n idx = self.create_index()\n idx.get_loc(idx[0]) # populates the _cache.\n copy = idx.copy()\n\n # check that the copied cache is a copy of the original\n assert idx._cache == copy._cache\n assert idx._cache is not copy._cache\n # cache values should reference the same object\n for key, val in idx._cache.items():\n assert copy._cache[key] is val, key\n\n def test_shallow_copy_copies_cache(self):\n # GH32669\n idx = self.create_index()\n idx.get_loc(idx[0]) # populates the _cache.\n shallow_copy = idx._shallow_copy()\n\n # check that the shallow_copied cache is a copy of the original\n assert idx._cache == shallow_copy._cache\n assert idx._cache is not shallow_copy._cache\n # cache values should reference the same object\n for key, val in idx._cache.items():\n assert shallow_copy._cache[key] is val, key\n"
] |
[
[
"pandas.timedelta_range",
"pandas._testing.assert_produces_warning",
"pandas.Series",
"numpy.arange",
"pandas.Categorical",
"pandas.array",
"pandas.isna",
"numpy.random.randn",
"pandas.Timestamp.now",
"pandas.date_range",
"pandas._testing.makeDateIndex",
"pandas._testing.assert_series_equal",
"pandas.Timestamp"
],
[
"matplotlib.use"
],
[
"pandas.Series",
"pandas._libs.tslibs.timezones.maybe_get_tz",
"pandas.offsets.Day",
"pandas.Period._from_ordinal",
"pandas.Period.now",
"pandas.Timestamp",
"pandas.offsets.MonthBegin",
"pandas.Index",
"pandas.offsets.BDay",
"pandas.offsets.QuarterEnd",
"pandas.offsets.MonthEnd",
"pandas._libs.tslibs.timezones.dateutil_gettz",
"pandas._testing.assert_produces_warning",
"pandas.compat.numpy.np_datetime64_compat",
"pandas.Timedelta",
"pandas.offsets.YearBegin",
"numpy.timedelta64",
"pandas._testing.round_trip_pickle",
"pandas.offsets.YearEnd",
"pandas.offsets.BusinessDay",
"numpy.datetime64",
"pandas.offsets.Minute",
"pandas.Period",
"pandas.offsets.Hour",
"numpy.empty"
],
[
"pandas.io.excel._util._validate_freeze_panes",
"pandas.compat._optional.import_optional_dependency"
],
[
"pandas.PeriodIndex",
"pandas.Series",
"pandas.offsets.Day",
"numpy.asarray",
"numpy.random.randn",
"pandas._testing.makePeriodIndex",
"pandas._testing.assert_numpy_array_equal",
"pandas.Index",
"pandas.DatetimeIndex",
"pandas.offsets.MonthEnd",
"numpy.repeat",
"pandas._testing.assert_series_equal",
"pandas._testing.assert_index_equal",
"pandas.concat",
"pandas._testing.round_trip_pickle",
"pandas.date_range",
"numpy.array",
"numpy.random.random",
"pandas.period_range",
"pandas.Period",
"pandas.offsets.BusinessDay"
],
[
"pandas._testing.assert_almost_equal",
"pandas.concat",
"pandas.Series",
"pandas.period_range",
"pandas.Timestamp",
"numpy.arange",
"pandas.DatetimeIndex",
"pandas.DataFrame",
"numpy.dtype",
"numpy.random.permutation",
"numpy.random.randn",
"numpy.random.rand",
"pandas.date_range",
"pandas._libs.tslibs.timezones.dateutil_gettz",
"pandas._testing.assert_series_equal",
"pandas._testing.assert_frame_equal",
"numpy.where",
"pandas._testing.assert_index_equal"
],
[
"pandas.util._validators.validate_bool_kwarg",
"pandas.core.dtypes.inference.is_hashable",
"numpy.unique",
"numpy.asanyarray",
"pandas.core.dtypes.common.is_re_compilable",
"pandas.concat",
"pandas.core.dtypes.common.is_list_like",
"pandas.compat.numpy.function.validate_cum_func_with_skipna",
"pandas.io.pickle.to_pickle",
"numpy.array",
"pandas.io.formats.format.DataFrameFormatter",
"pandas.core.dtypes.common.is_bool_dtype",
"pandas.core.window.Window",
"pandas.core.dtypes.missing.isna",
"pandas.io.sql.to_sql",
"pandas.Series",
"pandas._libs.tslibs.Timestamp",
"numpy.asarray",
"pandas.io.json.to_json",
"pandas.core.dtypes.common.is_datetime64tz_dtype",
"pandas.core.common.SettingWithCopyError",
"pandas.compat._optional.import_optional_dependency",
"pandas._config.config.is_nonnegative_int",
"pandas.core.construction.create_series_with_explicit_dtype",
"pandas.core.dtypes.common.ensure_str",
"pandas.core.indexes.api.Index",
"numpy.errstate",
"pandas.core.computation.parsing.clean_column_name",
"pandas.core.common.random_state",
"pandas.core.resample.get_resampler",
"pandas.util._decorators.doc",
"pandas._libs.lib.item_from_zerodim",
"pandas.core.indexes.datetimes.DatetimeIndex",
"pandas.core.dtypes.common.is_extension_array_dtype",
"pandas.core.missing.find_valid_index",
"pandas.core.dtypes.missing.notna",
"pandas.core.common.pipe",
"pandas.core.nanops.na_accum_func",
"pandas.core.indexes.api.RangeIndex",
"pandas.io.pytables.to_hdf",
"pandas.util._decorators.Substitution",
"pandas.core.dtypes.common.is_numeric_dtype",
"pandas.errors.AbstractMethodError",
"pandas.core.dtypes.common.is_number",
"pandas.core.dtypes.common.ensure_int64",
"pandas.core.dtypes.common.is_dict_like",
"pandas.core.window.Rolling",
"pandas.util._decorators.Appender",
"pandas.core.dtypes.common.pandas_dtype",
"pandas.core.dtypes.common.is_timedelta64_dtype",
"numpy.isnan",
"pandas.core.algorithms.rank",
"pandas.core.missing.mask_missing",
"pandas.core.dtypes.common.is_bool",
"pandas.core.missing.get_fill_func",
"pandas.util._decorators.rewrite_axis_style_signature",
"pandas.core.missing.clean_fill_method",
"pandas.core.common.get_rename_function",
"pandas.core.dtypes.common.is_scalar",
"pandas.io.formats.csvs.CSVFormatter",
"pandas.core.window.ExponentialMovingWindow",
"pandas.io.formats.format.format_percentiles",
"pandas.util._validators.validate_fillna_kwargs",
"pandas.core.indexes.api.ensure_index",
"pandas.core.resample.asfreq",
"pandas.compat.numpy.function.validate_clip_with_axis",
"pandas.util._validators.validate_percentile",
"numpy.dtype",
"pandas.core.indexes.period.Period",
"numpy.any",
"pandas._libs.tslibs.to_offset",
"pandas.io.clipboards.to_clipboard",
"numpy.arange",
"pandas.core.window.Expanding",
"pandas.compat.set_function_name",
"pandas.core.dtypes.common.is_float",
"pandas.core.dtypes.common.is_datetime64_any_dtype",
"pandas.core.reshape.concat.concat",
"pandas.core.tools.datetimes.to_datetime",
"pandas.core.common.maybe_make_list",
"pandas._config.config.get_option",
"pandas.core.common.count_not_none",
"numpy.abs",
"pandas.core.ops._align_method_FRAME",
"pandas.io.formats.excel.ExcelFormatter",
"pandas.core.dtypes.common.is_object_dtype",
"pandas.core.common.apply_if_callable",
"numpy.prod",
"pandas.core.common.index_labels_to_array"
],
[
"pandas._testing.equalContents",
"pandas.CategoricalIndex",
"pandas._testing.assert_produces_warning",
"pandas._testing.assert_numpy_array_equal",
"pandas.Series",
"numpy.asarray",
"numpy.arange",
"pandas.Index",
"pandas.core.dtypes.common.is_datetime64tz_dtype",
"pandas.option_context",
"pandas.isna",
"pandas.core.dtypes.dtypes.CategoricalDtype",
"numpy.argsort",
"numpy.repeat",
"numpy.array",
"pandas._testing.assert_index_equal"
]
] |
octaviomtz/inpaint_melanoma
|
[
"19cf85a0d51f04ad3e1e3ef68ddf1cc5e27a0b84",
"19cf85a0d51f04ad3e1e3ef68ddf1cc5e27a0b84"
] |
[
"inpaint_melanoma/core.py",
"inpaint_melanoma/models/skip.py"
] |
[
"# AUTOGENERATED! DO NOT EDIT! File to edit: 00_core.ipynb (unless otherwise specified).\n\n__all__ = ['rgb2gray', 'otsu_thresh_4largest_component', 'component_closest_center', 'get_center', 'denormalizePatches',\n 'figs_horizontal2', 'figs_comparison', 'figs_horizontal3', 'plot_inpaints_pairs', 'channels_first_last',\n 'plot_distributions', 'get_saved_images', 'get_sample_distributions_per_channel']\n\n# Cell\nimport argparse\nimport matplotlib.pyplot as plt\nimport os\nimport numpy as np\nimport torch\nimport torch.optim\nfrom torch import nn\nfrom copy import copy, deepcopy\nimport time\nfrom mpl_toolkits.mplot3d.art3d import Poly3DCollection\nimport pandas as pd\nfrom skimage import measure, morphology\nfrom itertools import groupby, count\nimport matplotlib.patches as patches\nfrom skimage.morphology import watershed\nfrom skimage.feature import peak_local_max\nfrom torch.autograd import Variable\nfrom scipy.spatial import distance\nimport sys\nfrom PIL import Image\nfrom matplotlib.gridspec import GridSpec\nimport random\n\n# Cell\n# from models.skip import skip\nfrom .skip import *\nfrom .inpainting_utils import *\nfrom .common_utils import *\nfrom .inpainting_nodules_functions import *\n\n# Cell\nimport warnings\nfrom torch.autograd import Variable\n# from google.colab import drive\nfrom scipy import ndimage\nfrom skimage import filters\n\nwarnings.filterwarnings(\"ignore\", category=UserWarning)\n\n# Cell\ndef rgb2gray(rgb):\n '''https://stackoverflow.com/questions/12201577/how-can-i-convert-an-rgb-image-into-grayscale-in-python'''\n return np.dot(rgb[...,:3], [0.2989, 0.5870, 0.1140])\n\ndef otsu_thresh_4largest_component(img2):\n val = filters.threshold_otsu(img2)\n mask_otsu_orig = img2<val\n mask_otsu = ndimage.morphology.binary_erosion(mask_otsu_orig, iterations=20)\n mask_otsu = ndimage.morphology.binary_dilation(mask_otsu, iterations=80)\n mask_otsu = ndimage.morphology.binary_fill_holes(mask_otsu)\n labeled_mask, cc_num = ndimage.label(mask_otsu)\n sorted_comp = np.bincount(labeled_mask.flat)\n sorted_comp = np.sort(sorted_comp)[::-1]\n mask_lesions = []\n for i in np.arange(1, np.min([len(sorted_comp), 4])):\n mask_lesions.append((labeled_mask == np.where(np.bincount(labeled_mask.flat) == sorted_comp[i])[0][0]))\n return mask_lesions\n\ndef component_closest_center(img2, masks_lesions):\n y_half, x_half = [i//2 for i in np.shape(img2)]\n y_half_x_half = np.asarray([y_half, x_half])\n ml_closest = masks_lesions[0] # default\n dist_min = 10000\n for i in masks_lesions:\n yy,xx = np.where(i==1)\n ymed_xmed = np.asarray([np.median(yy), np.median(xx)])\n dist_new = distance.cdist(np.expand_dims(y_half_x_half,0), np.expand_dims(ymed_xmed,0))\n if dist_new < dist_min:\n dist_min = dist_new\n ml_closest = i\n return ml_closest\n\ndef get_center(img, part=.25):\n factor = 32\n y_half, x_half, _ = [i//2 for i in np.shape(img)]\n y_include, x_include = np.asarray([y_half, x_half])* part\n y_include = y_include + (factor - y_include % factor)\n x_include = x_include + (factor - x_include % factor)\n y_part1, x_part1 = int(y_half - y_include), int(x_half - x_include)\n y_part2, x_part2 = int(y_half + y_include), int(x_half + x_include)\n y_part1, y_part2, x_part1, x_part2\n return img[y_part1: y_part2, x_part1: x_part2,:], y_part1, x_part1\n\ndef denormalizePatches(img):\n img = img * 255.\n img = img.astype('int16')\n return img\n\n# Cell\ndef figs_horizontal2(ff, names_selected, suffix_available, path_source):\n f1 = [names_selected+j for j in suffix_available if names_selected+j in ff]\n f1 = np.unique(f1)\n f1 = np.sort(f1)\n n_img = len(f1)\n fig, ax = plt.subplots(1,n_img,figsize=(24,5))\n for idx,i in enumerate(f1):\n # name_display = '_'.join(names_selected.split('_')[1:])\n name_display = i.split('_ISIC')[0].split('_')[-1]\n a = np.fromfile(f'{path_source}{i}',dtype='int16')\n a = a/255\n a = np.clip(a,0,1)\n a = np.reshape(a, (3,512,512))\n a = np.swapaxes(a,0,1)\n a = np.swapaxes(a,1,2)\n ax[idx].imshow(a)\n ax[idx].text(10,50,name_display)\n for axx in ax.ravel(): axx.axis('off')\n plt.tight_layout()\n print(names_selected)\n return f1\n\n# Cell\ndef figs_comparison(ff, names_selected, suffix_available, gen_idx, folder='/mnt/90cf2a10-3cf8-48a6-9234-9973231cadc6/Kaggle/melanoma/datasets_preprocessed/size_512/'):\n f1 = [names_selected+j for j in suffix_available if names_selected+j in ff]\n f1 = np.unique(f1)\n f1 = np.sort(f1)\n n_img = len(f1)\n i = f1[gen_idx]\n\n key = 'ISIC'+suffix_available[0].split('.raw')[0].split('ISIC')[-1]\n orig = plt.imread(f'{folder}{key}.jpg')\n mask = np.load(f'{folder}mask_{key}.npz')\n mask = mask.f.arr_0\n\n fig, ax = plt.subplots(1,4,figsize=(12,5))\n name_display = i.split('_ISIC')[0].split('_')[-1]\n inpain = np.fromfile(f'{path_source}{i}',dtype='int16')\n inpain = inpain/255\n inpain = np.clip(inpain,0,1)\n inpain = np.reshape(inpain, (3,512,512))\n inpain = np.swapaxes(inpain,0,1)\n inpain = np.swapaxes(inpain,1,2)\n ax[1].imshow(orig)\n ax[0].imshow(orig)\n ax[0].imshow(mask, alpha=.3)\n ax[2].imshow(inpain)\n ax[3].imshow(inpain)\n ax[3].imshow(mask, alpha=.3)\n for axx in ax.ravel(): axx.axis('off')\n plt.tight_layout()\n return key, inpain\n\n# Cell\ndef figs_horizontal3(ff, names_selected, suffix_available, path_results):\n f1 = [names_selected+j for j in suffix_available if names_selected+j in ff]\n f1 = np.unique(f1)\n f1 = np.sort(f1)\n n_img = len(f1)\n fig, ax = plt.subplots(1,n_img,figsize=(24,5))\n for idx,i in enumerate(f1):\n name_display = i.split('_ISIC')[0].split('_')[-1]\n a = Image.open(f'{path_results}{i}')\n ax[idx].imshow(a)\n ax[idx].text(10,50,name_display)\n for axx in ax.ravel(): axx.axis('off')\n plt.tight_layout()\n print(names_selected)\n return f1\n\n# Cell\ndef plot_inpaints_pairs(mse_error, images_raw, images_combined, epochs_saved, filename, archi, params, path_save=''):\n fontsize = 20\n color1 = \"#3F5D7D\"\n color2 = \"#990F02\"\n color3 = \"#ffe84f\"\n widths = [1,2,2,2,2]\n fig=plt.figure(figsize=(18,8));\n gs=GridSpec(2,5, width_ratios=widths)\n ax1=fig.add_subplot(gs[:,0]) # First row, first column\n ax2=fig.add_subplot(gs[0,1]) # First row, second column\n ax3=fig.add_subplot(gs[0,2]) # First row, third column\n ax4=fig.add_subplot(gs[0,3])\n ax5=fig.add_subplot(gs[0,4])\n ax6=fig.add_subplot(gs[1,1])\n ax7=fig.add_subplot(gs[1,2])\n ax8=fig.add_subplot(gs[1,3])\n ax9=fig.add_subplot(gs[1,4])\n\n count=0\n for i, ax_ in zip(images_raw, [ax2, ax4, ax6, ax8]):\n ax_.imshow(i)\n ax_.text(10, 50, str(epochs_saved[-4+count]*10), fontsize=fontsize)\n count+=1\n for i, ax_ in zip(images_combined, [ax3, ax5, ax7, ax9]): ax_.imshow(i)\n for i in [ax2, ax3, ax4, ax5, ax6, ax7, ax8, ax9]: i.axis('off')\n\n name = f'{archi}\\n{params}'\n name = name.replace('_LR','\\nLR')\n ax9.text(10,140,name, fontsize=fontsize)\n ax1.semilogy(mse_error, color=color1)\n epochs_saved = np.asarray(epochs_saved)*10\n ax1.semilogy(np.asarray(epochs_saved)[-4:],np.asarray(mse_error)[np.asarray(epochs_saved)][-4:], marker='.', linestyle='None', markersize=20, color=color1)\n fig.tight_layout()\n\n if len(path_save)>0:\n fig.savefig(f'{path_save}ov_{filename}_{name}.png' )\n plt.close()\n\n# Cell\ndef channels_first_last(img, output='first'):\n '''just change an img to channels last or to channels first'''\n if output=='last':\n img = np.swapaxes(img,0,1)\n img = np.swapaxes(img,1,2)\n else:\n img = np.swapaxes(img,1,2)\n img = np.swapaxes(img,0,1)\n return img\n\n# Cell\ndef plot_distributions(img, images, mask_var):\n a = channels_first_last(img,'last')\n inp = images[-1]\n mask_ = mask_var.detach().cpu().numpy()\n mask_ = channels_first_last(mask_,'last')\n _mask_ = -mask_+1\n fig ,ax = plt.subplots(2,5, figsize=(18,7))\n ax[0,0].imshow(a)\n ax[1,0].hist(a.flatten()); ax[1,0].set_xlim([0,1]);\n ax[0,1].imshow(inp)\n ax[1,1].hist(inp.flatten()); ax[1,0].set_xlim([0,1]);\n ax[0,2].imshow(a*mask_)\n ax[1,2].hist(((a*mask_)[np.where(a*mask_>0)]).flatten()); ax[1,2].set_xlim([0,1]);\n ax[0,3].imshow(a*_mask_)\n ax[1,3].hist(((a*_mask_)[np.where(a*_mask_>0)]).flatten()); ax[1,3].set_xlim([0,1]);\n ax[0,4].imshow(inp*_mask_)\n ax[1,4].hist(((inp*_mask_)[np.where(a*_mask_>0)]).flatten()); ax[1,4].set_xlim([0,1]);\n for i in [ax[0,0], ax[0,1], ax[0,2], ax[0,3], ax[0,4]]: i.axis('off')\n for i in ax.ravel(): i.ticklabel_format(style='sci', scilimits=(0,0))\n\n# Cell\ndef get_saved_images(path_img_dest, filename, name):\n ii = np.load(f'{path_img_dest}mse/{filename}_{name}.npy')\n tmp = os.listdir(f'{path_img_dest}final/')\n tmp = [i for i in tmp if f'{filename}_{name}' in i]\n final_names = np.sort(tmp)\n tmp = os.listdir(f'{path_img_dest}final/')\n tmp = [i for i in tmp if f'{filename}_{name}' in i]\n raw_names = np.sort(tmp)\n final = [np.asarray(Image.open(f'{path_img_dest}final/{i}'))/255 for i in final_names]\n raw = [np.asarray(Image.open(f'{path_img_dest}raw/{i}'))/255 for i in raw_names]\n epochs = [int(i.split('_')[-1].split('.')[0]) for i in final_names]\n return ii, final, raw, epochs\n\n# Cell\ndef get_sample_distributions_per_channel(img, mask_var, len_lesion):\n '''For each channel, get a sample distribution of the outside skin\n of the same size of the lesion'''\n skin_only_ch0 = img[0][np.where((img[0] * mask_var[0])>0)]\n skin_only_ch1 = img[1][np.where((img[1] * mask_var[0])>0)]\n skin_only_ch2 = img[2][np.where((img[2] * mask_var[0])>0)]\n skin_sample_ch0 = random.sample(list(skin_only_ch0), len_lesion)\n skin_sample_ch1 = random.sample(list(skin_only_ch1), len_lesion)\n skin_sample_ch2 = random.sample(list(skin_only_ch2), len_lesion)\n return skin_sample_ch0, skin_sample_ch1, skin_sample_ch2",
"# AUTOGENERATED! DO NOT EDIT! File to edit: 01_models.skip.ipynb (unless otherwise specified).\n\n__all__ = ['skip']\n\n# Cell\nimport torch\nimport torch.nn as nn\nimport numpy as np\n\n# Cell\ndef skip(\n num_input_channels=2, num_output_channels=1,\n num_channels_down=[16, 32, 64, 128, 128], num_channels_up=[16, 32, 64, 128, 128], num_channels_skip=[4, 4, 4, 4, 4],\n filter_size_down=3, filter_size_up=3, filter_skip_size=1,\n need_sigmoid=True, need_bias=True,\n pad='zero', upsample_mode='nearest', downsample_mode='stride', act_fun='LeakyReLU',\n need1x1_up=True):\n \"\"\"Assembles encoder-decoder with skip connections.\n Arguments:\n act_fun: Either string 'LeakyReLU|Swish|ELU|none' or module (e.g. nn.ReLU)\n pad (string): zero|reflection (default: 'zero')\n upsample_mode (string): 'nearest|bilinear' (default: 'nearest')\n downsample_mode (string): 'stride|avg|max|lanczos2' (default: 'stride')\n \"\"\"\n assert len(num_channels_down) == len(num_channels_up) == len(num_channels_skip)\n\n n_scales = len(num_channels_down)\n\n if not (isinstance(upsample_mode, list) or isinstance(upsample_mode, tuple)) :\n upsample_mode = [upsample_mode]*n_scales\n\n if not (isinstance(downsample_mode, list)or isinstance(downsample_mode, tuple)):\n downsample_mode = [downsample_mode]*n_scales\n\n if not (isinstance(filter_size_down, list) or isinstance(filter_size_down, tuple)) :\n filter_size_down = [filter_size_down]*n_scales\n\n if not (isinstance(filter_size_up, list) or isinstance(filter_size_up, tuple)) :\n filter_size_up = [filter_size_up]*n_scales\n\n last_scale = n_scales - 1\n\n cur_depth = None\n\n model = nn.Sequential()\n model_tmp = model\n\n input_depth = num_input_channels\n for i in range(len(num_channels_down)):\n\n deeper = nn.Sequential()\n skip = nn.Sequential()\n\n if num_channels_skip[i] != 0:\n model_tmp.add(Concat(1, skip, deeper))\n else:\n model_tmp.add(deeper)\n\n model_tmp.add(bn(num_channels_skip[i] + (num_channels_up[i + 1] if i < last_scale else num_channels_down[i])))\n\n if num_channels_skip[i] != 0:\n skip.add(conv(input_depth, num_channels_skip[i], filter_skip_size, bias=need_bias, pad=pad))\n skip.add(bn(num_channels_skip[i]))\n skip.add(act(act_fun))\n\n # skip.add(Concat(2, GenNoise(nums_noise[i]), skip_part))\n\n deeper.add(conv(input_depth, num_channels_down[i], filter_size_down[i], 2, bias=need_bias, pad=pad, downsample_mode=downsample_mode[i]))\n deeper.add(bn(num_channels_down[i]))\n deeper.add(act(act_fun))\n\n deeper.add(conv(num_channels_down[i], num_channels_down[i], filter_size_down[i], bias=need_bias, pad=pad))\n deeper.add(bn(num_channels_down[i]))\n deeper.add(act(act_fun))\n\n deeper_main = nn.Sequential()\n\n if i == len(num_channels_down) - 1:\n # The deepest\n k = num_channels_down[i]\n else:\n deeper.add(deeper_main)\n k = num_channels_up[i + 1]\n\n deeper.add(nn.Upsample(scale_factor=2, mode=upsample_mode[i]))\n\n model_tmp.add(conv(num_channels_skip[i] + k, num_channels_up[i], filter_size_up[i], 1, bias=need_bias, pad=pad))\n model_tmp.add(bn(num_channels_up[i]))\n model_tmp.add(act(act_fun))\n\n\n if need1x1_up:\n model_tmp.add(conv(num_channels_up[i], num_channels_up[i], 1, bias=need_bias, pad=pad))\n model_tmp.add(bn(num_channels_up[i]))\n model_tmp.add(act(act_fun))\n\n input_depth = num_channels_down[i]\n model_tmp = deeper_main\n\n model.add(conv(num_channels_up[0], num_output_channels, 1, bias=need_bias, pad=pad))\n if need_sigmoid:\n model.add(nn.Sigmoid())\n\n return model"
] |
[
[
"numpy.dot",
"scipy.ndimage.morphology.binary_dilation",
"numpy.expand_dims",
"numpy.asarray",
"matplotlib.pyplot.imread",
"numpy.where",
"numpy.swapaxes",
"matplotlib.pyplot.tight_layout",
"scipy.ndimage.morphology.binary_fill_holes",
"numpy.unique",
"numpy.clip",
"numpy.reshape",
"matplotlib.gridspec.GridSpec",
"matplotlib.pyplot.close",
"numpy.load",
"matplotlib.pyplot.figure",
"numpy.median",
"scipy.ndimage.morphology.binary_erosion",
"scipy.ndimage.label",
"numpy.fromfile",
"matplotlib.pyplot.subplots",
"numpy.sort",
"numpy.shape",
"numpy.bincount"
],
[
"torch.nn.Sequential",
"torch.nn.Upsample",
"torch.nn.Sigmoid"
]
] |
maxibor/coproID
|
[
"7dc3362267bc89ce658651d47534455e01dc152b"
] |
[
"bin/merge_bp_sp.py"
] |
[
"#!/usr/bin/env python3\n\n\nimport argparse\nimport pandas as pd\nimport sys\n\n\ndef get_args():\n '''This function parses and return arguments passed in'''\n parser = argparse.ArgumentParser(\n prog='normalizedReadCount',\n description='Counts reads aligned to genome, and normalize by genome size')\n parser.add_argument(\n '-c',\n dest='countfile',\n default=None,\n help=\"normalized read count csv file\")\n parser.add_argument(\n '-s',\n dest='sourcepredict',\n default=None,\n help=\"sourcepredict csv file\")\n parser.add_argument(\n '-o',\n dest='output',\n default=None,\n help=\"output csv file\")\n\n args = parser.parse_args()\n cf = args.countfile\n sp = args.sourcepredict\n out = args.output\n\n return(cf, sp, out)\n\n\ndef indicator(x):\n if x > 0.5:\n return(0)\n return(1)\n\n\ndef check_learning(orga, col_list):\n if orga not in col_list:\n print(f\"{orga} not in machine learning dataset\")\n sys.exit(1)\n\n\ndef compute_coproba(indic, nrr, sp):\n return(indic*nrr*sp)\n\n\nif __name__ == \"__main__\":\n CF, SP, OUTPUT = get_args()\n\n dcf = pd.read_csv(CF, index_col=0)\n print(dcf.shape)\n orga1 = dcf['Organism_name1'][0]\n orga2 = dcf['Organism_name2'][0]\n try:\n orga3 = dcf['Organism_name3'][0]\n except:\n orga3 = None\n\n dsp = pd.read_csv(SP, index_col=0).T\n\n if orga3:\n check_learning(orga1, dsp.columns)\n check_learning(orga2, dsp.columns)\n check_learning(orga3, dsp.columns)\n else:\n check_learning(orga1, dsp.columns)\n check_learning(orga2, dsp.columns)\n\n d = dcf.merge(dsp, left_index=True, right_index=True)\n\n coproba_list_orga1 = [compute_coproba(\n indic=indicator(a), nrr=b, sp=c) for a, b, c in zip(list(d['unknown']), list(d['NormalizedReadRatio_1']), list(d[orga1]))]\n coproba_list_orga2 = [compute_coproba(\n indic=indicator(a), nrr=b, sp=c) for a, b, c in zip(list(d['unknown']), list(d['NormalizedReadRatio_2']), list(d[orga2]))]\n if orga3:\n coproba_list_orga3 = [compute_coproba(indic=indicator(a), nrr=b, sp=c) for a, b, c in zip(\n list(d['unknown']), list(d['NormalizedReadRatio_3']), list(d[orga3]))]\n\n d2 = pd.DataFrame()\n d2[f\"normalized_bp_proportion_aligned_{orga1}\"] = d['NormalizedReadRatio_1']\n d2[f\"normalized_bp_proportion_aligned_{orga2}\"] = d['NormalizedReadRatio_2']\n if orga3:\n d2[f\"normalized_bp_aligned_{orga3}\"] = d['NormalizedReadRatio_3']\n d2[f\"metagenomic_proportion_{orga1}\"] = d[orga1]\n d2[f\"metagenomic_proportion_{orga2}\"] = d[orga2]\n if orga3:\n d2[f\"metagenomic_proportion_{orga3}\"] = d[orga3]\n d2[f\"coproID_proba_{orga1}\"] = coproba_list_orga1\n d2[f\"coproID_proba_{orga2}\"] = coproba_list_orga2\n if orga3:\n d2[f\"coproID_proba_{orga3}\"] = coproba_list_orga3\n d2.index = d.index\n d2.to_csv(OUTPUT)\n"
] |
[
[
"pandas.read_csv",
"pandas.DataFrame"
]
] |
pashatab1/tablab_fish
|
[
"4e49c19ca9eb94f059fa1c15231401ffc4405195"
] |
[
"common/find_pixel_size.py"
] |
[
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"Determine pixel/inch ratio from linescan across ruler\n\nInputs:\nfilename - full path to csv file containing Position and Intensity Value\n\nAssumes:\nImage is taken of inch side of ruler, and smallest ticks are 1/8 inch increment\n\n@author: tabatabai\n\"\"\"\n\nimport numpy as np\nfrom math import nan\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\n# Read in data\n# filename = '/Users/tabatabai/Desktop/linescan_closestToWindow.csv'\n# filename = '/Volumes/storage/pasha/2020/201210/linescan_rulerOnAcrylicSheetOnFiberBoardOnRecycleBin.csv'\n\n\ndef main(filepath,unit,unit_str):\n \"\"\"Calculates the pixel size for a given image based on a ruler\n inputs: filepath- full path to file containing ruler information\n (ex: '/Volumes/storage/pasha/2021/210226' )\n unit - number corresponding to repeating distance\n (ex: 0.125 for 1/8 ticks)\n unit_str - string of actual unit value corresponding to unit\n (ex: 'inch')\n \n outputs: saves file 'pixel_conversion.txt' in filepath\n saved file contains conversion information\n \n Example Execution\n main('/Volumes/storage/pasha/2021/210226',.125,'inch')\n \n \"\"\"\n fullfile = filepath + '/ruler_linescan.csv' #expects this naming for filename\n df = pd.read_csv(fullfile) \n \n # Plot raw data\n ax1 = plt.subplot(1,2,1)\n plt.plot(df['Distance_(pixels)'], df.Gray_Value) \n ax1.set_xlim(200,300)\n #ax1.set_ylim(60,90)\n ax1.set_ylabel('Pixel Gray Value')\n ax1.set_xlabel('Pixel Position')\n \n # Calculate FFT of Intensities\n y = np.fft.fft(df.Gray_Value.to_numpy()) #calculates fast fourier transform\n y[0] = nan #First position due to data shift (not about 0)\n yystar = y*np.conj(y) # Multiply by complex conjugate - now everything real\n \n # Generate frequencies corresponding to FFT\n xf = np.linspace(0,.5,int(np.floor(len(df.Gray_Value)/2))) # frequencies used in fft\n \n # Plot Power Spectrum\n ax2 = plt.subplot(1,2,2)\n plt.plot(xf,yystar[0:int(np.floor(len(df.Gray_Value)/2))])\n ax2.set_xlim(0, .25)\n ax2.set_ylabel('Power Spectrum')\n ax2.set_xlabel('Frequency (1/d)')\n # plt.savefig('Linescan.png')\n plt.show()\n \n # Convert from frequency to px/inch\n indx = np.nanargmax(yystar[2:int(np.floor(len(df.Gray_Value)/2))]) # Max of power spectrum occurs at this index\n frequency = xf[indx]\n repeating_distance = 1/frequency\n \n # Creates value for cm_per_pixel depending on the unit you used\n if unit_str == 'inch':\n cm_per_pixel = unit/repeating_distance * 2.54\n elif unit_str == 'cm':\n cm_per_pixel = unit/repeating_distance\n else:\n print('You have a unit that your code wasnt ready for')\n \n print('Max Frequency = ', str(frequency))\n print('Repeating distance = ', str(repeating_distance), ' pixels')\n file1 = open(filepath + '/pixel_conversion.txt',\"w\") \n \n #write required informational file\n L = ['The repeating distance is: ' + str(repeating_distance) + ' pixels\\n',\n 'The repeating unit in my image is: ' + str(unit) + unit_str + '\\n',\n 'Therefore, the pixel conversion is: ' + str(unit/repeating_distance) + ' ' +unit_str +' per pixel\\n',\n 'For trex, the cm_per_pixel parameter is: ' + str(cm_per_pixel) + '\\n'] \n file1.writelines(L) \n file1.close() #to change file access modes \n \n \n# inches_per_pixel = 1/(8*repeating_distance) # this is where 1/8 inch increment comes in\n# pixels_per_inch = 1/inches_per_pixel\n\n# # Print to screen relevant information \n# print('Repeating distance = ', str(repeating_distance))\n# print('Inches per pixel = ', str(inches_per_pixel))\n# print('Pixels per inch = ', str(pixels_per_inch))\n\n\n# Example Execution\n# main('/Volumes/storage/pasha/2021/210226',.125,'inch')\n\n"
] |
[
[
"pandas.read_csv",
"numpy.conj",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.show"
]
] |
GeoffKriston/deep-learning-v2-pytorch
|
[
"92f7b12e8afeb12753bc990829bfa8307b26ef6c"
] |
[
"intro-to-pytorch/fc_model.py"
] |
[
"import torch\nfrom torch import nn\nimport torch.nn.functional as F\n\n\nclass Network(nn.Module):\n def __init__(self, input_size, output_size, hidden_layers, drop_p=0.5):\n ''' Builds a feedforward network with arbitrary hidden layers.\n \n Arguments\n ---------\n input_size: integer, size of the input layer\n output_size: integer, size of the output layer\n hidden_layers: list of integers, the sizes of the hidden layers\n \n '''\n super(Network,self).__init__()\n # Input to a hidden layer\n self.hidden_layers = nn.ModuleList([nn.Linear(input_size, hidden_layers[0])])\n \n # Add a variable number of more hidden layers\n layer_sizes = zip(hidden_layers[:-1], hidden_layers[1:])\n self.hidden_layers.extend([nn.Linear(h1, h2) for h1, h2 in layer_sizes])\n \n self.output = nn.Linear(hidden_layers[-1], output_size)\n \n self.dropout = nn.Dropout(p=drop_p)\n \n def forward(self, x):\n ''' Forward pass through the network, returns the output logits '''\n \n for each in self.hidden_layers:\n x = F.relu(each(x))\n x = self.dropout(x)\n x = self.output(x)\n \n return F.log_softmax(x, dim=1)\n\n\ndef validation(model, testloader, criterion):\n accuracy = 0\n test_loss = 0\n for images, labels in testloader:\n\n images = images.resize_(images.size()[0], 784)\n\n output = model.forward(images)\n test_loss += criterion(output, labels).item()\n\n ## Calculating the accuracy \n # Model's output is log-softmax, take exponential to get the probabilities\n ps = torch.exp(output)\n # Class with highest probability is our predicted class, compare with true label\n equality = (labels.data == ps.max(1)[1])\n # Accuracy is number of correct predictions divided by all predictions, just take the mean\n accuracy += equality.type_as(torch.FloatTensor()).mean()\n\n return test_loss, accuracy\n\n\ndef train(model, trainloader, testloader, criterion, optimizer, epochs=5, print_every=40):\n \n steps = 0\n running_loss = 0\n for e in range(epochs):\n # Model in training mode, dropout is on\n model.train()\n for images, labels in trainloader:\n steps += 1\n \n # Flatten images into a 784 long vector\n images.resize_(images.size()[0], 784)\n \n optimizer.zero_grad()\n \n output = model.forward(images)\n loss = criterion(output, labels)\n loss.backward()\n optimizer.step()\n \n running_loss += loss.item()\n\n if steps % print_every == 0:\n # Model in inference mode, dropout is off\n model.eval()\n \n # Turn off gradients for validation, will speed up inference\n with torch.no_grad():\n test_loss, accuracy = validation(model, testloader, criterion)\n \n print(\"Epoch: {}/{}.. \".format(e+1, epochs),\n \"Training Loss: {:.3f}.. \".format(running_loss/print_every),\n \"Test Loss: {:.3f}.. \".format(test_loss/len(testloader)),\n \"Test Accuracy: {:.3f}\".format(accuracy/len(testloader)))\n \n running_loss = 0\n \n # Make sure dropout and grads are on for training\n model.train()\n"
] |
[
[
"torch.nn.Dropout",
"torch.nn.functional.log_softmax",
"torch.exp",
"torch.nn.Linear",
"torch.FloatTensor",
"torch.no_grad"
]
] |
lauromoraes/promoter_paper
|
[
"62aea776cb318a13e142f84dd84bb0a29fb0e83f"
] |
[
"mymodels/parent_models.py"
] |
[
"#!/usr/bin/python\n# -*- encoding: utf-8 -*-\n\n\"\"\"\n@ide: PyCharm\n@author: Lauro Ângelo Gonçalves de Moraes\n@contact: [email protected]\n@created: 20/06/2020\n\"\"\"\nimport tensorflow as tf\nfrom tensorflow.keras import models\nfrom tensorflow.keras.layers import (\n Input,\n Embedding,\n Conv2D,\n Conv1D,\n MaxPooling1D,\n MaxPooling2D,\n AveragePooling1D,\n AveragePooling2D,\n Flatten,\n Dense,\n)\nfrom tensorflow.keras.optimizers import (Adam, Nadam, )\nfrom kerastuner import HyperModel\n\n\ndef conv_pool_block(input_tensor, n_filters=100, k_size=15, pad='same', p_size=2, p_stride=2, activ='relu'):\n x = input_tensor\n input_dim = tf.keras.backend.shape(x).shape[0]\n block1 = Conv2D(\n filters=n_filters,\n kernel_size=(k_size, input_dim),\n padding=pad,\n activation=activ)(x)\n block1 = MaxPooling2D(\n pool_size=(p_size, 1),\n strides=(p_stride, 1))(block1)\n output_tensor = block1\n return output_tensor\n\n\nclass BaseModel(object):\n def __init__(self, data_list, num_classes):\n self.num_classes = num_classes\n self.input_shapes = list()\n self.input_types = list()\n for d in data_list:\n self.input_shapes.append(d.shape()[1:])\n self.input_types.append(d.get_encode())\n self.num_branches = len(data_list)\n self.inputs = self.setup_input()\n self.inputs_tensors = list()\n self.outputs_tensors = list()\n\n def setup_input(self):\n inputs = list()\n for i, t in enumerate(self.input_types):\n # Setup input for this branch\n input_shape = self.input_shapes[i]\n # print('input_shape', input_shape)\n x = Input(shape=input_shape, name='Input_{}'.format(i))\n if self.input_types[i] == 'categorical':\n n_words = self.k ** 4\n emb_size = (n_words * 2) + 1\n x = Embedding(n_words, emb_size, input_length=input_shape[0])(x)\n inputs.append(x)\n self.inputs_tensors = inputs\n return inputs\n\n def build(self):\n raise NotImplementedError()\n\n\nclass BaseHyperModel(BaseModel, HyperModel):\n def __init__(self, data_list, num_classes):\n super(HyperModel, self).__init__()\n super(BaseModel, self).__init__(data_list, num_classes)\n\n def define_search_space(self):\n raise NotImplementedError()\n\n def build(self, hp):\n raise NotImplementedError()\n\n\nclass BaselineHotCNN(BaseModel):\n def __init__(self, data_list, num_classes):\n super(BaselineHotCNN, self).__init__(data_list, num_classes)\n\n def build(self):\n input_tensor = self.setup_input()[0]\n block1 = conv_pool_block(input_tensor, n_filters=100, k_size=15, pad='same', p_size=2, p_stride=2, activ='relu')\n block2 = conv_pool_block(block1, n_filters=250, k_size=17, pad='same', p_size=2, p_stride=2, activ='relu')\n\n # Flat tensors\n flat = Flatten()(block2)\n\n # Fully connected layers\n dense1 = Dense(128, activation='relu', name='fully_con')(flat)\n\n # Classification layer\n activ = 'sigmoid' if self.num_classes == 1 else 'softmax'\n output = Dense(self.num_classes, activation=activ, name='classification_layer')(dense1)\n self.outputs_tensors.append(output)\n\n # Create model object\n model = models.Model(inputs=self.inputs_tensors, outputs=self.outputs_tensors, name='Baseline_HotCNN_Bacillus')\n return model\n\n\n\ndef main():\n pass\n\n\nif __name__ == \"__main__\":\n main()\n"
] |
[
[
"tensorflow.keras.models.Model",
"tensorflow.keras.layers.Embedding",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.layers.Conv2D",
"tensorflow.keras.backend.shape",
"tensorflow.keras.layers.MaxPooling2D",
"tensorflow.keras.layers.Flatten"
]
] |
TariniHardikar/OpenFermion
|
[
"1a1538c976d3c867c66c04a7b63766910ed73bf1",
"1a1538c976d3c867c66c04a7b63766910ed73bf1"
] |
[
"src/openfermion/ops/_quadratic_hamiltonian.py",
"src/openfermion/utils/_operator_utils_test.py"
] |
[
"# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Class and functions to store and manipulate Hamiltonians that are quadratic\nin the fermionic ladder operators.\"\"\"\nfrom __future__ import absolute_import\nfrom scipy.linalg import schur\n\nimport numpy\n\nfrom openfermion.config import EQ_TOLERANCE\nfrom openfermion.ops import FermionOperator, PolynomialTensor\n\n\nclass QuadraticHamiltonianError(Exception):\n pass\n\n\nclass QuadraticHamiltonian(PolynomialTensor):\n \"\"\"Class for storing Hamiltonians that are quadratic in the fermionic\n ladder operators. The operators stored in this class take the form\n\n .. math::\n\n \\sum_{p, q} (M_{pq} - \\mu \\delta_{pq}) a^\\dagger_p a_q\n + \\\\frac12 \\sum_{p, q}\n (\\\\Delta_{pq} a^\\dagger_p a^\\dagger_q + \\\\text{h.c.})\n + \\\\text{constant}\n\n where\n\n - :math:`M` is a Hermitian `n_qubits` x `n_qubits` matrix.\n - :math:`\\\\Delta` is an antisymmetric `n_qubits` x `n_qubits` matrix.\n - :math:`\\mu` is a real number representing the chemical potential.\n - :math:`\\delta_{pq}` is the Kronecker delta symbol.\n\n We separate the chemical potential :math:`\\mu` from :math:`M` so that\n we can use it to adjust the expectation value of the total number of\n particles.\n\n Attributes:\n chemical_potential(float): The chemical potential :math:`\\mu`.\n \"\"\"\n\n def __init__(self, constant, hermitian_part,\n antisymmetric_part=None, chemical_potential=0.):\n \"\"\"\n Initialize the QuadraticHamiltonian class.\n\n Args:\n constant(float): A constant term in the operator.\n hermitian_part(ndarray): The matrix :math:`M`, which represents the\n coefficients of the particle-number-conserving terms.\n This is an `n_qubits` x `n_qubits` numpy array of complex\n numbers.\n antisymmetric_part(ndarray): The matrix :math:`\\\\Delta`,\n which represents the coefficients of the\n non-particle-number-conserving terms.\n This is an `n_qubits` x `n_qubits` numpy array of complex\n numbers.\n chemical_potential(float): The chemical potential :math:`\\mu`.\n \"\"\"\n n_qubits = hermitian_part.shape[0]\n\n # Initialize combined Hermitian part\n if not chemical_potential:\n combined_hermitian_part = hermitian_part\n else:\n combined_hermitian_part = (\n hermitian_part - chemical_potential * numpy.eye(n_qubits))\n\n # Initialize the PolynomialTensor\n if antisymmetric_part is None:\n super(QuadraticHamiltonian, self).__init__(\n {(): constant, (1, 0): combined_hermitian_part})\n else:\n super(QuadraticHamiltonian, self).__init__(\n {(): constant, (1, 0): combined_hermitian_part,\n (1, 1): 0.5 * antisymmetric_part,\n (0, 0): -0.5 * antisymmetric_part.conj()})\n\n # Add remaining attributes\n self.chemical_potential = chemical_potential\n\n @property\n def combined_hermitian_part(self):\n \"\"\"The Hermitian part including the chemical potential.\"\"\"\n return self.n_body_tensors[1, 0]\n\n @property\n def antisymmetric_part(self):\n \"\"\"The antisymmetric part.\"\"\"\n if (1, 1) in self.n_body_tensors:\n return 2. * self.n_body_tensors[1, 1]\n else:\n return numpy.zeros((self.n_qubits, self.n_qubits), complex)\n\n @property\n def hermitian_part(self):\n \"\"\"The Hermitian part not including the chemical potential.\"\"\"\n return (self.combined_hermitian_part +\n self.chemical_potential * numpy.eye(self.n_qubits))\n\n @property\n def conserves_particle_number(self):\n \"\"\"Whether this Hamiltonian conserves particle number.\"\"\"\n discrepancy = numpy.max(numpy.abs(self.antisymmetric_part))\n return discrepancy < EQ_TOLERANCE\n\n def add_chemical_potential(self, chemical_potential):\n \"\"\"Increase (or decrease) the chemical potential by some value.\"\"\"\n self.n_body_tensors[1, 0] -= (chemical_potential *\n numpy.eye(self.n_qubits))\n self.chemical_potential += chemical_potential\n\n def orbital_energies(self, non_negative=False):\n \"\"\"Return the orbital energies.\n\n Any quadratic Hamiltonian is unitarily equivalent to a Hamiltonian\n of the form\n\n .. math::\n\n \\sum_{j} \\\\varepsilon_j b^\\dagger_j b_j + \\\\text{constant}.\n\n We call the :math:`\\\\varepsilon_j` the orbital energies.\n The eigenvalues of the Hamiltonian are sums of subsets of the\n orbital energies (up to the additive constant).\n\n Args:\n non_negative(bool): If True, always return a list of orbital\n energies that are non-negative. This option is ignored if\n the Hamiltonian does not conserve particle number, in which\n case the returned orbital energies are always non-negative.\n\n Returns\n -------\n orbital_energies(ndarray)\n A one-dimensional array containing the :math:`\\\\varepsilon_j`\n constant(float)\n The constant\n \"\"\"\n if self.conserves_particle_number and not non_negative:\n hermitian_matrix = self.combined_hermitian_part\n orbital_energies, diagonalizing_unitary = numpy.linalg.eigh(\n hermitian_matrix)\n constant = self.constant\n else:\n majorana_matrix, majorana_constant = self.majorana_form()\n canonical, orthogonal = antisymmetric_canonical_form(\n majorana_matrix)\n orbital_energies = canonical[\n range(self.n_qubits), range(self.n_qubits, 2 * self.n_qubits)]\n constant = -0.5 * numpy.sum(orbital_energies) + majorana_constant\n\n return orbital_energies, constant\n\n def ground_energy(self):\n \"\"\"Return the ground energy.\"\"\"\n _, constant = self.orbital_energies(non_negative=True)\n return constant\n\n def majorana_form(self):\n \"\"\"Return the Majorana represention of the Hamiltonian.\n\n Any quadratic Hamiltonian can be written in the form\n\n .. math::\n\n \\\\frac{i}{2} \\sum_{j, k} A_{jk} f_j f_k + \\\\text{constant}\n\n where the :math:`f_i` are normalized Majorana fermion operators:\n\n .. math::\n\n f_j = \\\\frac{1}{\\\\sqrt{2}} (a^\\dagger_j + a_j)\n\n f_{j + N} = \\\\frac{i}{\\\\sqrt{2}} (a^\\dagger_j - a_j)\n\n and :math:`A` is a (2 * `n_qubits`) x (2 * `n_qubits`) real\n antisymmetric matrix. This function returns the matrix\n :math:`A` and the constant.\n \"\"\"\n hermitian_part = self.combined_hermitian_part\n antisymmetric_part = self.antisymmetric_part\n\n # Compute the Majorana matrix using block matrix manipulations\n majorana_matrix = numpy.zeros((2 * self.n_qubits, 2 * self.n_qubits))\n # Set upper left block\n majorana_matrix[:self.n_qubits, :self.n_qubits] = numpy.real(-0.5j * (\n hermitian_part - hermitian_part.conj() +\n antisymmetric_part - antisymmetric_part.conj()))\n # Set upper right block\n majorana_matrix[:self.n_qubits, self.n_qubits:] = numpy.real(0.5 * (\n hermitian_part + hermitian_part.conj() -\n antisymmetric_part - antisymmetric_part.conj()))\n # Set lower left block\n majorana_matrix[self.n_qubits:, :self.n_qubits] = numpy.real(-0.5 * (\n hermitian_part + hermitian_part.conj() +\n antisymmetric_part + antisymmetric_part.conj()))\n # Set lower right block\n majorana_matrix[self.n_qubits:, self.n_qubits:] = numpy.real(-0.5j * (\n hermitian_part - hermitian_part.conj() -\n antisymmetric_part + antisymmetric_part.conj()))\n\n # Compute the constant\n majorana_constant = (0.5 * numpy.real(numpy.trace(hermitian_part)) +\n self.n_body_tensors[()])\n\n return majorana_matrix, majorana_constant\n\n def diagonalizing_bogoliubov_transform(self):\n \"\"\"Compute the unitary that diagonalizes a quadratic Hamiltonian.\n\n Any quadratic Hamiltonian can be rewritten in the form\n\n .. math::\n\n \\sum_{j} \\\\varepsilon_j b^\\dagger_j b_j + \\\\text{constant},\n\n where the :math:`b_j` are a new set fermionic operators\n that satisfy the canonical anticommutation relations.\n The new fermionic operators are linear combinations of the\n original ones:\n\n .. math::\n\n \\\\begin{pmatrix}\n b^\\dagger_1 \\\\\\\\\n \\\\vdots \\\\\\\\\n b^\\dagger_N \\\\\\\\\n b_1 \\\\\\\\\n \\\\vdots \\\\\\\\\n b_N\n \\\\end{pmatrix}\n = W\n \\\\begin{pmatrix}\n a^\\dagger_1 \\\\\\\\\n \\\\vdots \\\\\\\\\n a^\\dagger_N \\\\\\\\\n a_1 \\\\\\\\\n \\\\vdots \\\\\\\\\n a_N\n \\\\end{pmatrix},\n\n where :math:`W` is a :math:`2N \\\\times 2N` unitary matrix.\n This method returns the matrix :math:`W`.\n\n Returns:\n diagonalizing_unitary (ndarray):\n A (2 * `n_qubits`) x (2 * `n_qubits`) matrix representing\n the transformation :math:`W` of the fermionic ladder operators.\n \"\"\"\n majorana_matrix, majorana_constant = self.majorana_form()\n\n # Get the orthogonal transformation that puts majorana_matrix\n # into canonical form\n canonical, orthogonal = antisymmetric_canonical_form(majorana_matrix)\n\n # Create the matrix that converts between fermionic ladder and\n # Majorana bases\n normalized_identity = (numpy.eye(self.n_qubits, dtype=complex) /\n numpy.sqrt(2.))\n majorana_basis_change = numpy.eye(\n 2 * self.n_qubits, dtype=complex) / numpy.sqrt(2.)\n majorana_basis_change[self.n_qubits:, self.n_qubits:] *= -1.j\n majorana_basis_change[:self.n_qubits,\n self.n_qubits:] = normalized_identity\n majorana_basis_change[self.n_qubits:,\n :self.n_qubits] = 1.j * normalized_identity\n\n # Compute the unitary and return\n diagonalizing_unitary = majorana_basis_change.T.conj().dot(\n orthogonal.dot(majorana_basis_change))\n\n return diagonalizing_unitary\n\n\ndef antisymmetric_canonical_form(antisymmetric_matrix):\n \"\"\"Compute the canonical form of an antisymmetric matrix.\n\n The input is a real, antisymmetric n x n matrix A, where n is even.\n Its canonical form is::\n\n A = R^T C R\n\n where R is a real, orthogonal matrix and C has the form::\n\n [ 0 D ]\n [ -D 0 ]\n\n where D is a diagonal matrix with nonnegative entries.\n\n Args:\n antisymmetric_matrix(ndarray): An antisymmetric matrix with even\n dimension.\n\n Returns:\n canonical(ndarray): The canonical form C of antisymmetric_matrix\n orthogonal(ndarray): The orthogonal transformation R.\n \"\"\"\n m, p = antisymmetric_matrix.shape\n\n if m != p or p % 2 != 0:\n raise ValueError('The input matrix must be square with even '\n 'dimension.')\n\n # Check that input matrix is antisymmetric\n matrix_plus_transpose = antisymmetric_matrix + antisymmetric_matrix.T\n maxval = numpy.max(numpy.abs(matrix_plus_transpose))\n if maxval > EQ_TOLERANCE:\n raise ValueError('The input matrix must be antisymmetric.')\n\n # Compute Schur decomposition\n canonical, orthogonal = schur(antisymmetric_matrix, output='real')\n\n # The returned form is block diagonal; we need to permute rows and columns\n # to put it into the form we want\n n = p // 2\n for i in range(1, n, 2):\n swap_rows(canonical, i, n + i - 1)\n swap_columns(canonical, i, n + i - 1)\n swap_columns(orthogonal, i, n + i - 1)\n if n % 2 != 0:\n swap_rows(canonical, n - 1, n + i)\n swap_columns(canonical, n - 1, n + i)\n swap_columns(orthogonal, n - 1, n + i)\n\n # Now we permute so that the upper right block is non-negative\n for i in range(n):\n if canonical[i, n + i] < -EQ_TOLERANCE:\n swap_rows(canonical, i, n + i)\n swap_columns(canonical, i, n + i)\n swap_columns(orthogonal, i, n + i)\n\n # Now we permute so that the nonzero entries are ordered by magnitude\n # We use insertion sort\n diagonal = canonical[range(n), range(n, 2 * n)]\n for i in range(n):\n # Insert the smallest element from the unsorted part of the list into\n # index i\n arg_min = numpy.argmin(diagonal[i:]) + i\n if arg_min != i:\n # Permute the upper right block\n swap_rows(canonical, i, arg_min)\n swap_columns(canonical, n + i, n + arg_min)\n swap_columns(orthogonal, n + i, n + arg_min)\n # Permute the lower left block\n swap_rows(canonical, n + i, n + arg_min)\n swap_columns(canonical, i, arg_min)\n swap_columns(orthogonal, i, arg_min)\n # Update diagonal\n swap_rows(diagonal, i, arg_min)\n\n return canonical, orthogonal.T\n\n\ndef swap_rows(M, i, j):\n \"\"\"Swap rows i and j of matrix M.\"\"\"\n if len(M.shape) == 1:\n M[i], M[j] = M[j], M[i]\n else:\n row_i = M[i, :].copy()\n row_j = M[j, :].copy()\n M[i, :], M[j, :] = row_j, row_i\n\n\ndef swap_columns(M, i, j):\n \"\"\"Swap columns i and j of matrix M.\"\"\"\n if len(M.shape) == 1:\n M[i], M[j] = M[j], M[i]\n else:\n column_i = M[:, i].copy()\n column_j = M[:, j].copy()\n M[:, i], M[:, j] = column_j, column_i\n",
"# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for operator_utils.\"\"\"\nfrom __future__ import absolute_import\n\nimport numpy\nimport os\nimport unittest\n\nfrom openfermion.config import *\nfrom openfermion.hamiltonians import plane_wave_hamiltonian\nfrom openfermion.ops import *\nfrom openfermion.transforms import jordan_wigner, get_interaction_operator\nfrom openfermion.utils import Grid\nfrom openfermion.utils._operator_utils import *\n\n\nclass OperatorUtilsTest(unittest.TestCase):\n\n def setUp(self):\n self.n_qubits = 5\n self.fermion_term = FermionOperator('1^ 2^ 3 4', -3.17)\n self.fermion_operator = self.fermion_term + hermitian_conjugated(\n self.fermion_term)\n self.qubit_operator = jordan_wigner(self.fermion_operator)\n self.interaction_operator = get_interaction_operator(\n self.fermion_operator)\n\n def test_n_qubits_single_fermion_term(self):\n self.assertEqual(self.n_qubits,\n count_qubits(self.fermion_term))\n\n def test_n_qubits_fermion_operator(self):\n self.assertEqual(self.n_qubits,\n count_qubits(self.fermion_operator))\n\n def test_n_qubits_qubit_operator(self):\n self.assertEqual(self.n_qubits,\n count_qubits(self.qubit_operator))\n\n def test_n_qubits_interaction_operator(self):\n self.assertEqual(self.n_qubits,\n count_qubits(self.interaction_operator))\n\n def test_n_qubits_bad_type(self):\n with self.assertRaises(TypeError):\n count_qubits('twelve')\n\n def test_eigenspectrum(self):\n fermion_eigenspectrum = eigenspectrum(self.fermion_operator)\n qubit_eigenspectrum = eigenspectrum(self.qubit_operator)\n interaction_eigenspectrum = eigenspectrum(self.interaction_operator)\n for i in range(2 ** self.n_qubits):\n self.assertAlmostEqual(fermion_eigenspectrum[i],\n qubit_eigenspectrum[i])\n self.assertAlmostEqual(fermion_eigenspectrum[i],\n interaction_eigenspectrum[i])\n\n def test_is_identity_unit_fermionoperator(self):\n self.assertTrue(is_identity(FermionOperator(())))\n\n def test_is_identity_double_of_unit_fermionoperator(self):\n self.assertTrue(is_identity(2. * FermionOperator(())))\n\n def test_is_identity_unit_qubitoperator(self):\n self.assertTrue(is_identity(QubitOperator(())))\n\n def test_is_identity_double_of_unit_qubitoperator(self):\n self.assertTrue(is_identity(QubitOperator((), 2.)))\n\n def test_not_is_identity_single_term_fermionoperator(self):\n self.assertFalse(is_identity(FermionOperator('1^')))\n\n def test_not_is_identity_single_term_qubitoperator(self):\n self.assertFalse(is_identity(QubitOperator('X1')))\n\n def test_not_is_identity_zero_fermionoperator(self):\n self.assertFalse(is_identity(FermionOperator()))\n\n def test_not_is_identity_zero_qubitoperator(self):\n self.assertFalse(is_identity(QubitOperator()))\n\n def test_is_identity_bad_type(self):\n with self.assertRaises(TypeError):\n is_identity('eleven')\n\n\nclass SaveLoadOperatorTest(unittest.TestCase):\n def setUp(self):\n self.n_qubits = 5\n self.fermion_term = FermionOperator('1^ 2^ 3 4', -3.17)\n self.fermion_operator = self.fermion_term + hermitian_conjugated(\n self.fermion_term)\n self.qubit_operator = jordan_wigner(self.fermion_operator)\n self.file_name = \"test_file\"\n\n def tearDown(self):\n file_path = os.path.join(DATA_DIRECTORY, self.file_name + '.data')\n if os.path.isfile(file_path):\n os.remove(file_path)\n\n def test_save_and_load_fermion_operators(self):\n save_operator(self.fermion_operator, self.file_name)\n loaded_fermion_operator = load_operator(self.file_name)\n self.assertEqual(self.fermion_operator.terms,\n loaded_fermion_operator.terms,\n msg=str(self.fermion_operator -\n loaded_fermion_operator))\n\n def test_save_and_load_qubit_operators(self):\n save_operator(self.qubit_operator, self.file_name)\n loaded_qubit_operator = load_operator(self.file_name)\n self.assertEqual(self.qubit_operator.terms,\n loaded_qubit_operator.terms)\n\n def test_save_no_filename_operator_utils_error(self):\n with self.assertRaises(OperatorUtilsError):\n save_operator(self.fermion_operator)\n\n def test_basic_save(self):\n save_operator(self.fermion_operator, self.file_name)\n\n def test_save_interaction_operator_not_implemented(self):\n constant = 100.0\n one_body = numpy.zeros((self.n_qubits, self.n_qubits), float)\n two_body = numpy.zeros((self.n_qubits, self.n_qubits,\n self.n_qubits, self.n_qubits), float)\n one_body[1, 1] = 10.0\n two_body[1, 2, 3, 4] = 12.0\n interaction_operator = InteractionOperator(\n constant, one_body, two_body)\n with self.assertRaises(NotImplementedError):\n save_operator(interaction_operator, self.file_name)\n\n def test_save_on_top_of_existing_operator_utils_error(self):\n save_operator(self.fermion_operator, self.file_name)\n with self.assertRaises(OperatorUtilsError):\n save_operator(self.fermion_operator, self.file_name)\n\n def test_save_on_top_of_existing_operator_error_with_explicit_flag(self):\n save_operator(self.fermion_operator, self.file_name)\n with self.assertRaises(OperatorUtilsError):\n save_operator(self.fermion_operator, self.file_name,\n allow_overwrite=False)\n\n def test_overwrite_flag_save_on_top_of_existing_operator(self):\n save_operator(self.fermion_operator, self.file_name)\n save_operator(self.fermion_operator, self.file_name,\n allow_overwrite=True)\n fermion_operator = load_operator(self.file_name)\n\n self.assertTrue(fermion_operator.isclose(self.fermion_operator))\n\n def test_load_bad_type(self):\n with self.assertRaises(TypeError):\n load_operator('bad_type_operator')\n\n def test_save_bad_type(self):\n with self.assertRaises(TypeError):\n save_operator('ping', 'somewhere')\n\n\nclass FourierTransformTest(unittest.TestCase):\n\n def test_fourier_transform(self):\n grid = Grid(dimensions=1, scale=1.5, length=3)\n spinless_set = [True, False]\n geometry = [('H', (0,)), ('H', (0.5,))]\n for spinless in spinless_set:\n h_plane_wave = plane_wave_hamiltonian(\n grid, geometry, spinless, True)\n h_dual_basis = plane_wave_hamiltonian(\n grid, geometry, spinless, False)\n h_plane_wave_t = fourier_transform(h_plane_wave, grid, spinless)\n self.assertTrue(normal_ordered(h_plane_wave_t).isclose(\n normal_ordered(h_dual_basis)))\n\n def test_inverse_fourier_transform_1d(self):\n grid = Grid(dimensions=1, scale=1.5, length=4)\n spinless_set = [True, False]\n geometry = [('H', (0,)), ('H', (0.5,))]\n for spinless in spinless_set:\n h_plane_wave = plane_wave_hamiltonian(\n grid, geometry, spinless, True)\n h_dual_basis = plane_wave_hamiltonian(\n grid, geometry, spinless, False)\n h_dual_basis_t = inverse_fourier_transform(\n h_dual_basis, grid, spinless)\n self.assertTrue(normal_ordered(h_dual_basis_t).isclose(\n normal_ordered(h_plane_wave)))\n\n def test_inverse_fourier_transform_2d(self):\n grid = Grid(dimensions=2, scale=1.5, length=3)\n spinless = True\n geometry = [('H', (0, 0)), ('H', (0.5, 0.8))]\n h_plane_wave = plane_wave_hamiltonian(grid, geometry, spinless, True)\n h_dual_basis = plane_wave_hamiltonian(grid, geometry, spinless, False)\n h_dual_basis_t = inverse_fourier_transform(\n h_dual_basis, grid, spinless)\n self.assertTrue(normal_ordered(h_dual_basis_t).isclose(\n normal_ordered(h_plane_wave)))\n"
] |
[
[
"numpy.abs",
"numpy.sqrt",
"scipy.linalg.schur",
"numpy.eye",
"numpy.trace",
"numpy.linalg.eigh",
"numpy.argmin",
"numpy.zeros",
"numpy.sum"
],
[
"numpy.zeros"
]
] |
vijayrgopu/neo4j-lib
|
[
"45a5abc43ee057ea0908fba0746727c36ab8f444"
] |
[
"neo_lib.py"
] |
[
"from contextlib import nullcontext\nimport pandas as pd\nfrom pprint import pprint\n\nfrom neo4j import GraphDatabase, basic_auth\nfrom py2neo import Graph\nempty_cq = \"\"\"\n// Your query goes here\n\n\"\"\"\n'''\nThis is a neo4j library 1.0\n'''\n\nclass Neo_lib:\n def __init__(self, neo_url, neo_user, neo_pwd, neo_database):\n self.driver = GraphDatabase.driver(neo_url, auth=basic_auth(neo_user, neo_pwd))\n self.neo_database = neo_database\n self.graph = Graph(neo_url, auth=(neo_user, neo_pwd))\n\n\n\n def run_cypher(self,cq,parameters=None,limit=10):\n try:\n tran = lambda tx: tx.run(cq,parameters=parameters,limit=limit).data()\n with self.driver.session(database=self.neo_database) as session:\n results = session.write_transaction(tran)\n except Exception as e:\n results = e.message \n return results\n\n def run_cypher_pd(self,cq,parameters=None):\n if cq == empty_cq:\n data = {'Empty CQ': [\"Please enter query and try again\"]}\n result_pd = pd.DataFrame.from_dict(data)\n else:\n try:\n result_pd = self.graph.run(cq,parameters).to_data_frame()\n except Exception as e:\n data = {'Message':[ e.message]}\n result_pd = pd.DataFrame.from_dict(data)\n return result_pd\n\n def reset_db(self):\n self.drop_constraints()\n cq = \"match (n) detach delete n\"\n return self.run_cypher(cq)\n \n\n def get_stats(self):\n cq = \"\"\"\n call apoc.meta.stats() yield labelCount, relTypeCount, propertyKeyCount, nodeCount, relCount\n with labelCount, relTypeCount, propertyKeyCount, nodeCount, relCount\n return labelCount, relTypeCount,propertyKeyCount, nodeCount,relCount\n \"\"\"\n return self.run_cypher_pd(cq)\n\n def checksum(self):\n cq = \"\"\"\n call apoc.meta.stats() yield labelCount, relTypeCount, propertyKeyCount, nodeCount, relCount\n with labelCount, relTypeCount, propertyKeyCount, nodeCount, relCount\n return labelCount+relTypeCount+propertyKeyCount+nodeCount+relCount as checksum\n \"\"\"\n res = self.run_cypher(cq)\n return res[0]['checksum']\n\n def node_count(self):\n cq = \"\"\"\n match (n) return count(n) as count\n \"\"\"\n return self.run_cypher_pd(cq)\n\n\n def get_stats_all(self):\n cq = \"\"\"\n call apoc.meta.stats()\n \"\"\"\n return self.run_cypher_pd(cq)\n\n\n def schema_view(self):\n cq = \"CALL db.schema.visualization()\"\n print (\"Run {} in Neo4j Browser to see a graphical view\".format(cq))\n return self.run_cypher(cq)\n\n\n def label_count(self):\n result = {\"Label\": [], \"Count\": []}\n for label in self.graph.run(\"CALL db.labels()\").to_series():\n query = f\"MATCH (:`{label}`) RETURN count(*) AS count\"\n count = self.graph.run(query).to_data_frame().iloc[0]['count']\n result[\"Label\"].append(label)\n result[\"Count\"].append(count)\n nodes_df = pd.DataFrame(data=result)\n return nodes_df\n\n def relationship_count(self):\n result = {\"From\":[], \"Relationship\": [], \"To\":[], \"Count\": []}\n x = self.schema_view()\n y = x[0]['relationships']\n for i in y:\n rel = i[1]\n query = f\"MATCH ()-[r:`{rel}`]-() RETURN count(r) AS count\"\n count = self.graph.run(query).to_data_frame().iloc[0]['count']\n result[\"From\"].append(i[0]['name'])\n result[\"Relationship\"].append(rel)\n result[\"To\"].append(i[2]['name'])\n result[\"Count\"].append(count)\n rels_df = pd.DataFrame(data=result)\n return rels_df\n\n def drop_constraints(self):\n cq = \"SHOW CONSTRAINTS\"\n x = self.run_cypher(cq)\n for c in x:\n cq = \"drop constraint \" + c[\"name\"]\n print(\"Dropping Constraint \", c[\"name\"])\n self.run_cypher(cq)"
] |
[
[
"pandas.DataFrame",
"pandas.DataFrame.from_dict"
]
] |
wsustcid/FlowDriveNet
|
[
"3604495269ae45e5b43964046104f685ec66e383"
] |
[
"eval.py"
] |
[
"'''\n@Author: Shuai Wang\n@Github: https://github.com/wsustcid\n@Version: 1.0.0\n@Date: 2020-09-11 23:42:23\n@LastEditTime: 2020-10-13 22:32:20\n'''\n\nimport os\nimport sys\nimport argparse\nfrom datetime import datetime\nimport time\nfrom tqdm import tqdm\nimport time\n\nimport numpy as np\nimport tensorflow as tf\n\nbase_dir = os.path.dirname(os.path.abspath(__file__))\nsys.path.append(base_dir)\nfrom utils.tf_util import log_string\n\nfrom data_gen import DataLoader\nfrom models.flowdrivenet import FlowDriveNet\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--data_root', default='/media/ubuntu16/Documents/Datasets/Udacity/CH2',\n help='data_root path [default: local path]')\nparser.add_argument('--input_cfg', default='GRAY', \n help='Input type: GRAY, GRAYF, GRAYF-T, XYZ, XYZF, XYZF-T, GRAYF-XYZF-T')\nparser.add_argument('--model_cfg', default='VFE',\n help='Model type: VFE, VFE-TFP, PFE, PFE-TFP, VFE-PFE-TFP')\nparser.add_argument('--height', type=int, default=200, help='img height')\nparser.add_argument('--width', type=int, default=200, help='img width')\nparser.add_argument('--seq_len', type=int, default=5, help='sel length')\nparser.add_argument('--aug_cfg', default='None', help='None, IA, RP, SC, BA, BS')\n#parser.add_argument('--use_side_cam', default=False, action='store_true')\nparser.add_argument('--num_point', type=int, default=10000, help='Point N')\nparser.add_argument('--log_dir', default='test',\n help='Log dir [default: test]')\nparser.add_argument('--batch_size', type=int, default=1,\n help='Batch Size during training [default: 16]')\nparser.add_argument('--decay_steps', type=int, default=300000,\n help='Decay step for lr decay [default: 200000]') # decay_steps = n_train * epochs\nparser.add_argument('--decay_rate', type=float, default=0.7,\n help='Decay rate for lr decay [default: 0.7]')\nparser.add_argument('--model_file', default='/media/ubuntu16/F/FlowDriveNet/logs/VFE/gray_base/model_best.ckpt',\n help='the model path to be evaluated')\n\n\nFLAGS = parser.parse_args()\n\nBATCH_SIZE = FLAGS.batch_size\n\nlog_dir = os.path.join(base_dir, 'logs', FLAGS.log_dir)\nos.makedirs(log_dir, exist_ok=True)\ntest_log_dir = os.path.join(log_dir, 'log_test.txt')\nlog_string(test_log_dir, str(FLAGS)+'\\n')\n\n# \ndataloader = DataLoader(FLAGS.data_root, FLAGS.input_cfg, \n FLAGS.height, FLAGS.width,\n FLAGS.seq_len, \n FLAGS.num_point,\n FLAGS.aug_cfg)\nmodel = FlowDriveNet(FLAGS.input_cfg, FLAGS.model_cfg, \n FLAGS.height, FLAGS.width, FLAGS.seq_len, FLAGS.num_point)\n\ndef get_bn_decay(batch):\n bn_momentum = tf.train.exponential_decay(\n 0.5,\n batch*BATCH_SIZE,\n float(FLAGS.decay_steps),\n 0.5,\n staircase=True)\n bn_decay = tf.minimum(0.99, 1 - bn_momentum)\n return bn_decay\n\ndef eval():\n with tf.Graph().as_default():\n image_pl, points_pl, _ = model.get_inputs_pl(BATCH_SIZE)\n is_training_pl = tf.placeholder(tf.bool, shape=())\n # define global_step; optimizer will increase it in every training loop\n batch = tf.get_variable('batch', [], \n initializer=tf.constant_initializer(0),\n trainable=False)\n bn_decay = get_bn_decay(batch) \n \n pred = model.get_model(image_pl, points_pl, is_training_pl, bn_decay)\n\n # Create a session\n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True\n config.allow_soft_placement = True\n config.log_device_placement = False\n sess = tf.Session(config=config)\n\n # Init variables\n init = tf.global_variables_initializer()\n sess.run(init)\n \n # restore model\n saver = tf.train.Saver()\n saver.restore(sess, FLAGS.model_file)\n\n # save all tensor\n ops = {'image_pl': image_pl,\n 'points_pl': points_pl,\n 'is_training_pl': is_training_pl,\n 'pred': pred}\n\n ## evaluation \n is_training = False\n num_batches = dataloader.num_test // BATCH_SIZE\n rmse_angle_sum = 0.0\n rmse_speed_sum = 0.0\n result_all = np.zeros((0,4)) # pred_a, pred_s, label_a, label_s\n \n time_sum = 0.0\n for i in tqdm(range(num_batches)):\n X_image_batch, X_cloud_batch, y_batch = dataloader.load_test_batch(BATCH_SIZE)\n \n feed_dict = {ops['image_pl']: X_image_batch,\n ops['points_pl']: X_cloud_batch,\n ops['is_training_pl']: is_training}\n t1 = time.time()\n pred_batch = sess.run(ops['pred'],feed_dict=feed_dict)\n t2 = time.time()\n time_sum += (t2-t1) \n result_batch = np.hstack((pred_batch, y_batch))\n result_all = np.concatenate((result_all, result_batch), axis=0)\n \n \n np.savetxt(os.path.join(log_dir, 'results.csv'), result_all, delimiter=\",\")\n # b = np.loadtxt(\"temp.csv\", delimiter=\",\")\n\n rmse_angle = np.sqrt(np.mean(np.square(result_all[:,0] - result_all[:,2])))\n rmse_speed = np.sqrt(np.mean(np.square(result_all[:,1] - result_all[:,3])))\n log_string(test_log_dir, 'Test rmse_angle: %f' % (rmse_angle))\n log_string(test_log_dir, 'Test rmse_speed: %f' % (rmse_speed))\n log_string(test_log_dir, 'Test rmse_average: %f' % ((rmse_angle+rmse_speed)/2))\n log_string(test_log_dir, 'Test FPS: %f' % (1/(time_sum/num_batches)))\n\n\nif __name__ == \"__main__\":\n eval()\n"
] |
[
[
"numpy.square",
"numpy.hstack",
"tensorflow.Graph",
"tensorflow.minimum",
"tensorflow.placeholder",
"tensorflow.ConfigProto",
"tensorflow.global_variables_initializer",
"numpy.concatenate",
"tensorflow.constant_initializer",
"tensorflow.Session",
"tensorflow.train.Saver",
"numpy.zeros"
]
] |
flowmatters/veneer-py
|
[
"af551b49038f5f93358b510fb893015c590bf6d4"
] |
[
"veneer/losses.py"
] |
[
"from types import MethodType\nimport pandas as pd\nimport numpy as np\nfrom .server_side import VeneerNetworkElementActions\nfrom .utils import _quote_string\n\nGET_LOSS_TABLE_SCRIPTLET='''\nignoreExceptions=False\nfn = target.lossFct\nfor row in fn:\n result.append((row.Key,row.Value))\n'''\n\nclass VeneerLossNodeActions(VeneerNetworkElementActions):\n def __init__(self,node_actions):\n self.node_actions = node_actions\n self._name_accessor = self.node_actions._name_accessor\n super(VeneerLossNodeActions, self).__init__(node_actions._ironpy)\n def _build_accessor(self, parameter=None, nodes=None):\n return self.node_actions._build_accessor(parameter,nodes=nodes,node_types='LossNodeModel')\n\n def loss_table(self,node):\n '''\n Retrieve the Loss table for a given loss node\n '''\n code = GET_LOSS_TABLE_SCRIPTLET\n vals = self.apply(code,init='[]',nodes=[node])\n return pd.DataFrame(vals,columns=['inflow','loss'])\n"
] |
[
[
"pandas.DataFrame"
]
] |
rudolfspetrovs/benchml
|
[
"896673f387a6bb9b185664ddd54f569a1ba54e51"
] |
[
"benchml/models/mod_basic.py"
] |
[
"import numpy as np\n\nimport benchml.transforms as btf\nfrom benchml.hyper import BayesianHyper, GridHyper, Hyper\nfrom benchml.models.mod_dscribe import compile_dscribe, compile_dscribe_periodic\n\n\ndef compile_null(**kwargs):\n return []\n\n\ndef compile_physchem(custom_fields=None, with_hyper=False, **kwargs):\n if custom_fields is None:\n custom_fields = []\n if with_hyper:\n hyper = BayesianHyper(\n Hyper(\n {\n \"pred.n_estimators\": [10, 200],\n \"pred.max_depth\": [2, 16],\n }\n ),\n convert={\"pred.n_estimators\": \"lambda x: int(x)\", \"pred.max_depth\": \"lambda x: int(x)\"},\n init_points=10,\n n_iter=30,\n )\n else:\n hyper = GridHyper(Hyper({\"pred.max_depth\": [None]}))\n return [\n btf.Module(\n tag=\"physchem\",\n transforms=[\n btf.ExtXyzInput(tag=\"input\"),\n btf.Physchem2D(tag=\"Physchem2D\", inputs={\"configs\": \"input.configs\"}),\n btf.PhyschemUser(\n tag=\"PhyschemUser\",\n args={\"fields\": custom_fields},\n inputs={\"configs\": \"input.configs\"},\n ),\n btf.Concatenate(tag=\"desc\", inputs={\"X\": [\"Physchem2D.X\", \"PhyschemUser.X\"]}),\n btf.RandomForestRegressor(tag=\"pred\", inputs={\"X\": \"desc.X\", \"y\": \"input.y\"}),\n ],\n hyper=hyper,\n broadcast={\"meta\": \"input.meta\"},\n outputs={\"y\": \"pred.y\"},\n ),\n ]\n\n\ndef make_soap_krr(tag):\n return btf.Module(\n tag=tag,\n transforms=[\n btf.ExtXyzInput(tag=\"input\"),\n btf.UniversalSoapGylmxx(tag=\"descriptor\", inputs={\"configs\": \"input.configs\"}),\n btf.ReduceTypedMatrix(tag=\"reduce\", inputs={\"X\": \"descriptor.X\", \"T\": \"descriptor.T\"}),\n btf.WhitenMatrix(tag=\"whiten\", inputs={\"X\": \"reduce.X\"}),\n btf.KernelDot(tag=\"kernel\", inputs={\"X\": \"whiten.X\"}),\n btf.KernelRidge(\n tag=\"predictor\", args={\"alpha\": None}, inputs={\"K\": \"kernel.K\", \"y\": \"input.y\"}\n ),\n ],\n hyper=GridHyper(\n Hyper(\n {\n \"predictor.alpha\": np.logspace(-7, +7, 15),\n }\n )\n ),\n broadcast={\"meta\": \"input.meta\"},\n outputs={\"y\": \"predictor.y\"},\n )\n\n\ndef compile_soap(basic=False, **kwargs):\n if basic:\n hyper = GridHyper(\n Hyper({\"descriptor.normalize\": [False]}),\n Hyper({\"descriptor.mode\": [\"minimal\"]}),\n Hyper({\"descriptor.crossover\": [True]}),\n Hyper({\"reduce.reduce_op\": [\"sum\"]}),\n Hyper({\"reduce.normalize\": [True]}),\n Hyper({\"reduce.reduce_by_type\": [False]}),\n Hyper({\"whiten.centre\": [False]}),\n Hyper({\"whiten.scale\": [False]}),\n Hyper({\"predictor.power\": [2]}),\n )\n else:\n hyper = GridHyper(\n Hyper({\"descriptor.normalize\": [True]}),\n Hyper({\"descriptor.mode\": [\"minimal\", \"smart\", \"longrange\"]}),\n Hyper({\"descriptor.crossover\": [False, True]}),\n Hyper({\"reduce.reduce_op\": [\"mean\"]}), # + \"sum\"\n Hyper({\"reduce.normalize\": [True]}),\n Hyper({\"reduce.reduce_by_type\": [False]}), # + True\n Hyper({\"whiten.centre\": [False]}), # + True\n Hyper({\"whiten.scale\": [False]}), # + True\n Hyper({\"predictor.power\": [2]}),\n )\n models = []\n for hidx, updates in enumerate(hyper):\n model = make_soap_krr(tag=\"soap_krr_%02d\" % hidx)\n model.hyperUpdate(updates)\n models.append(model)\n return models\n\n\ndef compile_morgan_krr(**kwargs):\n return [\n btf.Module(\n tag=\"morgan_krr\",\n transforms=[\n btf.ExtXyzInput(tag=\"input\"),\n btf.MorganFP(\n tag=\"desc\",\n args={\"length\": 4096, \"radius\": 2, \"normalize\": True},\n inputs={\"configs\": \"input.configs\"},\n ),\n btf.KernelDot(tag=\"kern\", inputs={\"X\": \"desc.X\"}),\n btf.KernelRidge(\n args={\"alpha\": 1e-5, \"power\": 2}, inputs={\"K\": \"kern.K\", \"y\": \"input.y\"}\n ),\n ],\n hyper=GridHyper(\n Hyper(\n {\n \"KernelRidge.alpha\": np.logspace(-6, +1, 8),\n }\n ),\n Hyper({\"KernelRidge.power\": [2.0]}),\n ),\n broadcast={\"meta\": \"input.meta\"},\n outputs={\"y\": \"KernelRidge.y\"},\n )\n ]\n\n\ndef compile_morgan(**kwargs):\n return [\n # Macro example\n # >>> Module(\n # >>> tag=\"morgan_krrx2\",\n # >>> transforms=[\n # >>> ExtXyzInput(tag=\"input\"),\n # >>> MorganKernel(\n # >>> tag=\"A\",\n # >>> args={\"x.fp_length\": 1024, \"x.fp_radius\": 2},\n # >>> inputs={\"x.configs\": \"input.configs\"}),\n # >>> MorganKernel(\n # >>> tag=\"B\",\n # >>> args={\"x.fp_length\": 2048, \"x.fp_radius\": 4},\n # >>> inputs={\"x.configs\": \"input.configs\"}),\n # >>> Add(\n # >>> args={\"coeffs\": [ 0.5, 0.5 ]},\n # >>> inputs={\"X\": [\"A/k.K\", \"B/k.K\"]}),\n # >>> KernelRidge(\n # >>> args={\"alpha\": 0.1, \"power\": 2},\n # >>> inputs={\"K\": \"Add.y\", \"y\": \"input.y\"})\n # >>> ],\n # >>> hyper=BayesianHyper(\n # >>> Hyper({ \"Add.coeffs\":\n # >>> list(map(lambda f: [ f, 1.-f ], np.linspace(0.25, 0.75, 3)))\n # >>> }),\n # >>> Hyper({ \"KernelRidge.alpha\":\n # >>> np.linspace(-3,+1, 5),\n # >>> }),\n # >>> n_iter=40,\n # >>> init_points=10,\n # >>> convert={\n # >>> \"KernelRidge.alpha\": lambda p: 10**p}),\n # >>> broadcast={ \"meta\": \"input.meta\" },\n # >>> outputs={ \"y\": \"KernelRidge.y\" },\n # >>> ),\n btf.Module(\n tag=\"morgan_krr_ext\",\n transforms=[\n btf.ExtXyzInput(tag=\"input\"),\n btf.MorganFP(\n tag=\"desc\",\n args={\"length\": 4096, \"radius\": 2},\n inputs={\"configs\": \"input.configs\"},\n ),\n btf.KernelDot(tag=\"kern\", inputs={\"X\": \"desc.X\"}),\n btf.KernelRidge(\n args={\"alpha\": 1e-5, \"power\": 2}, inputs={\"K\": \"kern.K\", \"y\": \"input.y\"}\n ),\n ],\n hyper=GridHyper(\n Hyper({\"desc.radius\": [1, 2, 3, 4]}),\n Hyper(\n {\n \"KernelRidge.alpha\": np.logspace(-5, +1, 7),\n }\n ),\n Hyper({\"KernelRidge.power\": [2.0]}),\n ),\n # >>> hyper=BayesianHyper(\n # >>> Hyper({ \"KernelRidge.alpha\": np.linspace(-3,+1, 5), }),\n # >>> Hyper({ \"KernelRidge.power\": [ 1., 4. ] }),\n # >>> n_iter=40,\n # >>> init_points=10,\n # >>> convert={\n # >>> \"KernelRidge.alpha\": \"lambda p: 10**p\"\n # >>> }),\n broadcast={\"meta\": \"input.meta\"},\n outputs={\"y\": \"KernelRidge.y\"},\n ),\n btf.Module(\n tag=\"morgan_ridge\",\n transforms=[\n btf.ExtXyzInput(tag=\"input\"),\n btf.MorganFP(args={\"length\": 2048}, inputs={\"configs\": \"input.configs\"}),\n btf.Ridge(inputs={\"X\": \"MorganFP.X\", \"y\": \"input.y\"}),\n ],\n hyper=BayesianHyper(\n Hyper({\"Ridge.alpha\": np.linspace(-2, 2, 5)}),\n convert={\"Ridge.alpha\": \"lambda p: 10**p\"},\n ),\n outputs={\"y\": \"Ridge.y\"},\n ),\n btf.Module(\n tag=\"morgan_gb\",\n transforms=[\n btf.ExtXyzInput(tag=\"input\"),\n btf.MorganFP(args={\"length\": 2048}, inputs={\"configs\": \"input.configs\"}),\n btf.GradientBoosting(inputs={\"X\": \"MorganFP.X\", \"y\": \"input.y\"}),\n ],\n hyper=GridHyper(Hyper({\"GradientBoosting.max_depth\": [1, 3, 5]})),\n outputs={\"y\": \"GradientBoosting.y\"},\n ),\n ]\n\n\ndef compile_gylm_match(**kwargs):\n return [\n btf.Module(\n tag=\"gylm_smooth_match\",\n transforms=[\n btf.ExtXyzInput(tag=\"input\"),\n btf.GylmAtomic(tag=\"desc\", inputs={\"configs\": \"input.configs\"}),\n btf.KernelSmoothMatch(inputs={\"X\": \"desc.X\"}),\n btf.KernelRidge(\n args={\"alpha\": 1e-5, \"power\": 2},\n inputs={\"K\": \"KernelSmoothMatch.K\", \"y\": \"input.y\"},\n ),\n ],\n hyper=GridHyper(\n Hyper(\n {\n \"KernelRidge.alpha\": np.logspace(-5, +1, 7),\n }\n ),\n Hyper({\"KernelRidge.power\": [2.0]}),\n ),\n broadcast={\"meta\": \"input.meta\"},\n outputs={\"y\": \"KernelRidge.y\"},\n ),\n ]\n\n\ndef compile_gylm(**kwargs):\n return [\n btf.Module(\n tag=\"gylm\",\n transforms=[\n btf.ExtXyzInput(tag=\"input\"),\n btf.GylmAverage(tag=\"desc\", inputs={\"configs\": \"input.configs\"}),\n btf.KernelDot(inputs={\"X\": \"desc.X\"}),\n btf.KernelRidge(\n args={\"alpha\": 1e-5, \"power\": 2}, inputs={\"K\": \"KernelDot.K\", \"y\": \"input.y\"}\n ),\n ],\n hyper=BayesianHyper(\n Hyper(\n {\n \"KernelRidge.alpha\": np.linspace(-5, +1, 7),\n }\n ),\n Hyper({\"KernelRidge.power\": [1.0, 4.0]}),\n init_points=10,\n n_iter=30,\n convert={\"KernelRidge.alpha\": \"lambda p: 10**p\"},\n ),\n broadcast={\"meta\": \"input.meta\"},\n outputs={\"y\": \"KernelRidge.y\"},\n ),\n ]\n\n\ndef compile_gylm_grid(**kwargs):\n return [\n btf.Module(\n tag=\"gylm_grid\",\n transforms=[\n btf.ExtXyzInput(tag=\"input\"),\n btf.GylmAverage(tag=\"desc\", inputs={\"configs\": \"input.configs\"}),\n btf.KernelDot(inputs={\"X\": \"desc.X\"}),\n btf.KernelRidge(\n args={\"alpha\": 1e-5, \"power\": 2}, inputs={\"K\": \"KernelDot.K\", \"y\": \"input.y\"}\n ),\n ],\n hyper=GridHyper(\n Hyper(\n {\n \"KernelRidge.alpha\": np.logspace(-5, +1, 7),\n }\n ),\n Hyper({\"KernelRidge.power\": [2.0]}),\n init_points=10,\n n_iter=30,\n convert={\"KernelRidge.alpha\": \"lambda p: 10**p\"},\n ),\n broadcast={\"meta\": \"input.meta\"},\n outputs={\"y\": \"KernelRidge.y\"},\n ),\n ]\n\n\ndef register_all():\n return {\n \"dscribe\": compile_dscribe,\n \"dscribe_periodic\": compile_dscribe_periodic,\n \"ecfp\": compile_morgan,\n \"gylm\": compile_gylm,\n \"gylm_match\": compile_gylm_match,\n \"gylm_grid\": compile_gylm_grid,\n \"morgan_krr\": compile_morgan_krr,\n \"null\": compile_null,\n \"physchem\": compile_physchem,\n \"soap\": compile_soap,\n }\n"
] |
[
[
"numpy.logspace",
"numpy.linspace"
]
] |
catnlp/metaLSTM
|
[
"08b3086ebc558b936898022dd7eea7d726e6d491",
"f477f49d6435f0fbf30a848efc72b67fa34a3f9f"
] |
[
"NER/Module/crf.py",
"Modules/MetaRNNs.py"
] |
[
"# encoding:utf-8\n'''\n@Author: catnlp\n@Email: [email protected]\n@Time: 2018/5/2 15:02\n'''\nimport torch\nimport torch.autograd as autograd\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport numpy as np\nSTART_TAG = -2\nSTOP_TAG = -1\n\ndef log_sum_exp(vec, m_size):\n _, idx = torch.max(vec, 1)\n max_score = torch.gather(vec, 1, idx.view(-1, 1, m_size)).view(-1, 1, m_size)\n return max_score.view(-1, m_size) + torch.log(torch.sum(torch.exp(vec - max_score.expand_as(vec)), 1)).view(-1, m_size)\n\nclass CRF(nn.Module):\n def __init__(self, tagset_size, gpu):\n super(CRF, self).__init__()\n print('---build batched CRF---')\n self.tagset_size = tagset_size\n self.gpu = gpu\n\n init_transitions = torch.zeros(self.tagset_size+2, self.tagset_size+2)\n init_transitions[:, START_TAG] = -1000.0\n init_transitions[STOP_TAG, :] = -1000.0\n if gpu:\n init_transitions = init_transitions.cuda()\n self.transitions = nn.Parameter(init_transitions)\n\n def _calculate_PZ(self, feats, mask):\n batch_size = feats.size(0)\n seq_len = feats.size(1)\n tag_size = feats.size(2)\n assert(tag_size == self.tagset_size+2)\n mask = mask.transpose(1, 0).contiguous()\n ins_num = seq_len * batch_size\n\n feats = feats.transpose(1, 0).contiguous().view(ins_num, 1, tag_size).expand(ins_num, tag_size, tag_size)\n scores = feats + self.transitions.view(1, tag_size, tag_size).expand(ins_num, tag_size, tag_size)\n scores = scores.view(seq_len, batch_size, tag_size, tag_size)\n\n seq_iter = enumerate(scores)\n _, inivalues = seq_iter.__next__()\n\n partition = inivalues[:, START_TAG, :].clone().view(batch_size, tag_size, 1)\n for idx, cur_values in seq_iter:\n cur_values = cur_values + partition.contiguous().view(batch_size, tag_size, 1).expand(batch_size, tag_size, tag_size)\n cur_partition = log_sum_exp(cur_values, tag_size)\n\n mask_idx = mask[idx, :].view(batch_size, 1).expand(batch_size, tag_size)\n masked_cur_partition = cur_partition.masked_select(mask_idx)\n mask_idx = mask_idx.contiguous().view(batch_size, tag_size, 1)\n\n partition.masked_scatter_(mask_idx, masked_cur_partition)\n cur_values = self.transitions.view(1, tag_size, tag_size).expand(batch_size, tag_size, tag_size) + partition.contiguous().view(batch_size, tag_size, 1).expand(batch_size, tag_size, tag_size)\n cur_partition = log_sum_exp(cur_values, tag_size)\n final_partition = cur_partition[:, STOP_TAG]\n return final_partition.sum(), scores\n\n def viterbi_decode(self, feats, mask):\n batch_size = feats.size(0)\n seq_len = feats.size(1)\n tag_size = feats.size(2)\n assert(tag_size == self.tagset_size+2)\n\n length_mask = torch.sum(mask, dim=1).view(batch_size, 1).long()\n mask = mask.transpose(1, 0).contiguous()\n ins_num = seq_len * batch_size\n\n feats = feats.transpose(1, 0).contiguous().view(ins_num, 1, tag_size).expand(ins_num, tag_size, tag_size)\n scores = feats + self.transitions.view(1, tag_size, tag_size).expand(ins_num, tag_size, tag_size)\n scores = scores.view(seq_len, batch_size, tag_size, tag_size)\n\n seq_iter = enumerate(scores)\n back_points = list()\n partition_history = list()\n mask = (1 - mask.long()).byte()\n _, inivalues = seq_iter.__next__()\n partition = inivalues[:, START_TAG, :].clone().view(batch_size, tag_size)\n partition_history.append(partition)\n for idx, cur_values in seq_iter:\n cur_values = cur_values + partition.contiguous().view(batch_size, tag_size, 1).expand(batch_size, tag_size, tag_size)\n partition, cur_bp = torch.max(cur_values, 1)\n partition_history.append(partition)\n\n cur_bp.masked_fill_(mask[idx].view(batch_size, 1).expand(batch_size, tag_size), 0)\n back_points.append(cur_bp)\n partition_history = torch.cat(partition_history, 0)\n partition_history = partition_history.view(seq_len, batch_size, -1).transpose(1, 0).contiguous()\n last_position = length_mask.view(batch_size, 1, 1).expand(batch_size, 1, tag_size) - 1\n last_partition = torch.gather(partition_history, 1, last_position).view(batch_size, tag_size, 1)\n last_values = last_partition.expand(batch_size, tag_size, tag_size) + self.transitions.view(1, tag_size, tag_size).expand(batch_size, tag_size, tag_size)\n _, last_bp = torch.max(last_values, 1)\n pad_zero = autograd.Variable(torch.zeros(batch_size, tag_size)).long()\n if self.gpu:\n pad_zero = pad_zero.cuda()\n back_points.append(pad_zero)\n back_points = torch.cat(back_points).view(seq_len, batch_size, tag_size)\n\n pointer = last_bp[:, STOP_TAG]\n insert_last = pointer.contiguous().view(batch_size, 1, 1).expand(batch_size, 1, tag_size)\n back_points = back_points.transpose(1, 0).contiguous()\n back_points.scatter_(1, last_position, insert_last)\n back_points = back_points.transpose(1, 0).contiguous()\n\n decode_idx = autograd.Variable(torch.LongTensor(seq_len, batch_size))\n if self.gpu:\n decode_idx = decode_idx.cuda()\n decode_idx[-1] = pointer.data\n for idx in range(len(back_points)-2, -1, -1):\n pointer = torch.gather(back_points[idx], 1, pointer.contiguous().view(batch_size, 1))\n decode_idx[idx] = pointer.data\n path_score = None\n decode_idx = decode_idx.transpose(1, 0)\n return path_score, decode_idx\n\n def forward(self, feats, mask):\n path_score, best_path = self._viterbi_decode(feats, mask)\n return path_score, best_path\n\n def _score_sentence(self, scores, tags, mask):\n batch_size = scores.size(1)\n seq_len = scores.size(0)\n tag_size = scores.size(2)\n\n new_tags = autograd.Variable(torch.LongTensor(batch_size, seq_len))\n if self.gpu:\n new_tags = new_tags.cuda()\n for idx in range(seq_len):\n if idx == 0:\n new_tags[:, 0] = (tag_size - 2) * tag_size + tags[:, 0]\n else:\n new_tags[:, idx] = tags[:, idx-1] * tag_size + tags[:, idx]\n\n end_transition = self.transitions[:, STOP_TAG].contiguous().view(1, tag_size).expand(batch_size, tag_size)\n length_mask = torch.sum(mask, dim=1).view(batch_size, 1).long()\n end_ids = torch.gather(tags, 1, length_mask-1)\n\n end_energy = torch.gather(end_transition, 1, end_ids)\n\n new_tags = new_tags.transpose(1, 0).contiguous().view(seq_len, batch_size, 1)\n tg_energy = torch.gather(scores.view(seq_len, batch_size, -1), 2, new_tags).view(seq_len, batch_size)\n tg_energy = tg_energy.masked_select(mask.transpose(1, 0))\n\n gold_score = tg_energy.sum() + end_energy.sum()\n return gold_score\n\n def neg_log_likelihood_loss(self, feats, tags, mask):\n forward_score, scores = self._calculate_PZ(feats, mask)\n gold_score = self._score_sentence(scores, tags, mask)\n return forward_score - gold_score\n",
"# encoding:utf-8\n'''\n@Author: catnlp\n@Email: [email protected]\n@Time: 2018/4/25 21:19\n'''\nfrom Modules import MetaRNNCells\n\nimport torch\nfrom torch.nn import Module\nfrom torch.autograd import Variable\n\nclass MetaRNNBase(Module):\n def __init__(self, mode, input_size, hidden_size, hyper_hidden_size, hyper_embedding_size, num_layers, bias=True, bias_hyper=True, gpu=False, bidirectional=False):\n super(MetaRNNBase, self).__init__()\n self.mode = mode\n self.input_size = input_size\n self.hidden_size = hidden_size\n self.hyper_hidden_size = hyper_hidden_size\n self.hyper_embedding_size = hyper_embedding_size\n self.num_layers = num_layers\n self.bias = bias\n self.bias_hyper = bias_hyper\n self.gpu = gpu\n self.bidirectional=bidirectional\n\n mode2cell = {'MetaRNN': MetaRNNCells.MetaRNNCell,\n 'MetaLSTM': MetaRNNCells.MetaLSTMCell}\n\n Cell = mode2cell[mode]\n\n kwargs = {'input_size': input_size,\n 'hidden_size': hidden_size,\n 'hyper_hidden_size': hyper_hidden_size,\n 'hyper_embedding_size': hyper_embedding_size,\n 'bias': bias,\n 'bias_hyper': bias_hyper}\n\n if self.bidirectional:\n self.cell0 = Cell(**kwargs)\n for i in range(1, num_layers):\n kwargs['input_size'] = hidden_size * 2\n cell = Cell(**kwargs)\n setattr(self, 'cell{}'.format(i), cell)\n\n kwargs['input_size'] = input_size\n self.cellb0 = Cell(**kwargs)\n for i in range(1, num_layers):\n kwargs['input_size'] = hidden_size * 2\n cell = Cell(**kwargs)\n setattr(self, 'cellb{}'.format(i), cell)\n else:\n self.cell0 = Cell(**kwargs)\n for i in range(1, num_layers):\n kwargs['input_size'] = hidden_size\n cell = Cell(**kwargs)\n setattr(self, 'cell{}'.format(i), cell)\n\n def _initial_states(self, inputSize):\n main_zeros = Variable(torch.zeros(inputSize, self.hidden_size))\n meta_zeros = Variable(torch.zeros(inputSize, self.hyper_hidden_size))\n if self.gpu:\n main_zeros = main_zeros.cuda()\n meta_zeros = meta_zeros.cuda()\n zeros = (main_zeros, meta_zeros)\n if self.mode == 'MetaLSTM':\n states = [((main_zeros, main_zeros), (meta_zeros, meta_zeros)), ] * self.num_layers\n else:\n states = [zeros] * self.num_layers\n return states\n\n def forward(self, input, length=None):\n states = self._initial_states(input.size(0))\n outputs = []\n time_steps = input.size(1)\n\n if length is None:\n length = Variable(torch.LongTensor([time_steps] * input.size(0)))\n if self.gpu:\n length = length.cuda()\n\n if self.bidirectional:\n states_b = self._initial_states(input.size(0))\n outputs_f = []\n outputs_b = []\n hx = None\n\n for num in range(self.num_layers):\n for t in range(time_steps):\n x = input[:, t, :]\n # main_h, main_c, meta_h, meta_c = getattr(self, 'cell{}'.format(num))(x, states[num])\n # mask_main_h = (t < length).float().unsqueeze(1).expand_as(main_h)\n # mask_main_c = (t < length).float().unsqueeze(1).expand_as(main_c)\n # mask_meta_h = (t < length).float().unsqueeze(1).expand_as(meta_h)\n # mask_meta_c = (t < length).float().unsqueeze(1).expand_as(meta_c)\n # main_h = main_h * mask_main_h + states[0][0][0] * (1 - mask_main_h)\n # main_c = main_c * mask_main_c + states[0][0][1] * (1 - mask_main_c)\n # meta_h = meta_h * mask_meta_h + states[0][1][0] * (1 - mask_meta_h)\n # meta_c = meta_c * mask_meta_c + states[0][1][1] * (1 - mask_meta_c)\n # states[num] = (main_h, meta_h)\n if self.mode.startswith('MetaLSTM'):\n (main_h, main_c), (meta_h, meta_c) = getattr(self, 'cell{}'.format(num))(x, states[num])\n mask_main_h = (t < length).float().unsqueeze(1).expand_as(main_h)\n mask_main_c = (t < length).float().unsqueeze(1).expand_as(main_c)\n mask_meta_h = (t < length).float().unsqueeze(1).expand_as(meta_h)\n mask_meta_c = (t < length).float().unsqueeze(1).expand_as(meta_c)\n main_h = main_h * mask_main_h + states[0][0][0] * (1 - mask_main_h)\n main_c = main_c * mask_main_c + states[0][0][1] * (1 - mask_main_c)\n meta_h = meta_h * mask_meta_h + states[0][1][0] * (1 - mask_meta_h)\n meta_c = meta_c * mask_meta_c + states[0][1][1] * (1 - mask_meta_c)\n states[num] = ((main_h, main_c),(meta_h, meta_c))\n outputs_f.append(main_h)\n else:\n main_h, meta_h = getattr(self, 'cell{}'.format(num))(x, states[num])\n mask_main_h = (t < length).float().unsqueeze(1).expand_as(main_h)\n mask_meta_h = (t < length).float().unsqueeze(1).expand_as(meta_h)\n main_h = main_h * mask_main_h + states[0][0] * (1 - mask_main_h)\n meta_h = meta_h * mask_meta_h + states[0][1] * (1 - mask_meta_h)\n states[num] = (main_h, meta_h)\n outputs_f.append(main_h)\n for t in range(time_steps)[::-1]:\n x = input[:, t, :]\n if self.mode.startswith('MetaLSTM'):\n (main_h, main_c), (meta_h, meta_c) = getattr(self, 'cell{}'.format(num))(x, states_b[num])\n mask_main_h = (t < length).float().unsqueeze(1).expand_as(main_h)\n mask_main_c = (t < length).float().unsqueeze(1).expand_as(main_c)\n mask_meta_h = (t < length).float().unsqueeze(1).expand_as(meta_h)\n mask_meta_c = (t < length).float().unsqueeze(1).expand_as(meta_c)\n main_h = main_h * mask_main_h + states_b[0][0][0] * (1 - mask_main_h)\n main_c = main_c * mask_main_c + states_b[0][0][1] * (1 - mask_main_c)\n meta_h = meta_h * mask_meta_h + states_b[0][1][0] * (1 - mask_meta_h)\n meta_c = meta_c * mask_meta_c + states_b[0][1][1] * (1 - mask_meta_c)\n states_b[num] = ((main_h, main_c),(meta_h, meta_c))\n outputs_b.append(main_h)\n else:\n main_h, meta_h = getattr(self, 'cell{}'.format(num))(x, states_b[num])\n mask_main_h = (t < length).float().unsqueeze(1).expand_as(main_h)\n mask_meta_h = (t < length).float().unsqueeze(1).expand_as(meta_h)\n main_h = main_h * mask_main_h + states_b[0][0] * (1 - mask_main_h)\n meta_h = meta_h * mask_meta_h + states_b[0][1] * (1 - mask_meta_h)\n states_b[num] = (main_h, meta_h)\n outputs_b.append(main_h)\n # main_h, meta_h = getattr(self, 'cell{}'.format(num))(x, states_b[num])\n # mask_main = (t < length).float().unsqueeze(1).expand_as(main_h)\n # mask_meta = (t < length).float().unsqueeze(1).expand_as(meta_h)\n # main_h = main_h * mask_main + states[0][0] * (1 - mask_main)\n # meta_h = meta_h * mask_meta + states[0][1] * (1 - mask_meta)\n # states_b[num] = (main_h, meta_h)\n # if self.mode.startswith('MetaLSTM'):\n # outputs_b.append(main_h[0])\n # else:\n # outputs_b.append(main_h)\n # hx = getattr(self, 'cellb{}'.format(num))(x, states_b[num])\n # mask = (t < length).float().unsqueeze(1).expand_as(hx)\n # hx = hx * mask + states[0] * (1 - mask)\n # states_b[num] = hx\n # if self.mode.startswith('MetaLSTM'):\n # outputs_b.append(hx[0][0])\n # else:\n # outputs_b.append(hx[0])\n outputs_b.reverse()\n input = torch.cat([torch.stack(outputs_f).transpose(0, 1), torch.stack(outputs_b).transpose(0, 1)], 2)\n outputs_f = []\n outputs_b = []\n # output = input, input[-1]\n else:\n # for t in range(time_steps):\n # x = input[:, t, :]\n # for num in range(self.num_layers):\n # hx = getattr(self, 'cell{}'.format(num))(x, states[num])\n # states[num] = hx\n # if self.mode.startswith('MetaLSTM'):\n # x = hx[0][0]\n # else:\n # x = hx[0]\n # outputs.append(hx[0])\n #\n # if self.mode.startswith('MetaLSTM'):\n # hs, cs = zip(*outputs)\n # h = torch.stack(hs).transpose(0, 1)\n # output = h, (outputs[-1][0], outputs[-1][1])\n # else:\n # output = torch.stack(outputs).transpose(0, 1), outputs[-1]\n outputs_f = []\n for num in range(self.num_layers):\n for t in range(time_steps):\n x = input[:, t, :]\n if self.mode.startswith('MetaLSTM'):\n (main_h, main_c), (meta_h, meta_c) = getattr(self, 'cell{}'.format(num))(x, states[num])\n mask_main_h = (t < length).float().unsqueeze(1).expand_as(main_h)\n mask_main_c = (t < length).float().unsqueeze(1).expand_as(main_c)\n mask_meta_h = (t < length).float().unsqueeze(1).expand_as(meta_h)\n mask_meta_c = (t < length).float().unsqueeze(1).expand_as(meta_c)\n main_h = main_h * mask_main_h + states[0][0][0] * (1 - mask_main_h)\n main_c = main_c * mask_main_c + states[0][0][1] * (1 - mask_main_c)\n meta_h = meta_h * mask_meta_h + states[0][1][0] * (1 - mask_meta_h)\n meta_c = meta_c * mask_meta_c + states[0][1][1] * (1 - mask_meta_c)\n states[num] = ((main_h, main_c),(meta_h, meta_c))\n outputs_f.append(main_h)\n else:\n main_h, meta_h = getattr(self, 'cell{}'.format(num))(x, states[num])\n mask_main_h = (t < length).float().unsqueeze(1).expand_as(main_h)\n mask_meta_h = (t < length).float().unsqueeze(1).expand_as(meta_h)\n main_h = main_h * mask_main_h + states[0][0] * (1 - mask_main_h)\n meta_h = meta_h * mask_meta_h + states[0][1] * (1 - mask_meta_h)\n states[num] = (main_h, meta_h)\n outputs_f.append(main_h)\n # main_h, meta_h = getattr(self, 'cell{}'.format(num))(x, states[num])\n # mask_main = (t < length).float().unsqueeze(1).expand_as(main_h)\n # mask_meta = (t < length).float().unsqueeze(1).expand_as(meta_h)\n # main_h = main_h * mask_main + states[0][0] * (1 - mask_main)\n # meta_h = meta_h * mask_meta + states[0][1] * (1 - mask_meta)\n # states[num] = (main_h, meta_h)\n # if self.mode.startswith('MetaLSTM'):\n # outputs_f.append(main_h[0])\n # else:\n # outputs_f.append(main_h)\n\n # hx = getattr(self, 'cell{}'.format(num))(x, states[num])\n # mask = (t < length).float().unsqueeze(1).expand_as(hx)\n # hx = hx * mask + states[0] * (1 - mask)\n # states[num] = hx\n # if self.mode.startswith('MetaLSTM'):\n # outputs_f.append(hx[0][0])\n # else:\n # outputs_f.append(hx[0])\n input = torch.stack(outputs_f[0]).transpose(0, 1)\n outputs_f = []\n\n output = input, input[-1]\n return output\n\nclass MetaRNN(MetaRNNBase):\n def __init__(self, *args, **kwargs):\n super(MetaRNN, self).__init__('MetaRNN', *args, **kwargs)\n\nclass MetaLSTM(MetaRNNBase):\n def __init__(self, *args, **kwargs):\n super(MetaLSTM, self).__init__('MetaLSTM', *args, **kwargs)"
] |
[
[
"torch.LongTensor",
"torch.nn.Parameter",
"torch.max",
"torch.cat",
"torch.zeros",
"torch.sum",
"torch.gather"
],
[
"torch.stack",
"torch.zeros"
]
] |
hjc3613/simpletransformers
|
[
"bce58639f3fa8f45f445b053b5aaae428c3c5429"
] |
[
"simpletransformers/classification/classification_model.py"
] |
[
"#!/usr/bin/env python\n# coding: utf-8\n\n\nfrom __future__ import absolute_import, division, print_function\n\nimport json\nimport logging\nimport math\nimport os\nimport random\nimport warnings\nfrom multiprocessing import cpu_count\n\nimport numpy as np\nfrom scipy.stats import mode, pearsonr\nfrom sklearn.metrics import (\n confusion_matrix,\n label_ranking_average_precision_score,\n matthews_corrcoef,\n mean_squared_error,\n)\nfrom tqdm.auto import tqdm, trange\n\nimport pandas as pd\nimport torch\nfrom simpletransformers.classification.classification_utils import InputExample, convert_examples_to_features\nfrom simpletransformers.classification.transformer_models.albert_model import AlbertForSequenceClassification\nfrom simpletransformers.classification.transformer_models.bert_model import BertForSequenceClassification\nfrom simpletransformers.classification.transformer_models.camembert_model import CamembertForSequenceClassification\nfrom simpletransformers.classification.transformer_models.distilbert_model import DistilBertForSequenceClassification\nfrom simpletransformers.classification.transformer_models.flaubert_model import FlaubertForSequenceClassification\nfrom simpletransformers.classification.transformer_models.roberta_model import RobertaForSequenceClassification\nfrom simpletransformers.classification.transformer_models.xlm_model import XLMForSequenceClassification\nfrom simpletransformers.classification.transformer_models.xlm_roberta_model import XLMRobertaForSequenceClassification\nfrom simpletransformers.classification.transformer_models.xlnet_model import XLNetForSequenceClassification\nfrom simpletransformers.config.global_args import global_args\nfrom simpletransformers.custom_models.models import ElectraForSequenceClassification\nfrom tensorboardX import SummaryWriter\nfrom torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset\nfrom torch.utils.data.distributed import DistributedSampler\nfrom transformers import (\n WEIGHTS_NAME,\n AdamW,\n AlbertConfig,\n AlbertTokenizer,\n BertConfig,\n BertTokenizer,\n CamembertConfig,\n CamembertTokenizer,\n DistilBertConfig,\n DistilBertTokenizer,\n ElectraConfig,\n ElectraTokenizer,\n FlaubertConfig,\n FlaubertTokenizer,\n RobertaConfig,\n RobertaTokenizer,\n XLMConfig,\n XLMRobertaConfig,\n XLMRobertaTokenizer,\n XLMTokenizer,\n XLNetConfig,\n XLNetTokenizer,\n get_linear_schedule_with_warmup,\n)\n\ntry:\n import wandb\n\n wandb_available = True\nexcept ImportError:\n wandb_available = False\n\nlogger = logging.getLogger(__name__)\n\n\nclass ClassificationModel:\n def __init__(\n self, model_type, model_name, num_labels=None, weight=None, args=None, use_cuda=True, cuda_device=-1, **kwargs,\n ):\n\n \"\"\"\n Initializes a ClassificationModel model.\n\n Args:\n model_type: The type of model (bert, xlnet, xlm, roberta, distilbert)\n model_name: The exact architecture and trained weights to use. This may be a Hugging Face Transformers compatible pre-trained model, a community model, or the path to a directory containing model files.\n num_labels (optional): The number of labels or classes in the dataset.\n weight (optional): A list of length num_labels containing the weights to assign to each label for loss calculation.\n args (optional): Default args will be used if this parameter is not provided. If provided, it should be a dict containing the args that should be changed in the default args.\n use_cuda (optional): Use GPU if available. Setting to False will force model to use CPU only.\n cuda_device (optional): Specific GPU that should be used. Will use the first available GPU by default.\n **kwargs (optional): For providing proxies, force_download, resume_download, cache_dir and other options specific to the 'from_pretrained' implementation where this will be supplied.\n \"\"\" # noqa: ignore flake8\"\n\n MODEL_CLASSES = {\n \"bert\": (BertConfig, BertForSequenceClassification, BertTokenizer),\n \"xlnet\": (XLNetConfig, XLNetForSequenceClassification, XLNetTokenizer),\n \"xlm\": (XLMConfig, XLMForSequenceClassification, XLMTokenizer),\n \"roberta\": (RobertaConfig, RobertaForSequenceClassification, RobertaTokenizer),\n \"distilbert\": (DistilBertConfig, DistilBertForSequenceClassification, DistilBertTokenizer),\n \"albert\": (AlbertConfig, AlbertForSequenceClassification, AlbertTokenizer),\n \"camembert\": (CamembertConfig, CamembertForSequenceClassification, CamembertTokenizer),\n \"xlmroberta\": (XLMRobertaConfig, XLMRobertaForSequenceClassification, XLMRobertaTokenizer),\n \"flaubert\": (FlaubertConfig, FlaubertForSequenceClassification, FlaubertTokenizer),\n \"electra\": (ElectraConfig, ElectraForSequenceClassification, ElectraTokenizer),\n }\n\n if args and \"manual_seed\" in args:\n random.seed(args[\"manual_seed\"])\n np.random.seed(args[\"manual_seed\"])\n torch.manual_seed(args[\"manual_seed\"])\n if \"n_gpu\" in args and args[\"n_gpu\"] > 0:\n torch.cuda.manual_seed_all(args[\"manual_seed\"])\n\n self.args = {\n \"sliding_window\": False,\n \"tie_value\": 1,\n \"stride\": 0.8,\n \"regression\": False,\n }\n\n self.args.update(global_args)\n\n saved_model_args = self._load_model_args(model_name)\n if saved_model_args:\n self.args.update(saved_model_args)\n\n if args:\n self.args.update(args)\n\n config_class, model_class, tokenizer_class = MODEL_CLASSES[model_type]\n if num_labels:\n self.config = config_class.from_pretrained(model_name, num_labels=num_labels, **self.args[\"config\"])\n self.num_labels = num_labels\n else:\n self.config = config_class.from_pretrained(model_name, **self.args[\"config\"])\n self.num_labels = self.config.num_labels\n self.weight = weight\n\n if use_cuda:\n if torch.cuda.is_available():\n if cuda_device == -1:\n self.device = torch.device(\"cuda\")\n else:\n self.device = torch.device(f\"cuda:{cuda_device}\")\n else:\n raise ValueError(\n \"'use_cuda' set to True when cuda is unavailable.\"\n \" Make sure CUDA is available or set use_cuda=False.\"\n )\n else:\n self.device = \"cpu\"\n\n if self.weight:\n self.model = model_class.from_pretrained(\n model_name, config=self.config, weight=torch.Tensor(self.weight).to(self.device), **kwargs,\n )\n else:\n self.model = model_class.from_pretrained(model_name, config=self.config, **kwargs)\n\n self.results = {}\n\n if not use_cuda:\n self.args[\"fp16\"] = False\n\n self.tokenizer = tokenizer_class.from_pretrained(\n model_name, do_lower_case=self.args[\"do_lower_case\"], **kwargs\n )\n\n self.args[\"model_name\"] = model_name\n self.args[\"model_type\"] = model_type\n\n if model_type in [\"camembert\", \"xlmroberta\"]:\n warnings.warn(\n f\"use_multiprocessing automatically disabled as {model_type}\"\n \" fails when using multiprocessing for feature conversion.\"\n )\n self.args[\"use_multiprocessing\"] = False\n\n if self.args[\"wandb_project\"] and not wandb_available:\n warnings.warn(\"wandb_project specified but wandb is not available. Wandb disabled.\")\n self.args[\"wandb_project\"] = None\n\n def train_model(\n self,\n train_df,\n multi_label=False,\n output_dir=None,\n show_running_loss=True,\n args=None,\n eval_df=None,\n verbose=True,\n **kwargs,\n ):\n \"\"\"\n Trains the model using 'train_df'\n\n Args:\n train_df: Pandas Dataframe containing at least two columns. If the Dataframe has a header, it should contain a 'text' and a 'labels' column. If no header is present,\n the Dataframe should contain at least two columns, with the first column containing the text, and the second column containing the label. The model will be trained on this Dataframe.\n output_dir: The directory where model files will be saved. If not given, self.args['output_dir'] will be used.\n show_running_loss (optional): Set to False to prevent running loss from being printed to console. Defaults to True.\n args (optional): Optional changes to the args dict of the model. Any changes made will persist for the model.\n eval_df (optional): A DataFrame against which evaluation will be performed when evaluate_during_training is enabled. Is required if evaluate_during_training is enabled.\n **kwargs: Additional metrics that should be used. Pass in the metrics as keyword arguments (name of metric: function to use). E.g. f1=sklearn.metrics.f1_score.\n A metric function should take in two parameters. The first parameter will be the true labels, and the second parameter will be the predictions.\n\n Returns:\n None\n \"\"\" # noqa: ignore flake8\"\n\n if args:\n self.args.update(args)\n\n if self.args[\"silent\"]:\n show_running_loss = False\n\n if self.args[\"evaluate_during_training\"] and eval_df is None:\n raise ValueError(\n \"evaluate_during_training is enabled but eval_df is not specified.\"\n \" Pass eval_df to model.train_model() if using evaluate_during_training.\"\n )\n\n if not output_dir:\n output_dir = self.args[\"output_dir\"]\n\n if os.path.exists(output_dir) and os.listdir(output_dir) and not self.args[\"overwrite_output_dir\"]:\n raise ValueError(\n \"Output directory ({}) already exists and is not empty.\"\n \" Use --overwrite_output_dir to overcome.\".format(output_dir)\n )\n\n self._move_model_to_device()\n\n if \"text\" in train_df.columns and \"labels\" in train_df.columns:\n train_examples = [\n InputExample(i, text, None, label)\n for i, (text, label) in enumerate(zip(train_df[\"text\"], train_df[\"labels\"]))\n ]\n elif \"text_a\" in train_df.columns and \"text_b\" in train_df.columns:\n train_examples = [\n InputExample(i, text_a, text_b, label)\n for i, (text_a, text_b, label) in enumerate(\n zip(train_df[\"text_a\"], train_df[\"text_b\"], train_df[\"labels\"])\n )\n ]\n else:\n warnings.warn(\n \"Dataframe headers not specified. Falling back to using column 0 as text and column 1 as labels.\"\n )\n train_examples = [\n InputExample(i, text, None, label)\n for i, (text, label) in enumerate(zip(train_df.iloc[:, 0], train_df.iloc[:, 1]))\n ]\n\n train_dataset = self.load_and_cache_examples(train_examples, verbose=verbose)\n\n os.makedirs(output_dir, exist_ok=True)\n\n global_step, tr_loss = self.train(\n train_dataset,\n output_dir,\n multi_label=multi_label,\n show_running_loss=show_running_loss,\n eval_df=eval_df,\n verbose=verbose,\n **kwargs,\n )\n\n # model_to_save = self.model.module if hasattr(self.model, \"module\") else self.model\n # model_to_save.save_pretrained(output_dir)\n # self.tokenizer.save_pretrained(output_dir)\n # torch.save(self.args, os.path.join(output_dir, \"training_args.bin\"))\n self._save_model()\n\n if verbose:\n logger.info(\" Training of {} model complete. Saved to {}.\".format(self.args[\"model_type\"], output_dir))\n\n def train(\n self,\n train_dataset,\n output_dir,\n multi_label=False,\n show_running_loss=True,\n eval_df=None,\n verbose=True,\n **kwargs,\n ):\n \"\"\"\n Trains the model on train_dataset.\n\n Utility function to be used by the train_model() method. Not intended to be used directly.\n \"\"\"\n\n device = self.device\n model = self.model\n args = self.args\n\n tb_writer = SummaryWriter(logdir=args[\"tensorboard_dir\"])\n train_sampler = RandomSampler(train_dataset)\n train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args[\"train_batch_size\"])\n\n t_total = len(train_dataloader) // args[\"gradient_accumulation_steps\"] * args[\"num_train_epochs\"]\n\n no_decay = [\"bias\", \"LayerNorm.weight\"]\n optimizer_grouped_parameters = [\n {\n \"params\": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],\n \"weight_decay\": args[\"weight_decay\"],\n },\n {\n \"params\": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)],\n \"weight_decay\": 0.0,\n },\n ]\n\n warmup_steps = math.ceil(t_total * args[\"warmup_ratio\"])\n args[\"warmup_steps\"] = warmup_steps if args[\"warmup_steps\"] == 0 else args[\"warmup_steps\"]\n\n optimizer = AdamW(optimizer_grouped_parameters, lr=args[\"learning_rate\"], eps=args[\"adam_epsilon\"])\n scheduler = get_linear_schedule_with_warmup(\n optimizer, num_warmup_steps=args[\"warmup_steps\"], num_training_steps=t_total\n )\n\n if args[\"fp16\"]:\n try:\n from apex import amp\n except ImportError:\n raise ImportError(\"Please install apex from https://www.github.com/nvidia/apex to use fp16 training.\")\n\n model, optimizer = amp.initialize(model, optimizer, opt_level=args[\"fp16_opt_level\"])\n\n if args[\"n_gpu\"] > 1:\n model = torch.nn.DataParallel(model)\n\n global_step = 0\n tr_loss, logging_loss = 0.0, 0.0\n model.zero_grad()\n train_iterator = trange(int(args[\"num_train_epochs\"]), desc=\"Epoch\", disable=args[\"silent\"], mininterval=0)\n epoch_number = 0\n best_eval_metric = None\n early_stopping_counter = 0\n steps_trained_in_current_epoch = 0\n epochs_trained = 0\n\n if args[\"model_name\"] and os.path.exists(args[\"model_name\"]):\n try:\n # set global_step to gobal_step of last saved checkpoint from model path\n checkpoint_suffix = args[\"model_name\"].split(\"/\")[-1].split(\"-\")\n if len(checkpoint_suffix) > 2:\n checkpoint_suffix = checkpoint_suffix[1]\n else:\n checkpoint_suffix = checkpoint_suffix[-1]\n global_step = int(checkpoint_suffix)\n epochs_trained = global_step // (len(train_dataloader) // args[\"gradient_accumulation_steps\"])\n steps_trained_in_current_epoch = global_step % (\n len(train_dataloader) // args[\"gradient_accumulation_steps\"]\n )\n\n logger.info(\" Continuing training from checkpoint, will skip to saved global_step\")\n logger.info(\" Continuing training from epoch %d\", epochs_trained)\n logger.info(\" Continuing training from global step %d\", global_step)\n logger.info(\" Will skip the first %d steps in the current epoch\", steps_trained_in_current_epoch)\n except ValueError:\n logger.info(\" Starting fine-tuning.\")\n\n if args[\"evaluate_during_training\"]:\n training_progress_scores = self._create_training_progress_scores(multi_label, **kwargs)\n\n if args[\"wandb_project\"]:\n wandb.init(project=args[\"wandb_project\"], config={**args}, **args[\"wandb_kwargs\"])\n wandb.watch(self.model)\n\n model.train()\n for _ in train_iterator:\n if epochs_trained > 0:\n epochs_trained -= 1\n continue\n # epoch_iterator = tqdm(train_dataloader, desc=\"Iteration\")\n for step, batch in enumerate(tqdm(train_dataloader, desc=\"Current iteration\", disable=args[\"silent\"])):\n if steps_trained_in_current_epoch > 0:\n steps_trained_in_current_epoch -= 1\n continue\n batch = tuple(t.to(device) for t in batch)\n\n inputs = self._get_inputs_dict(batch)\n outputs = model(**inputs)\n # model outputs are always tuple in pytorch-transformers (see doc)\n loss = outputs[0]\n\n if args[\"n_gpu\"] > 1:\n loss = loss.mean() # mean() to average on multi-gpu parallel training\n\n current_loss = loss.item()\n\n if show_running_loss:\n print(\"\\rRunning loss: %f\" % loss, end=\"\")\n\n if args[\"gradient_accumulation_steps\"] > 1:\n loss = loss / args[\"gradient_accumulation_steps\"]\n\n if args[\"fp16\"]:\n with amp.scale_loss(loss, optimizer) as scaled_loss:\n scaled_loss.backward()\n # torch.nn.utils.clip_grad_norm_(\n # amp.master_params(optimizer), args[\"max_grad_norm\"]\n # )\n else:\n loss.backward()\n # torch.nn.utils.clip_grad_norm_(\n # model.parameters(), args[\"max_grad_norm\"]\n # )\n\n tr_loss += loss.item()\n if (step + 1) % args[\"gradient_accumulation_steps\"] == 0:\n if args[\"fp16\"]:\n torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args[\"max_grad_norm\"])\n else:\n torch.nn.utils.clip_grad_norm_(model.parameters(), args[\"max_grad_norm\"])\n\n optimizer.step()\n scheduler.step() # Update learning rate schedule\n model.zero_grad()\n global_step += 1\n\n if args[\"logging_steps\"] > 0 and global_step % args[\"logging_steps\"] == 0:\n # Log metrics\n tb_writer.add_scalar(\"lr\", scheduler.get_lr()[0], global_step)\n tb_writer.add_scalar(\"loss\", (tr_loss - logging_loss) / args[\"logging_steps\"], global_step)\n logging_loss = tr_loss\n if args[\"wandb_project\"]:\n wandb.log(\n {\n \"Training loss\": current_loss,\n \"lr\": scheduler.get_lr()[0],\n \"global_step\": global_step,\n }\n )\n\n if args[\"save_steps\"] > 0 and global_step % args[\"save_steps\"] == 0:\n # Save model checkpoint\n output_dir_current = os.path.join(output_dir, \"checkpoint-{}\".format(global_step))\n\n self._save_model(output_dir_current, optimizer, scheduler, model=model)\n\n if args[\"evaluate_during_training\"] and (\n args[\"evaluate_during_training_steps\"] > 0\n and global_step % args[\"evaluate_during_training_steps\"] == 0\n ):\n # Only evaluate when single GPU otherwise metrics may not average well\n results, _, _ = self.eval_model(\n eval_df,\n verbose=verbose and args[\"evaluate_during_training_verbose\"],\n silent=True,\n **kwargs,\n )\n for key, value in results.items():\n tb_writer.add_scalar(\"eval_{}\".format(key), value, global_step)\n\n output_dir_current = os.path.join(output_dir, \"checkpoint-{}\".format(global_step))\n\n if args[\"save_eval_checkpoints\"]:\n self._save_model(output_dir_current, optimizer, scheduler, model=model, results=results)\n\n training_progress_scores[\"global_step\"].append(global_step)\n training_progress_scores[\"train_loss\"].append(current_loss)\n for key in results:\n training_progress_scores[key].append(results[key])\n report = pd.DataFrame(training_progress_scores)\n report.to_csv(\n os.path.join(args[\"output_dir\"], \"training_progress_scores.csv\"), index=False,\n )\n\n if args[\"wandb_project\"]:\n wandb.log(self._get_last_metrics(training_progress_scores))\n\n if not best_eval_metric:\n best_eval_metric = results[args[\"early_stopping_metric\"]]\n self._save_model(\n args[\"best_model_dir\"], optimizer, scheduler, model=model, results=results\n )\n if best_eval_metric and args[\"early_stopping_metric_minimize\"]:\n if (\n results[args[\"early_stopping_metric\"]] - best_eval_metric\n < args[\"early_stopping_delta\"]\n ):\n best_eval_metric = results[args[\"early_stopping_metric\"]]\n self._save_model(\n args[\"best_model_dir\"], optimizer, scheduler, model=model, results=results\n )\n early_stopping_counter = 0\n else:\n if args[\"use_early_stopping\"]:\n if early_stopping_counter < args[\"early_stopping_patience\"]:\n early_stopping_counter += 1\n if verbose:\n logger.info(f\" No improvement in {args['early_stopping_metric']}\")\n logger.info(f\" Current step: {early_stopping_counter}\")\n logger.info(f\" Early stopping patience: {args['early_stopping_patience']}\")\n else:\n if verbose:\n logger.info(\n f\" Patience of {args['early_stopping_patience']} steps reached\"\n )\n logger.info(\" Training terminated.\")\n train_iterator.close()\n return global_step, tr_loss / global_step\n else:\n if (\n results[args[\"early_stopping_metric\"]] - best_eval_metric\n > args[\"early_stopping_delta\"]\n ):\n best_eval_metric = results[args[\"early_stopping_metric\"]]\n self._save_model(\n args[\"best_model_dir\"], optimizer, scheduler, model=model, results=results\n )\n early_stopping_counter = 0\n else:\n if args[\"use_early_stopping\"]:\n if early_stopping_counter < args[\"early_stopping_patience\"]:\n early_stopping_counter += 1\n if verbose:\n logger.info(f\" No improvement in {args['early_stopping_metric']}\")\n logger.info(f\" Current step: {early_stopping_counter}\")\n logger.info(f\" Early stopping patience: {args['early_stopping_patience']}\")\n else:\n if verbose:\n logger.info(\n f\" Patience of {args['early_stopping_patience']} steps reached\"\n )\n logger.info(\" Training terminated.\")\n train_iterator.close()\n return global_step, tr_loss / global_step\n\n epoch_number += 1\n output_dir_current = os.path.join(output_dir, \"checkpoint-{}-epoch-{}\".format(global_step, epoch_number))\n\n if args[\"save_model_every_epoch\"] or args[\"evaluate_during_training\"]:\n os.makedirs(output_dir_current, exist_ok=True)\n\n if args[\"save_model_every_epoch\"]:\n self._save_model(output_dir_current, optimizer, scheduler, model=model)\n\n if args[\"evaluate_during_training\"]:\n results, _, _ = self.eval_model(\n eval_df, verbose=verbose and args[\"evaluate_during_training_verbose\"], silent=True, **kwargs\n )\n\n self._save_model(output_dir_current, optimizer, scheduler, results=results)\n\n training_progress_scores[\"global_step\"].append(global_step)\n training_progress_scores[\"train_loss\"].append(current_loss)\n for key in results:\n training_progress_scores[key].append(results[key])\n report = pd.DataFrame(training_progress_scores)\n report.to_csv(os.path.join(args[\"output_dir\"], \"training_progress_scores.csv\"), index=False)\n\n if args[\"wandb_project\"]:\n wandb.log(self._get_last_metrics(training_progress_scores))\n\n if not best_eval_metric:\n best_eval_metric = results[args[\"early_stopping_metric\"]]\n self._save_model(args[\"best_model_dir\"], optimizer, scheduler, model=model, results=results)\n if best_eval_metric and args[\"early_stopping_metric_minimize\"]:\n if results[args[\"early_stopping_metric\"]] - best_eval_metric < args[\"early_stopping_delta\"]:\n best_eval_metric = results[args[\"early_stopping_metric\"]]\n self._save_model(args[\"best_model_dir\"], optimizer, scheduler, model=model, results=results)\n early_stopping_counter = 0\n else:\n if args[\"use_early_stopping\"] and args[\"early_stopping_consider_epochs\"]:\n if early_stopping_counter < args[\"early_stopping_patience\"]:\n early_stopping_counter += 1\n if verbose:\n logger.info(f\" No improvement in {args['early_stopping_metric']}\")\n logger.info(f\" Current step: {early_stopping_counter}\")\n logger.info(f\" Early stopping patience: {args['early_stopping_patience']}\")\n else:\n if verbose:\n logger.info(f\" Patience of {args['early_stopping_patience']} steps reached\")\n logger.info(\" Training terminated.\")\n train_iterator.close()\n return global_step, tr_loss / global_step\n else:\n if results[args[\"early_stopping_metric\"]] - best_eval_metric > args[\"early_stopping_delta\"]:\n best_eval_metric = results[args[\"early_stopping_metric\"]]\n self._save_model(args[\"best_model_dir\"], optimizer, scheduler, model=model, results=results)\n early_stopping_counter = 0\n else:\n if args[\"use_early_stopping\"] and args[\"early_stopping_consider_epochs\"]:\n if early_stopping_counter < args[\"early_stopping_patience\"]:\n early_stopping_counter += 1\n if verbose:\n logger.info(f\" No improvement in {args['early_stopping_metric']}\")\n logger.info(f\" Current step: {early_stopping_counter}\")\n logger.info(f\" Early stopping patience: {args['early_stopping_patience']}\")\n else:\n if verbose:\n logger.info(f\" Patience of {args['early_stopping_patience']} steps reached\")\n logger.info(\" Training terminated.\")\n train_iterator.close()\n return global_step, tr_loss / global_step\n\n return global_step, tr_loss / global_step\n\n def eval_model(self, eval_df, multi_label=False, output_dir=None, verbose=True, silent=False, **kwargs):\n \"\"\"\n Evaluates the model on eval_df. Saves results to output_dir.\n\n Args:\n eval_df: Pandas Dataframe containing at least two columns. If the Dataframe has a header, it should contain a 'text' and a 'labels' column. If no header is present,\n the Dataframe should contain at least two columns, with the first column containing the text, and the second column containing the label. The model will be evaluated on this Dataframe.\n output_dir: The directory where model files will be saved. If not given, self.args['output_dir'] will be used.\n verbose: If verbose, results will be printed to the console on completion of evaluation.\n silent: If silent, tqdm progress bars will be hidden.\n **kwargs: Additional metrics that should be used. Pass in the metrics as keyword arguments (name of metric: function to use). E.g. f1=sklearn.metrics.f1_score.\n A metric function should take in two parameters. The first parameter will be the true labels, and the second parameter will be the predictions.\n\n Returns:\n result: Dictionary containing evaluation results.\n model_outputs: List of model outputs for each row in eval_df\n wrong_preds: List of InputExample objects corresponding to each incorrect prediction by the model\n \"\"\" # noqa: ignore flake8\"\n\n if not output_dir:\n output_dir = self.args[\"output_dir\"]\n\n self._move_model_to_device()\n\n result, model_outputs, wrong_preds = self.evaluate(\n eval_df, output_dir, multi_label=multi_label, verbose=verbose, silent=silent, **kwargs\n )\n self.results.update(result)\n\n if verbose:\n logger.info(self.results)\n\n return result, model_outputs, wrong_preds\n\n def evaluate(self, eval_df, output_dir, multi_label=False, prefix=\"\", verbose=True, silent=False, **kwargs):\n \"\"\"\n Evaluates the model on eval_df.\n\n Utility function to be used by the eval_model() method. Not intended to be used directly.\n \"\"\"\n\n device = self.device\n model = self.model\n args = self.args\n eval_output_dir = output_dir\n\n results = {}\n\n if \"text\" in eval_df.columns and \"labels\" in eval_df.columns:\n eval_examples = [\n InputExample(i, text, None, label)\n for i, (text, label) in enumerate(zip(eval_df[\"text\"], eval_df[\"labels\"]))\n ]\n elif \"text_a\" in eval_df.columns and \"text_b\" in eval_df.columns:\n eval_examples = [\n InputExample(i, text_a, text_b, label)\n for i, (text_a, text_b, label) in enumerate(\n zip(eval_df[\"text_a\"], eval_df[\"text_b\"], eval_df[\"labels\"])\n )\n ]\n else:\n warnings.warn(\n \"Dataframe headers not specified. Falling back to using column 0 as text and column 1 as labels.\"\n )\n eval_examples = [\n InputExample(i, text, None, label)\n for i, (text, label) in enumerate(zip(eval_df.iloc[:, 0], eval_df.iloc[:, 1]))\n ]\n\n if args[\"sliding_window\"]:\n eval_dataset, window_counts = self.load_and_cache_examples(\n eval_examples, evaluate=True, verbose=verbose, silent=silent\n )\n else:\n eval_dataset = self.load_and_cache_examples(eval_examples, evaluate=True, verbose=verbose, silent=silent)\n os.makedirs(eval_output_dir, exist_ok=True)\n\n eval_sampler = SequentialSampler(eval_dataset)\n eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args[\"eval_batch_size\"])\n\n eval_loss = 0.0\n nb_eval_steps = 0\n preds = None\n out_label_ids = None\n model.eval()\n\n for batch in tqdm(eval_dataloader, disable=args[\"silent\"] or silent):\n batch = tuple(t.to(device) for t in batch)\n\n with torch.no_grad():\n inputs = self._get_inputs_dict(batch)\n\n outputs = model(**inputs)\n tmp_eval_loss, logits = outputs[:2]\n\n if multi_label:\n logits = logits.sigmoid()\n eval_loss += tmp_eval_loss.mean().item()\n\n nb_eval_steps += 1\n\n if preds is None:\n preds = logits.detach().cpu().numpy()\n out_label_ids = inputs[\"labels\"].detach().cpu().numpy()\n else:\n preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)\n out_label_ids = np.append(out_label_ids, inputs[\"labels\"].detach().cpu().numpy(), axis=0)\n\n eval_loss = eval_loss / nb_eval_steps\n\n if args[\"sliding_window\"]:\n count = 0\n window_ranges = []\n for n_windows in window_counts:\n window_ranges.append([count, count + n_windows])\n count += n_windows\n\n preds = [preds[window_range[0] : window_range[1]] for window_range in window_ranges]\n out_label_ids = [\n out_label_ids[i] for i in range(len(out_label_ids)) if i in [window[0] for window in window_ranges]\n ]\n\n model_outputs = preds\n\n preds = [np.argmax(pred, axis=1) for pred in preds]\n final_preds = []\n for pred_row in preds:\n mode_pred, counts = mode(pred_row)\n if len(counts) > 1 and counts[0] == counts[1]:\n final_preds.append(args[\"tie_value\"])\n else:\n final_preds.append(mode_pred[0])\n preds = np.array(final_preds)\n elif not multi_label and args[\"regression\"] is True:\n preds = np.squeeze(preds)\n model_outputs = preds\n else:\n model_outputs = preds\n\n if not multi_label:\n preds = np.argmax(preds, axis=1)\n\n result, wrong = self.compute_metrics(preds, out_label_ids, eval_examples, **kwargs)\n result[\"eval_loss\"] = eval_loss\n results.update(result)\n\n output_eval_file = os.path.join(eval_output_dir, \"eval_results.txt\")\n with open(output_eval_file, \"w\") as writer:\n for key in sorted(result.keys()):\n writer.write(\"{} = {}\\n\".format(key, str(result[key])))\n\n return results, model_outputs, wrong\n\n def load_and_cache_examples(\n self, examples, evaluate=False, no_cache=False, multi_label=False, verbose=True, silent=False\n ):\n \"\"\"\n Converts a list of InputExample objects to a TensorDataset containing InputFeatures. Caches the InputFeatures.\n\n Utility function for train() and eval() methods. Not intended to be used directly.\n \"\"\"\n\n process_count = self.args[\"process_count\"]\n\n tokenizer = self.tokenizer\n args = self.args\n\n if not no_cache:\n no_cache = args[\"no_cache\"]\n\n if not multi_label and args[\"regression\"]:\n output_mode = \"regression\"\n else:\n output_mode = \"classification\"\n\n os.makedirs(self.args[\"cache_dir\"], exist_ok=True)\n\n mode = \"dev\" if evaluate else \"train\"\n cached_features_file = os.path.join(\n args[\"cache_dir\"],\n \"cached_{}_{}_{}_{}_{}\".format(\n mode, args[\"model_type\"], args[\"max_seq_length\"], self.num_labels, len(examples),\n ),\n )\n\n if os.path.exists(cached_features_file) and (\n (not args[\"reprocess_input_data\"] and not no_cache)\n or (mode == \"dev\" and args[\"use_cached_eval_features\"] and not no_cache)\n ):\n features = torch.load(cached_features_file)\n if verbose:\n logger.info(f\" Features loaded from cache at {cached_features_file}\")\n else:\n if verbose:\n logger.info(f\" Converting to features started. Cache is not used.\")\n if args[\"sliding_window\"]:\n logger.info(\" Sliding window enabled\")\n features = convert_examples_to_features(\n examples,\n args[\"max_seq_length\"],\n tokenizer,\n output_mode,\n # XLNet has a CLS token at the end\n cls_token_at_end=bool(args[\"model_type\"] in [\"xlnet\"]),\n cls_token=tokenizer.cls_token,\n cls_token_segment_id=2 if args[\"model_type\"] in [\"xlnet\"] else 0,\n sep_token=tokenizer.sep_token,\n # RoBERTa uses an extra separator b/w pairs of sentences,\n # cf. github.com/pytorch/fairseq/commit/1684e166e3da03f5b600dbb7855cb98ddfcd0805\n sep_token_extra=bool(args[\"model_type\"] in [\"roberta\", \"camembert\", \"xlmroberta\"]),\n # PAD on the left for XLNet\n pad_on_left=bool(args[\"model_type\"] in [\"xlnet\"]),\n pad_token=tokenizer.convert_tokens_to_ids([tokenizer.pad_token])[0],\n pad_token_segment_id=4 if args[\"model_type\"] in [\"xlnet\"] else 0,\n process_count=process_count,\n multi_label=multi_label,\n silent=args[\"silent\"] or silent,\n use_multiprocessing=args[\"use_multiprocessing\"],\n sliding_window=args[\"sliding_window\"],\n flatten=not evaluate,\n stride=args[\"stride\"],\n )\n if verbose and args[\"sliding_window\"]:\n logger.info(f\" {len(features)} features created from {len(examples)} samples.\")\n\n if not no_cache:\n torch.save(features, cached_features_file)\n\n if args[\"sliding_window\"] and evaluate:\n window_counts = [len(sample) for sample in features]\n features = [feature for feature_set in features for feature in feature_set]\n\n all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)\n all_input_mask = torch.tensor([f.input_mask for f in features], dtype=torch.long)\n all_segment_ids = torch.tensor([f.segment_ids for f in features], dtype=torch.long)\n\n if output_mode == \"classification\":\n all_label_ids = torch.tensor([f.label_id for f in features], dtype=torch.long)\n elif output_mode == \"regression\":\n all_label_ids = torch.tensor([f.label_id for f in features], dtype=torch.float)\n\n dataset = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids)\n\n if args[\"sliding_window\"] and evaluate:\n return dataset, window_counts\n else:\n return dataset\n\n def compute_metrics(self, preds, labels, eval_examples, multi_label=False, **kwargs):\n \"\"\"\n Computes the evaluation metrics for the model predictions.\n\n Args:\n preds: Model predictions\n labels: Ground truth labels\n eval_examples: List of examples on which evaluation was performed\n **kwargs: Additional metrics that should be used. Pass in the metrics as keyword arguments (name of metric: function to use). E.g. f1=sklearn.metrics.f1_score.\n A metric function should take in two parameters. The first parameter will be the true labels, and the second parameter will be the predictions.\n\n Returns:\n result: Dictionary containing evaluation results. (Matthews correlation coefficient, tp, tn, fp, fn)\n wrong: List of InputExample objects corresponding to each incorrect prediction by the model\n \"\"\" # noqa: ignore flake8\"\n\n assert len(preds) == len(labels)\n\n extra_metrics = {}\n for metric, func in kwargs.items():\n extra_metrics[metric] = func(labels, preds)\n\n mismatched = labels != preds\n\n wrong = [i for (i, v) in zip(eval_examples, mismatched) if v.any()]\n\n if multi_label:\n label_ranking_score = label_ranking_average_precision_score(labels, preds)\n return {**{\"LRAP\": label_ranking_score}, **extra_metrics}, wrong\n elif self.args[\"regression\"]:\n return {**extra_metrics}, wrong\n\n mcc = matthews_corrcoef(labels, preds)\n\n if self.model.num_labels == 2:\n tn, fp, fn, tp = confusion_matrix(labels, preds, labels=[0, 1]).ravel()\n return (\n {**{\"mcc\": mcc, \"tp\": tp, \"tn\": tn, \"fp\": fp, \"fn\": fn}, **extra_metrics},\n wrong,\n )\n else:\n return {**{\"mcc\": mcc}, **extra_metrics}, wrong\n\n def predict(self, to_predict, multi_label=False):\n \"\"\"\n Performs predictions on a list of text.\n\n Args:\n to_predict: A python list of text (str) to be sent to the model for prediction.\n\n Returns:\n preds: A python list of the predictions (0 or 1) for each text.\n model_outputs: A python list of the raw model outputs for each text.\n \"\"\"\n\n device = self.device\n model = self.model\n args = self.args\n\n self._move_model_to_device()\n\n if multi_label:\n eval_examples = [\n InputExample(i, text, None, [0 for i in range(self.num_labels)]) for i, text in enumerate(to_predict)\n ]\n else:\n if isinstance(to_predict[0], list):\n eval_examples = [InputExample(i, text[0], text[1], 0) for i, text in enumerate(to_predict)]\n else:\n eval_examples = [InputExample(i, text, None, 0) for i, text in enumerate(to_predict)]\n if args[\"sliding_window\"]:\n eval_dataset, window_counts = self.load_and_cache_examples(eval_examples, evaluate=True, no_cache=True)\n else:\n eval_dataset = self.load_and_cache_examples(\n eval_examples, evaluate=True, multi_label=multi_label, no_cache=True\n )\n\n eval_sampler = SequentialSampler(eval_dataset)\n eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args[\"eval_batch_size\"])\n\n eval_loss = 0.0\n nb_eval_steps = 0\n preds = None\n out_label_ids = None\n\n if self.config.output_hidden_states:\n for batch in tqdm(eval_dataloader, disable=args[\"silent\"]):\n model.eval()\n batch = tuple(t.to(device) for t in batch)\n\n with torch.no_grad():\n inputs = self._get_inputs_dict(batch)\n outputs = model(**inputs)\n tmp_eval_loss, logits = outputs[:2]\n embedding_outputs, layer_hidden_states = outputs[2][0], outputs[2][1:]\n\n if multi_label:\n logits = logits.sigmoid()\n\n eval_loss += tmp_eval_loss.mean().item()\n\n nb_eval_steps += 1\n\n if preds is None:\n preds = logits.detach().cpu().numpy()\n out_label_ids = inputs[\"labels\"].detach().cpu().numpy()\n all_layer_hidden_states = [state.detach().cpu().numpy() for state in layer_hidden_states]\n all_embedding_outputs = embedding_outputs.detach().cpu().numpy()\n else:\n preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)\n out_label_ids = np.append(out_label_ids, inputs[\"labels\"].detach().cpu().numpy(), axis=0)\n all_layer_hidden_states = np.append(\n [state.detach().cpu().numpy() for state in layer_hidden_states], axis=0\n )\n all_embedding_outputs = np.append(embedding_outputs.detach().cpu().numpy(), axis=0)\n else:\n for batch in tqdm(eval_dataloader, disable=args[\"silent\"]):\n model.eval()\n batch = tuple(t.to(device) for t in batch)\n\n with torch.no_grad():\n inputs = self._get_inputs_dict(batch)\n outputs = model(**inputs)\n tmp_eval_loss, logits = outputs[:2]\n\n if multi_label:\n logits = logits.sigmoid()\n\n eval_loss += tmp_eval_loss.mean().item()\n\n nb_eval_steps += 1\n\n if preds is None:\n preds = logits.detach().cpu().numpy()\n out_label_ids = inputs[\"labels\"].detach().cpu().numpy()\n else:\n preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)\n out_label_ids = np.append(out_label_ids, inputs[\"labels\"].detach().cpu().numpy(), axis=0)\n\n eval_loss = eval_loss / nb_eval_steps\n\n if args[\"sliding_window\"]:\n count = 0\n window_ranges = []\n for n_windows in window_counts:\n window_ranges.append([count, count + n_windows])\n count += n_windows\n\n preds = [preds[window_range[0] : window_range[1]] for window_range in window_ranges]\n\n model_outputs = preds\n\n preds = [np.argmax(pred, axis=1) for pred in preds]\n final_preds = []\n for pred_row in preds:\n mode_pred, counts = mode(pred_row)\n if len(counts) > 1 and counts[0] == counts[1]:\n final_preds.append(args[\"tie_value\"])\n else:\n final_preds.append(mode_pred[0])\n preds = np.array(final_preds)\n elif not multi_label and args[\"regression\"] is True:\n preds = np.squeeze(preds)\n model_outputs = preds\n else:\n model_outputs = preds\n if multi_label:\n if isinstance(args[\"threshold\"], list):\n threshold_values = args[\"threshold\"]\n preds = [\n [self._threshold(pred, threshold_values[i]) for i, pred in enumerate(example)]\n for example in preds\n ]\n else:\n preds = [[self._threshold(pred, args[\"threshold\"]) for pred in example] for example in preds]\n else:\n preds = np.argmax(preds, axis=1)\n\n if self.config.output_hidden_states:\n return preds, model_outputs, all_embedding_outputs, all_layer_hidden_states\n else:\n return preds, model_outputs\n\n def _threshold(self, x, threshold):\n if x >= threshold:\n return 1\n return 0\n\n def _move_model_to_device(self):\n self.model.to(self.device)\n\n def _get_inputs_dict(self, batch):\n inputs = {\"input_ids\": batch[0], \"attention_mask\": batch[1], \"labels\": batch[3]}\n\n # XLM, DistilBERT and RoBERTa don't use segment_ids\n if self.args[\"model_type\"] != \"distilbert\":\n inputs[\"token_type_ids\"] = batch[2] if self.args[\"model_type\"] in [\"bert\", \"xlnet\", \"albert\"] else None\n\n return inputs\n\n def _get_last_metrics(self, metric_values):\n return {metric: values[-1] for metric, values in metric_values.items()}\n\n def _create_training_progress_scores(self, multi_label, **kwargs):\n extra_metrics = {key: [] for key in kwargs}\n if multi_label:\n training_progress_scores = {\n \"global_step\": [],\n \"LRAP\": [],\n \"train_loss\": [],\n \"eval_loss\": [],\n **extra_metrics,\n }\n else:\n if self.model.num_labels == 2:\n training_progress_scores = {\n \"global_step\": [],\n \"tp\": [],\n \"tn\": [],\n \"fp\": [],\n \"fn\": [],\n \"mcc\": [],\n \"train_loss\": [],\n \"eval_loss\": [],\n **extra_metrics,\n }\n elif self.model.num_labels == 1:\n training_progress_scores = {\n \"global_step\": [],\n \"train_loss\": [],\n \"eval_loss\": [],\n **extra_metrics,\n }\n else:\n training_progress_scores = {\n \"global_step\": [],\n \"mcc\": [],\n \"train_loss\": [],\n \"eval_loss\": [],\n **extra_metrics,\n }\n\n return training_progress_scores\n\n def _save_model(self, output_dir=None, optimizer=None, scheduler=None, model=None, results=None):\n if not output_dir:\n output_dir = self.args[\"output_dir\"]\n os.makedirs(output_dir, exist_ok=True)\n\n if model and not self.args[\"no_save\"]:\n # Take care of distributed/parallel training\n model_to_save = model.module if hasattr(model, \"module\") else model\n model_to_save.save_pretrained(output_dir)\n self.tokenizer.save_pretrained(output_dir)\n torch.save(self.args, os.path.join(output_dir, \"training_args.bin\"))\n if optimizer and scheduler:\n torch.save(optimizer.state_dict(), os.path.join(output_dir, \"optimizer.pt\"))\n torch.save(scheduler.state_dict(), os.path.join(output_dir, \"scheduler.pt\"))\n self._save_model_args(output_dir)\n\n if results:\n output_eval_file = os.path.join(output_dir, \"eval_results.txt\")\n with open(output_eval_file, \"w\") as writer:\n for key in sorted(results.keys()):\n writer.write(\"{} = {}\\n\".format(key, str(results[key])))\n\n def _save_model_args(self, output_dir):\n os.makedirs(output_dir, exist_ok=True)\n with open(os.path.join(output_dir, \"model_args.json\"), \"w\") as f:\n json.dump(self.args, f)\n\n def _load_model_args(self, input_dir):\n model_args_file = os.path.join(input_dir, \"model_args.json\")\n if os.path.isfile(model_args_file):\n with open(model_args_file, \"r\") as f:\n model_args = json.load(f)\n return model_args\n"
] |
[
[
"torch.load",
"numpy.squeeze",
"sklearn.metrics.matthews_corrcoef",
"torch.utils.data.DataLoader",
"sklearn.metrics.confusion_matrix",
"pandas.DataFrame",
"torch.no_grad",
"torch.cuda.is_available",
"torch.cuda.manual_seed_all",
"torch.device",
"torch.save",
"torch.utils.data.TensorDataset",
"torch.tensor",
"numpy.argmax",
"numpy.array",
"numpy.random.seed",
"torch.Tensor",
"torch.manual_seed",
"torch.utils.data.SequentialSampler",
"torch.utils.data.RandomSampler",
"scipy.stats.mode",
"torch.nn.DataParallel",
"sklearn.metrics.label_ranking_average_precision_score"
]
] |
NarendraPatwardhan/gym_venv
|
[
"9c7456cc64d416556f1d1d8eca7a72df0821cf00"
] |
[
"model.py"
] |
[
"import numpy as np\nimport mxnet as mx\nimport matplotlib.pyplot as plt\n\n#-----------------------------------------------------------------------------\n\nclass StateModel(mx.gluon.Block):\n def __init__(self,config):\n super(StateModel, self).__init__()\n self.config = config\n x = mx.nd.array(self.config['S0A'])\n y = mx.nd.array(self.config['S1'])\n self.dataset = mx.gluon.data.dataset.ArrayDataset(x,y)\n self.dataloader = mx.gluon.data.DataLoader(self.dataset,batch_size=self.config['batch_size'])\n with self.name_scope():\n self.state_transition = mx.gluon.nn.Sequential('state_transition_')\n with self.state_transition.name_scope():\n self.state_transition.add(mx.gluon.nn.Dense(10, activation='relu'))\n self.state_transition.add(mx.gluon.nn.Dense(20, activation='relu'))\n self.state_transition.add(mx.gluon.nn.Dense(10, activation='relu'))\n self.state_transition.add(mx.gluon.nn.Dense(self.config['S1'].shape[1]))\n\n def forward(self, x):\n return self.state_transition(x)\n\n def fit(self):\n self.collect_params().initialize(mx.init.Xavier(), ctx=mx.cpu())\n criterion = mx.gluon.loss.HuberLoss()\n optimizer = mx.gluon.Trainer(self.collect_params(), 'adam',{'learning_rate': self.config['learning_rate'],'wd': self.config['weight_decay']})\n errors = []\n for epoch in range(self.config['max_epochs']):\n running_loss = 0.0\n n_total = 0.0\n for data in self.dataloader:\n x, y = data\n with mx.autograd.record():\n output = self.forward(x)\n loss = criterion(output, y)\n loss.backward()\n optimizer.step(self.config['batch_size'])\n running_loss += mx.nd.sum(loss).asscalar()\n n_total += x.shape[0]\n errors.append(running_loss / n_total)\n if epoch%self.config['verbosity']==0:\n print('epoch [{}/{}], loss:{:.4f}'\n .format(epoch + 1, self.config['max_epochs'], running_loss / n_total))\n fig,ax = plt.subplots()\n ax.plot(range(len(errors)),np.array(errors))\n ax.set_title('State Modelling')\n ax.set_ylabel('Huber Loss')\n ax.set_xlabel('Epoch')\n fig.savefig('state_modelling')\n\n#-----------------------------------------------------------------------------\n\nclass RewardModel(mx.gluon.Block):\n def __init__(self,config):\n super(RewardModel, self).__init__()\n self.config = config\n x = mx.nd.array(self.config['S0AS1'])\n y = mx.nd.array(self.config['R'])\n self.dataset = mx.gluon.data.dataset.ArrayDataset(x,y)\n self.dataloader = mx.gluon.data.DataLoader(self.dataset,batch_size=self.config['batch_size'])\n with self.name_scope():\n self.reward_function = mx.gluon.nn.Sequential('reward_function_')\n with self.reward_function.name_scope():\n self.reward_function.add(mx.gluon.nn.Dense(10, activation='relu'))\n self.reward_function.add(mx.gluon.nn.Dense(20, activation='relu'))\n self.reward_function.add(mx.gluon.nn.Dense(10, activation='relu'))\n self.reward_function.add(mx.gluon.nn.Dense(1))\n\n def forward(self, x):\n return self.reward_function(x)\n\n def fit(self):\n self.collect_params().initialize(mx.init.Xavier(), ctx=mx.cpu())\n criterion = mx.gluon.loss.HuberLoss()\n optimizer = mx.gluon.Trainer(self.collect_params(), 'adam',{'learning_rate': self.config['learning_rate'],'wd': self.config['weight_decay']})\n errors = []\n for epoch in range(self.config['max_epochs']):\n running_loss = 0.0\n n_total = 0.0\n for data in self.dataloader:\n x, y = data\n with mx.autograd.record():\n output = self.forward(x)\n loss = criterion(output, y)\n loss.backward()\n optimizer.step(self.config['batch_size'])\n running_loss += mx.nd.sum(loss).asscalar()\n n_total += x.shape[0]\n errors.append(running_loss / n_total)\n if epoch%self.config['verbosity']==0:\n print('epoch [{}/{}], loss:{:.4f}'\n .format(epoch + 1, self.config['max_epochs'], running_loss / n_total))\n fig,ax = plt.subplots()\n ax.plot(range(len(errors)),np.array(errors))\n ax.set_title('Reward Modelling')\n ax.set_ylabel('Huber Loss')\n ax.set_xlabel('Epoch')\n fig.savefig('reward_modelling')\n\n#-----------------------------------------------------------------------------\n\nif __name__ == '__main__':\n x = np.random.randn(100,4)\n xt = np.random.randn(100,4)\n y = x[:,:3]\n yt = xt[:,:3]\n random_config = {\n 'max_epochs': 5000,\n 'batch_size': 64,\n 'learning_rate': 1e-3,\n 'weight_decay': 1e-5,\n 'verbosity': 25,\n 'S0A': x,\n 'S1': y\n }\n random_sm = StateModel(random_config)\n random_sm.fit()\n yp = random_sm(mx.nd.array(xt))\n print(abs(yp.asnumpy() - yt).sum())\n\n"
] |
[
[
"numpy.array",
"numpy.random.randn",
"matplotlib.pyplot.subplots"
]
] |
AlexanderDokuchaev/mmsegmentation
|
[
"0c443ee370cce6227661b802184072174c4e3f64"
] |
[
"mmseg/apis/ote/apis/segmentation/openvino_task.py"
] |
[
"# Copyright (C) 2021 Intel Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions\n# and limitations under the License.\n\nimport logging\nimport json\nimport os\nimport tempfile\nfrom addict import Dict as ADDict\nfrom typing import Any, Dict, Tuple, List, Optional, Union\n\nimport cv2\nimport numpy as np\n\nfrom ote_sdk.utils.segmentation_utils import (create_hard_prediction_from_soft_prediction,\n create_annotation_from_segmentation_map)\nfrom ote_sdk.entities.datasets import DatasetEntity\nfrom ote_sdk.entities.annotation import AnnotationSceneEntity, AnnotationSceneKind\nfrom ote_sdk.entities.inference_parameters import InferenceParameters, default_progress_callback\nfrom ote_sdk.entities.label import LabelEntity\nfrom ote_sdk.entities.model import (\n ModelStatus,\n ModelEntity,\n ModelFormat,\n OptimizationMethod,\n ModelPrecision,\n)\nfrom ote_sdk.entities.optimization_parameters import OptimizationParameters\nfrom ote_sdk.entities.resultset import ResultSetEntity\nfrom ote_sdk.entities.task_environment import TaskEnvironment\nfrom ote_sdk.usecases.evaluation.metrics_helper import MetricsHelper\nfrom ote_sdk.usecases.exportable_code.inference import BaseOpenVINOInferencer\nfrom ote_sdk.usecases.tasks.interfaces.evaluate_interface import IEvaluationTask\nfrom ote_sdk.usecases.tasks.interfaces.inference_interface import IInferenceTask\nfrom ote_sdk.usecases.tasks.interfaces.optimization_interface import IOptimizationTask, OptimizationType\n\nfrom compression.api import DataLoader\nfrom compression.engines.ie_engine import IEEngine\nfrom compression.graph import load_model, save_model\nfrom compression.graph.model_utils import compress_model_weights, get_nodes_by_type\nfrom compression.pipeline.initializer import create_pipeline\nfrom ote_sdk.serialization.label_mapper import label_schema_to_bytes\n\nfrom .configuration import OTESegmentationConfig\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef get_output(net, outputs, name):\n try:\n key = net.get_ov_name_for_tensor(name)\n assert key in outputs, f'\"{key}\" is not a valid output identifier'\n except KeyError:\n if name not in outputs:\n raise KeyError(f'Failed to identify output \"{name}\"')\n key = name\n\n return outputs[key]\n\n\nclass OpenVINOSegmentationInferencer(BaseOpenVINOInferencer):\n def __init__(\n self,\n hparams: OTESegmentationConfig,\n labels: List[LabelEntity],\n model_file: Union[str, bytes],\n weight_file: Union[str, bytes, None] = None,\n device: str = \"CPU\",\n num_requests: int = 1,\n ):\n \"\"\"\n Inferencer implementation for OTESegmentation using OpenVINO backend.\n\n :param hparams: Hyper parameters that the model should use.\n :param model_file: Path to model to load, `.xml`, `.bin` or `.onnx` file.\n :param device: Device to run inference on, such as CPU, GPU or MYRIAD. Defaults to \"CPU\".\n :param num_requests: Maximum number of requests that the inferencer can make.\n Good value is the number of available cores. Defaults to 1.\n \"\"\"\n\n super().__init__(model_file, weight_file, device, num_requests)\n\n self.labels = labels\n self.input_blob_name = 'input'\n self.n, self.c, self.h, self.w = self.net.input_info[self.input_blob_name].tensor_desc.dims\n self.keep_aspect_ratio_resize = False\n self.pad_value = 0\n self.soft_threshold = float(hparams.postprocessing.soft_threshold)\n self.blur_strength = int(hparams.postprocessing.blur_strength)\n\n @staticmethod\n def resize_image(image: np.ndarray, size: Tuple[int], keep_aspect_ratio: bool = False) -> np.ndarray:\n if not keep_aspect_ratio:\n resized_frame = cv2.resize(image, size)\n else:\n h, w = image.shape[:2]\n scale = min(size[1] / h, size[0] / w)\n resized_frame = cv2.resize(image, None, fx=scale, fy=scale)\n return resized_frame\n\n def pre_process(self, image: np.ndarray) -> Tuple[Dict[str, np.ndarray], Dict[str, Any]]:\n resized_image = self.resize_image(image, (self.w, self.h), self.keep_aspect_ratio_resize)\n meta = {'original_shape': image.shape,\n 'resized_shape': resized_image.shape}\n\n h, w = resized_image.shape[:2]\n if h != self.h or w != self.w:\n resized_image = np.pad(resized_image,\n ((0, self.h - h), (0, self.w - w), (0, 0)),\n mode='constant',\n constant_values=self.pad_value)\n\n resized_image = resized_image.transpose((2, 0, 1)) # Change data layout from HWC to CHW\n resized_image = resized_image.reshape((self.n, self.c, self.h, self.w))\n dict_inputs = {self.input_blob_name: resized_image}\n\n return dict_inputs, meta\n\n def post_process(self, prediction: Dict[str, np.ndarray], metadata: Dict[str, Any]) -> AnnotationSceneEntity:\n pred_class_maps = prediction['output']\n assert pred_class_maps.shape[0] == 1\n pred_class_map = pred_class_maps[0]\n\n soft_prediction = np.transpose(pred_class_map, axes=(1, 2, 0))\n\n hard_prediction = create_hard_prediction_from_soft_prediction(\n soft_prediction=soft_prediction,\n soft_threshold=self.soft_threshold,\n blur_strength=self.blur_strength\n )\n\n label_dictionary = {i + 1: self.labels[i] for i in range(len(self.labels))}\n annotations = create_annotation_from_segmentation_map(\n hard_prediction=hard_prediction,\n soft_prediction=soft_prediction,\n label_map=label_dictionary\n )\n\n return AnnotationSceneEntity(\n kind=AnnotationSceneKind.PREDICTION,\n annotations=annotations\n )\n\n def forward(self, inputs: Dict[str, np.ndarray]) -> Dict[str, np.ndarray]:\n return self.model.infer(inputs)\n\n\nclass OTEOpenVinoDataLoader(DataLoader):\n def __init__(self, dataset: DatasetEntity, inferencer: BaseOpenVINOInferencer):\n self.dataset = dataset\n self.inferencer = inferencer\n\n def __getitem__(self, index):\n image = self.dataset[index].numpy\n annotation = self.dataset[index].annotation_scene\n inputs, metadata = self.inferencer.pre_process(image)\n\n return (index, annotation), inputs, metadata\n\n def __len__(self):\n return len(self.dataset)\n\n\nclass OpenVINOSegmentationTask(IInferenceTask, IEvaluationTask, IOptimizationTask):\n def __init__(self,\n task_environment: TaskEnvironment):\n self.task_environment = task_environment\n self.model = self.task_environment.model\n self.inferencer = self.load_inferencer()\n\n template_file_path = task_environment.model_template.model_template_path\n self._base_dir = os.path.abspath(os.path.dirname(template_file_path))\n\n @property\n def hparams(self):\n return self.task_environment.get_hyper_parameters(OTESegmentationConfig)\n\n def load_inferencer(self) -> OpenVINOSegmentationInferencer:\n labels = self.task_environment.label_schema.get_labels(include_empty=False)\n return OpenVINOSegmentationInferencer(self.hparams,\n labels,\n self.model.get_data(\"openvino.xml\"),\n self.model.get_data(\"openvino.bin\"))\n\n def infer(self,\n dataset: DatasetEntity,\n inference_parameters: Optional[InferenceParameters] = None) -> DatasetEntity:\n update_progress_callback = default_progress_callback\n if inference_parameters is not None:\n update_progress_callback = inference_parameters.update_progress\n dataset_size = len(dataset)\n for i, dataset_item in enumerate(dataset, 1):\n predicted_scene = self.inferencer.predict(dataset_item.numpy)\n dataset_item.append_annotations(predicted_scene.annotations)\n update_progress_callback(int(i / dataset_size * 100))\n return dataset\n\n def evaluate(self,\n output_result_set: ResultSetEntity,\n evaluation_metric: Optional[str] = None):\n logger.info('Computing mDice')\n metrics = MetricsHelper.compute_dice_averaged_over_pixels(\n output_result_set\n )\n logger.info(f\"mDice after evaluation: {metrics.overall_dice.value}\")\n\n output_result_set.performance = metrics.get_performance()\n\n def optimize(self,\n optimization_type: OptimizationType,\n dataset: DatasetEntity,\n output_model: ModelEntity,\n optimization_parameters: Optional[OptimizationParameters]):\n\n if optimization_type is not OptimizationType.POT:\n raise ValueError(\"POT is the only supported optimization type for OpenVino models\")\n\n data_loader = OTEOpenVinoDataLoader(dataset, self.inferencer)\n\n with tempfile.TemporaryDirectory() as tempdir:\n xml_path = os.path.join(tempdir, \"model.xml\")\n bin_path = os.path.join(tempdir, \"model.bin\")\n with open(xml_path, \"wb\") as f:\n f.write(self.model.get_data(\"openvino.xml\"))\n with open(bin_path, \"wb\") as f:\n f.write(self.model.get_data(\"openvino.bin\"))\n\n model_config = ADDict({\n 'model_name': 'openvino_model',\n 'model': xml_path,\n 'weights': bin_path\n })\n\n model = load_model(model_config)\n\n if get_nodes_by_type(model, ['FakeQuantize']):\n logger.warning(\"Model is already optimized by POT\")\n output_model.model_status = ModelStatus.FAILED\n return\n\n engine_config = ADDict({\n 'device': 'CPU'\n })\n\n optimization_config_path = os.path.join(self._base_dir, 'pot_optimization_config.json')\n if os.path.exists(optimization_config_path):\n with open(optimization_config_path) as f_src:\n algorithms = ADDict(json.load(f_src))['algorithms']\n else:\n algorithms = [\n ADDict({\n 'name': 'DefaultQuantization',\n 'params': {\n 'target_device': 'ANY'\n }\n })\n ]\n for algo in algorithms:\n algo.params.stat_subset_size = self.hparams.pot_parameters.stat_subset_size\n algo.params.shuffle_data = True\n if 'Quantization' in algo['name']:\n algo.params.preset = self.hparams.pot_parameters.preset.name.lower()\n\n engine = IEEngine(config=engine_config, data_loader=data_loader, metric=None)\n\n pipeline = create_pipeline(algorithms, engine)\n\n compressed_model = pipeline.run(model)\n\n compress_model_weights(compressed_model)\n\n with tempfile.TemporaryDirectory() as tempdir:\n save_model(compressed_model, tempdir, model_name=\"model\")\n with open(os.path.join(tempdir, \"model.xml\"), \"rb\") as f:\n output_model.set_data(\"openvino.xml\", f.read())\n with open(os.path.join(tempdir, \"model.bin\"), \"rb\") as f:\n output_model.set_data(\"openvino.bin\", f.read())\n \n output_model.set_data(\"label_schema.json\", label_schema_to_bytes(self.task_environment.label_schema))\n\n # set model attributes for quantized model\n output_model.model_status = ModelStatus.SUCCESS\n output_model.model_format = ModelFormat.OPENVINO\n output_model.optimization_type = OptimizationType.POT\n output_model.optimization_methods = [OptimizationMethod.QUANTIZATION]\n output_model.precision = [ModelPrecision.INT8]\n\n self.model = output_model\n self.inferencer = self.load_inferencer()\n"
] |
[
[
"numpy.pad",
"numpy.transpose"
]
] |
USGS-WiM/Gage-Cam-Sensor-AI
|
[
"6e38517cbf90a82b6f679b8eee289cfdc12dd1b1"
] |
[
"sensor_AI/run_lite.py"
] |
[
"from tensorflow import keras\nimport numpy as np\nimport pidash\nimport os\n#import gc\n\nPATH = os.path.dirname(__file__)\n\n# This is a prototype implementation of the sensor AI deployment. \n#This is not final code and should not be reguarded as a best practices.\n\n\n\n# get_exposed() is a simple pixel count routine. It established the pixel count on the x and the y axis using simple n^2 logic loops\n\ndef get_exposed(y_hat):\n\timg = y_hat.ravel()\n\timg = img[2::3]\n\timg = np.resize(img, (256, 256))\n\th = []\n\tfor i, obj in enumerate(img):\n\t\tfor j in obj:\n\t\t\tif j:\n\t\t\t\th.append(i)\n\t\t\t\tbreak\n\tw=[]\n\tfor i, obj in enumerate(img.T):\n\t\tfor j in obj:\t\t\t\t\n\t\t\tif j:\n\t\t\t\tw.append(i)\n\t\t\t\tbreak\n\th = len(h)\n\tw = len(w)\n\t\n\treturn h, w\n\n\ndef execute(): #on_dek, meta, id):\n\t#gc.collect()\n\t#Load keras pretrained model from .h5 file\n\tmodel = keras.models.load_model(PATH + \"/model/UnetM-relu_output.h5\") \n\t# summarize model \n\tmodel.summary()\n\tpidash.dashboard()\n\t#get px height and px width from image\n\tpxH, pxW = run_on_dek(model)\n\toutputtxt = 'Height: '+ str(pxH) + ' px '+ ' H(p): ' + str((3.36 - (pxH/pxW) * .333)) + ' width: '+ str(pxW) + ' px'\n\ttext_file = open(\"complete.txt\", \"w\") \n\tn = text_file.write(outputtxt) \n\ttext_file.close()\n\tprint (outputtxt)\n\n\ndef run_on_dek(model):\n\t# Load img\n\timg = np.load(PATH + \"/on_dek/rdy.npy\")\n\tprint(\"Image loaded...\" + '\\n\\n' + \"Running model...\")\n\tpidash.dashboard()\n\tresult = model.predict(img)\n\tprint(\"\\n\\nModel ran successfully...\")\n\tresult = result >=.995\n\t#print (result)\n\tpx, w = get_exposed(result)\n\treturn px, w\n\n#execute()\n"
] |
[
[
"tensorflow.keras.models.load_model",
"numpy.resize",
"numpy.load"
]
] |
asplos2020/DRTest
|
[
"85c3c9b2a46cafa7184130f2596c5f9eb3b20bff"
] |
[
"attack_metrics/rgb.py"
] |
[
"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport sys\n\nimport numpy as np\nimport tensorflow as tf\nfrom tensorflow.python.platform import flags\n\nsys.path.append(\"../\")\n\nfrom nmutant_model.model_operation import model_load\nfrom nmutant_util.utils_file import get_data_file, get_data_file_with_Gaussian\nfrom nmutant_util.utils_imgproc import preprocess_image_1\n\nFLAGS = flags.FLAGS\n\nua = [\"fgsm\", \"blackbox\"]\nta = [\"jsma\", 'cw']\n\ndef rgb(datasets, model, samples_path, radius, epoch=49):\n \"\"\"\n :param datasets\n :param model\n :param samples_path\n :return:\n \"\"\"\n # Object used to keep track of (and return) key accuracies\n tf.reset_default_graph()\n sess, preds, x, y, model, feed_dict = model_load(datasets, model, epoch=epoch)\n\n [image_list, image_files, real_labels, predicted_labels] = get_data_file_with_Gaussian(datasets, samples_path, radius)\n\n samples = np.asarray([preprocess_image_1(image.astype('float64')) for image in image_list])\n\n RGB_UA=0\n n_batches = int(np.ceil(samples.shape[0] / 256))\n for i in range(n_batches):\n start = i * 256\n end = np.minimum(len(samples), (i + 1) * 256)\n feed = {x: samples[start:end]}\n if feed_dict is not None:\n feed.update(feed_dict)\n probabilities = sess.run(preds, feed)\n #print(probabilities[1])\n for j in range(len(probabilities)): \n if np.argmax(probabilities[j])!=real_labels[start+j]:\n RGB_UA+=1\n\n result = RGB_UA / len(image_list)\n print('Robustness to Gaussian Blur %.4f' %(result))\n\n # Close TF session\n sess.close()\n\n return result\n\n\ndef main(argv=None):\n rgb(datasets = FLAGS.datasets,\n model=FLAGS.model,\n samples_path=FLAGS.samples,\n radius=FLAGS.radius)\n\nif __name__ == '__main__':\n flags.DEFINE_string('datasets', 'mnist', 'The target datasets.')\n flags.DEFINE_string('model', 'lenet1', 'The name of model')\n flags.DEFINE_string('samples', '../adv_result/mnist/lenet1_fgsm_test', 'The path to load samples.')\n flags.DEFINE_string('radius', '1', 'The Gaussion radius.')\n tf.app.run()\n"
] |
[
[
"numpy.ceil",
"tensorflow.reset_default_graph",
"numpy.argmax",
"tensorflow.python.platform.flags.DEFINE_string",
"tensorflow.app.run"
]
] |
Sinha-Raunak/gan-toolkit
|
[
"6d2d86833bb00833b2d9cd11a1a83476f44b65fd"
] |
[
"agant/models/pytorch/loss/NLL.py"
] |
[
"import torch\nimport numpy as np\nfrom torch.autograd import Variable\n\nclass loss_block:\n def __init__(self):\n super(loss_block, self).__init__()\n self.criterion = torch.nn.NLLLoss(size_average=False)\n cuda = True if torch.cuda.is_available() else False\n if cuda:\n self.criterion.cuda()\n def loss(self,input_vals,lables):\n return self.criterion(input_vals,lables)"
] |
[
[
"torch.nn.NLLLoss",
"torch.cuda.is_available"
]
] |
jake-is-ESD-protected/scipy
|
[
"d7283ff75c218c300f372b5fdd960b987c1709a1",
"d7283ff75c218c300f372b5fdd960b987c1709a1",
"d7283ff75c218c300f372b5fdd960b987c1709a1",
"d7283ff75c218c300f372b5fdd960b987c1709a1",
"d7283ff75c218c300f372b5fdd960b987c1709a1",
"d7283ff75c218c300f372b5fdd960b987c1709a1"
] |
[
"doc/source/tutorial/examples/optimize_global_1.py",
"scipy/special/utils/makenpz.py",
"scipy/linalg/interpolative.py",
"scipy/stats/_crosstab.py",
"scipy/linalg/_decomp.py",
"scipy/interpolate/_bsplines.py"
] |
[
"import numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy import optimize\n\n\ndef eggholder(x):\n return (-(x[1] + 47) * np.sin(np.sqrt(abs(x[0]/2 + (x[1] + 47))))\n -x[0] * np.sin(np.sqrt(abs(x[0] - (x[1] + 47)))))\n\nbounds = [(-512, 512), (-512, 512)]\n\nx = np.arange(-512, 513)\ny = np.arange(-512, 513)\nxgrid, ygrid = np.meshgrid(x, y)\nxy = np.stack([xgrid, ygrid])\n\nresults = dict()\nresults['shgo'] = optimize.shgo(eggholder, bounds)\nresults['DA'] = optimize.dual_annealing(eggholder, bounds)\nresults['DE'] = optimize.differential_evolution(eggholder, bounds)\nresults['shgo_sobol'] = optimize.shgo(eggholder, bounds, n=256, iters=5,\n sampling_method='sobol')\n\nfig = plt.figure(figsize=(4.5, 4.5))\nax = fig.add_subplot(111)\nim = ax.imshow(eggholder(xy), interpolation='bilinear', origin='lower',\n cmap='gray')\nax.set_xlabel('x')\nax.set_ylabel('y')\n\ndef plot_point(res, marker='o', color=None):\n ax.plot(512+res.x[0], 512+res.x[1], marker=marker, color=color, ms=10)\n\nplot_point(results['DE'], color='c') # differential_evolution - cyan\nplot_point(results['DA'], color='w') # dual_annealing. - white\n\n# SHGO produces multiple minima, plot them all (with a smaller marker size)\nplot_point(results['shgo'], color='r', marker='+')\nplot_point(results['shgo_sobol'], color='r', marker='x')\nfor i in range(results['shgo_sobol'].xl.shape[0]):\n ax.plot(512 + results['shgo_sobol'].xl[i, 0],\n 512 + results['shgo_sobol'].xl[i, 1],\n 'ro', ms=2)\n\nax.set_xlim([-4, 514*2])\nax.set_ylim([-4, 514*2])\n\nfig.tight_layout()\nplt.show()\n\n\n",
"\"\"\"\npython makenpz.py DIRECTORY\n\nBuild a npz containing all data files in the directory.\n\n\"\"\"\n\nimport os\nimport numpy as np\nimport argparse\nfrom stat import ST_MTIME\n\n\ndef newer(source, target):\n \"\"\"\n Return true if 'source' exists and is more recently modified than\n 'target', or if 'source' exists and 'target' doesn't. Return false if\n both exist and 'target' is the same age or younger than 'source'.\n \"\"\"\n if not os.path.exists(source):\n raise ValueError(\"file '%s' does not exist\" % os.path.abspath(source))\n if not os.path.exists(target):\n return 1\n\n mtime1 = os.stat(source)[ST_MTIME]\n mtime2 = os.stat(target)[ST_MTIME]\n\n return mtime1 > mtime2\n\n\ndef main():\n p = argparse.ArgumentParser(usage=(__doc__ or '').strip())\n p.add_argument('--use-timestamp', action='store_true', default=False,\n help=\"don't rewrite npz file if it is newer than sources\")\n p.add_argument('dirname') # for Meson: 'boost' or 'gsl'\n p.add_argument(\"-o\", \"--outdir\", type=str,\n help=\"Relative path to the output directory\")\n args = p.parse_args()\n\n if not args.outdir:\n # We're dealing with a distutils build here, write in-place:\n inp = os.path.normpath(args.dirname)\n outp = inp + \".npz\"\n else:\n inp = os.path.join(os.path.abspath(os.path.dirname(__file__)),\n '..', 'tests', 'data', args.dirname)\n outdir_abs = os.path.join(os.getcwd(), args.outdir)\n outp = os.path.join(outdir_abs, args.dirname + \".npz\")\n\n # Skip rebuilding if no sources\n if os.path.isfile(outp) and not os.path.isdir(inp):\n return\n\n # Find source files\n files = []\n for dirpath, dirnames, filenames in os.walk(inp):\n for fn in filenames:\n if fn.endswith('.txt'):\n key = dirpath[len(inp)+1:] + '-' + fn[:-4]\n key = key.strip('-')\n files.append((key, os.path.join(dirpath, fn)))\n\n # Check if changes required\n if args.use_timestamp and os.path.isfile(outp):\n try:\n old_data = np.load(outp)\n try:\n changed = set(old_data.keys()) != set(key for key, _ in files)\n finally:\n old_data.close()\n except OSError:\n # corrupted file\n changed = True\n\n changed = changed or any(newer(fn, outp) for key, fn in files)\n changed = changed or newer(__file__, outp)\n if not changed:\n return\n\n data = {}\n for key, fn in files:\n data[key] = np.loadtxt(fn)\n\n np.savez_compressed(outp, **data)\n\n\nif __name__ == \"__main__\":\n main()\n",
"#******************************************************************************\n# Copyright (C) 2013 Kenneth L. Ho\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer. Redistributions in binary\n# form must reproduce the above copyright notice, this list of conditions and\n# the following disclaimer in the documentation and/or other materials\n# provided with the distribution.\n#\n# None of the names of the copyright holders may be used to endorse or\n# promote products derived from this software without specific prior written\n# permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE\n# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n# POSSIBILITY OF SUCH DAMAGE.\n#******************************************************************************\n\n# Python module for interfacing with `id_dist`.\n\nr\"\"\"\n======================================================================\nInterpolative matrix decomposition (:mod:`scipy.linalg.interpolative`)\n======================================================================\n\n.. moduleauthor:: Kenneth L. Ho <[email protected]>\n\n.. versionadded:: 0.13\n\n.. currentmodule:: scipy.linalg.interpolative\n\nAn interpolative decomposition (ID) of a matrix :math:`A \\in\n\\mathbb{C}^{m \\times n}` of rank :math:`k \\leq \\min \\{ m, n \\}` is a\nfactorization\n\n.. math::\n A \\Pi =\n \\begin{bmatrix}\n A \\Pi_{1} & A \\Pi_{2}\n \\end{bmatrix} =\n A \\Pi_{1}\n \\begin{bmatrix}\n I & T\n \\end{bmatrix},\n\nwhere :math:`\\Pi = [\\Pi_{1}, \\Pi_{2}]` is a permutation matrix with\n:math:`\\Pi_{1} \\in \\{ 0, 1 \\}^{n \\times k}`, i.e., :math:`A \\Pi_{2} =\nA \\Pi_{1} T`. This can equivalently be written as :math:`A = BP`,\nwhere :math:`B = A \\Pi_{1}` and :math:`P = [I, T] \\Pi^{\\mathsf{T}}`\nare the *skeleton* and *interpolation matrices*, respectively.\n\nIf :math:`A` does not have exact rank :math:`k`, then there exists an\napproximation in the form of an ID such that :math:`A = BP + E`, where\n:math:`\\| E \\| \\sim \\sigma_{k + 1}` is on the order of the :math:`(k +\n1)`-th largest singular value of :math:`A`. Note that :math:`\\sigma_{k\n+ 1}` is the best possible error for a rank-:math:`k` approximation\nand, in fact, is achieved by the singular value decomposition (SVD)\n:math:`A \\approx U S V^{*}`, where :math:`U \\in \\mathbb{C}^{m \\times\nk}` and :math:`V \\in \\mathbb{C}^{n \\times k}` have orthonormal columns\nand :math:`S = \\mathop{\\mathrm{diag}} (\\sigma_{i}) \\in \\mathbb{C}^{k\n\\times k}` is diagonal with nonnegative entries. The principal\nadvantages of using an ID over an SVD are that:\n\n- it is cheaper to construct;\n- it preserves the structure of :math:`A`; and\n- it is more efficient to compute with in light of the identity submatrix of :math:`P`.\n\nRoutines\n========\n\nMain functionality:\n\n.. autosummary::\n :toctree: generated/\n\n interp_decomp\n reconstruct_matrix_from_id\n reconstruct_interp_matrix\n reconstruct_skel_matrix\n id_to_svd\n svd\n estimate_spectral_norm\n estimate_spectral_norm_diff\n estimate_rank\n\nSupport functions:\n\n.. autosummary::\n :toctree: generated/\n\n seed\n rand\n\n\nReferences\n==========\n\nThis module uses the ID software package [1]_ by Martinsson, Rokhlin,\nShkolnisky, and Tygert, which is a Fortran library for computing IDs\nusing various algorithms, including the rank-revealing QR approach of\n[2]_ and the more recent randomized methods described in [3]_, [4]_,\nand [5]_. This module exposes its functionality in a way convenient\nfor Python users. Note that this module does not add any functionality\nbeyond that of organizing a simpler and more consistent interface.\n\nWe advise the user to consult also the `documentation for the ID package\n<http://tygert.com/id_doc.4.pdf>`_.\n\n.. [1] P.G. Martinsson, V. Rokhlin, Y. Shkolnisky, M. Tygert. \"ID: a\n software package for low-rank approximation of matrices via interpolative\n decompositions, version 0.2.\" http://tygert.com/id_doc.4.pdf.\n\n.. [2] H. Cheng, Z. Gimbutas, P.G. Martinsson, V. Rokhlin. \"On the\n compression of low rank matrices.\" *SIAM J. Sci. Comput.* 26 (4): 1389--1404,\n 2005. :doi:`10.1137/030602678`.\n\n.. [3] E. Liberty, F. Woolfe, P.G. Martinsson, V. Rokhlin, M.\n Tygert. \"Randomized algorithms for the low-rank approximation of matrices.\"\n *Proc. Natl. Acad. Sci. U.S.A.* 104 (51): 20167--20172, 2007.\n :doi:`10.1073/pnas.0709640104`.\n\n.. [4] P.G. Martinsson, V. Rokhlin, M. Tygert. \"A randomized\n algorithm for the decomposition of matrices.\" *Appl. Comput. Harmon. Anal.* 30\n (1): 47--68, 2011. :doi:`10.1016/j.acha.2010.02.003`.\n\n.. [5] F. Woolfe, E. Liberty, V. Rokhlin, M. Tygert. \"A fast\n randomized algorithm for the approximation of matrices.\" *Appl. Comput.\n Harmon. Anal.* 25 (3): 335--366, 2008. :doi:`10.1016/j.acha.2007.12.002`.\n\n\nTutorial\n========\n\nInitializing\n------------\n\nThe first step is to import :mod:`scipy.linalg.interpolative` by issuing the\ncommand:\n\n>>> import scipy.linalg.interpolative as sli\n\nNow let's build a matrix. For this, we consider a Hilbert matrix, which is well\nknow to have low rank:\n\n>>> from scipy.linalg import hilbert\n>>> n = 1000\n>>> A = hilbert(n)\n\nWe can also do this explicitly via:\n\n>>> import numpy as np\n>>> n = 1000\n>>> A = np.empty((n, n), order='F')\n>>> for j in range(n):\n>>> for i in range(m):\n>>> A[i,j] = 1. / (i + j + 1)\n\nNote the use of the flag ``order='F'`` in :func:`numpy.empty`. This\ninstantiates the matrix in Fortran-contiguous order and is important for\navoiding data copying when passing to the backend.\n\nWe then define multiplication routines for the matrix by regarding it as a\n:class:`scipy.sparse.linalg.LinearOperator`:\n\n>>> from scipy.sparse.linalg import aslinearoperator\n>>> L = aslinearoperator(A)\n\nThis automatically sets up methods describing the action of the matrix and its\nadjoint on a vector.\n\nComputing an ID\n---------------\n\nWe have several choices of algorithm to compute an ID. These fall largely\naccording to two dichotomies:\n\n1. how the matrix is represented, i.e., via its entries or via its action on a\n vector; and\n2. whether to approximate it to a fixed relative precision or to a fixed rank.\n\nWe step through each choice in turn below.\n\nIn all cases, the ID is represented by three parameters:\n\n1. a rank ``k``;\n2. an index array ``idx``; and\n3. interpolation coefficients ``proj``.\n\nThe ID is specified by the relation\n``np.dot(A[:,idx[:k]], proj) == A[:,idx[k:]]``.\n\nFrom matrix entries\n...................\n\nWe first consider a matrix given in terms of its entries.\n\nTo compute an ID to a fixed precision, type:\n\n>>> k, idx, proj = sli.interp_decomp(A, eps)\n\nwhere ``eps < 1`` is the desired precision.\n\nTo compute an ID to a fixed rank, use:\n\n>>> idx, proj = sli.interp_decomp(A, k)\n\nwhere ``k >= 1`` is the desired rank.\n\nBoth algorithms use random sampling and are usually faster than the\ncorresponding older, deterministic algorithms, which can be accessed via the\ncommands:\n\n>>> k, idx, proj = sli.interp_decomp(A, eps, rand=False)\n\nand:\n\n>>> idx, proj = sli.interp_decomp(A, k, rand=False)\n\nrespectively.\n\nFrom matrix action\n..................\n\nNow consider a matrix given in terms of its action on a vector as a\n:class:`scipy.sparse.linalg.LinearOperator`.\n\nTo compute an ID to a fixed precision, type:\n\n>>> k, idx, proj = sli.interp_decomp(L, eps)\n\nTo compute an ID to a fixed rank, use:\n\n>>> idx, proj = sli.interp_decomp(L, k)\n\nThese algorithms are randomized.\n\nReconstructing an ID\n--------------------\n\nThe ID routines above do not output the skeleton and interpolation matrices\nexplicitly but instead return the relevant information in a more compact (and\nsometimes more useful) form. To build these matrices, write:\n\n>>> B = sli.reconstruct_skel_matrix(A, k, idx)\n\nfor the skeleton matrix and:\n\n>>> P = sli.reconstruct_interp_matrix(idx, proj)\n\nfor the interpolation matrix. The ID approximation can then be computed as:\n\n>>> C = np.dot(B, P)\n\nThis can also be constructed directly using:\n\n>>> C = sli.reconstruct_matrix_from_id(B, idx, proj)\n\nwithout having to first compute ``P``.\n\nAlternatively, this can be done explicitly as well using:\n\n>>> B = A[:,idx[:k]]\n>>> P = np.hstack([np.eye(k), proj])[:,np.argsort(idx)]\n>>> C = np.dot(B, P)\n\nComputing an SVD\n----------------\n\nAn ID can be converted to an SVD via the command:\n\n>>> U, S, V = sli.id_to_svd(B, idx, proj)\n\nThe SVD approximation is then:\n\n>>> C = np.dot(U, np.dot(np.diag(S), np.dot(V.conj().T)))\n\nThe SVD can also be computed \"fresh\" by combining both the ID and conversion\nsteps into one command. Following the various ID algorithms above, there are\ncorrespondingly various SVD algorithms that one can employ.\n\nFrom matrix entries\n...................\n\nWe consider first SVD algorithms for a matrix given in terms of its entries.\n\nTo compute an SVD to a fixed precision, type:\n\n>>> U, S, V = sli.svd(A, eps)\n\nTo compute an SVD to a fixed rank, use:\n\n>>> U, S, V = sli.svd(A, k)\n\nBoth algorithms use random sampling; for the determinstic versions, issue the\nkeyword ``rand=False`` as above.\n\nFrom matrix action\n..................\n\nNow consider a matrix given in terms of its action on a vector.\n\nTo compute an SVD to a fixed precision, type:\n\n>>> U, S, V = sli.svd(L, eps)\n\nTo compute an SVD to a fixed rank, use:\n\n>>> U, S, V = sli.svd(L, k)\n\nUtility routines\n----------------\n\nSeveral utility routines are also available.\n\nTo estimate the spectral norm of a matrix, use:\n\n>>> snorm = sli.estimate_spectral_norm(A)\n\nThis algorithm is based on the randomized power method and thus requires only\nmatrix-vector products. The number of iterations to take can be set using the\nkeyword ``its`` (default: ``its=20``). The matrix is interpreted as a\n:class:`scipy.sparse.linalg.LinearOperator`, but it is also valid to supply it\nas a :class:`numpy.ndarray`, in which case it is trivially converted using\n:func:`scipy.sparse.linalg.aslinearoperator`.\n\nThe same algorithm can also estimate the spectral norm of the difference of two\nmatrices ``A1`` and ``A2`` as follows:\n\n>>> diff = sli.estimate_spectral_norm_diff(A1, A2)\n\nThis is often useful for checking the accuracy of a matrix approximation.\n\nSome routines in :mod:`scipy.linalg.interpolative` require estimating the rank\nof a matrix as well. This can be done with either:\n\n>>> k = sli.estimate_rank(A, eps)\n\nor:\n\n>>> k = sli.estimate_rank(L, eps)\n\ndepending on the representation. The parameter ``eps`` controls the definition\nof the numerical rank.\n\nFinally, the random number generation required for all randomized routines can\nbe controlled via :func:`scipy.linalg.interpolative.seed`. To reset the seed\nvalues to their original values, use:\n\n>>> sli.seed('default')\n\nTo specify the seed values, use:\n\n>>> sli.seed(s)\n\nwhere ``s`` must be an integer or array of 55 floats. If an integer, the array\nof floats is obtained by using ``numpy.random.rand`` with the given integer\nseed.\n\nTo simply generate some random numbers, type:\n\n>>> sli.rand(n)\n\nwhere ``n`` is the number of random numbers to generate.\n\nRemarks\n-------\n\nThe above functions all automatically detect the appropriate interface and work\nwith both real and complex data types, passing input arguments to the proper\nbackend routine.\n\n\"\"\"\n\nimport scipy.linalg._interpolative_backend as _backend\nimport numpy as np\nimport sys\n\n__all__ = [\n 'estimate_rank',\n 'estimate_spectral_norm',\n 'estimate_spectral_norm_diff',\n 'id_to_svd',\n 'interp_decomp',\n 'rand',\n 'reconstruct_interp_matrix',\n 'reconstruct_matrix_from_id',\n 'reconstruct_skel_matrix',\n 'seed',\n 'svd',\n]\n\n_DTYPE_ERROR = ValueError(\"invalid input dtype (input must be float64 or complex128)\")\n_TYPE_ERROR = TypeError(\"invalid input type (must be array or LinearOperator)\")\n_32BIT_ERROR = ValueError(\"interpolative decomposition on 32-bit systems \"\n \"with complex128 is buggy\")\n_IS_32BIT = (sys.maxsize < 2**32)\n\n\ndef _is_real(A):\n try:\n if A.dtype == np.complex128:\n return False\n elif A.dtype == np.float64:\n return True\n else:\n raise _DTYPE_ERROR\n except AttributeError as e:\n raise _TYPE_ERROR from e\n\n\ndef seed(seed=None):\n \"\"\"\n Seed the internal random number generator used in this ID package.\n\n The generator is a lagged Fibonacci method with 55-element internal state.\n\n Parameters\n ----------\n seed : int, sequence, 'default', optional\n If 'default', the random seed is reset to a default value.\n\n If `seed` is a sequence containing 55 floating-point numbers\n in range [0,1], these are used to set the internal state of\n the generator.\n\n If the value is an integer, the internal state is obtained\n from `numpy.random.RandomState` (MT19937) with the integer\n used as the initial seed.\n\n If `seed` is omitted (None), ``numpy.random.rand`` is used to\n initialize the generator.\n\n \"\"\"\n # For details, see :func:`_backend.id_srand`, :func:`_backend.id_srandi`,\n # and :func:`_backend.id_srando`.\n\n if isinstance(seed, str) and seed == 'default':\n _backend.id_srando()\n elif hasattr(seed, '__len__'):\n state = np.asfortranarray(seed, dtype=float)\n if state.shape != (55,):\n raise ValueError(\"invalid input size\")\n elif state.min() < 0 or state.max() > 1:\n raise ValueError(\"values not in range [0,1]\")\n _backend.id_srandi(state)\n elif seed is None:\n _backend.id_srandi(np.random.rand(55))\n else:\n rnd = np.random.RandomState(seed)\n _backend.id_srandi(rnd.rand(55))\n\n\ndef rand(*shape):\n \"\"\"\n Generate standard uniform pseudorandom numbers via a very efficient lagged\n Fibonacci method.\n\n This routine is used for all random number generation in this package and\n can affect ID and SVD results.\n\n Parameters\n ----------\n *shape\n Shape of output array\n\n \"\"\"\n # For details, see :func:`_backend.id_srand`, and :func:`_backend.id_srando`.\n return _backend.id_srand(np.prod(shape)).reshape(shape)\n\n\ndef interp_decomp(A, eps_or_k, rand=True):\n \"\"\"\n Compute ID of a matrix.\n\n An ID of a matrix `A` is a factorization defined by a rank `k`, a column\n index array `idx`, and interpolation coefficients `proj` such that::\n\n numpy.dot(A[:,idx[:k]], proj) = A[:,idx[k:]]\n\n The original matrix can then be reconstructed as::\n\n numpy.hstack([A[:,idx[:k]],\n numpy.dot(A[:,idx[:k]], proj)]\n )[:,numpy.argsort(idx)]\n\n or via the routine :func:`reconstruct_matrix_from_id`. This can\n equivalently be written as::\n\n numpy.dot(A[:,idx[:k]],\n numpy.hstack([numpy.eye(k), proj])\n )[:,np.argsort(idx)]\n\n in terms of the skeleton and interpolation matrices::\n\n B = A[:,idx[:k]]\n\n and::\n\n P = numpy.hstack([numpy.eye(k), proj])[:,np.argsort(idx)]\n\n respectively. See also :func:`reconstruct_interp_matrix` and\n :func:`reconstruct_skel_matrix`.\n\n The ID can be computed to any relative precision or rank (depending on the\n value of `eps_or_k`). If a precision is specified (`eps_or_k < 1`), then\n this function has the output signature::\n\n k, idx, proj = interp_decomp(A, eps_or_k)\n\n Otherwise, if a rank is specified (`eps_or_k >= 1`), then the output\n signature is::\n\n idx, proj = interp_decomp(A, eps_or_k)\n\n .. This function automatically detects the form of the input parameters\n and passes them to the appropriate backend. For details, see\n :func:`_backend.iddp_id`, :func:`_backend.iddp_aid`,\n :func:`_backend.iddp_rid`, :func:`_backend.iddr_id`,\n :func:`_backend.iddr_aid`, :func:`_backend.iddr_rid`,\n :func:`_backend.idzp_id`, :func:`_backend.idzp_aid`,\n :func:`_backend.idzp_rid`, :func:`_backend.idzr_id`,\n :func:`_backend.idzr_aid`, and :func:`_backend.idzr_rid`.\n\n Parameters\n ----------\n A : :class:`numpy.ndarray` or :class:`scipy.sparse.linalg.LinearOperator` with `rmatvec`\n Matrix to be factored\n eps_or_k : float or int\n Relative error (if `eps_or_k < 1`) or rank (if `eps_or_k >= 1`) of\n approximation.\n rand : bool, optional\n Whether to use random sampling if `A` is of type :class:`numpy.ndarray`\n (randomized algorithms are always used if `A` is of type\n :class:`scipy.sparse.linalg.LinearOperator`).\n\n Returns\n -------\n k : int\n Rank required to achieve specified relative precision if\n `eps_or_k < 1`.\n idx : :class:`numpy.ndarray`\n Column index array.\n proj : :class:`numpy.ndarray`\n Interpolation coefficients.\n \"\"\"\n from scipy.sparse.linalg import LinearOperator\n\n real = _is_real(A)\n\n if isinstance(A, np.ndarray):\n if eps_or_k < 1:\n eps = eps_or_k\n if rand:\n if real:\n k, idx, proj = _backend.iddp_aid(eps, A)\n else:\n if _IS_32BIT:\n raise _32BIT_ERROR\n k, idx, proj = _backend.idzp_aid(eps, A)\n else:\n if real:\n k, idx, proj = _backend.iddp_id(eps, A)\n else:\n k, idx, proj = _backend.idzp_id(eps, A)\n return k, idx - 1, proj\n else:\n k = int(eps_or_k)\n if rand:\n if real:\n idx, proj = _backend.iddr_aid(A, k)\n else:\n if _IS_32BIT:\n raise _32BIT_ERROR\n idx, proj = _backend.idzr_aid(A, k)\n else:\n if real:\n idx, proj = _backend.iddr_id(A, k)\n else:\n idx, proj = _backend.idzr_id(A, k)\n return idx - 1, proj\n elif isinstance(A, LinearOperator):\n m, n = A.shape\n matveca = A.rmatvec\n if eps_or_k < 1:\n eps = eps_or_k\n if real:\n k, idx, proj = _backend.iddp_rid(eps, m, n, matveca)\n else:\n if _IS_32BIT:\n raise _32BIT_ERROR\n k, idx, proj = _backend.idzp_rid(eps, m, n, matveca)\n return k, idx - 1, proj\n else:\n k = int(eps_or_k)\n if real:\n idx, proj = _backend.iddr_rid(m, n, matveca, k)\n else:\n if _IS_32BIT:\n raise _32BIT_ERROR\n idx, proj = _backend.idzr_rid(m, n, matveca, k)\n return idx - 1, proj\n else:\n raise _TYPE_ERROR\n\n\ndef reconstruct_matrix_from_id(B, idx, proj):\n \"\"\"\n Reconstruct matrix from its ID.\n\n A matrix `A` with skeleton matrix `B` and ID indices and coefficients `idx`\n and `proj`, respectively, can be reconstructed as::\n\n numpy.hstack([B, numpy.dot(B, proj)])[:,numpy.argsort(idx)]\n\n See also :func:`reconstruct_interp_matrix` and\n :func:`reconstruct_skel_matrix`.\n\n .. This function automatically detects the matrix data type and calls the\n appropriate backend. For details, see :func:`_backend.idd_reconid` and\n :func:`_backend.idz_reconid`.\n\n Parameters\n ----------\n B : :class:`numpy.ndarray`\n Skeleton matrix.\n idx : :class:`numpy.ndarray`\n Column index array.\n proj : :class:`numpy.ndarray`\n Interpolation coefficients.\n\n Returns\n -------\n :class:`numpy.ndarray`\n Reconstructed matrix.\n \"\"\"\n if _is_real(B):\n return _backend.idd_reconid(B, idx + 1, proj)\n else:\n return _backend.idz_reconid(B, idx + 1, proj)\n\n\ndef reconstruct_interp_matrix(idx, proj):\n \"\"\"\n Reconstruct interpolation matrix from ID.\n\n The interpolation matrix can be reconstructed from the ID indices and\n coefficients `idx` and `proj`, respectively, as::\n\n P = numpy.hstack([numpy.eye(proj.shape[0]), proj])[:,numpy.argsort(idx)]\n\n The original matrix can then be reconstructed from its skeleton matrix `B`\n via::\n\n numpy.dot(B, P)\n\n See also :func:`reconstruct_matrix_from_id` and\n :func:`reconstruct_skel_matrix`.\n\n .. This function automatically detects the matrix data type and calls the\n appropriate backend. For details, see :func:`_backend.idd_reconint` and\n :func:`_backend.idz_reconint`.\n\n Parameters\n ----------\n idx : :class:`numpy.ndarray`\n Column index array.\n proj : :class:`numpy.ndarray`\n Interpolation coefficients.\n\n Returns\n -------\n :class:`numpy.ndarray`\n Interpolation matrix.\n \"\"\"\n if _is_real(proj):\n return _backend.idd_reconint(idx + 1, proj)\n else:\n return _backend.idz_reconint(idx + 1, proj)\n\n\ndef reconstruct_skel_matrix(A, k, idx):\n \"\"\"\n Reconstruct skeleton matrix from ID.\n\n The skeleton matrix can be reconstructed from the original matrix `A` and its\n ID rank and indices `k` and `idx`, respectively, as::\n\n B = A[:,idx[:k]]\n\n The original matrix can then be reconstructed via::\n\n numpy.hstack([B, numpy.dot(B, proj)])[:,numpy.argsort(idx)]\n\n See also :func:`reconstruct_matrix_from_id` and\n :func:`reconstruct_interp_matrix`.\n\n .. This function automatically detects the matrix data type and calls the\n appropriate backend. For details, see :func:`_backend.idd_copycols` and\n :func:`_backend.idz_copycols`.\n\n Parameters\n ----------\n A : :class:`numpy.ndarray`\n Original matrix.\n k : int\n Rank of ID.\n idx : :class:`numpy.ndarray`\n Column index array.\n\n Returns\n -------\n :class:`numpy.ndarray`\n Skeleton matrix.\n \"\"\"\n if _is_real(A):\n return _backend.idd_copycols(A, k, idx + 1)\n else:\n return _backend.idz_copycols(A, k, idx + 1)\n\n\ndef id_to_svd(B, idx, proj):\n \"\"\"\n Convert ID to SVD.\n\n The SVD reconstruction of a matrix with skeleton matrix `B` and ID indices and\n coefficients `idx` and `proj`, respectively, is::\n\n U, S, V = id_to_svd(B, idx, proj)\n A = numpy.dot(U, numpy.dot(numpy.diag(S), V.conj().T))\n\n See also :func:`svd`.\n\n .. This function automatically detects the matrix data type and calls the\n appropriate backend. For details, see :func:`_backend.idd_id2svd` and\n :func:`_backend.idz_id2svd`.\n\n Parameters\n ----------\n B : :class:`numpy.ndarray`\n Skeleton matrix.\n idx : :class:`numpy.ndarray`\n Column index array.\n proj : :class:`numpy.ndarray`\n Interpolation coefficients.\n\n Returns\n -------\n U : :class:`numpy.ndarray`\n Left singular vectors.\n S : :class:`numpy.ndarray`\n Singular values.\n V : :class:`numpy.ndarray`\n Right singular vectors.\n \"\"\"\n if _is_real(B):\n U, V, S = _backend.idd_id2svd(B, idx + 1, proj)\n else:\n U, V, S = _backend.idz_id2svd(B, idx + 1, proj)\n return U, S, V\n\n\ndef estimate_spectral_norm(A, its=20):\n \"\"\"\n Estimate spectral norm of a matrix by the randomized power method.\n\n .. This function automatically detects the matrix data type and calls the\n appropriate backend. For details, see :func:`_backend.idd_snorm` and\n :func:`_backend.idz_snorm`.\n\n Parameters\n ----------\n A : :class:`scipy.sparse.linalg.LinearOperator`\n Matrix given as a :class:`scipy.sparse.linalg.LinearOperator` with the\n `matvec` and `rmatvec` methods (to apply the matrix and its adjoint).\n its : int, optional\n Number of power method iterations.\n\n Returns\n -------\n float\n Spectral norm estimate.\n \"\"\"\n from scipy.sparse.linalg import aslinearoperator\n A = aslinearoperator(A)\n m, n = A.shape\n matvec = lambda x: A. matvec(x)\n matveca = lambda x: A.rmatvec(x)\n if _is_real(A):\n return _backend.idd_snorm(m, n, matveca, matvec, its=its)\n else:\n return _backend.idz_snorm(m, n, matveca, matvec, its=its)\n\n\ndef estimate_spectral_norm_diff(A, B, its=20):\n \"\"\"\n Estimate spectral norm of the difference of two matrices by the randomized\n power method.\n\n .. This function automatically detects the matrix data type and calls the\n appropriate backend. For details, see :func:`_backend.idd_diffsnorm` and\n :func:`_backend.idz_diffsnorm`.\n\n Parameters\n ----------\n A : :class:`scipy.sparse.linalg.LinearOperator`\n First matrix given as a :class:`scipy.sparse.linalg.LinearOperator` with the\n `matvec` and `rmatvec` methods (to apply the matrix and its adjoint).\n B : :class:`scipy.sparse.linalg.LinearOperator`\n Second matrix given as a :class:`scipy.sparse.linalg.LinearOperator` with\n the `matvec` and `rmatvec` methods (to apply the matrix and its adjoint).\n its : int, optional\n Number of power method iterations.\n\n Returns\n -------\n float\n Spectral norm estimate of matrix difference.\n \"\"\"\n from scipy.sparse.linalg import aslinearoperator\n A = aslinearoperator(A)\n B = aslinearoperator(B)\n m, n = A.shape\n matvec1 = lambda x: A. matvec(x)\n matveca1 = lambda x: A.rmatvec(x)\n matvec2 = lambda x: B. matvec(x)\n matveca2 = lambda x: B.rmatvec(x)\n if _is_real(A):\n return _backend.idd_diffsnorm(\n m, n, matveca1, matveca2, matvec1, matvec2, its=its)\n else:\n return _backend.idz_diffsnorm(\n m, n, matveca1, matveca2, matvec1, matvec2, its=its)\n\n\ndef svd(A, eps_or_k, rand=True):\n \"\"\"\n Compute SVD of a matrix via an ID.\n\n An SVD of a matrix `A` is a factorization::\n\n A = numpy.dot(U, numpy.dot(numpy.diag(S), V.conj().T))\n\n where `U` and `V` have orthonormal columns and `S` is nonnegative.\n\n The SVD can be computed to any relative precision or rank (depending on the\n value of `eps_or_k`).\n\n See also :func:`interp_decomp` and :func:`id_to_svd`.\n\n .. This function automatically detects the form of the input parameters and\n passes them to the appropriate backend. For details, see\n :func:`_backend.iddp_svd`, :func:`_backend.iddp_asvd`,\n :func:`_backend.iddp_rsvd`, :func:`_backend.iddr_svd`,\n :func:`_backend.iddr_asvd`, :func:`_backend.iddr_rsvd`,\n :func:`_backend.idzp_svd`, :func:`_backend.idzp_asvd`,\n :func:`_backend.idzp_rsvd`, :func:`_backend.idzr_svd`,\n :func:`_backend.idzr_asvd`, and :func:`_backend.idzr_rsvd`.\n\n Parameters\n ----------\n A : :class:`numpy.ndarray` or :class:`scipy.sparse.linalg.LinearOperator`\n Matrix to be factored, given as either a :class:`numpy.ndarray` or a\n :class:`scipy.sparse.linalg.LinearOperator` with the `matvec` and\n `rmatvec` methods (to apply the matrix and its adjoint).\n eps_or_k : float or int\n Relative error (if `eps_or_k < 1`) or rank (if `eps_or_k >= 1`) of\n approximation.\n rand : bool, optional\n Whether to use random sampling if `A` is of type :class:`numpy.ndarray`\n (randomized algorithms are always used if `A` is of type\n :class:`scipy.sparse.linalg.LinearOperator`).\n\n Returns\n -------\n U : :class:`numpy.ndarray`\n Left singular vectors.\n S : :class:`numpy.ndarray`\n Singular values.\n V : :class:`numpy.ndarray`\n Right singular vectors.\n \"\"\"\n from scipy.sparse.linalg import LinearOperator\n\n real = _is_real(A)\n\n if isinstance(A, np.ndarray):\n if eps_or_k < 1:\n eps = eps_or_k\n if rand:\n if real:\n U, V, S = _backend.iddp_asvd(eps, A)\n else:\n if _IS_32BIT:\n raise _32BIT_ERROR\n U, V, S = _backend.idzp_asvd(eps, A)\n else:\n if real:\n U, V, S = _backend.iddp_svd(eps, A)\n else:\n U, V, S = _backend.idzp_svd(eps, A)\n else:\n k = int(eps_or_k)\n if k > min(A.shape):\n raise ValueError(\"Approximation rank %s exceeds min(A.shape) = \"\n \" %s \" % (k, min(A.shape)))\n if rand:\n if real:\n U, V, S = _backend.iddr_asvd(A, k)\n else:\n if _IS_32BIT:\n raise _32BIT_ERROR\n U, V, S = _backend.idzr_asvd(A, k)\n else:\n if real:\n U, V, S = _backend.iddr_svd(A, k)\n else:\n U, V, S = _backend.idzr_svd(A, k)\n elif isinstance(A, LinearOperator):\n m, n = A.shape\n matvec = lambda x: A.matvec(x)\n matveca = lambda x: A.rmatvec(x)\n if eps_or_k < 1:\n eps = eps_or_k\n if real:\n U, V, S = _backend.iddp_rsvd(eps, m, n, matveca, matvec)\n else:\n if _IS_32BIT:\n raise _32BIT_ERROR\n U, V, S = _backend.idzp_rsvd(eps, m, n, matveca, matvec)\n else:\n k = int(eps_or_k)\n if real:\n U, V, S = _backend.iddr_rsvd(m, n, matveca, matvec, k)\n else:\n if _IS_32BIT:\n raise _32BIT_ERROR\n U, V, S = _backend.idzr_rsvd(m, n, matveca, matvec, k)\n else:\n raise _TYPE_ERROR\n return U, S, V\n\n\ndef estimate_rank(A, eps):\n \"\"\"\n Estimate matrix rank to a specified relative precision using randomized\n methods.\n\n The matrix `A` can be given as either a :class:`numpy.ndarray` or a\n :class:`scipy.sparse.linalg.LinearOperator`, with different algorithms used\n for each case. If `A` is of type :class:`numpy.ndarray`, then the output\n rank is typically about 8 higher than the actual numerical rank.\n\n .. This function automatically detects the form of the input parameters and\n passes them to the appropriate backend. For details,\n see :func:`_backend.idd_estrank`, :func:`_backend.idd_findrank`,\n :func:`_backend.idz_estrank`, and :func:`_backend.idz_findrank`.\n\n Parameters\n ----------\n A : :class:`numpy.ndarray` or :class:`scipy.sparse.linalg.LinearOperator`\n Matrix whose rank is to be estimated, given as either a\n :class:`numpy.ndarray` or a :class:`scipy.sparse.linalg.LinearOperator`\n with the `rmatvec` method (to apply the matrix adjoint).\n eps : float\n Relative error for numerical rank definition.\n\n Returns\n -------\n int\n Estimated matrix rank.\n \"\"\"\n from scipy.sparse.linalg import LinearOperator\n\n real = _is_real(A)\n\n if isinstance(A, np.ndarray):\n if real:\n rank = _backend.idd_estrank(eps, A)\n else:\n rank = _backend.idz_estrank(eps, A)\n if rank == 0:\n # special return value for nearly full rank\n rank = min(A.shape)\n return rank\n elif isinstance(A, LinearOperator):\n m, n = A.shape\n matveca = A.rmatvec\n if real:\n return _backend.idd_findrank(eps, m, n, matveca)\n else:\n return _backend.idz_findrank(eps, m, n, matveca)\n else:\n raise _TYPE_ERROR\n",
"import numpy as np\nfrom scipy.sparse import coo_matrix\n\n\ndef crosstab(*args, levels=None, sparse=False):\n \"\"\"\n Return table of counts for each possible unique combination in ``*args``.\n\n When ``len(args) > 1``, the array computed by this function is\n often referred to as a *contingency table* [1]_.\n\n The arguments must be sequences with the same length. The second return\n value, `count`, is an integer array with ``len(args)`` dimensions. If\n `levels` is None, the shape of `count` is ``(n0, n1, ...)``, where ``nk``\n is the number of unique elements in ``args[k]``.\n\n Parameters\n ----------\n *args : sequences\n A sequence of sequences whose unique aligned elements are to be\n counted. The sequences in args must all be the same length.\n levels : sequence, optional\n If `levels` is given, it must be a sequence that is the same length as\n `args`. Each element in `levels` is either a sequence or None. If it\n is a sequence, it gives the values in the corresponding sequence in\n `args` that are to be counted. If any value in the sequences in `args`\n does not occur in the corresponding sequence in `levels`, that value\n is ignored and not counted in the returned array `count`. The default\n value of `levels` for ``args[i]`` is ``np.unique(args[i])``\n sparse : bool, optional\n If True, return a sparse matrix. The matrix will be an instance of\n the `scipy.sparse.coo_matrix` class. Because SciPy's sparse matrices\n must be 2-d, only two input sequences are allowed when `sparse` is\n True. Default is False.\n\n Returns\n -------\n elements : tuple of numpy.ndarrays.\n Tuple of length ``len(args)`` containing the arrays of elements that\n are counted in `count`. These can be interpreted as the labels of\n the corresponding dimensions of `count`.\n If `levels` was given, then if ``levels[i]`` is not None,\n ``elements[i]`` will hold the values given in ``levels[i]``.\n count : numpy.ndarray or scipy.sparse.coo_matrix\n Counts of the unique elements in ``zip(*args)``, stored in an array.\n Also known as a *contingency table* when ``len(args) > 1``.\n\n See Also\n --------\n numpy.unique\n\n Notes\n -----\n .. versionadded:: 1.7.0\n\n References\n ----------\n .. [1] \"Contingency table\", http://en.wikipedia.org/wiki/Contingency_table\n\n Examples\n --------\n >>> from scipy.stats.contingency import crosstab\n\n Given the lists `a` and `x`, create a contingency table that counts the\n frequencies of the corresponding pairs.\n\n >>> a = ['A', 'B', 'A', 'A', 'B', 'B', 'A', 'A', 'B', 'B']\n >>> x = ['X', 'X', 'X', 'Y', 'Z', 'Z', 'Y', 'Y', 'Z', 'Z']\n >>> (avals, xvals), count = crosstab(a, x)\n >>> avals\n array(['A', 'B'], dtype='<U1')\n >>> xvals\n array(['X', 'Y', 'Z'], dtype='<U1')\n >>> count\n array([[2, 3, 0],\n [1, 0, 4]])\n\n So `('A', 'X')` occurs twice, `('A', 'Y')` occurs three times, etc.\n\n Higher dimensional contingency tables can be created.\n\n >>> p = [0, 0, 0, 0, 1, 1, 1, 0, 0, 1]\n >>> (avals, xvals, pvals), count = crosstab(a, x, p)\n >>> count\n array([[[2, 0],\n [2, 1],\n [0, 0]],\n [[1, 0],\n [0, 0],\n [1, 3]]])\n >>> count.shape\n (2, 3, 2)\n\n The values to be counted can be set by using the `levels` argument.\n It allows the elements of interest in each input sequence to be\n given explicitly instead finding the unique elements of the sequence.\n\n For example, suppose one of the arguments is an array containing the\n answers to a survey question, with integer values 1 to 4. Even if the\n value 1 does not occur in the data, we want an entry for it in the table.\n\n >>> q1 = [2, 3, 3, 2, 4, 4, 2, 3, 4, 4, 4, 3, 3, 3, 4] # 1 does not occur.\n >>> q2 = [4, 4, 2, 2, 2, 4, 1, 1, 2, 2, 4, 2, 2, 2, 4] # 3 does not occur.\n >>> options = [1, 2, 3, 4]\n >>> vals, count = crosstab(q1, q2, levels=(options, options))\n >>> count\n array([[0, 0, 0, 0],\n [1, 1, 0, 1],\n [1, 4, 0, 1],\n [0, 3, 0, 3]])\n\n If `levels` is given, but an element of `levels` is None, the unique values\n of the corresponding argument are used. For example,\n\n >>> vals, count = crosstab(q1, q2, levels=(None, options))\n >>> vals\n [array([2, 3, 4]), [1, 2, 3, 4]]\n >>> count\n array([[1, 1, 0, 1],\n [1, 4, 0, 1],\n [0, 3, 0, 3]])\n\n If we want to ignore the pairs where 4 occurs in ``q2``, we can\n give just the values [1, 2] to `levels`, and the 4 will be ignored:\n\n >>> vals, count = crosstab(q1, q2, levels=(None, [1, 2]))\n >>> vals\n [array([2, 3, 4]), [1, 2]]\n >>> count\n array([[1, 1],\n [1, 4],\n [0, 3]])\n\n Finally, let's repeat the first example, but return a sparse matrix:\n\n >>> (avals, xvals), count = crosstab(a, x, sparse=True)\n >>> count\n <2x3 sparse matrix of type '<class 'numpy.int64'>'\n with 4 stored elements in COOrdinate format>\n >>> count.A\n array([[2, 3, 0],\n [1, 0, 4]])\n\n \"\"\"\n nargs = len(args)\n if nargs == 0:\n raise TypeError(\"At least one input sequence is required.\")\n\n len0 = len(args[0])\n if not all(len(a) == len0 for a in args[1:]):\n raise ValueError(\"All input sequences must have the same length.\")\n\n if sparse and nargs != 2:\n raise ValueError(\"When `sparse` is True, only two input sequences \"\n \"are allowed.\")\n\n if levels is None:\n # Call np.unique with return_inverse=True on each argument.\n actual_levels, indices = zip(*[np.unique(a, return_inverse=True)\n for a in args])\n else:\n # `levels` is not None...\n if len(levels) != nargs:\n raise ValueError('len(levels) must equal the number of input '\n 'sequences')\n\n args = [np.asarray(arg) for arg in args]\n mask = np.zeros((nargs, len0), dtype=np.bool_)\n inv = np.zeros((nargs, len0), dtype=np.intp)\n actual_levels = []\n for k, (levels_list, arg) in enumerate(zip(levels, args)):\n if levels_list is None:\n levels_list, inv[k, :] = np.unique(arg, return_inverse=True)\n mask[k, :] = True\n else:\n q = arg == np.asarray(levels_list).reshape(-1, 1)\n mask[k, :] = np.any(q, axis=0)\n qnz = q.T.nonzero()\n inv[k, qnz[0]] = qnz[1]\n actual_levels.append(levels_list)\n\n mask_all = mask.all(axis=0)\n indices = tuple(inv[:, mask_all])\n\n if sparse:\n count = coo_matrix((np.ones(len(indices[0]), dtype=int),\n (indices[0], indices[1])))\n count.sum_duplicates()\n else:\n shape = [len(u) for u in actual_levels]\n count = np.zeros(shape, dtype=int)\n np.add.at(count, indices, 1)\n\n return actual_levels, count\n",
"# -*- coding: utf-8 -*-\n#\n# Author: Pearu Peterson, March 2002\n#\n# additions by Travis Oliphant, March 2002\n# additions by Eric Jones, June 2002\n# additions by Johannes Loehnert, June 2006\n# additions by Bart Vandereycken, June 2006\n# additions by Andrew D Straw, May 2007\n# additions by Tiziano Zito, November 2008\n#\n# April 2010: Functions for LU, QR, SVD, Schur, and Cholesky decompositions\n# were moved to their own files. Still in this file are functions for\n# eigenstuff and for the Hessenberg form.\n\n__all__ = ['eig', 'eigvals', 'eigh', 'eigvalsh',\n 'eig_banded', 'eigvals_banded',\n 'eigh_tridiagonal', 'eigvalsh_tridiagonal', 'hessenberg', 'cdf2rdf']\n\nimport numpy\nfrom numpy import (array, isfinite, inexact, nonzero, iscomplexobj, cast,\n flatnonzero, conj, asarray, argsort, empty,\n iscomplex, zeros, einsum, eye, inf)\n# Local imports\nfrom scipy._lib._util import _asarray_validated\nfrom ._misc import LinAlgError, _datacopied, norm\nfrom .lapack import get_lapack_funcs, _compute_lwork\n\n\n_I = cast['F'](1j)\n\n\ndef _make_complex_eigvecs(w, vin, dtype):\n \"\"\"\n Produce complex-valued eigenvectors from LAPACK DGGEV real-valued output\n \"\"\"\n # - see LAPACK man page DGGEV at ALPHAI\n v = numpy.array(vin, dtype=dtype)\n m = (w.imag > 0)\n m[:-1] |= (w.imag[1:] < 0) # workaround for LAPACK bug, cf. ticket #709\n for i in flatnonzero(m):\n v.imag[:, i] = vin[:, i+1]\n conj(v[:, i], v[:, i+1])\n return v\n\n\ndef _make_eigvals(alpha, beta, homogeneous_eigvals):\n if homogeneous_eigvals:\n if beta is None:\n return numpy.vstack((alpha, numpy.ones_like(alpha)))\n else:\n return numpy.vstack((alpha, beta))\n else:\n if beta is None:\n return alpha\n else:\n w = numpy.empty_like(alpha)\n alpha_zero = (alpha == 0)\n beta_zero = (beta == 0)\n beta_nonzero = ~beta_zero\n w[beta_nonzero] = alpha[beta_nonzero]/beta[beta_nonzero]\n # Use numpy.inf for complex values too since\n # 1/numpy.inf = 0, i.e., it correctly behaves as projective\n # infinity.\n w[~alpha_zero & beta_zero] = numpy.inf\n if numpy.all(alpha.imag == 0):\n w[alpha_zero & beta_zero] = numpy.nan\n else:\n w[alpha_zero & beta_zero] = complex(numpy.nan, numpy.nan)\n return w\n\n\ndef _geneig(a1, b1, left, right, overwrite_a, overwrite_b,\n homogeneous_eigvals):\n ggev, = get_lapack_funcs(('ggev',), (a1, b1))\n cvl, cvr = left, right\n res = ggev(a1, b1, lwork=-1)\n lwork = res[-2][0].real.astype(numpy.int_)\n if ggev.typecode in 'cz':\n alpha, beta, vl, vr, work, info = ggev(a1, b1, cvl, cvr, lwork,\n overwrite_a, overwrite_b)\n w = _make_eigvals(alpha, beta, homogeneous_eigvals)\n else:\n alphar, alphai, beta, vl, vr, work, info = ggev(a1, b1, cvl, cvr,\n lwork, overwrite_a,\n overwrite_b)\n alpha = alphar + _I * alphai\n w = _make_eigvals(alpha, beta, homogeneous_eigvals)\n _check_info(info, 'generalized eig algorithm (ggev)')\n\n only_real = numpy.all(w.imag == 0.0)\n if not (ggev.typecode in 'cz' or only_real):\n t = w.dtype.char\n if left:\n vl = _make_complex_eigvecs(w, vl, t)\n if right:\n vr = _make_complex_eigvecs(w, vr, t)\n\n # the eigenvectors returned by the lapack function are NOT normalized\n for i in range(vr.shape[0]):\n if right:\n vr[:, i] /= norm(vr[:, i])\n if left:\n vl[:, i] /= norm(vl[:, i])\n\n if not (left or right):\n return w\n if left:\n if right:\n return w, vl, vr\n return w, vl\n return w, vr\n\n\ndef eig(a, b=None, left=False, right=True, overwrite_a=False,\n overwrite_b=False, check_finite=True, homogeneous_eigvals=False):\n \"\"\"\n Solve an ordinary or generalized eigenvalue problem of a square matrix.\n\n Find eigenvalues w and right or left eigenvectors of a general matrix::\n\n a vr[:,i] = w[i] b vr[:,i]\n a.H vl[:,i] = w[i].conj() b.H vl[:,i]\n\n where ``.H`` is the Hermitian conjugation.\n\n Parameters\n ----------\n a : (M, M) array_like\n A complex or real matrix whose eigenvalues and eigenvectors\n will be computed.\n b : (M, M) array_like, optional\n Right-hand side matrix in a generalized eigenvalue problem.\n Default is None, identity matrix is assumed.\n left : bool, optional\n Whether to calculate and return left eigenvectors. Default is False.\n right : bool, optional\n Whether to calculate and return right eigenvectors. Default is True.\n overwrite_a : bool, optional\n Whether to overwrite `a`; may improve performance. Default is False.\n overwrite_b : bool, optional\n Whether to overwrite `b`; may improve performance. Default is False.\n check_finite : bool, optional\n Whether to check that the input matrices contain only finite numbers.\n Disabling may give a performance gain, but may result in problems\n (crashes, non-termination) if the inputs do contain infinities or NaNs.\n homogeneous_eigvals : bool, optional\n If True, return the eigenvalues in homogeneous coordinates.\n In this case ``w`` is a (2, M) array so that::\n\n w[1,i] a vr[:,i] = w[0,i] b vr[:,i]\n\n Default is False.\n\n Returns\n -------\n w : (M,) or (2, M) double or complex ndarray\n The eigenvalues, each repeated according to its\n multiplicity. The shape is (M,) unless\n ``homogeneous_eigvals=True``.\n vl : (M, M) double or complex ndarray\n The normalized left eigenvector corresponding to the eigenvalue\n ``w[i]`` is the column vl[:,i]. Only returned if ``left=True``.\n vr : (M, M) double or complex ndarray\n The normalized right eigenvector corresponding to the eigenvalue\n ``w[i]`` is the column ``vr[:,i]``. Only returned if ``right=True``.\n\n Raises\n ------\n LinAlgError\n If eigenvalue computation does not converge.\n\n See Also\n --------\n eigvals : eigenvalues of general arrays\n eigh : Eigenvalues and right eigenvectors for symmetric/Hermitian arrays.\n eig_banded : eigenvalues and right eigenvectors for symmetric/Hermitian\n band matrices\n eigh_tridiagonal : eigenvalues and right eiegenvectors for\n symmetric/Hermitian tridiagonal matrices\n\n Examples\n --------\n >>> from scipy import linalg\n >>> a = np.array([[0., -1.], [1., 0.]])\n >>> linalg.eigvals(a)\n array([0.+1.j, 0.-1.j])\n\n >>> b = np.array([[0., 1.], [1., 1.]])\n >>> linalg.eigvals(a, b)\n array([ 1.+0.j, -1.+0.j])\n\n >>> a = np.array([[3., 0., 0.], [0., 8., 0.], [0., 0., 7.]])\n >>> linalg.eigvals(a, homogeneous_eigvals=True)\n array([[3.+0.j, 8.+0.j, 7.+0.j],\n [1.+0.j, 1.+0.j, 1.+0.j]])\n\n >>> a = np.array([[0., -1.], [1., 0.]])\n >>> linalg.eigvals(a) == linalg.eig(a)[0]\n array([ True, True])\n >>> linalg.eig(a, left=True, right=False)[1] # normalized left eigenvector\n array([[-0.70710678+0.j , -0.70710678-0.j ],\n [-0. +0.70710678j, -0. -0.70710678j]])\n >>> linalg.eig(a, left=False, right=True)[1] # normalized right eigenvector\n array([[0.70710678+0.j , 0.70710678-0.j ],\n [0. -0.70710678j, 0. +0.70710678j]])\n\n\n\n \"\"\"\n a1 = _asarray_validated(a, check_finite=check_finite)\n if len(a1.shape) != 2 or a1.shape[0] != a1.shape[1]:\n raise ValueError('expected square matrix')\n overwrite_a = overwrite_a or (_datacopied(a1, a))\n if b is not None:\n b1 = _asarray_validated(b, check_finite=check_finite)\n overwrite_b = overwrite_b or _datacopied(b1, b)\n if len(b1.shape) != 2 or b1.shape[0] != b1.shape[1]:\n raise ValueError('expected square matrix')\n if b1.shape != a1.shape:\n raise ValueError('a and b must have the same shape')\n return _geneig(a1, b1, left, right, overwrite_a, overwrite_b,\n homogeneous_eigvals)\n\n geev, geev_lwork = get_lapack_funcs(('geev', 'geev_lwork'), (a1,))\n compute_vl, compute_vr = left, right\n\n lwork = _compute_lwork(geev_lwork, a1.shape[0],\n compute_vl=compute_vl,\n compute_vr=compute_vr)\n\n if geev.typecode in 'cz':\n w, vl, vr, info = geev(a1, lwork=lwork,\n compute_vl=compute_vl,\n compute_vr=compute_vr,\n overwrite_a=overwrite_a)\n w = _make_eigvals(w, None, homogeneous_eigvals)\n else:\n wr, wi, vl, vr, info = geev(a1, lwork=lwork,\n compute_vl=compute_vl,\n compute_vr=compute_vr,\n overwrite_a=overwrite_a)\n t = {'f': 'F', 'd': 'D'}[wr.dtype.char]\n w = wr + _I * wi\n w = _make_eigvals(w, None, homogeneous_eigvals)\n\n _check_info(info, 'eig algorithm (geev)',\n positive='did not converge (only eigenvalues '\n 'with order >= %d have converged)')\n\n only_real = numpy.all(w.imag == 0.0)\n if not (geev.typecode in 'cz' or only_real):\n t = w.dtype.char\n if left:\n vl = _make_complex_eigvecs(w, vl, t)\n if right:\n vr = _make_complex_eigvecs(w, vr, t)\n if not (left or right):\n return w\n if left:\n if right:\n return w, vl, vr\n return w, vl\n return w, vr\n\n\ndef eigh(a, b=None, lower=True, eigvals_only=False, overwrite_a=False,\n overwrite_b=False, turbo=True, eigvals=None, type=1,\n check_finite=True, subset_by_index=None, subset_by_value=None,\n driver=None):\n \"\"\"\n Solve a standard or generalized eigenvalue problem for a complex\n Hermitian or real symmetric matrix.\n\n Find eigenvalues array ``w`` and optionally eigenvectors array ``v`` of\n array ``a``, where ``b`` is positive definite such that for every\n eigenvalue λ (i-th entry of w) and its eigenvector ``vi`` (i-th column of\n ``v``) satisfies::\n\n a @ vi = λ * b @ vi\n vi.conj().T @ a @ vi = λ\n vi.conj().T @ b @ vi = 1\n\n In the standard problem, ``b`` is assumed to be the identity matrix.\n\n Parameters\n ----------\n a : (M, M) array_like\n A complex Hermitian or real symmetric matrix whose eigenvalues and\n eigenvectors will be computed.\n b : (M, M) array_like, optional\n A complex Hermitian or real symmetric definite positive matrix in.\n If omitted, identity matrix is assumed.\n lower : bool, optional\n Whether the pertinent array data is taken from the lower or upper\n triangle of ``a`` and, if applicable, ``b``. (Default: lower)\n eigvals_only : bool, optional\n Whether to calculate only eigenvalues and no eigenvectors.\n (Default: both are calculated)\n subset_by_index : iterable, optional\n If provided, this two-element iterable defines the start and the end\n indices of the desired eigenvalues (ascending order and 0-indexed).\n To return only the second smallest to fifth smallest eigenvalues,\n ``[1, 4]`` is used. ``[n-3, n-1]`` returns the largest three. Only\n available with \"evr\", \"evx\", and \"gvx\" drivers. The entries are\n directly converted to integers via ``int()``.\n subset_by_value : iterable, optional\n If provided, this two-element iterable defines the half-open interval\n ``(a, b]`` that, if any, only the eigenvalues between these values\n are returned. Only available with \"evr\", \"evx\", and \"gvx\" drivers. Use\n ``np.inf`` for the unconstrained ends.\n driver : str, optional\n Defines which LAPACK driver should be used. Valid options are \"ev\",\n \"evd\", \"evr\", \"evx\" for standard problems and \"gv\", \"gvd\", \"gvx\" for\n generalized (where b is not None) problems. See the Notes section.\n The default for standard problems is \"evr\". For generalized problems,\n \"gvd\" is used for full set, and \"gvx\" for subset requested cases.\n type : int, optional\n For the generalized problems, this keyword specifies the problem type\n to be solved for ``w`` and ``v`` (only takes 1, 2, 3 as possible\n inputs)::\n\n 1 => a @ v = w @ b @ v\n 2 => a @ b @ v = w @ v\n 3 => b @ a @ v = w @ v\n\n This keyword is ignored for standard problems.\n overwrite_a : bool, optional\n Whether to overwrite data in ``a`` (may improve performance). Default\n is False.\n overwrite_b : bool, optional\n Whether to overwrite data in ``b`` (may improve performance). Default\n is False.\n check_finite : bool, optional\n Whether to check that the input matrices contain only finite numbers.\n Disabling may give a performance gain, but may result in problems\n (crashes, non-termination) if the inputs do contain infinities or NaNs.\n turbo : bool, optional\n *Deprecated since v1.5.0, use ``driver=gvd`` keyword instead*.\n Use divide and conquer algorithm (faster but expensive in memory, only\n for generalized eigenvalue problem and if full set of eigenvalues are\n requested.). Has no significant effect if eigenvectors are not\n requested.\n eigvals : tuple (lo, hi), optional\n *Deprecated since v1.5.0, use ``subset_by_index`` keyword instead*.\n Indexes of the smallest and largest (in ascending order) eigenvalues\n and corresponding eigenvectors to be returned: 0 <= lo <= hi <= M-1.\n If omitted, all eigenvalues and eigenvectors are returned.\n\n Returns\n -------\n w : (N,) ndarray\n The N (1<=N<=M) selected eigenvalues, in ascending order, each\n repeated according to its multiplicity.\n v : (M, N) ndarray\n (if ``eigvals_only == False``)\n\n Raises\n ------\n LinAlgError\n If eigenvalue computation does not converge, an error occurred, or\n b matrix is not definite positive. Note that if input matrices are\n not symmetric or Hermitian, no error will be reported but results will\n be wrong.\n\n See Also\n --------\n eigvalsh : eigenvalues of symmetric or Hermitian arrays\n eig : eigenvalues and right eigenvectors for non-symmetric arrays\n eigh_tridiagonal : eigenvalues and right eiegenvectors for\n symmetric/Hermitian tridiagonal matrices\n\n Notes\n -----\n This function does not check the input array for being Hermitian/symmetric\n in order to allow for representing arrays with only their upper/lower\n triangular parts. Also, note that even though not taken into account,\n finiteness check applies to the whole array and unaffected by \"lower\"\n keyword.\n\n This function uses LAPACK drivers for computations in all possible keyword\n combinations, prefixed with ``sy`` if arrays are real and ``he`` if\n complex, e.g., a float array with \"evr\" driver is solved via\n \"syevr\", complex arrays with \"gvx\" driver problem is solved via \"hegvx\"\n etc.\n\n As a brief summary, the slowest and the most robust driver is the\n classical ``<sy/he>ev`` which uses symmetric QR. ``<sy/he>evr`` is seen as\n the optimal choice for the most general cases. However, there are certain\n occasions that ``<sy/he>evd`` computes faster at the expense of more\n memory usage. ``<sy/he>evx``, while still being faster than ``<sy/he>ev``,\n often performs worse than the rest except when very few eigenvalues are\n requested for large arrays though there is still no performance guarantee.\n\n\n For the generalized problem, normalization with respect to the given\n type argument::\n\n type 1 and 3 : v.conj().T @ a @ v = w\n type 2 : inv(v).conj().T @ a @ inv(v) = w\n\n type 1 or 2 : v.conj().T @ b @ v = I\n type 3 : v.conj().T @ inv(b) @ v = I\n\n\n Examples\n --------\n >>> from scipy.linalg import eigh\n >>> A = np.array([[6, 3, 1, 5], [3, 0, 5, 1], [1, 5, 6, 2], [5, 1, 2, 2]])\n >>> w, v = eigh(A)\n >>> np.allclose(A @ v - v @ np.diag(w), np.zeros((4, 4)))\n True\n\n Request only the eigenvalues\n\n >>> w = eigh(A, eigvals_only=True)\n\n Request eigenvalues that are less than 10.\n\n >>> A = np.array([[34, -4, -10, -7, 2],\n ... [-4, 7, 2, 12, 0],\n ... [-10, 2, 44, 2, -19],\n ... [-7, 12, 2, 79, -34],\n ... [2, 0, -19, -34, 29]])\n >>> eigh(A, eigvals_only=True, subset_by_value=[-np.inf, 10])\n array([6.69199443e-07, 9.11938152e+00])\n\n Request the largest second eigenvalue and its eigenvector\n\n >>> w, v = eigh(A, subset_by_index=[1, 1])\n >>> w\n array([9.11938152])\n >>> v.shape # only a single column is returned\n (5, 1)\n\n \"\"\"\n # set lower\n uplo = 'L' if lower else 'U'\n # Set job for Fortran routines\n _job = 'N' if eigvals_only else 'V'\n\n drv_str = [None, \"ev\", \"evd\", \"evr\", \"evx\", \"gv\", \"gvd\", \"gvx\"]\n if driver not in drv_str:\n raise ValueError('\"{}\" is unknown. Possible values are \"None\", \"{}\".'\n ''.format(driver, '\", \"'.join(drv_str[1:])))\n\n a1 = _asarray_validated(a, check_finite=check_finite)\n if len(a1.shape) != 2 or a1.shape[0] != a1.shape[1]:\n raise ValueError('expected square \"a\" matrix')\n overwrite_a = overwrite_a or (_datacopied(a1, a))\n cplx = True if iscomplexobj(a1) else False\n n = a1.shape[0]\n drv_args = {'overwrite_a': overwrite_a}\n\n if b is not None:\n b1 = _asarray_validated(b, check_finite=check_finite)\n overwrite_b = overwrite_b or _datacopied(b1, b)\n if len(b1.shape) != 2 or b1.shape[0] != b1.shape[1]:\n raise ValueError('expected square \"b\" matrix')\n\n if b1.shape != a1.shape:\n raise ValueError(\"wrong b dimensions {}, should \"\n \"be {}\".format(b1.shape, a1.shape))\n\n if type not in [1, 2, 3]:\n raise ValueError('\"type\" keyword only accepts 1, 2, and 3.')\n\n cplx = True if iscomplexobj(b1) else (cplx or False)\n drv_args.update({'overwrite_b': overwrite_b, 'itype': type})\n\n # backwards-compatibility handling\n subset_by_index = subset_by_index if (eigvals is None) else eigvals\n\n subset = (subset_by_index is not None) or (subset_by_value is not None)\n\n # Both subsets can't be given\n if subset_by_index and subset_by_value:\n raise ValueError('Either index or value subset can be requested.')\n\n # Take turbo into account if all conditions are met otherwise ignore\n if turbo and b is not None:\n driver = 'gvx' if subset else 'gvd'\n\n # Check indices if given\n if subset_by_index:\n lo, hi = [int(x) for x in subset_by_index]\n if not (0 <= lo <= hi < n):\n raise ValueError('Requested eigenvalue indices are not valid. '\n 'Valid range is [0, {}] and start <= end, but '\n 'start={}, end={} is given'.format(n-1, lo, hi))\n # fortran is 1-indexed\n drv_args.update({'range': 'I', 'il': lo + 1, 'iu': hi + 1})\n\n if subset_by_value:\n lo, hi = subset_by_value\n if not (-inf <= lo < hi <= inf):\n raise ValueError('Requested eigenvalue bounds are not valid. '\n 'Valid range is (-inf, inf) and low < high, but '\n 'low={}, high={} is given'.format(lo, hi))\n\n drv_args.update({'range': 'V', 'vl': lo, 'vu': hi})\n\n # fix prefix for lapack routines\n pfx = 'he' if cplx else 'sy'\n\n # decide on the driver if not given\n # first early exit on incompatible choice\n if driver:\n if b is None and (driver in [\"gv\", \"gvd\", \"gvx\"]):\n raise ValueError('{} requires input b array to be supplied '\n 'for generalized eigenvalue problems.'\n ''.format(driver))\n if (b is not None) and (driver in ['ev', 'evd', 'evr', 'evx']):\n raise ValueError('\"{}\" does not accept input b array '\n 'for standard eigenvalue problems.'\n ''.format(driver))\n if subset and (driver in [\"ev\", \"evd\", \"gv\", \"gvd\"]):\n raise ValueError('\"{}\" cannot compute subsets of eigenvalues'\n ''.format(driver))\n\n # Default driver is evr and gvd\n else:\n driver = \"evr\" if b is None else (\"gvx\" if subset else \"gvd\")\n\n lwork_spec = {\n 'syevd': ['lwork', 'liwork'],\n 'syevr': ['lwork', 'liwork'],\n 'heevd': ['lwork', 'liwork', 'lrwork'],\n 'heevr': ['lwork', 'lrwork', 'liwork'],\n }\n\n if b is None: # Standard problem\n drv, drvlw = get_lapack_funcs((pfx + driver, pfx+driver+'_lwork'),\n [a1])\n clw_args = {'n': n, 'lower': lower}\n if driver == 'evd':\n clw_args.update({'compute_v': 0 if _job == \"N\" else 1})\n\n lw = _compute_lwork(drvlw, **clw_args)\n # Multiple lwork vars\n if isinstance(lw, tuple):\n lwork_args = dict(zip(lwork_spec[pfx+driver], lw))\n else:\n lwork_args = {'lwork': lw}\n\n drv_args.update({'lower': lower, 'compute_v': 0 if _job == \"N\" else 1})\n w, v, *other_args, info = drv(a=a1, **drv_args, **lwork_args)\n\n else: # Generalized problem\n # 'gvd' doesn't have lwork query\n if driver == \"gvd\":\n drv = get_lapack_funcs(pfx + \"gvd\", [a1, b1])\n lwork_args = {}\n else:\n drv, drvlw = get_lapack_funcs((pfx + driver, pfx+driver+'_lwork'),\n [a1, b1])\n # generalized drivers use uplo instead of lower\n lw = _compute_lwork(drvlw, n, uplo=uplo)\n lwork_args = {'lwork': lw}\n\n drv_args.update({'uplo': uplo, 'jobz': _job})\n\n w, v, *other_args, info = drv(a=a1, b=b1, **drv_args, **lwork_args)\n\n # m is always the first extra argument\n w = w[:other_args[0]] if subset else w\n v = v[:, :other_args[0]] if (subset and not eigvals_only) else v\n\n # Check if we had a successful exit\n if info == 0:\n if eigvals_only:\n return w\n else:\n return w, v\n else:\n if info < -1:\n raise LinAlgError('Illegal value in argument {} of internal {}'\n ''.format(-info, drv.typecode + pfx + driver))\n elif info > n:\n raise LinAlgError('The leading minor of order {} of B is not '\n 'positive definite. The factorization of B '\n 'could not be completed and no eigenvalues '\n 'or eigenvectors were computed.'.format(info-n))\n else:\n drv_err = {'ev': 'The algorithm failed to converge; {} '\n 'off-diagonal elements of an intermediate '\n 'tridiagonal form did not converge to zero.',\n 'evx': '{} eigenvectors failed to converge.',\n 'evd': 'The algorithm failed to compute an eigenvalue '\n 'while working on the submatrix lying in rows '\n 'and columns {0}/{1} through mod({0},{1}).',\n 'evr': 'Internal Error.'\n }\n if driver in ['ev', 'gv']:\n msg = drv_err['ev'].format(info)\n elif driver in ['evx', 'gvx']:\n msg = drv_err['evx'].format(info)\n elif driver in ['evd', 'gvd']:\n if eigvals_only:\n msg = drv_err['ev'].format(info)\n else:\n msg = drv_err['evd'].format(info, n+1)\n else:\n msg = drv_err['evr']\n\n raise LinAlgError(msg)\n\n\n_conv_dict = {0: 0, 1: 1, 2: 2,\n 'all': 0, 'value': 1, 'index': 2,\n 'a': 0, 'v': 1, 'i': 2}\n\n\ndef _check_select(select, select_range, max_ev, max_len):\n \"\"\"Check that select is valid, convert to Fortran style.\"\"\"\n if isinstance(select, str):\n select = select.lower()\n try:\n select = _conv_dict[select]\n except KeyError as e:\n raise ValueError('invalid argument for select') from e\n vl, vu = 0., 1.\n il = iu = 1\n if select != 0: # (non-all)\n sr = asarray(select_range)\n if sr.ndim != 1 or sr.size != 2 or sr[1] < sr[0]:\n raise ValueError('select_range must be a 2-element array-like '\n 'in nondecreasing order')\n if select == 1: # (value)\n vl, vu = sr\n if max_ev == 0:\n max_ev = max_len\n else: # 2 (index)\n if sr.dtype.char.lower() not in 'hilqp':\n raise ValueError('when using select=\"i\", select_range must '\n 'contain integers, got dtype %s (%s)'\n % (sr.dtype, sr.dtype.char))\n # translate Python (0 ... N-1) into Fortran (1 ... N) with + 1\n il, iu = sr + 1\n if min(il, iu) < 1 or max(il, iu) > max_len:\n raise ValueError('select_range out of bounds')\n max_ev = iu - il + 1\n return select, vl, vu, il, iu, max_ev\n\n\ndef eig_banded(a_band, lower=False, eigvals_only=False, overwrite_a_band=False,\n select='a', select_range=None, max_ev=0, check_finite=True):\n \"\"\"\n Solve real symmetric or complex Hermitian band matrix eigenvalue problem.\n\n Find eigenvalues w and optionally right eigenvectors v of a::\n\n a v[:,i] = w[i] v[:,i]\n v.H v = identity\n\n The matrix a is stored in a_band either in lower diagonal or upper\n diagonal ordered form:\n\n a_band[u + i - j, j] == a[i,j] (if upper form; i <= j)\n a_band[ i - j, j] == a[i,j] (if lower form; i >= j)\n\n where u is the number of bands above the diagonal.\n\n Example of a_band (shape of a is (6,6), u=2)::\n\n upper form:\n * * a02 a13 a24 a35\n * a01 a12 a23 a34 a45\n a00 a11 a22 a33 a44 a55\n\n lower form:\n a00 a11 a22 a33 a44 a55\n a10 a21 a32 a43 a54 *\n a20 a31 a42 a53 * *\n\n Cells marked with * are not used.\n\n Parameters\n ----------\n a_band : (u+1, M) array_like\n The bands of the M by M matrix a.\n lower : bool, optional\n Is the matrix in the lower form. (Default is upper form)\n eigvals_only : bool, optional\n Compute only the eigenvalues and no eigenvectors.\n (Default: calculate also eigenvectors)\n overwrite_a_band : bool, optional\n Discard data in a_band (may enhance performance)\n select : {'a', 'v', 'i'}, optional\n Which eigenvalues to calculate\n\n ====== ========================================\n select calculated\n ====== ========================================\n 'a' All eigenvalues\n 'v' Eigenvalues in the interval (min, max]\n 'i' Eigenvalues with indices min <= i <= max\n ====== ========================================\n select_range : (min, max), optional\n Range of selected eigenvalues\n max_ev : int, optional\n For select=='v', maximum number of eigenvalues expected.\n For other values of select, has no meaning.\n\n In doubt, leave this parameter untouched.\n\n check_finite : bool, optional\n Whether to check that the input matrix contains only finite numbers.\n Disabling may give a performance gain, but may result in problems\n (crashes, non-termination) if the inputs do contain infinities or NaNs.\n\n Returns\n -------\n w : (M,) ndarray\n The eigenvalues, in ascending order, each repeated according to its\n multiplicity.\n v : (M, M) float or complex ndarray\n The normalized eigenvector corresponding to the eigenvalue w[i] is\n the column v[:,i].\n\n Raises\n ------\n LinAlgError\n If eigenvalue computation does not converge.\n\n See Also\n --------\n eigvals_banded : eigenvalues for symmetric/Hermitian band matrices\n eig : eigenvalues and right eigenvectors of general arrays.\n eigh : eigenvalues and right eigenvectors for symmetric/Hermitian arrays\n eigh_tridiagonal : eigenvalues and right eigenvectors for\n symmetric/Hermitian tridiagonal matrices\n\n Examples\n --------\n >>> from scipy.linalg import eig_banded\n >>> A = np.array([[1, 5, 2, 0], [5, 2, 5, 2], [2, 5, 3, 5], [0, 2, 5, 4]])\n >>> Ab = np.array([[1, 2, 3, 4], [5, 5, 5, 0], [2, 2, 0, 0]])\n >>> w, v = eig_banded(Ab, lower=True)\n >>> np.allclose(A @ v - v @ np.diag(w), np.zeros((4, 4)))\n True\n >>> w = eig_banded(Ab, lower=True, eigvals_only=True)\n >>> w\n array([-4.26200532, -2.22987175, 3.95222349, 12.53965359])\n\n Request only the eigenvalues between ``[-3, 4]``\n\n >>> w, v = eig_banded(Ab, lower=True, select='v', select_range=[-3, 4])\n >>> w\n array([-2.22987175, 3.95222349])\n\n \"\"\"\n if eigvals_only or overwrite_a_band:\n a1 = _asarray_validated(a_band, check_finite=check_finite)\n overwrite_a_band = overwrite_a_band or (_datacopied(a1, a_band))\n else:\n a1 = array(a_band)\n if issubclass(a1.dtype.type, inexact) and not isfinite(a1).all():\n raise ValueError(\"array must not contain infs or NaNs\")\n overwrite_a_band = 1\n\n if len(a1.shape) != 2:\n raise ValueError('expected a 2-D array')\n select, vl, vu, il, iu, max_ev = _check_select(\n select, select_range, max_ev, a1.shape[1])\n del select_range\n if select == 0:\n if a1.dtype.char in 'GFD':\n # FIXME: implement this somewhen, for now go with builtin values\n # FIXME: calc optimal lwork by calling ?hbevd(lwork=-1)\n # or by using calc_lwork.f ???\n # lwork = calc_lwork.hbevd(bevd.typecode, a1.shape[0], lower)\n internal_name = 'hbevd'\n else: # a1.dtype.char in 'fd':\n # FIXME: implement this somewhen, for now go with builtin values\n # see above\n # lwork = calc_lwork.sbevd(bevd.typecode, a1.shape[0], lower)\n internal_name = 'sbevd'\n bevd, = get_lapack_funcs((internal_name,), (a1,))\n w, v, info = bevd(a1, compute_v=not eigvals_only,\n lower=lower, overwrite_ab=overwrite_a_band)\n else: # select in [1, 2]\n if eigvals_only:\n max_ev = 1\n # calculate optimal abstol for dsbevx (see manpage)\n if a1.dtype.char in 'fF': # single precision\n lamch, = get_lapack_funcs(('lamch',), (array(0, dtype='f'),))\n else:\n lamch, = get_lapack_funcs(('lamch',), (array(0, dtype='d'),))\n abstol = 2 * lamch('s')\n if a1.dtype.char in 'GFD':\n internal_name = 'hbevx'\n else: # a1.dtype.char in 'gfd'\n internal_name = 'sbevx'\n bevx, = get_lapack_funcs((internal_name,), (a1,))\n w, v, m, ifail, info = bevx(\n a1, vl, vu, il, iu, compute_v=not eigvals_only, mmax=max_ev,\n range=select, lower=lower, overwrite_ab=overwrite_a_band,\n abstol=abstol)\n # crop off w and v\n w = w[:m]\n if not eigvals_only:\n v = v[:, :m]\n _check_info(info, internal_name)\n\n if eigvals_only:\n return w\n return w, v\n\n\ndef eigvals(a, b=None, overwrite_a=False, check_finite=True,\n homogeneous_eigvals=False):\n \"\"\"\n Compute eigenvalues from an ordinary or generalized eigenvalue problem.\n\n Find eigenvalues of a general matrix::\n\n a vr[:,i] = w[i] b vr[:,i]\n\n Parameters\n ----------\n a : (M, M) array_like\n A complex or real matrix whose eigenvalues and eigenvectors\n will be computed.\n b : (M, M) array_like, optional\n Right-hand side matrix in a generalized eigenvalue problem.\n If omitted, identity matrix is assumed.\n overwrite_a : bool, optional\n Whether to overwrite data in a (may improve performance)\n check_finite : bool, optional\n Whether to check that the input matrices contain only finite numbers.\n Disabling may give a performance gain, but may result in problems\n (crashes, non-termination) if the inputs do contain infinities\n or NaNs.\n homogeneous_eigvals : bool, optional\n If True, return the eigenvalues in homogeneous coordinates.\n In this case ``w`` is a (2, M) array so that::\n\n w[1,i] a vr[:,i] = w[0,i] b vr[:,i]\n\n Default is False.\n\n Returns\n -------\n w : (M,) or (2, M) double or complex ndarray\n The eigenvalues, each repeated according to its multiplicity\n but not in any specific order. The shape is (M,) unless\n ``homogeneous_eigvals=True``.\n\n Raises\n ------\n LinAlgError\n If eigenvalue computation does not converge\n\n See Also\n --------\n eig : eigenvalues and right eigenvectors of general arrays.\n eigvalsh : eigenvalues of symmetric or Hermitian arrays\n eigvals_banded : eigenvalues for symmetric/Hermitian band matrices\n eigvalsh_tridiagonal : eigenvalues of symmetric/Hermitian tridiagonal\n matrices\n\n Examples\n --------\n >>> from scipy import linalg\n >>> a = np.array([[0., -1.], [1., 0.]])\n >>> linalg.eigvals(a)\n array([0.+1.j, 0.-1.j])\n\n >>> b = np.array([[0., 1.], [1., 1.]])\n >>> linalg.eigvals(a, b)\n array([ 1.+0.j, -1.+0.j])\n\n >>> a = np.array([[3., 0., 0.], [0., 8., 0.], [0., 0., 7.]])\n >>> linalg.eigvals(a, homogeneous_eigvals=True)\n array([[3.+0.j, 8.+0.j, 7.+0.j],\n [1.+0.j, 1.+0.j, 1.+0.j]])\n\n \"\"\"\n return eig(a, b=b, left=0, right=0, overwrite_a=overwrite_a,\n check_finite=check_finite,\n homogeneous_eigvals=homogeneous_eigvals)\n\n\ndef eigvalsh(a, b=None, lower=True, overwrite_a=False,\n overwrite_b=False, turbo=True, eigvals=None, type=1,\n check_finite=True, subset_by_index=None, subset_by_value=None,\n driver=None):\n \"\"\"\n Solves a standard or generalized eigenvalue problem for a complex\n Hermitian or real symmetric matrix.\n\n Find eigenvalues array ``w`` of array ``a``, where ``b`` is positive\n definite such that for every eigenvalue λ (i-th entry of w) and its\n eigenvector vi (i-th column of v) satisfies::\n\n a @ vi = λ * b @ vi\n vi.conj().T @ a @ vi = λ\n vi.conj().T @ b @ vi = 1\n\n In the standard problem, b is assumed to be the identity matrix.\n\n Parameters\n ----------\n a : (M, M) array_like\n A complex Hermitian or real symmetric matrix whose eigenvalues will\n be computed.\n b : (M, M) array_like, optional\n A complex Hermitian or real symmetric definite positive matrix in.\n If omitted, identity matrix is assumed.\n lower : bool, optional\n Whether the pertinent array data is taken from the lower or upper\n triangle of ``a`` and, if applicable, ``b``. (Default: lower)\n overwrite_a : bool, optional\n Whether to overwrite data in ``a`` (may improve performance). Default\n is False.\n overwrite_b : bool, optional\n Whether to overwrite data in ``b`` (may improve performance). Default\n is False.\n type : int, optional\n For the generalized problems, this keyword specifies the problem type\n to be solved for ``w`` and ``v`` (only takes 1, 2, 3 as possible\n inputs)::\n\n 1 => a @ v = w @ b @ v\n 2 => a @ b @ v = w @ v\n 3 => b @ a @ v = w @ v\n\n This keyword is ignored for standard problems.\n check_finite : bool, optional\n Whether to check that the input matrices contain only finite numbers.\n Disabling may give a performance gain, but may result in problems\n (crashes, non-termination) if the inputs do contain infinities or NaNs.\n subset_by_index : iterable, optional\n If provided, this two-element iterable defines the start and the end\n indices of the desired eigenvalues (ascending order and 0-indexed).\n To return only the second smallest to fifth smallest eigenvalues,\n ``[1, 4]`` is used. ``[n-3, n-1]`` returns the largest three. Only\n available with \"evr\", \"evx\", and \"gvx\" drivers. The entries are\n directly converted to integers via ``int()``.\n subset_by_value : iterable, optional\n If provided, this two-element iterable defines the half-open interval\n ``(a, b]`` that, if any, only the eigenvalues between these values\n are returned. Only available with \"evr\", \"evx\", and \"gvx\" drivers. Use\n ``np.inf`` for the unconstrained ends.\n driver : str, optional\n Defines which LAPACK driver should be used. Valid options are \"ev\",\n \"evd\", \"evr\", \"evx\" for standard problems and \"gv\", \"gvd\", \"gvx\" for\n generalized (where b is not None) problems. See the Notes section of\n `scipy.linalg.eigh`.\n turbo : bool, optional\n *Deprecated by ``driver=gvd`` option*. Has no significant effect for\n eigenvalue computations since no eigenvectors are requested.\n\n .. deprecated:: 1.5.0\n\n eigvals : tuple (lo, hi), optional\n *Deprecated by ``subset_by_index`` keyword*. Indexes of the smallest\n and largest (in ascending order) eigenvalues and corresponding\n eigenvectors to be returned: 0 <= lo <= hi <= M-1. If omitted, all\n eigenvalues and eigenvectors are returned.\n\n .. deprecated:: 1.5.0\n\n Returns\n -------\n w : (N,) ndarray\n The ``N`` (``1<=N<=M``) selected eigenvalues, in ascending order, each\n repeated according to its multiplicity.\n\n Raises\n ------\n LinAlgError\n If eigenvalue computation does not converge, an error occurred, or\n b matrix is not definite positive. Note that if input matrices are\n not symmetric or Hermitian, no error will be reported but results will\n be wrong.\n\n See Also\n --------\n eigh : eigenvalues and right eigenvectors for symmetric/Hermitian arrays\n eigvals : eigenvalues of general arrays\n eigvals_banded : eigenvalues for symmetric/Hermitian band matrices\n eigvalsh_tridiagonal : eigenvalues of symmetric/Hermitian tridiagonal\n matrices\n\n Notes\n -----\n This function does not check the input array for being Hermitian/symmetric\n in order to allow for representing arrays with only their upper/lower\n triangular parts.\n\n This function serves as a one-liner shorthand for `scipy.linalg.eigh` with\n the option ``eigvals_only=True`` to get the eigenvalues and not the\n eigenvectors. Here it is kept as a legacy convenience. It might be\n beneficial to use the main function to have full control and to be a bit\n more pythonic.\n\n Examples\n --------\n For more examples see `scipy.linalg.eigh`.\n\n >>> from scipy.linalg import eigvalsh\n >>> A = np.array([[6, 3, 1, 5], [3, 0, 5, 1], [1, 5, 6, 2], [5, 1, 2, 2]])\n >>> w = eigvalsh(A)\n >>> w\n array([-3.74637491, -0.76263923, 6.08502336, 12.42399079])\n\n \"\"\"\n return eigh(a, b=b, lower=lower, eigvals_only=True,\n overwrite_a=overwrite_a, overwrite_b=overwrite_b,\n turbo=turbo, eigvals=eigvals, type=type,\n check_finite=check_finite, subset_by_index=subset_by_index,\n subset_by_value=subset_by_value, driver=driver)\n\n\ndef eigvals_banded(a_band, lower=False, overwrite_a_band=False,\n select='a', select_range=None, check_finite=True):\n \"\"\"\n Solve real symmetric or complex Hermitian band matrix eigenvalue problem.\n\n Find eigenvalues w of a::\n\n a v[:,i] = w[i] v[:,i]\n v.H v = identity\n\n The matrix a is stored in a_band either in lower diagonal or upper\n diagonal ordered form:\n\n a_band[u + i - j, j] == a[i,j] (if upper form; i <= j)\n a_band[ i - j, j] == a[i,j] (if lower form; i >= j)\n\n where u is the number of bands above the diagonal.\n\n Example of a_band (shape of a is (6,6), u=2)::\n\n upper form:\n * * a02 a13 a24 a35\n * a01 a12 a23 a34 a45\n a00 a11 a22 a33 a44 a55\n\n lower form:\n a00 a11 a22 a33 a44 a55\n a10 a21 a32 a43 a54 *\n a20 a31 a42 a53 * *\n\n Cells marked with * are not used.\n\n Parameters\n ----------\n a_band : (u+1, M) array_like\n The bands of the M by M matrix a.\n lower : bool, optional\n Is the matrix in the lower form. (Default is upper form)\n overwrite_a_band : bool, optional\n Discard data in a_band (may enhance performance)\n select : {'a', 'v', 'i'}, optional\n Which eigenvalues to calculate\n\n ====== ========================================\n select calculated\n ====== ========================================\n 'a' All eigenvalues\n 'v' Eigenvalues in the interval (min, max]\n 'i' Eigenvalues with indices min <= i <= max\n ====== ========================================\n select_range : (min, max), optional\n Range of selected eigenvalues\n check_finite : bool, optional\n Whether to check that the input matrix contains only finite numbers.\n Disabling may give a performance gain, but may result in problems\n (crashes, non-termination) if the inputs do contain infinities or NaNs.\n\n Returns\n -------\n w : (M,) ndarray\n The eigenvalues, in ascending order, each repeated according to its\n multiplicity.\n\n Raises\n ------\n LinAlgError\n If eigenvalue computation does not converge.\n\n See Also\n --------\n eig_banded : eigenvalues and right eigenvectors for symmetric/Hermitian\n band matrices\n eigvalsh_tridiagonal : eigenvalues of symmetric/Hermitian tridiagonal\n matrices\n eigvals : eigenvalues of general arrays\n eigh : eigenvalues and right eigenvectors for symmetric/Hermitian arrays\n eig : eigenvalues and right eigenvectors for non-symmetric arrays\n\n Examples\n --------\n >>> from scipy.linalg import eigvals_banded\n >>> A = np.array([[1, 5, 2, 0], [5, 2, 5, 2], [2, 5, 3, 5], [0, 2, 5, 4]])\n >>> Ab = np.array([[1, 2, 3, 4], [5, 5, 5, 0], [2, 2, 0, 0]])\n >>> w = eigvals_banded(Ab, lower=True)\n >>> w\n array([-4.26200532, -2.22987175, 3.95222349, 12.53965359])\n \"\"\"\n return eig_banded(a_band, lower=lower, eigvals_only=1,\n overwrite_a_band=overwrite_a_band, select=select,\n select_range=select_range, check_finite=check_finite)\n\n\ndef eigvalsh_tridiagonal(d, e, select='a', select_range=None,\n check_finite=True, tol=0., lapack_driver='auto'):\n \"\"\"\n Solve eigenvalue problem for a real symmetric tridiagonal matrix.\n\n Find eigenvalues `w` of ``a``::\n\n a v[:,i] = w[i] v[:,i]\n v.H v = identity\n\n For a real symmetric matrix ``a`` with diagonal elements `d` and\n off-diagonal elements `e`.\n\n Parameters\n ----------\n d : ndarray, shape (ndim,)\n The diagonal elements of the array.\n e : ndarray, shape (ndim-1,)\n The off-diagonal elements of the array.\n select : {'a', 'v', 'i'}, optional\n Which eigenvalues to calculate\n\n ====== ========================================\n select calculated\n ====== ========================================\n 'a' All eigenvalues\n 'v' Eigenvalues in the interval (min, max]\n 'i' Eigenvalues with indices min <= i <= max\n ====== ========================================\n select_range : (min, max), optional\n Range of selected eigenvalues\n check_finite : bool, optional\n Whether to check that the input matrix contains only finite numbers.\n Disabling may give a performance gain, but may result in problems\n (crashes, non-termination) if the inputs do contain infinities or NaNs.\n tol : float\n The absolute tolerance to which each eigenvalue is required\n (only used when ``lapack_driver='stebz'``).\n An eigenvalue (or cluster) is considered to have converged if it\n lies in an interval of this width. If <= 0. (default),\n the value ``eps*|a|`` is used where eps is the machine precision,\n and ``|a|`` is the 1-norm of the matrix ``a``.\n lapack_driver : str\n LAPACK function to use, can be 'auto', 'stemr', 'stebz', 'sterf',\n or 'stev'. When 'auto' (default), it will use 'stemr' if ``select='a'``\n and 'stebz' otherwise. 'sterf' and 'stev' can only be used when\n ``select='a'``.\n\n Returns\n -------\n w : (M,) ndarray\n The eigenvalues, in ascending order, each repeated according to its\n multiplicity.\n\n Raises\n ------\n LinAlgError\n If eigenvalue computation does not converge.\n\n See Also\n --------\n eigh_tridiagonal : eigenvalues and right eiegenvectors for\n symmetric/Hermitian tridiagonal matrices\n\n Examples\n --------\n >>> from scipy.linalg import eigvalsh_tridiagonal, eigvalsh\n >>> d = 3*np.ones(4)\n >>> e = -1*np.ones(3)\n >>> w = eigvalsh_tridiagonal(d, e)\n >>> A = np.diag(d) + np.diag(e, k=1) + np.diag(e, k=-1)\n >>> w2 = eigvalsh(A) # Verify with other eigenvalue routines\n >>> np.allclose(w - w2, np.zeros(4))\n True\n \"\"\"\n return eigh_tridiagonal(\n d, e, eigvals_only=True, select=select, select_range=select_range,\n check_finite=check_finite, tol=tol, lapack_driver=lapack_driver)\n\n\ndef eigh_tridiagonal(d, e, eigvals_only=False, select='a', select_range=None,\n check_finite=True, tol=0., lapack_driver='auto'):\n \"\"\"\n Solve eigenvalue problem for a real symmetric tridiagonal matrix.\n\n Find eigenvalues `w` and optionally right eigenvectors `v` of ``a``::\n\n a v[:,i] = w[i] v[:,i]\n v.H v = identity\n\n For a real symmetric matrix ``a`` with diagonal elements `d` and\n off-diagonal elements `e`.\n\n Parameters\n ----------\n d : ndarray, shape (ndim,)\n The diagonal elements of the array.\n e : ndarray, shape (ndim-1,)\n The off-diagonal elements of the array.\n select : {'a', 'v', 'i'}, optional\n Which eigenvalues to calculate\n\n ====== ========================================\n select calculated\n ====== ========================================\n 'a' All eigenvalues\n 'v' Eigenvalues in the interval (min, max]\n 'i' Eigenvalues with indices min <= i <= max\n ====== ========================================\n select_range : (min, max), optional\n Range of selected eigenvalues\n check_finite : bool, optional\n Whether to check that the input matrix contains only finite numbers.\n Disabling may give a performance gain, but may result in problems\n (crashes, non-termination) if the inputs do contain infinities or NaNs.\n tol : float\n The absolute tolerance to which each eigenvalue is required\n (only used when 'stebz' is the `lapack_driver`).\n An eigenvalue (or cluster) is considered to have converged if it\n lies in an interval of this width. If <= 0. (default),\n the value ``eps*|a|`` is used where eps is the machine precision,\n and ``|a|`` is the 1-norm of the matrix ``a``.\n lapack_driver : str\n LAPACK function to use, can be 'auto', 'stemr', 'stebz', 'sterf',\n or 'stev'. When 'auto' (default), it will use 'stemr' if ``select='a'``\n and 'stebz' otherwise. When 'stebz' is used to find the eigenvalues and\n ``eigvals_only=False``, then a second LAPACK call (to ``?STEIN``) is\n used to find the corresponding eigenvectors. 'sterf' can only be\n used when ``eigvals_only=True`` and ``select='a'``. 'stev' can only\n be used when ``select='a'``.\n\n Returns\n -------\n w : (M,) ndarray\n The eigenvalues, in ascending order, each repeated according to its\n multiplicity.\n v : (M, M) ndarray\n The normalized eigenvector corresponding to the eigenvalue ``w[i]`` is\n the column ``v[:,i]``.\n\n Raises\n ------\n LinAlgError\n If eigenvalue computation does not converge.\n\n See Also\n --------\n eigvalsh_tridiagonal : eigenvalues of symmetric/Hermitian tridiagonal\n matrices\n eig : eigenvalues and right eigenvectors for non-symmetric arrays\n eigh : eigenvalues and right eigenvectors for symmetric/Hermitian arrays\n eig_banded : eigenvalues and right eigenvectors for symmetric/Hermitian\n band matrices\n\n Notes\n -----\n This function makes use of LAPACK ``S/DSTEMR`` routines.\n\n Examples\n --------\n >>> from scipy.linalg import eigh_tridiagonal\n >>> d = 3*np.ones(4)\n >>> e = -1*np.ones(3)\n >>> w, v = eigh_tridiagonal(d, e)\n >>> A = np.diag(d) + np.diag(e, k=1) + np.diag(e, k=-1)\n >>> np.allclose(A @ v - v @ np.diag(w), np.zeros((4, 4)))\n True\n \"\"\"\n d = _asarray_validated(d, check_finite=check_finite)\n e = _asarray_validated(e, check_finite=check_finite)\n for check in (d, e):\n if check.ndim != 1:\n raise ValueError('expected a 1-D array')\n if check.dtype.char in 'GFD': # complex\n raise TypeError('Only real arrays currently supported')\n if d.size != e.size + 1:\n raise ValueError('d (%s) must have one more element than e (%s)'\n % (d.size, e.size))\n select, vl, vu, il, iu, _ = _check_select(\n select, select_range, 0, d.size)\n if not isinstance(lapack_driver, str):\n raise TypeError('lapack_driver must be str')\n drivers = ('auto', 'stemr', 'sterf', 'stebz', 'stev')\n if lapack_driver not in drivers:\n raise ValueError('lapack_driver must be one of %s, got %s'\n % (drivers, lapack_driver))\n if lapack_driver == 'auto':\n lapack_driver = 'stemr' if select == 0 else 'stebz'\n func, = get_lapack_funcs((lapack_driver,), (d, e))\n compute_v = not eigvals_only\n if lapack_driver == 'sterf':\n if select != 0:\n raise ValueError('sterf can only be used when select == \"a\"')\n if not eigvals_only:\n raise ValueError('sterf can only be used when eigvals_only is '\n 'True')\n w, info = func(d, e)\n m = len(w)\n elif lapack_driver == 'stev':\n if select != 0:\n raise ValueError('stev can only be used when select == \"a\"')\n w, v, info = func(d, e, compute_v=compute_v)\n m = len(w)\n elif lapack_driver == 'stebz':\n tol = float(tol)\n internal_name = 'stebz'\n stebz, = get_lapack_funcs((internal_name,), (d, e))\n # If getting eigenvectors, needs to be block-ordered (B) instead of\n # matrix-ordered (E), and we will reorder later\n order = 'E' if eigvals_only else 'B'\n m, w, iblock, isplit, info = stebz(d, e, select, vl, vu, il, iu, tol,\n order)\n else: # 'stemr'\n # ?STEMR annoyingly requires size N instead of N-1\n e_ = empty(e.size+1, e.dtype)\n e_[:-1] = e\n stemr_lwork, = get_lapack_funcs(('stemr_lwork',), (d, e))\n lwork, liwork, info = stemr_lwork(d, e_, select, vl, vu, il, iu,\n compute_v=compute_v)\n _check_info(info, 'stemr_lwork')\n m, w, v, info = func(d, e_, select, vl, vu, il, iu,\n compute_v=compute_v, lwork=lwork, liwork=liwork)\n _check_info(info, lapack_driver + ' (eigh_tridiagonal)')\n w = w[:m]\n if eigvals_only:\n return w\n else:\n # Do we still need to compute the eigenvalues?\n if lapack_driver == 'stebz':\n func, = get_lapack_funcs(('stein',), (d, e))\n v, info = func(d, e, w, iblock, isplit)\n _check_info(info, 'stein (eigh_tridiagonal)',\n positive='%d eigenvectors failed to converge')\n # Convert block-order to matrix-order\n order = argsort(w)\n w, v = w[order], v[:, order]\n else:\n v = v[:, :m]\n return w, v\n\n\ndef _check_info(info, driver, positive='did not converge (LAPACK info=%d)'):\n \"\"\"Check info return value.\"\"\"\n if info < 0:\n raise ValueError('illegal value in argument %d of internal %s'\n % (-info, driver))\n if info > 0 and positive:\n raise LinAlgError((\"%s \" + positive) % (driver, info,))\n\n\ndef hessenberg(a, calc_q=False, overwrite_a=False, check_finite=True):\n \"\"\"\n Compute Hessenberg form of a matrix.\n\n The Hessenberg decomposition is::\n\n A = Q H Q^H\n\n where `Q` is unitary/orthogonal and `H` has only zero elements below\n the first sub-diagonal.\n\n Parameters\n ----------\n a : (M, M) array_like\n Matrix to bring into Hessenberg form.\n calc_q : bool, optional\n Whether to compute the transformation matrix. Default is False.\n overwrite_a : bool, optional\n Whether to overwrite `a`; may improve performance.\n Default is False.\n check_finite : bool, optional\n Whether to check that the input matrix contains only finite numbers.\n Disabling may give a performance gain, but may result in problems\n (crashes, non-termination) if the inputs do contain infinities or NaNs.\n\n Returns\n -------\n H : (M, M) ndarray\n Hessenberg form of `a`.\n Q : (M, M) ndarray\n Unitary/orthogonal similarity transformation matrix ``A = Q H Q^H``.\n Only returned if ``calc_q=True``.\n\n Examples\n --------\n >>> from scipy.linalg import hessenberg\n >>> A = np.array([[2, 5, 8, 7], [5, 2, 2, 8], [7, 5, 6, 6], [5, 4, 4, 8]])\n >>> H, Q = hessenberg(A, calc_q=True)\n >>> H\n array([[ 2. , -11.65843866, 1.42005301, 0.25349066],\n [ -9.94987437, 14.53535354, -5.31022304, 2.43081618],\n [ 0. , -1.83299243, 0.38969961, -0.51527034],\n [ 0. , 0. , -3.83189513, 1.07494686]])\n >>> np.allclose(Q @ H @ Q.conj().T - A, np.zeros((4, 4)))\n True\n \"\"\"\n a1 = _asarray_validated(a, check_finite=check_finite)\n if len(a1.shape) != 2 or (a1.shape[0] != a1.shape[1]):\n raise ValueError('expected square matrix')\n overwrite_a = overwrite_a or (_datacopied(a1, a))\n\n # if 2x2 or smaller: already in Hessenberg\n if a1.shape[0] <= 2:\n if calc_q:\n return a1, eye(a1.shape[0])\n return a1\n\n gehrd, gebal, gehrd_lwork = get_lapack_funcs(('gehrd', 'gebal',\n 'gehrd_lwork'), (a1,))\n ba, lo, hi, pivscale, info = gebal(a1, permute=0, overwrite_a=overwrite_a)\n _check_info(info, 'gebal (hessenberg)', positive=False)\n n = len(a1)\n\n lwork = _compute_lwork(gehrd_lwork, ba.shape[0], lo=lo, hi=hi)\n\n hq, tau, info = gehrd(ba, lo=lo, hi=hi, lwork=lwork, overwrite_a=1)\n _check_info(info, 'gehrd (hessenberg)', positive=False)\n h = numpy.triu(hq, -1)\n if not calc_q:\n return h\n\n # use orghr/unghr to compute q\n orghr, orghr_lwork = get_lapack_funcs(('orghr', 'orghr_lwork'), (a1,))\n lwork = _compute_lwork(orghr_lwork, n, lo=lo, hi=hi)\n\n q, info = orghr(a=hq, tau=tau, lo=lo, hi=hi, lwork=lwork, overwrite_a=1)\n _check_info(info, 'orghr (hessenberg)', positive=False)\n return h, q\n\n\ndef cdf2rdf(w, v):\n \"\"\"\n Converts complex eigenvalues ``w`` and eigenvectors ``v`` to real\n eigenvalues in a block diagonal form ``wr`` and the associated real\n eigenvectors ``vr``, such that::\n\n vr @ wr = X @ vr\n\n continues to hold, where ``X`` is the original array for which ``w`` and\n ``v`` are the eigenvalues and eigenvectors.\n\n .. versionadded:: 1.1.0\n\n Parameters\n ----------\n w : (..., M) array_like\n Complex or real eigenvalues, an array or stack of arrays\n\n Conjugate pairs must not be interleaved, else the wrong result\n will be produced. So ``[1+1j, 1, 1-1j]`` will give a correct result,\n but ``[1+1j, 2+1j, 1-1j, 2-1j]`` will not.\n\n v : (..., M, M) array_like\n Complex or real eigenvectors, a square array or stack of square arrays.\n\n Returns\n -------\n wr : (..., M, M) ndarray\n Real diagonal block form of eigenvalues\n vr : (..., M, M) ndarray\n Real eigenvectors associated with ``wr``\n\n See Also\n --------\n eig : Eigenvalues and right eigenvectors for non-symmetric arrays\n rsf2csf : Convert real Schur form to complex Schur form\n\n Notes\n -----\n ``w``, ``v`` must be the eigenstructure for some *real* matrix ``X``.\n For example, obtained by ``w, v = scipy.linalg.eig(X)`` or\n ``w, v = numpy.linalg.eig(X)`` in which case ``X`` can also represent\n stacked arrays.\n\n .. versionadded:: 1.1.0\n\n Examples\n --------\n >>> X = np.array([[1, 2, 3], [0, 4, 5], [0, -5, 4]])\n >>> X\n array([[ 1, 2, 3],\n [ 0, 4, 5],\n [ 0, -5, 4]])\n\n >>> from scipy import linalg\n >>> w, v = linalg.eig(X)\n >>> w\n array([ 1.+0.j, 4.+5.j, 4.-5.j])\n >>> v\n array([[ 1.00000+0.j , -0.01906-0.40016j, -0.01906+0.40016j],\n [ 0.00000+0.j , 0.00000-0.64788j, 0.00000+0.64788j],\n [ 0.00000+0.j , 0.64788+0.j , 0.64788-0.j ]])\n\n >>> wr, vr = linalg.cdf2rdf(w, v)\n >>> wr\n array([[ 1., 0., 0.],\n [ 0., 4., 5.],\n [ 0., -5., 4.]])\n >>> vr\n array([[ 1. , 0.40016, -0.01906],\n [ 0. , 0.64788, 0. ],\n [ 0. , 0. , 0.64788]])\n\n >>> vr @ wr\n array([[ 1. , 1.69593, 1.9246 ],\n [ 0. , 2.59153, 3.23942],\n [ 0. , -3.23942, 2.59153]])\n >>> X @ vr\n array([[ 1. , 1.69593, 1.9246 ],\n [ 0. , 2.59153, 3.23942],\n [ 0. , -3.23942, 2.59153]])\n \"\"\"\n w, v = _asarray_validated(w), _asarray_validated(v)\n\n # check dimensions\n if w.ndim < 1:\n raise ValueError('expected w to be at least 1D')\n if v.ndim < 2:\n raise ValueError('expected v to be at least 2D')\n if v.ndim != w.ndim + 1:\n raise ValueError('expected eigenvectors array to have exactly one '\n 'dimension more than eigenvalues array')\n\n # check shapes\n n = w.shape[-1]\n M = w.shape[:-1]\n if v.shape[-2] != v.shape[-1]:\n raise ValueError('expected v to be a square matrix or stacked square '\n 'matrices: v.shape[-2] = v.shape[-1]')\n if v.shape[-1] != n:\n raise ValueError('expected the same number of eigenvalues as '\n 'eigenvectors')\n\n # get indices for each first pair of complex eigenvalues\n complex_mask = iscomplex(w)\n n_complex = complex_mask.sum(axis=-1)\n\n # check if all complex eigenvalues have conjugate pairs\n if not (n_complex % 2 == 0).all():\n raise ValueError('expected complex-conjugate pairs of eigenvalues')\n\n # find complex indices\n idx = nonzero(complex_mask)\n idx_stack = idx[:-1]\n idx_elem = idx[-1]\n\n # filter them to conjugate indices, assuming pairs are not interleaved\n j = idx_elem[0::2]\n k = idx_elem[1::2]\n stack_ind = ()\n for i in idx_stack:\n # should never happen, assuming nonzero orders by the last axis\n assert (i[0::2] == i[1::2]).all(),\\\n \"Conjugate pair spanned different arrays!\"\n stack_ind += (i[0::2],)\n\n # all eigenvalues to diagonal form\n wr = zeros(M + (n, n), dtype=w.real.dtype)\n di = range(n)\n wr[..., di, di] = w.real\n\n # complex eigenvalues to real block diagonal form\n wr[stack_ind + (j, k)] = w[stack_ind + (j,)].imag\n wr[stack_ind + (k, j)] = w[stack_ind + (k,)].imag\n\n # compute real eigenvectors associated with real block diagonal eigenvalues\n u = zeros(M + (n, n), dtype=numpy.cdouble)\n u[..., di, di] = 1.0\n u[stack_ind + (j, j)] = 0.5j\n u[stack_ind + (j, k)] = 0.5\n u[stack_ind + (k, j)] = -0.5j\n u[stack_ind + (k, k)] = 0.5\n\n # multipy matrices v and u (equivalent to v @ u)\n vr = einsum('...ij,...jk->...ik', v, u).real\n\n return wr, vr\n",
"import operator\n\nimport numpy as np\nfrom numpy.core.multiarray import normalize_axis_index\nfrom scipy.linalg import (get_lapack_funcs, LinAlgError,\n cholesky_banded, cho_solve_banded,\n solve, solve_banded)\nfrom . import _bspl\nfrom . import _fitpack_impl\nfrom . import _fitpack as _dierckx\nfrom scipy._lib._util import prod\nfrom scipy.special import poch\nfrom scipy.sparse import csr_array\nfrom itertools import combinations\n\n__all__ = [\"BSpline\", \"make_interp_spline\", \"make_lsq_spline\"]\n\n\ndef _get_dtype(dtype):\n \"\"\"Return np.complex128 for complex dtypes, np.float64 otherwise.\"\"\"\n if np.issubdtype(dtype, np.complexfloating):\n return np.complex_\n else:\n return np.float_\n\n\ndef _as_float_array(x, check_finite=False):\n \"\"\"Convert the input into a C contiguous float array.\n\n NB: Upcasts half- and single-precision floats to double precision.\n \"\"\"\n x = np.ascontiguousarray(x)\n dtyp = _get_dtype(x.dtype)\n x = x.astype(dtyp, copy=False)\n if check_finite and not np.isfinite(x).all():\n raise ValueError(\"Array must not contain infs or nans.\")\n return x\n\n\ndef _dual_poly(j, k, t, y):\n \"\"\"\n Dual polynomial of the B-spline B_{j,k,t} -\n polynomial which is associated with B_{j,k,t}:\n $p_{j,k}(y) = (y - t_{j+1})(y - t_{j+2})...(y - t_{j+k})$\n \"\"\"\n if k == 0:\n return 1\n return np.prod([(y - t[j + i]) for i in range(1, k + 1)])\n\n\ndef _diff_dual_poly(j, k, y, d, t):\n \"\"\"\n d-th derivative of the dual polynomial $p_{j,k}(y)$\n \"\"\"\n if d == 0:\n return _dual_poly(j, k, t, y)\n if d == k:\n return poch(1, k)\n comb = list(combinations(range(j + 1, j + k + 1), d))\n res = 0\n for i in range(len(comb) * len(comb[0])):\n res += np.prod([(y - t[j + p]) for p in range(1, k + 1)\n if (j + p) not in comb[i//d]])\n return res\n\n\nclass BSpline:\n r\"\"\"Univariate spline in the B-spline basis.\n\n .. math::\n\n S(x) = \\sum_{j=0}^{n-1} c_j B_{j, k; t}(x)\n\n where :math:`B_{j, k; t}` are B-spline basis functions of degree `k`\n and knots `t`.\n\n Parameters\n ----------\n t : ndarray, shape (n+k+1,)\n knots\n c : ndarray, shape (>=n, ...)\n spline coefficients\n k : int\n B-spline degree\n extrapolate : bool or 'periodic', optional\n whether to extrapolate beyond the base interval, ``t[k] .. t[n]``,\n or to return nans.\n If True, extrapolates the first and last polynomial pieces of b-spline\n functions active on the base interval.\n If 'periodic', periodic extrapolation is used.\n Default is True.\n axis : int, optional\n Interpolation axis. Default is zero.\n\n Attributes\n ----------\n t : ndarray\n knot vector\n c : ndarray\n spline coefficients\n k : int\n spline degree\n extrapolate : bool\n If True, extrapolates the first and last polynomial pieces of b-spline\n functions active on the base interval.\n axis : int\n Interpolation axis.\n tck : tuple\n A read-only equivalent of ``(self.t, self.c, self.k)``\n\n Methods\n -------\n __call__\n basis_element\n derivative\n antiderivative\n integrate\n construct_fast\n design_matrix\n from_power_basis\n\n Notes\n -----\n B-spline basis elements are defined via\n\n .. math::\n\n B_{i, 0}(x) = 1, \\textrm{if $t_i \\le x < t_{i+1}$, otherwise $0$,}\n\n B_{i, k}(x) = \\frac{x - t_i}{t_{i+k} - t_i} B_{i, k-1}(x)\n + \\frac{t_{i+k+1} - x}{t_{i+k+1} - t_{i+1}} B_{i+1, k-1}(x)\n\n **Implementation details**\n\n - At least ``k+1`` coefficients are required for a spline of degree `k`,\n so that ``n >= k+1``. Additional coefficients, ``c[j]`` with\n ``j > n``, are ignored.\n\n - B-spline basis elements of degree `k` form a partition of unity on the\n *base interval*, ``t[k] <= x <= t[n]``.\n\n\n Examples\n --------\n\n Translating the recursive definition of B-splines into Python code, we have:\n\n >>> def B(x, k, i, t):\n ... if k == 0:\n ... return 1.0 if t[i] <= x < t[i+1] else 0.0\n ... if t[i+k] == t[i]:\n ... c1 = 0.0\n ... else:\n ... c1 = (x - t[i])/(t[i+k] - t[i]) * B(x, k-1, i, t)\n ... if t[i+k+1] == t[i+1]:\n ... c2 = 0.0\n ... else:\n ... c2 = (t[i+k+1] - x)/(t[i+k+1] - t[i+1]) * B(x, k-1, i+1, t)\n ... return c1 + c2\n\n >>> def bspline(x, t, c, k):\n ... n = len(t) - k - 1\n ... assert (n >= k+1) and (len(c) >= n)\n ... return sum(c[i] * B(x, k, i, t) for i in range(n))\n\n Note that this is an inefficient (if straightforward) way to\n evaluate B-splines --- this spline class does it in an equivalent,\n but much more efficient way.\n\n Here we construct a quadratic spline function on the base interval\n ``2 <= x <= 4`` and compare with the naive way of evaluating the spline:\n\n >>> from scipy.interpolate import BSpline\n >>> k = 2\n >>> t = [0, 1, 2, 3, 4, 5, 6]\n >>> c = [-1, 2, 0, -1]\n >>> spl = BSpline(t, c, k)\n >>> spl(2.5)\n array(1.375)\n >>> bspline(2.5, t, c, k)\n 1.375\n\n Note that outside of the base interval results differ. This is because\n `BSpline` extrapolates the first and last polynomial pieces of B-spline\n functions active on the base interval.\n\n >>> import matplotlib.pyplot as plt\n >>> fig, ax = plt.subplots()\n >>> xx = np.linspace(1.5, 4.5, 50)\n >>> ax.plot(xx, [bspline(x, t, c ,k) for x in xx], 'r-', lw=3, label='naive')\n >>> ax.plot(xx, spl(xx), 'b-', lw=4, alpha=0.7, label='BSpline')\n >>> ax.grid(True)\n >>> ax.legend(loc='best')\n >>> plt.show()\n\n\n References\n ----------\n .. [1] Tom Lyche and Knut Morken, Spline methods,\n http://www.uio.no/studier/emner/matnat/ifi/INF-MAT5340/v05/undervisningsmateriale/\n .. [2] Carl de Boor, A practical guide to splines, Springer, 2001.\n\n \"\"\"\n def __init__(self, t, c, k, extrapolate=True, axis=0):\n super().__init__()\n\n self.k = operator.index(k)\n self.c = np.asarray(c)\n self.t = np.ascontiguousarray(t, dtype=np.float64)\n\n if extrapolate == 'periodic':\n self.extrapolate = extrapolate\n else:\n self.extrapolate = bool(extrapolate)\n\n n = self.t.shape[0] - self.k - 1\n\n axis = normalize_axis_index(axis, self.c.ndim)\n\n # Note that the normalized axis is stored in the object.\n self.axis = axis\n if axis != 0:\n # roll the interpolation axis to be the first one in self.c\n # More specifically, the target shape for self.c is (n, ...),\n # and axis !=0 means that we have c.shape (..., n, ...)\n # ^\n # axis\n self.c = np.moveaxis(self.c, axis, 0)\n\n if k < 0:\n raise ValueError(\"Spline order cannot be negative.\")\n if self.t.ndim != 1:\n raise ValueError(\"Knot vector must be one-dimensional.\")\n if n < self.k + 1:\n raise ValueError(\"Need at least %d knots for degree %d\" %\n (2*k + 2, k))\n if (np.diff(self.t) < 0).any():\n raise ValueError(\"Knots must be in a non-decreasing order.\")\n if len(np.unique(self.t[k:n+1])) < 2:\n raise ValueError(\"Need at least two internal knots.\")\n if not np.isfinite(self.t).all():\n raise ValueError(\"Knots should not have nans or infs.\")\n if self.c.ndim < 1:\n raise ValueError(\"Coefficients must be at least 1-dimensional.\")\n if self.c.shape[0] < n:\n raise ValueError(\"Knots, coefficients and degree are inconsistent.\")\n\n dt = _get_dtype(self.c.dtype)\n self.c = np.ascontiguousarray(self.c, dtype=dt)\n\n @classmethod\n def construct_fast(cls, t, c, k, extrapolate=True, axis=0):\n \"\"\"Construct a spline without making checks.\n\n Accepts same parameters as the regular constructor. Input arrays\n `t` and `c` must of correct shape and dtype.\n \"\"\"\n self = object.__new__(cls)\n self.t, self.c, self.k = t, c, k\n self.extrapolate = extrapolate\n self.axis = axis\n return self\n\n @property\n def tck(self):\n \"\"\"Equivalent to ``(self.t, self.c, self.k)`` (read-only).\n \"\"\"\n return self.t, self.c, self.k\n\n @classmethod\n def basis_element(cls, t, extrapolate=True):\n \"\"\"Return a B-spline basis element ``B(x | t[0], ..., t[k+1])``.\n\n Parameters\n ----------\n t : ndarray, shape (k+2,)\n internal knots\n extrapolate : bool or 'periodic', optional\n whether to extrapolate beyond the base interval, ``t[0] .. t[k+1]``,\n or to return nans.\n If 'periodic', periodic extrapolation is used.\n Default is True.\n\n Returns\n -------\n basis_element : callable\n A callable representing a B-spline basis element for the knot\n vector `t`.\n\n Notes\n -----\n The degree of the B-spline, `k`, is inferred from the length of `t` as\n ``len(t)-2``. The knot vector is constructed by appending and prepending\n ``k+1`` elements to internal knots `t`.\n\n Examples\n --------\n\n Construct a cubic B-spline:\n\n >>> from scipy.interpolate import BSpline\n >>> b = BSpline.basis_element([0, 1, 2, 3, 4])\n >>> k = b.k\n >>> b.t[k:-k]\n array([ 0., 1., 2., 3., 4.])\n >>> k\n 3\n\n Construct a quadratic B-spline on ``[0, 1, 1, 2]``, and compare\n to its explicit form:\n\n >>> t = [-1, 0, 1, 1, 2]\n >>> b = BSpline.basis_element(t[1:])\n >>> def f(x):\n ... return np.where(x < 1, x*x, (2. - x)**2)\n\n >>> import matplotlib.pyplot as plt\n >>> fig, ax = plt.subplots()\n >>> x = np.linspace(0, 2, 51)\n >>> ax.plot(x, b(x), 'g', lw=3)\n >>> ax.plot(x, f(x), 'r', lw=8, alpha=0.4)\n >>> ax.grid(True)\n >>> plt.show()\n\n \"\"\"\n k = len(t) - 2\n t = _as_float_array(t)\n t = np.r_[(t[0]-1,) * k, t, (t[-1]+1,) * k]\n c = np.zeros_like(t)\n c[k] = 1.\n return cls.construct_fast(t, c, k, extrapolate)\n\n @classmethod\n def design_matrix(cls, x, t, k):\n \"\"\"\n Returns a design matrix as a CSR format sparse array.\n\n Parameters\n ----------\n x : array_like, shape (n,)\n Points to evaluate the spline at.\n t : array_like, shape (nt,)\n Sorted 1D array of knots.\n k : int\n B-spline degree.\n\n Returns\n -------\n design_matrix : `csr_array` object\n Sparse matrix in CSR format where in each row all the basis\n elements are evaluated at the certain point (first row - x[0],\n ..., last row - x[-1]).\n\n Examples\n --------\n Construct a design matrix for a B-spline\n\n >>> from scipy.interpolate import make_interp_spline, BSpline\n >>> x = np.linspace(0, np.pi * 2, 4)\n >>> y = np.sin(x)\n >>> k = 3\n >>> bspl = make_interp_spline(x, y, k=k)\n >>> design_matrix = bspl.design_matrix(x, bspl.t, k)\n >>> design_matrix.toarray()\n [[1. , 0. , 0. , 0. ],\n [0.2962963 , 0.44444444, 0.22222222, 0.03703704],\n [0.03703704, 0.22222222, 0.44444444, 0.2962963 ],\n [0. , 0. , 0. , 1. ]]\n\n Construct a design matrix for some vector of knots\n\n >>> k = 2\n >>> t = [-1, 0, 1, 2, 3, 4, 5, 6]\n >>> x = [1, 2, 3, 4]\n >>> design_matrix = BSpline.design_matrix(x, t, k).toarray()\n >>> design_matrix\n [[0.5, 0.5, 0. , 0. , 0. ],\n [0. , 0.5, 0.5, 0. , 0. ],\n [0. , 0. , 0.5, 0.5, 0. ],\n [0. , 0. , 0. , 0.5, 0.5]]\n\n This result is equivalent to the one created in the sparse format\n\n >>> c = np.eye(len(t) - k - 1)\n >>> design_matrix_gh = BSpline(t, c, k)(x)\n >>> np.allclose(design_matrix, design_matrix_gh, atol=1e-14)\n True\n\n Notes\n -----\n .. versionadded:: 1.8.0\n\n In each row of the design matrix all the basis elements are evaluated\n at the certain point (first row - x[0], ..., last row - x[-1]).\n\n `nt` is a length of the vector of knots: as far as there are\n `nt - k - 1` basis elements, `nt` should be not less than `2 * k + 2`\n to have at least `k + 1` basis element.\n\n Out of bounds `x` raises a ValueError.\n \"\"\"\n x = _as_float_array(x, True)\n t = _as_float_array(t, True)\n\n if t.ndim != 1 or np.any(t[1:] < t[:-1]):\n raise ValueError(f\"Expect t to be a 1-D sorted array_like, but \"\n f\"got t={t}.\")\n # There are `nt - k - 1` basis elements in a BSpline built on the\n # vector of knots with length `nt`, so to have at least `k + 1` basis\n # element we need to have at least `2 * k + 2` elements in the vector\n # of knots.\n if len(t) < 2 * k + 2:\n raise ValueError(f\"Length t is not enough for k={k}.\")\n # Checks from `find_interval` function\n if (min(x) < t[k]) or (max(x) > t[t.shape[0] - k - 1]):\n raise ValueError(f'Out of bounds w/ x = {x}.')\n\n n, nt = x.shape[0], t.shape[0]\n data, idx = _bspl._make_design_matrix(x, t, k)\n return csr_array((data, idx), (n, nt - k - 1))\n\n def __call__(self, x, nu=0, extrapolate=None):\n \"\"\"\n Evaluate a spline function.\n\n Parameters\n ----------\n x : array_like\n points to evaluate the spline at.\n nu : int, optional\n derivative to evaluate (default is 0).\n extrapolate : bool or 'periodic', optional\n whether to extrapolate based on the first and last intervals\n or return nans. If 'periodic', periodic extrapolation is used.\n Default is `self.extrapolate`.\n\n Returns\n -------\n y : array_like\n Shape is determined by replacing the interpolation axis\n in the coefficient array with the shape of `x`.\n\n \"\"\"\n if extrapolate is None:\n extrapolate = self.extrapolate\n x = np.asarray(x)\n x_shape, x_ndim = x.shape, x.ndim\n x = np.ascontiguousarray(x.ravel(), dtype=np.float_)\n\n # With periodic extrapolation we map x to the segment\n # [self.t[k], self.t[n]].\n if extrapolate == 'periodic':\n n = self.t.size - self.k - 1\n x = self.t[self.k] + (x - self.t[self.k]) % (self.t[n] -\n self.t[self.k])\n extrapolate = False\n\n out = np.empty((len(x), prod(self.c.shape[1:])), dtype=self.c.dtype)\n self._ensure_c_contiguous()\n self._evaluate(x, nu, extrapolate, out)\n out = out.reshape(x_shape + self.c.shape[1:])\n if self.axis != 0:\n # transpose to move the calculated values to the interpolation axis\n l = list(range(out.ndim))\n l = l[x_ndim:x_ndim+self.axis] + l[:x_ndim] + l[x_ndim+self.axis:]\n out = out.transpose(l)\n return out\n\n def _evaluate(self, xp, nu, extrapolate, out):\n _bspl.evaluate_spline(self.t, self.c.reshape(self.c.shape[0], -1),\n self.k, xp, nu, extrapolate, out)\n\n def _ensure_c_contiguous(self):\n \"\"\"\n c and t may be modified by the user. The Cython code expects\n that they are C contiguous.\n\n \"\"\"\n if not self.t.flags.c_contiguous:\n self.t = self.t.copy()\n if not self.c.flags.c_contiguous:\n self.c = self.c.copy()\n\n def derivative(self, nu=1):\n \"\"\"Return a B-spline representing the derivative.\n\n Parameters\n ----------\n nu : int, optional\n Derivative order.\n Default is 1.\n\n Returns\n -------\n b : BSpline object\n A new instance representing the derivative.\n\n See Also\n --------\n splder, splantider\n\n \"\"\"\n c = self.c\n # pad the c array if needed\n ct = len(self.t) - len(c)\n if ct > 0:\n c = np.r_[c, np.zeros((ct,) + c.shape[1:])]\n tck = _fitpack_impl.splder((self.t, c, self.k), nu)\n return self.construct_fast(*tck, extrapolate=self.extrapolate,\n axis=self.axis)\n\n def antiderivative(self, nu=1):\n \"\"\"Return a B-spline representing the antiderivative.\n\n Parameters\n ----------\n nu : int, optional\n Antiderivative order. Default is 1.\n\n Returns\n -------\n b : BSpline object\n A new instance representing the antiderivative.\n\n Notes\n -----\n If antiderivative is computed and ``self.extrapolate='periodic'``,\n it will be set to False for the returned instance. This is done because\n the antiderivative is no longer periodic and its correct evaluation\n outside of the initially given x interval is difficult.\n\n See Also\n --------\n splder, splantider\n\n \"\"\"\n c = self.c\n # pad the c array if needed\n ct = len(self.t) - len(c)\n if ct > 0:\n c = np.r_[c, np.zeros((ct,) + c.shape[1:])]\n tck = _fitpack_impl.splantider((self.t, c, self.k), nu)\n\n if self.extrapolate == 'periodic':\n extrapolate = False\n else:\n extrapolate = self.extrapolate\n\n return self.construct_fast(*tck, extrapolate=extrapolate,\n axis=self.axis)\n\n def integrate(self, a, b, extrapolate=None):\n \"\"\"Compute a definite integral of the spline.\n\n Parameters\n ----------\n a : float\n Lower limit of integration.\n b : float\n Upper limit of integration.\n extrapolate : bool or 'periodic', optional\n whether to extrapolate beyond the base interval,\n ``t[k] .. t[-k-1]``, or take the spline to be zero outside of the\n base interval. If 'periodic', periodic extrapolation is used.\n If None (default), use `self.extrapolate`.\n\n Returns\n -------\n I : array_like\n Definite integral of the spline over the interval ``[a, b]``.\n\n Examples\n --------\n Construct the linear spline ``x if x < 1 else 2 - x`` on the base\n interval :math:`[0, 2]`, and integrate it\n\n >>> from scipy.interpolate import BSpline\n >>> b = BSpline.basis_element([0, 1, 2])\n >>> b.integrate(0, 1)\n array(0.5)\n\n If the integration limits are outside of the base interval, the result\n is controlled by the `extrapolate` parameter\n\n >>> b.integrate(-1, 1)\n array(0.0)\n >>> b.integrate(-1, 1, extrapolate=False)\n array(0.5)\n\n >>> import matplotlib.pyplot as plt\n >>> fig, ax = plt.subplots()\n >>> ax.grid(True)\n >>> ax.axvline(0, c='r', lw=5, alpha=0.5) # base interval\n >>> ax.axvline(2, c='r', lw=5, alpha=0.5)\n >>> xx = [-1, 1, 2]\n >>> ax.plot(xx, b(xx))\n >>> plt.show()\n\n \"\"\"\n if extrapolate is None:\n extrapolate = self.extrapolate\n\n # Prepare self.t and self.c.\n self._ensure_c_contiguous()\n\n # Swap integration bounds if needed.\n sign = 1\n if b < a:\n a, b = b, a\n sign = -1\n n = self.t.size - self.k - 1\n\n if extrapolate != \"periodic\" and not extrapolate:\n # Shrink the integration interval, if needed.\n a = max(a, self.t[self.k])\n b = min(b, self.t[n])\n\n if self.c.ndim == 1:\n # Fast path: use FITPACK's routine\n # (cf _fitpack_impl.splint).\n t, c, k = self.tck\n integral, wrk = _dierckx._splint(t, c, k, a, b)\n return integral * sign\n\n out = np.empty((2, prod(self.c.shape[1:])), dtype=self.c.dtype)\n\n # Compute the antiderivative.\n c = self.c\n ct = len(self.t) - len(c)\n if ct > 0:\n c = np.r_[c, np.zeros((ct,) + c.shape[1:])]\n ta, ca, ka = _fitpack_impl.splantider((self.t, c, self.k), 1)\n\n if extrapolate == 'periodic':\n # Split the integral into the part over period (can be several\n # of them) and the remaining part.\n\n ts, te = self.t[self.k], self.t[n]\n period = te - ts\n interval = b - a\n n_periods, left = divmod(interval, period)\n\n if n_periods > 0:\n # Evaluate the difference of antiderivatives.\n x = np.asarray([ts, te], dtype=np.float_)\n _bspl.evaluate_spline(ta, ca.reshape(ca.shape[0], -1),\n ka, x, 0, False, out)\n integral = out[1] - out[0]\n integral *= n_periods\n else:\n integral = np.zeros((1, prod(self.c.shape[1:])),\n dtype=self.c.dtype)\n\n # Map a to [ts, te], b is always a + left.\n a = ts + (a - ts) % period\n b = a + left\n\n # If b <= te then we need to integrate over [a, b], otherwise\n # over [a, te] and from xs to what is remained.\n if b <= te:\n x = np.asarray([a, b], dtype=np.float_)\n _bspl.evaluate_spline(ta, ca.reshape(ca.shape[0], -1),\n ka, x, 0, False, out)\n integral += out[1] - out[0]\n else:\n x = np.asarray([a, te], dtype=np.float_)\n _bspl.evaluate_spline(ta, ca.reshape(ca.shape[0], -1),\n ka, x, 0, False, out)\n integral += out[1] - out[0]\n\n x = np.asarray([ts, ts + b - te], dtype=np.float_)\n _bspl.evaluate_spline(ta, ca.reshape(ca.shape[0], -1),\n ka, x, 0, False, out)\n integral += out[1] - out[0]\n else:\n # Evaluate the difference of antiderivatives.\n x = np.asarray([a, b], dtype=np.float_)\n _bspl.evaluate_spline(ta, ca.reshape(ca.shape[0], -1),\n ka, x, 0, extrapolate, out)\n integral = out[1] - out[0]\n\n integral *= sign\n return integral.reshape(ca.shape[1:])\n\n @classmethod\n def from_power_basis(cls, pp, bc_type='not-a-knot'):\n r\"\"\"\n Construct a polynomial in the B-spline basis\n from a piecewise polynomial in the power basis.\n\n For now, accepts ``CubicSpline`` instances only.\n\n Parameters\n ----------\n pp : CubicSpline\n A piecewise polynomial in the power basis, as created\n by ``CubicSpline``\n bc_type : string, optional\n Boundary condition type as in ``CubicSpline``: one of the\n ``not-a-knot``, ``natural``, ``clamped``, or ``periodic``.\n Necessary for construction an instance of ``BSpline`` class.\n Default is ``not-a-knot``.\n\n Returns\n -------\n b : BSpline object\n A new instance representing the initial polynomial\n in the B-spline basis.\n\n Notes\n -----\n .. versionadded:: 1.8.0\n\n Accepts only ``CubicSpline`` instances for now.\n\n The algorithm follows from differentiation\n the Marsden's identity [1]: each of coefficients of spline\n interpolation function in the B-spline basis is computed as follows:\n\n .. math::\n\n c_j = \\sum_{m=0}^{k} \\frac{(k-m)!}{k!}\n c_{m,i} (-1)^{k-m} D^m p_{j,k}(x_i)\n\n :math:`c_{m, i}` - a coefficient of CubicSpline,\n :math:`D^m p_{j, k}(x_i)` - an m-th defivative of a dual polynomial\n in :math:`x_i`.\n\n ``k`` always equals 3 for now.\n\n First ``n - 2`` coefficients are computed in :math:`x_i = x_j`, e.g.\n\n .. math::\n\n c_1 = \\sum_{m=0}^{k} \\frac{(k-1)!}{k!} c_{m,1} D^m p_{j,3}(x_1)\n\n Last ``nod + 2`` coefficients are computed in ``x[-2]``,\n ``nod`` - number of derivatives at the ends.\n\n For example, consider :math:`x = [0, 1, 2, 3, 4]`,\n :math:`y = [1, 1, 1, 1, 1]` and bc_type = ``natural``\n\n The coefficients of CubicSpline in the power basis:\n\n :math:`[[0, 0, 0, 0, 0], [0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0], [1, 1, 1, 1, 1]]`\n\n The knot vector: :math:`t = [0, 0, 0, 0, 1, 2, 3, 4, 4, 4, 4]`\n\n In this case\n\n .. math::\n\n c_j = \\frac{0!}{k!} c_{3, i} k! = c_{3, i} = 1,~j = 0, ..., 6\n\n References\n ----------\n .. [1] Tom Lyche and Knut Morken, Spline Methods, 2005, Section 3.1.2\n\n \"\"\"\n from ._cubic import CubicSpline\n if not isinstance(pp, CubicSpline):\n raise NotImplementedError(\"Only CubicSpline objects are accepted\"\n \"for now. Got %s instead.\" % type(pp))\n x = pp.x\n coef = pp.c\n k = pp.c.shape[0] - 1\n n = x.shape[0]\n\n if bc_type == 'not-a-knot':\n t = _not_a_knot(x, k)\n elif bc_type == 'natural' or bc_type == 'clamped':\n t = _augknt(x, k)\n elif bc_type == 'periodic':\n t = _periodic_knots(x, k)\n else:\n raise TypeError('Unknown boundary condition: %s' % bc_type)\n\n nod = t.shape[0] - (n + k + 1) # number of derivatives at the ends\n c = np.zeros(n + nod, dtype=pp.c.dtype)\n for m in range(k + 1):\n for i in range(n - 2):\n c[i] += poch(k + 1, -m) * coef[m, i]\\\n * np.power(-1, k - m)\\\n * _diff_dual_poly(i, k, x[i], m, t)\n for j in range(n - 2, n + nod):\n c[j] += poch(k + 1, -m) * coef[m, n - 2]\\\n * np.power(-1, k - m)\\\n * _diff_dual_poly(j, k, x[n - 2], m, t)\n return cls.construct_fast(t, c, k, pp.extrapolate, pp.axis)\n\n\n#################################\n# Interpolating spline helpers #\n#################################\n\ndef _not_a_knot(x, k):\n \"\"\"Given data x, construct the knot vector w/ not-a-knot BC.\n cf de Boor, XIII(12).\"\"\"\n x = np.asarray(x)\n if k % 2 != 1:\n raise ValueError(\"Odd degree for now only. Got %s.\" % k)\n\n m = (k - 1) // 2\n t = x[m+1:-m-1]\n t = np.r_[(x[0],)*(k+1), t, (x[-1],)*(k+1)]\n return t\n\n\ndef _augknt(x, k):\n \"\"\"Construct a knot vector appropriate for the order-k interpolation.\"\"\"\n return np.r_[(x[0],)*k, x, (x[-1],)*k]\n\n\ndef _convert_string_aliases(deriv, target_shape):\n if isinstance(deriv, str):\n if deriv == \"clamped\":\n deriv = [(1, np.zeros(target_shape))]\n elif deriv == \"natural\":\n deriv = [(2, np.zeros(target_shape))]\n else:\n raise ValueError(\"Unknown boundary condition : %s\" % deriv)\n return deriv\n\n\ndef _process_deriv_spec(deriv):\n if deriv is not None:\n try:\n ords, vals = zip(*deriv)\n except TypeError as e:\n msg = (\"Derivatives, `bc_type`, should be specified as a pair of \"\n \"iterables of pairs of (order, value).\")\n raise ValueError(msg) from e\n else:\n ords, vals = [], []\n return np.atleast_1d(ords, vals)\n\ndef _woodbury_algorithm(A, ur, ll, b, k):\n '''\n Solve a cyclic banded linear system with upper right\n and lower blocks of size ``(k-1) / 2`` using\n the Woodbury formula\n \n Parameters\n ----------\n A : 2-D array, shape(k, n)\n Matrix of diagonals of original matrix(see \n ``solve_banded`` documentation).\n ur : 2-D array, shape(bs, bs)\n Upper right block matrix.\n ll : 2-D array, shape(bs, bs)\n Lower left block matrix.\n b : 1-D array, shape(n,)\n Vector of constant terms of the system of linear equations.\n k : int\n B-spline degree.\n \n Returns\n -------\n c : 1-D array, shape(n,)\n Solution of the original system of linear equations.\n \n Notes\n -----\n This algorithm works only for systems with banded matrix A plus\n a correction term U @ V.T, where the matrix U @ V.T gives upper right\n and lower left block of A\n The system is solved with the following steps:\n 1. New systems of linear equations are constructed:\n A @ z_i = u_i,\n u_i - columnn vector of U,\n i = 1, ..., k - 1\n 2. Matrix Z is formed from vectors z_i:\n Z = [ z_1 | z_2 | ... | z_{k - 1} ]\n 3. Matrix H = (1 + V.T @ Z)^{-1}\n 4. The system A' @ y = b is solved\n 5. x = y - Z @ (H @ V.T @ y)\n Also, ``n`` should be greater than ``k``, otherwise corner block\n elements will intersect with diagonals.\n\n Examples\n --------\n Consider the case of n = 8, k = 5 (size of blocks - 2 x 2).\n The matrix of a system: U: V:\n x x x * * a b a b 0 0 0 0 1 0\n x x x x * * c 0 c 0 0 0 0 0 1\n x x x x x * * 0 0 0 0 0 0 0 0\n * x x x x x * 0 0 0 0 0 0 0 0\n * * x x x x x 0 0 0 0 0 0 0 0\n d * * x x x x 0 0 d 0 1 0 0 0\n e f * * x x x 0 0 e f 0 1 0 0\n\n References\n ----------\n .. [1] William H. Press, Saul A. Teukolsky, William T. Vetterling\n and Brian P. Flannery, Numerical Recipes, 2007, Section 2.7.3\n\n '''\n k_mod = k - k % 2\n bs = int((k - 1) / 2) + (k + 1) % 2\n\n n = A.shape[1] + 1\n U = np.zeros((n - 1, k_mod))\n VT = np.zeros((k_mod, n - 1)) # V transpose\n\n # upper right block \n U[:bs, :bs] = ur\n VT[np.arange(bs), np.arange(bs) - bs] = 1\n\n # lower left block \n U[-bs:, -bs:] = ll\n VT[np.arange(bs) - bs, np.arange(bs)] = 1\n \n Z = solve_banded((bs, bs), A, U)\n\n H = solve(np.identity(k_mod) + VT @ Z, np.identity(k_mod))\n\n y = solve_banded((bs, bs), A, b)\n c = y - Z @ (H @ (VT @ y))\n\n return c\n\ndef _periodic_knots(x, k):\n '''\n returns vector of nodes on circle\n '''\n xc = np.copy(x)\n n = len(xc)\n if k % 2 == 0:\n dx = np.diff(xc)\n xc[1: -1] -= dx[:-1] / 2 \n dx = np.diff(xc)\n t = np.zeros(n + 2 * k)\n t[k: -k] = xc\n for i in range(0, k):\n # filling first `k` elements in descending order\n t[k - i - 1] = t[k - i] - dx[-(i % (n - 1)) - 1]\n # filling last `k` elements in ascending order\n t[-k + i] = t[-k + i - 1] + dx[i % (n - 1)]\n return t\n\n\ndef _make_interp_per_full_matr(x, y, t, k):\n '''\n Returns a solution of a system for B-spline interpolation with periodic\n boundary conditions. First ``k - 1`` rows of matrix are condtions of\n periodicity (continuity of ``k - 1`` derivatives at the boundary points).\n Last ``n`` rows are interpolation conditions.\n RHS is ``k - 1`` zeros and ``n`` ordinates in this case.\n\n Parameters\n ----------\n x : 1-D array, shape (n,)\n Values of x - coordinate of a given set of points.\n y : 1-D array, shape (n,)\n Values of y - coordinate of a given set of points.\n t : 1-D array, shape(n+2*k,)\n Vector of knots.\n k : int\n The maximum degree of spline\n\n Returns\n -------\n c : 1-D array, shape (n+k-1,)\n B-spline coefficients\n\n Notes\n -----\n ``t`` is supposed to be taken on circle.\n\n '''\n\n x, y, t = map(np.asarray, (x, y, t))\n\n n = x.size\n # LHS: the collocation matrix + derivatives at edges\n matr = np.zeros((n + k - 1, n + k - 1))\n\n # derivatives at x[0] and x[-1]:\n for i in range(k - 1):\n bb = _bspl.evaluate_all_bspl(t, k, x[0], k, nu=i + 1)\n matr[i, : k + 1] += bb\n bb = _bspl.evaluate_all_bspl(t, k, x[-1], n + k - 1, nu=i + 1)[:-1]\n matr[i, -k:] -= bb\n \n # collocation matrix\n for i in range(n):\n xval = x[i]\n # find interval\n if xval == t[k]:\n left = k\n else:\n left = np.searchsorted(t, xval) - 1\n\n # fill a row\n bb = _bspl.evaluate_all_bspl(t, k, xval, left)\n matr[i + k - 1, left-k:left+1] = bb\n \n # RHS\n b = np.r_[[0] * (k - 1), y]\n\n c = solve(matr, b)\n return c\n\ndef _make_periodic_spline(x, y, t, k, axis):\n '''\n Compute the (coefficients of) interpolating B-spline with periodic\n boundary conditions.\n\n Parameters\n ----------\n x : array_like, shape (n,)\n Abscissas.\n y : array_like, shape (n,)\n Ordinates.\n k : int\n B-spline degree.\n t : array_like, shape (n + 2 * k,).\n Knots taken on a circle, ``k`` on the left and ``k`` on the right\n of the vector ``x``.\n\n Returns\n -------\n b : a BSpline object of the degree ``k`` and with knots ``t``.\n\n Notes\n -----\n The original system is formed by ``n + k - 1`` equations where the first\n ``k - 1`` of them stand for the ``k - 1`` derivatives continuity on the\n edges while the other equations correspond to an interpolating case\n (matching all the input points). Due to a special form of knot vector, it\n can be proved that in the original system the first and last ``k``\n coefficients of a spline function are the same, respectively. It follows\n from the fact that all ``k - 1`` derivatives are equal term by term at ends\n and that the matrix of the original system of linear equations is\n non-degenerate. So, we can reduce the number of equations to ``n - 1``\n (first ``k - 1`` equations could be reduced). Another trick of this\n implementation is cyclic shift of values of B-splines due to equality of\n ``k`` unknown coefficients. With this we can receive matrix of the system\n with upper right and lower left blocks, and ``k`` diagonals. It allows\n to use Woodbury formula to optimize the computations.\n\n '''\n n = y.shape[0]\n\n extradim = prod(y.shape[1:])\n y_new = y.reshape(n, extradim)\n c = np.zeros((n + k - 1, extradim))\n\n # n <= k case is solved with full matrix\n if n <= k:\n for i in range(extradim):\n c[:, i] = _make_interp_per_full_matr(x, y_new[:, i], t, k)\n c = np.ascontiguousarray(c.reshape((n + k - 1,) + y.shape[1:]))\n return BSpline.construct_fast(t, c, k, extrapolate='periodic', axis=axis)\n\n nt = len(t) - k - 1\n\n # size of block elements\n kul = int(k / 2)\n \n # kl = ku = k\n ab = np.zeros((3 * k + 1, nt), dtype=np.float_, order='F')\n\n # upper right and lower left blocks\n ur = np.zeros((kul, kul))\n ll = np.zeros_like(ur)\n \n # `offset` is made to shift all the non-zero elements to the end of the\n # matrix\n _bspl._colloc(x, t, k, ab, offset=k)\n \n # remove zeros before the matrix\n ab = ab[-k - (k + 1) % 2:, :]\n \n # The least elements in rows (except repetitions) are diagonals\n # of block matrices. Upper right matrix is an upper triangular\n # matrix while lower left is a lower triangular one.\n for i in range(kul):\n ur += np.diag(ab[-i - 1, i: kul], k=i)\n ll += np.diag(ab[i, -kul - (k % 2): n - 1 + 2 * kul - i], k=-i)\n\n # remove elements that occur in the last point\n # (first and last points are equivalent)\n A = ab[:, kul: -k + kul]\n\n for i in range(extradim):\n cc = _woodbury_algorithm(A, ur, ll, y_new[:, i][:-1], k)\n c[:, i] = np.concatenate((cc[-kul:], cc, cc[:kul + k % 2]))\n c = np.ascontiguousarray(c.reshape((n + k - 1,) + y.shape[1:]))\n return BSpline.construct_fast(t, c, k, extrapolate='periodic', axis=axis)\n\ndef make_interp_spline(x, y, k=3, t=None, bc_type=None, axis=0,\n check_finite=True):\n \"\"\"Compute the (coefficients of) interpolating B-spline.\n\n Parameters\n ----------\n x : array_like, shape (n,)\n Abscissas.\n y : array_like, shape (n, ...)\n Ordinates.\n k : int, optional\n B-spline degree. Default is cubic, k=3.\n t : array_like, shape (nt + k + 1,), optional.\n Knots.\n The number of knots needs to agree with the number of datapoints and\n the number of derivatives at the edges. Specifically, ``nt - n`` must\n equal ``len(deriv_l) + len(deriv_r)``.\n bc_type : 2-tuple or None\n Boundary conditions.\n Default is None, which means choosing the boundary conditions\n automatically. Otherwise, it must be a length-two tuple where the first\n element sets the boundary conditions at ``x[0]`` and the second\n element sets the boundary conditions at ``x[-1]``. Each of these must\n be an iterable of pairs ``(order, value)`` which gives the values of\n derivatives of specified orders at the given edge of the interpolation\n interval.\n Alternatively, the following string aliases are recognized:\n\n * ``\"clamped\"``: The first derivatives at the ends are zero. This is\n equivalent to ``bc_type=([(1, 0.0)], [(1, 0.0)])``.\n * ``\"natural\"``: The second derivatives at ends are zero. This is\n equivalent to ``bc_type=([(2, 0.0)], [(2, 0.0)])``.\n * ``\"not-a-knot\"`` (default): The first and second segments are the\n same polynomial. This is equivalent to having ``bc_type=None``.\n * ``\"periodic\"``: The values and the first ``k-1`` derivatives at the\n ends are equivalent.\n\n axis : int, optional\n Interpolation axis. Default is 0.\n check_finite : bool, optional\n Whether to check that the input arrays contain only finite numbers.\n Disabling may give a performance gain, but may result in problems\n (crashes, non-termination) if the inputs do contain infinities or NaNs.\n Default is True.\n\n Returns\n -------\n b : a BSpline object of the degree ``k`` and with knots ``t``.\n\n Examples\n --------\n\n Use cubic interpolation on Chebyshev nodes:\n\n >>> def cheb_nodes(N):\n ... jj = 2.*np.arange(N) + 1\n ... x = np.cos(np.pi * jj / 2 / N)[::-1]\n ... return x\n\n >>> x = cheb_nodes(20)\n >>> y = np.sqrt(1 - x**2)\n\n >>> from scipy.interpolate import BSpline, make_interp_spline\n >>> b = make_interp_spline(x, y)\n >>> np.allclose(b(x), y)\n True\n\n Note that the default is a cubic spline with a not-a-knot boundary condition\n\n >>> b.k\n 3\n\n Here we use a 'natural' spline, with zero 2nd derivatives at edges:\n\n >>> l, r = [(2, 0.0)], [(2, 0.0)]\n >>> b_n = make_interp_spline(x, y, bc_type=(l, r)) # or, bc_type=\"natural\"\n >>> np.allclose(b_n(x), y)\n True\n >>> x0, x1 = x[0], x[-1]\n >>> np.allclose([b_n(x0, 2), b_n(x1, 2)], [0, 0])\n True\n\n Interpolation of parametric curves is also supported. As an example, we\n compute a discretization of a snail curve in polar coordinates\n\n >>> phi = np.linspace(0, 2.*np.pi, 40)\n >>> r = 0.3 + np.cos(phi)\n >>> x, y = r*np.cos(phi), r*np.sin(phi) # convert to Cartesian coordinates\n\n Build an interpolating curve, parameterizing it by the angle\n\n >>> from scipy.interpolate import make_interp_spline\n >>> spl = make_interp_spline(phi, np.c_[x, y])\n\n Evaluate the interpolant on a finer grid (note that we transpose the result\n to unpack it into a pair of x- and y-arrays)\n\n >>> phi_new = np.linspace(0, 2.*np.pi, 100)\n >>> x_new, y_new = spl(phi_new).T\n\n Plot the result\n\n >>> import matplotlib.pyplot as plt\n >>> plt.plot(x, y, 'o')\n >>> plt.plot(x_new, y_new, '-')\n >>> plt.show()\n\n Build a B-spline curve with 2 dimensional y\n \n >>> x = np.linspace(0, 2*np.pi, 10)\n >>> y = np.array([np.sin(x), np.cos(x)])\n\n Periodic condition is satisfied because y coordinates of points on the ends\n are equivalent\n\n >>> ax = plt.axes(projection='3d')\n >>> xx = np.linspace(0, 2*np.pi, 100)\n >>> bspl = make_interp_spline(x, y, k=5, bc_type='periodic', axis=1)\n >>> ax.plot3D(xx, *bspl(xx))\n >>> ax.scatter3D(x, *y, color='red')\n >>> plt.show()\n\n See Also\n --------\n BSpline : base class representing the B-spline objects\n CubicSpline : a cubic spline in the polynomial basis\n make_lsq_spline : a similar factory function for spline fitting\n UnivariateSpline : a wrapper over FITPACK spline fitting routines\n splrep : a wrapper over FITPACK spline fitting routines\n\n \"\"\"\n # convert string aliases for the boundary conditions\n if bc_type is None or bc_type == 'not-a-knot' or bc_type == 'periodic':\n deriv_l, deriv_r = None, None\n elif isinstance(bc_type, str):\n deriv_l, deriv_r = bc_type, bc_type\n else:\n try:\n deriv_l, deriv_r = bc_type\n except TypeError as e:\n raise ValueError(\"Unknown boundary condition: %s\" % bc_type) from e\n\n y = np.asarray(y)\n\n axis = normalize_axis_index(axis, y.ndim)\n\n x = _as_float_array(x, check_finite)\n y = _as_float_array(y, check_finite)\n\n y = np.moveaxis(y, axis, 0) # now internally interp axis is zero\n\n if bc_type == 'periodic' and not np.allclose(y[0], y[-1], atol=1e-15):\n raise ValueError(\"First and last points does not match while \"\n \"periodic case expected\")\n\n # special-case k=0 right away\n if k == 0:\n if any(_ is not None for _ in (t, deriv_l, deriv_r)):\n raise ValueError(\"Too much info for k=0: t and bc_type can only \"\n \"be None.\")\n t = np.r_[x, x[-1]]\n c = np.asarray(y)\n c = np.ascontiguousarray(c, dtype=_get_dtype(c.dtype))\n return BSpline.construct_fast(t, c, k, axis=axis)\n\n # special-case k=1 (e.g., Lyche and Morken, Eq.(2.16))\n if k == 1 and t is None:\n if not (deriv_l is None and deriv_r is None):\n raise ValueError(\"Too much info for k=1: bc_type can only be None.\")\n t = np.r_[x[0], x, x[-1]]\n c = np.asarray(y)\n c = np.ascontiguousarray(c, dtype=_get_dtype(c.dtype))\n return BSpline.construct_fast(t, c, k, axis=axis)\n\n k = operator.index(k)\n\n if bc_type == 'periodic' and t is not None:\n raise NotImplementedError(\"For periodic case t is constructed \"\n \"automatically and can not be passed manually\")\n\n # come up with a sensible knot vector, if needed\n if t is None:\n if deriv_l is None and deriv_r is None:\n if bc_type == 'periodic':\n t = _periodic_knots(x, k)\n elif k == 2:\n # OK, it's a bit ad hoc: Greville sites + omit\n # 2nd and 2nd-to-last points, a la not-a-knot\n t = (x[1:] + x[:-1]) / 2.\n t = np.r_[(x[0],)*(k+1),\n t[1:-1],\n (x[-1],)*(k+1)]\n else:\n t = _not_a_knot(x, k)\n else:\n t = _augknt(x, k)\n\n t = _as_float_array(t, check_finite)\n\n if x.ndim != 1 or np.any(x[1:] < x[:-1]):\n raise ValueError(\"Expect x to be a 1-D sorted array_like.\")\n if np.any(x[1:] == x[:-1]):\n raise ValueError(\"Expect x to not have duplicates\")\n if k < 0:\n raise ValueError(\"Expect non-negative k.\")\n if t.ndim != 1 or np.any(t[1:] < t[:-1]):\n raise ValueError(\"Expect t to be a 1-D sorted array_like.\")\n if x.size != y.shape[0]:\n raise ValueError('Shapes of x {} and y {} are incompatible'\n .format(x.shape, y.shape))\n if t.size < x.size + k + 1:\n raise ValueError('Got %d knots, need at least %d.' %\n (t.size, x.size + k + 1))\n if (x[0] < t[k]) or (x[-1] > t[-k]):\n raise ValueError('Out of bounds w/ x = %s.' % x)\n\n if bc_type == 'periodic':\n return _make_periodic_spline(x, y, t, k, axis)\n\n # Here : deriv_l, r = [(nu, value), ...]\n deriv_l = _convert_string_aliases(deriv_l, y.shape[1:])\n deriv_l_ords, deriv_l_vals = _process_deriv_spec(deriv_l)\n nleft = deriv_l_ords.shape[0]\n\n deriv_r = _convert_string_aliases(deriv_r, y.shape[1:])\n deriv_r_ords, deriv_r_vals = _process_deriv_spec(deriv_r)\n nright = deriv_r_ords.shape[0]\n\n # have `n` conditions for `nt` coefficients; need nt-n derivatives\n n = x.size\n nt = t.size - k - 1\n\n if nt - n != nleft + nright:\n raise ValueError(\"The number of derivatives at boundaries does not \"\n \"match: expected %s, got %s+%s\" % (nt-n, nleft, nright))\n\n # set up the LHS: the collocation matrix + derivatives at boundaries\n kl = ku = k\n ab = np.zeros((2*kl + ku + 1, nt), dtype=np.float_, order='F')\n _bspl._colloc(x, t, k, ab, offset=nleft)\n if nleft > 0:\n _bspl._handle_lhs_derivatives(t, k, x[0], ab, kl, ku, deriv_l_ords)\n if nright > 0:\n _bspl._handle_lhs_derivatives(t, k, x[-1], ab, kl, ku, deriv_r_ords,\n offset=nt-nright)\n\n # set up the RHS: values to interpolate (+ derivative values, if any)\n extradim = prod(y.shape[1:])\n rhs = np.empty((nt, extradim), dtype=y.dtype)\n if nleft > 0:\n rhs[:nleft] = deriv_l_vals.reshape(-1, extradim)\n rhs[nleft:nt - nright] = y.reshape(-1, extradim)\n if nright > 0:\n rhs[nt - nright:] = deriv_r_vals.reshape(-1, extradim)\n\n # solve Ab @ x = rhs; this is the relevant part of linalg.solve_banded\n if check_finite:\n ab, rhs = map(np.asarray_chkfinite, (ab, rhs))\n gbsv, = get_lapack_funcs(('gbsv',), (ab, rhs))\n lu, piv, c, info = gbsv(kl, ku, ab, rhs,\n overwrite_ab=True, overwrite_b=True)\n\n if info > 0:\n raise LinAlgError(\"Collocation matix is singular.\")\n elif info < 0:\n raise ValueError('illegal value in %d-th argument of internal gbsv' % -info)\n\n c = np.ascontiguousarray(c.reshape((nt,) + y.shape[1:]))\n return BSpline.construct_fast(t, c, k, axis=axis)\n\n\ndef make_lsq_spline(x, y, t, k=3, w=None, axis=0, check_finite=True):\n r\"\"\"Compute the (coefficients of) an LSQ B-spline.\n\n The result is a linear combination\n\n .. math::\n\n S(x) = \\sum_j c_j B_j(x; t)\n\n of the B-spline basis elements, :math:`B_j(x; t)`, which minimizes\n\n .. math::\n\n \\sum_{j} \\left( w_j \\times (S(x_j) - y_j) \\right)^2\n\n Parameters\n ----------\n x : array_like, shape (m,)\n Abscissas.\n y : array_like, shape (m, ...)\n Ordinates.\n t : array_like, shape (n + k + 1,).\n Knots.\n Knots and data points must satisfy Schoenberg-Whitney conditions.\n k : int, optional\n B-spline degree. Default is cubic, k=3.\n w : array_like, shape (n,), optional\n Weights for spline fitting. Must be positive. If ``None``,\n then weights are all equal.\n Default is ``None``.\n axis : int, optional\n Interpolation axis. Default is zero.\n check_finite : bool, optional\n Whether to check that the input arrays contain only finite numbers.\n Disabling may give a performance gain, but may result in problems\n (crashes, non-termination) if the inputs do contain infinities or NaNs.\n Default is True.\n\n Returns\n -------\n b : a BSpline object of the degree `k` with knots `t`.\n\n Notes\n -----\n The number of data points must be larger than the spline degree `k`.\n\n Knots `t` must satisfy the Schoenberg-Whitney conditions,\n i.e., there must be a subset of data points ``x[j]`` such that\n ``t[j] < x[j] < t[j+k+1]``, for ``j=0, 1,...,n-k-2``.\n\n Examples\n --------\n Generate some noisy data:\n\n >>> rng = np.random.default_rng()\n >>> x = np.linspace(-3, 3, 50)\n >>> y = np.exp(-x**2) + 0.1 * rng.standard_normal(50)\n\n Now fit a smoothing cubic spline with a pre-defined internal knots.\n Here we make the knot vector (k+1)-regular by adding boundary knots:\n\n >>> from scipy.interpolate import make_lsq_spline, BSpline\n >>> t = [-1, 0, 1]\n >>> k = 3\n >>> t = np.r_[(x[0],)*(k+1),\n ... t,\n ... (x[-1],)*(k+1)]\n >>> spl = make_lsq_spline(x, y, t, k)\n\n For comparison, we also construct an interpolating spline for the same\n set of data:\n\n >>> from scipy.interpolate import make_interp_spline\n >>> spl_i = make_interp_spline(x, y)\n\n Plot both:\n\n >>> import matplotlib.pyplot as plt\n >>> xs = np.linspace(-3, 3, 100)\n >>> plt.plot(x, y, 'ro', ms=5)\n >>> plt.plot(xs, spl(xs), 'g-', lw=3, label='LSQ spline')\n >>> plt.plot(xs, spl_i(xs), 'b-', lw=3, alpha=0.7, label='interp spline')\n >>> plt.legend(loc='best')\n >>> plt.show()\n\n **NaN handling**: If the input arrays contain ``nan`` values, the result is\n not useful since the underlying spline fitting routines cannot deal with\n ``nan``. A workaround is to use zero weights for not-a-number data points:\n\n >>> y[8] = np.nan\n >>> w = np.isnan(y)\n >>> y[w] = 0.\n >>> tck = make_lsq_spline(x, y, t, w=~w)\n\n Notice the need to replace a ``nan`` by a numerical value (precise value\n does not matter as long as the corresponding weight is zero.)\n\n See Also\n --------\n BSpline : base class representing the B-spline objects\n make_interp_spline : a similar factory function for interpolating splines\n LSQUnivariateSpline : a FITPACK-based spline fitting routine\n splrep : a FITPACK-based fitting routine\n\n \"\"\"\n x = _as_float_array(x, check_finite)\n y = _as_float_array(y, check_finite)\n t = _as_float_array(t, check_finite)\n if w is not None:\n w = _as_float_array(w, check_finite)\n else:\n w = np.ones_like(x)\n k = operator.index(k)\n\n axis = normalize_axis_index(axis, y.ndim)\n\n y = np.moveaxis(y, axis, 0) # now internally interp axis is zero\n\n if x.ndim != 1 or np.any(x[1:] - x[:-1] <= 0):\n raise ValueError(\"Expect x to be a 1-D sorted array_like.\")\n if x.shape[0] < k+1:\n raise ValueError(\"Need more x points.\")\n if k < 0:\n raise ValueError(\"Expect non-negative k.\")\n if t.ndim != 1 or np.any(t[1:] - t[:-1] < 0):\n raise ValueError(\"Expect t to be a 1-D sorted array_like.\")\n if x.size != y.shape[0]:\n raise ValueError('Shapes of x {} and y {} are incompatible'\n .format(x.shape, y.shape))\n if k > 0 and np.any((x < t[k]) | (x > t[-k])):\n raise ValueError('Out of bounds w/ x = %s.' % x)\n if x.size != w.size:\n raise ValueError('Shapes of x {} and w {} are incompatible'\n .format(x.shape, w.shape))\n\n # number of coefficients\n n = t.size - k - 1\n\n # construct A.T @ A and rhs with A the collocation matrix, and\n # rhs = A.T @ y for solving the LSQ problem ``A.T @ A @ c = A.T @ y``\n lower = True\n extradim = prod(y.shape[1:])\n ab = np.zeros((k+1, n), dtype=np.float_, order='F')\n rhs = np.zeros((n, extradim), dtype=y.dtype, order='F')\n _bspl._norm_eq_lsq(x, t, k,\n y.reshape(-1, extradim),\n w,\n ab, rhs)\n rhs = rhs.reshape((n,) + y.shape[1:])\n\n # have observation matrix & rhs, can solve the LSQ problem\n cho_decomp = cholesky_banded(ab, overwrite_ab=True, lower=lower,\n check_finite=check_finite)\n c = cho_solve_banded((cho_decomp, lower), rhs, overwrite_b=True,\n check_finite=check_finite)\n\n c = np.ascontiguousarray(c)\n return BSpline.construct_fast(t, c, k, axis=axis)\n"
] |
[
[
"scipy.optimize.shgo",
"scipy.optimize.differential_evolution",
"numpy.arange",
"numpy.stack",
"scipy.optimize.dual_annealing",
"numpy.meshgrid",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
],
[
"numpy.load",
"numpy.savez_compressed",
"numpy.loadtxt"
],
[
"scipy.linalg._interpolative_backend.iddp_id",
"scipy.linalg._interpolative_backend.idz_diffsnorm",
"scipy.linalg._interpolative_backend.idd_reconid",
"scipy.linalg._interpolative_backend.idzr_svd",
"scipy.linalg._interpolative_backend.idz_copycols",
"scipy.linalg._interpolative_backend.idzr_aid",
"scipy.linalg._interpolative_backend.idz_reconint",
"scipy.linalg._interpolative_backend.idz_reconid",
"scipy.linalg._interpolative_backend.id_srando",
"scipy.linalg._interpolative_backend.iddr_rsvd",
"scipy.linalg._interpolative_backend.idzp_asvd",
"scipy.linalg._interpolative_backend.idd_reconint",
"scipy.linalg._interpolative_backend.idd_diffsnorm",
"scipy.linalg._interpolative_backend.iddr_aid",
"scipy.linalg._interpolative_backend.idd_copycols",
"scipy.sparse.linalg.aslinearoperator",
"scipy.linalg._interpolative_backend.idzp_aid",
"scipy.linalg._interpolative_backend.idd_estrank",
"scipy.linalg._interpolative_backend.idzp_id",
"scipy.linalg._interpolative_backend.idzp_rid",
"scipy.linalg._interpolative_backend.iddp_asvd",
"scipy.linalg._interpolative_backend.idzr_asvd",
"scipy.linalg._interpolative_backend.idzp_rsvd",
"scipy.linalg._interpolative_backend.idz_findrank",
"scipy.linalg._interpolative_backend.idzp_svd",
"scipy.linalg._interpolative_backend.iddr_asvd",
"scipy.linalg._interpolative_backend.iddp_rsvd",
"numpy.asfortranarray",
"scipy.linalg._interpolative_backend.idzr_id",
"numpy.random.rand",
"scipy.linalg._interpolative_backend.idz_id2svd",
"numpy.random.RandomState",
"scipy.linalg._interpolative_backend.iddp_aid",
"scipy.linalg._interpolative_backend.iddr_svd",
"scipy.linalg._interpolative_backend.id_srandi",
"scipy.linalg._interpolative_backend.idzr_rsvd",
"scipy.linalg._interpolative_backend.idd_id2svd",
"scipy.linalg._interpolative_backend.iddr_rid",
"scipy.linalg._interpolative_backend.idz_estrank",
"scipy.linalg._interpolative_backend.idz_snorm",
"scipy.linalg._interpolative_backend.iddp_svd",
"scipy.linalg._interpolative_backend.idd_snorm",
"scipy.linalg._interpolative_backend.idzr_rid",
"numpy.prod",
"scipy.linalg._interpolative_backend.idd_findrank",
"scipy.linalg._interpolative_backend.iddr_id",
"scipy.linalg._interpolative_backend.iddp_rid"
],
[
"numpy.add.at",
"numpy.unique",
"numpy.asarray",
"numpy.any",
"numpy.zeros"
],
[
"numpy.einsum",
"numpy.asarray",
"numpy.vstack",
"scipy._lib._util._asarray_validated",
"numpy.all",
"numpy.iscomplexobj",
"numpy.ones_like",
"numpy.empty_like",
"numpy.eye",
"numpy.flatnonzero",
"numpy.triu",
"numpy.zeros",
"numpy.nonzero",
"numpy.iscomplex",
"numpy.argsort",
"numpy.array",
"numpy.conj",
"numpy.isfinite",
"numpy.empty"
],
[
"numpy.diag",
"scipy.linalg.LinAlgError",
"numpy.asarray",
"numpy.issubdtype",
"scipy.linalg.cholesky_banded",
"numpy.concatenate",
"numpy.zeros_like",
"numpy.core.multiarray.normalize_axis_index",
"numpy.any",
"numpy.moveaxis",
"numpy.searchsorted",
"scipy.special.poch",
"numpy.ones_like",
"numpy.allclose",
"numpy.unique",
"numpy.arange",
"numpy.atleast_1d",
"numpy.copy",
"numpy.diff",
"scipy.linalg.cho_solve_banded",
"scipy.linalg.solve",
"numpy.zeros",
"scipy.linalg.solve_banded",
"numpy.power",
"numpy.ascontiguousarray",
"numpy.identity",
"scipy._lib._util.prod",
"scipy.linalg.get_lapack_funcs",
"numpy.isfinite",
"scipy.sparse.csr_array",
"numpy.empty"
]
] |
rexdivakar/Telegram-Notify
|
[
"7d4f317548e6c1fa14db1c636c328aac02224dc9"
] |
[
"temp.py"
] |
[
"import ssl\nfrom notifly import tf_notifier\nimport tensorflow as tf\nfrom dotenv import load_dotenv\nimport os\n\n\nload_dotenv()\n\nssl._create_default_https_context = ssl._create_unverified_context\ntoken = os.getenv('TOKEN')\nnotifier = tf_notifier.TfNotifier(token=token, platform='discord')\n\n\nclass TestCallback(tf.keras.callbacks.Callback):\n\n @notifier.notify_on_epoch_begin(epoch_interval=1, graph_interval=10)\n def on_epoch_begin(self, epoch, logs=None):\n pass\n\n @notifier.notify_on_epoch_end(epoch_interval=1, graph_interval=10)\n def on_epoch_end(self, epoch, logs=None):\n pass\n\n @notifier.notify_on_train_begin()\n def on_train_begin(self, logs=None):\n pass\n\n @notifier.notify_on_train_end()\n def on_train_end(self, logs=None):\n pass\n\n\nfashion_mnist = tf.keras.datasets.fashion_mnist\n\n(train_images, train_labels), (test_images, test_labels) = fashion_mnist.load_data()\n\nclass_names = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat',\n 'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot']\n\nmodel = tf.keras.Sequential([\n tf.keras.layers.Flatten(input_shape=(28, 28)),\n tf.keras.layers.Dense(5, activation='relu'),\n tf.keras.layers.Dense(10)\n])\n\nmodel.compile(optimizer='adam',\n loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),\n metrics=['accuracy'])\n\nmodel.fit(train_images, train_labels, epochs=5, callbacks=[TestCallback()])\n\ntest_loss, test_acc = model.evaluate(test_images, test_labels, verbose=2)\n\nprint('\\nTest accuracy:', test_acc)\n"
] |
[
[
"tensorflow.keras.losses.SparseCategoricalCrossentropy",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.layers.Flatten"
]
] |
realfolkcode/PyTorch-VAE
|
[
"6abff8c2483e04bbec936bcd1cf20f8f2705266d"
] |
[
"models/vanilla_vae.py"
] |
[
"import torch\nfrom models import BaseVAE\nfrom torch import nn\nfrom torch.nn import functional as F\nfrom .types_ import *\n\n\nclass VanillaVAE(BaseVAE):\n\n\n def __init__(self,\n in_channels: int,\n latent_dim: int,\n hidden_dims: List = None,\n **kwargs) -> None:\n super(VanillaVAE, self).__init__()\n\n self.latent_dim = latent_dim\n self.in_channels = in_channels\n\n modules = []\n if hidden_dims is None:\n hidden_dims = [32, 64, 128, 256, 512]\n\n # Build Encoder\n for h_dim in hidden_dims:\n modules.append(\n nn.Sequential(\n nn.Linear(in_channels, h_dim),\n nn.Tanh())\n )\n in_channels = h_dim\n\n self.encoder = nn.Sequential(*modules)\n self.fc_mu = nn.Linear(hidden_dims[-1], latent_dim) \n self.fc_var = nn.Linear(hidden_dims[-1], latent_dim)\n\n\n # Build Decoder\n modules = []\n\n self.decoder = nn.Sequential(\n nn.Linear(latent_dim, hidden_dims[-1]),\n nn.Tanh()\n )\n\n self.final_layer = nn.Sequential(nn.Linear(hidden_dims[-1], self.in_channels),\n nn.Sigmoid())\n\n def encode(self, input: Tensor) -> List[Tensor]:\n \"\"\"\n Encodes the input by passing through the encoder network\n and returns the latent codes.\n :param input: (Tensor) Input tensor to encoder [N x C x H x W]\n :return: (Tensor) List of latent codes\n \"\"\"\n input = input.view(input.shape[0], -1)\n result = self.encoder(input)\n result = torch.flatten(result, start_dim=1)\n\n # Split the result into mu and var components\n # of the latent Gaussian distribution\n mu = self.fc_mu(result)\n log_var = self.fc_var(result)\n\n return [mu, log_var]\n\n def decode(self, z: Tensor) -> Tensor:\n \"\"\"\n Maps the given latent codes\n onto the image space.\n :param z: (Tensor) [B x D]\n :return: (Tensor) [B x C x H x W]\n \"\"\"\n \n result = self.decoder(z)\n result = self.final_layer(result)\n result = result.view(result.shape[0], 28, 28)\n return result\n\n def reparameterize(self, mu: Tensor, logvar: Tensor) -> Tensor:\n \"\"\"\n Reparameterization trick to sample from N(mu, var) from\n N(0,1).\n :param mu: (Tensor) Mean of the latent Gaussian [B x D]\n :param logvar: (Tensor) Standard deviation of the latent Gaussian [B x D]\n :return: (Tensor) [B x D]\n \"\"\"\n std = torch.exp(0.5 * logvar)\n eps = torch.randn_like(std)\n return eps * std + mu\n\n def forward(self, input: Tensor, **kwargs) -> List[Tensor]:\n mu, log_var = self.encode(input)\n z = self.reparameterize(mu, log_var)\n return [self.decode(z), input, mu, log_var]\n\n def loss_function(self,\n *args,\n **kwargs) -> dict:\n \"\"\"\n Computes the VAE loss function.\n KL(N(\\mu, \\sigma), N(0, 1)) = \\log \\frac{1}{\\sigma} + \\frac{\\sigma^2 + \\mu^2}{2} - \\frac{1}{2}\n :param args:\n :param kwargs:\n :return:\n \"\"\"\n recons = args[0]\n input = args[1]\n mu = args[2]\n log_var = args[3]\n kld_weight = kwargs['M_N'] # Account for the minibatch samples from the dataset\n recons_loss =F.mse_loss(recons, input.view(input.shape[0], 28, 28))\n\n\n kld_loss = torch.mean(-0.5 * torch.sum(1 + log_var - mu ** 2 - log_var.exp(), dim = 1), dim = 0)\n\n loss = recons_loss + kld_weight * kld_loss\n return {'loss': loss, 'Reconstruction_Loss':recons_loss, 'KLD':-kld_loss}\n\n def sample(self,\n num_samples:int,\n current_device: int, **kwargs) -> Tensor:\n \"\"\"\n Samples from the latent space and return the corresponding\n image space map.\n :param num_samples: (Int) Number of samples\n :param current_device: (Int) Device to run the model\n :return: (Tensor)\n \"\"\"\n z = torch.randn(num_samples,\n self.latent_dim)\n\n z = z.to(current_device)\n\n samples = self.decode(z)\n return samples\n\n def generate(self, x: Tensor, **kwargs) -> Tensor:\n \"\"\"\n Given an input image x, returns the reconstructed image\n :param x: (Tensor) [B x C x H x W]\n :return: (Tensor) [B x C x H x W]\n \"\"\"\n\n return self.forward(x)[0]\n"
] |
[
[
"torch.randn_like",
"torch.nn.Sequential",
"torch.randn",
"torch.nn.Tanh",
"torch.exp",
"torch.nn.Linear",
"torch.nn.Sigmoid",
"torch.flatten"
]
] |
kantharajucn/job_seniority_prediction
|
[
"cad9147ffddab1c5ead878c2f9d9e48199dc0da9"
] |
[
"src/dataset.py"
] |
[
"import torch\nfrom sklearn.preprocessing import LabelEncoder\nfrom torch.utils.data import Dataset, DataLoader\n\n\nclass JobsDataset(Dataset):\n def __init__(self, X, y, tokenizer, max_len=512):\n self.len = len(X)\n self.data = X\n self.y = y\n self.tokenizer = tokenizer\n self.max_len = max_len\n self._label_encode()\n\n def _label_encode(self):\n self.label_encoder = LabelEncoder()\n self.y = self.label_encoder.fit_transform(self.y)\n\n def __getitem__(self, index):\n title = str(self.data.title[index])\n title = \" \".join(title.split())\n description = str(self.data.description[index])\n description = \" \".join(description.split())\n inputs = self.tokenizer.encode_plus(\n text=title,\n text_pair=description,\n add_special_tokens=True,\n max_length=self.max_len,\n padding='max_length',\n return_token_type_ids=True,\n truncation=True\n )\n ids = inputs['input_ids']\n mask = inputs['attention_mask']\n\n return {\n 'ids': torch.tensor(ids, dtype=torch.long),\n 'mask': torch.tensor(mask, dtype=torch.long),\n 'targets': torch.tensor(self.y[index], dtype=torch.long)\n }\n\n def __len__(self):\n return self.len\n\n\ndef get_data_loader(X_train, X_valid, y_train, y_valid, tokenizer, batch_size=16, num_workers=1):\n training_set = JobsDataset(X_train, y_train, tokenizer, max_len=512)\n validation_set = JobsDataset(X_valid, y_valid, tokenizer, max_len=512)\n train_params = {'batch_size': batch_size,\n 'shuffle': True,\n 'num_workers': num_workers\n }\n\n test_params = {'batch_size': batch_size,\n 'shuffle': True,\n 'num_workers': num_workers\n }\n\n training_loader = DataLoader(training_set, **train_params)\n validation_loader = DataLoader(validation_set, **test_params)\n return training_loader, validation_loader\n"
] |
[
[
"sklearn.preprocessing.LabelEncoder",
"torch.utils.data.DataLoader",
"torch.tensor"
]
] |
MATHplus-Young-Academy/P3-Morph-Scoring
|
[
"0e2ba66cf28e30525b22706cc50d23b9de09a58a"
] |
[
"morphomatics_med/manifold/Bezierfold.py"
] |
[
"################################################################################\n# #\n# This file is part of the Morphomatics library #\n# see https://github.com/morphomatics/morphomatics #\n# #\n# Copyright (C) 2021 Zuse Institute Berlin #\n# #\n# Morphomatics is distributed under the terms of the ZIB Academic License. #\n# see $MORPHOMATICS/LICENSE #\n# #\n################################################################################\n\nimport numpy as np\n\nimport scipy.integrate as integrate\n\nfrom . import Manifold\n\nfrom joblib import Parallel, delayed\n\nimport time\n\nimport copy\n\nfrom ..stats.RiemannianRegression import RiemannianRegression\nfrom ..stats import ExponentialBarycenter\nfrom ..geom.BezierSpline import BezierSpline\n\n\nclass Bezierfold(Manifold):\n \"\"\"Manifold of Bézier curves (of fixed degree)\n\n Only for single-segment curves for now.\n \"\"\"\n\n def __init__(self, M: Manifold, degree):\n \"\"\"\n\n :arg M: base manifold in which the curves lie\n :arg degree: degree of the Bézier curves\n \"\"\"\n assert M is not None\n\n self._M = M\n\n self._degree = degree\n\n name = 'Manifold of Bézier curves of degree {d} through '.format(d=degree)+M.__str__\n K = np.sum(self._degree) - 1\n dimension = K * M.dim\n point_shape = [K, M.point_shape]\n super().__init__(name, dimension, point_shape)\n\n @property\n def typicaldist(self):\n return\n\n def inner(self, bet, X, Y):\n \"\"\"Functional-based metric\n Vector fields must be given as functions.\n\n :arg bet: Bézier curve in M\n :arg X: generalized Jacobi Field along B\n :arg Y: generalized Jacobi Field along B\n :return: inner product of X and Y at B\n \"\"\"\n assert bet.degrees == self._degree\n # TODO\n return\n\n def norm(self, bet, X):\n \"\"\"Norm of tangent vectors induced by the functional-based metric\n\n :arg bet: Bézier curve in M\n :arg X: generalized Jacobi Field along B\n :return: norm of X\n \"\"\"\n assert bet.degrees() == self._degree\n\n return np.sqrt(self.inner(bet, X, X))\n\n def proj(self, X, H):\n # TODO\n return\n\n egrad2rgrad = proj\n\n def ehess2rhess(self, p, G, H, X):\n \"\"\"Converts the Euclidean gradient G and Hessian H of a function at\n a point p along a tangent vector X to the Riemannian Hessian\n along X on the manifold.\n \"\"\"\n return\n\n def retr(self, R, X):\n # TODO\n return self.exp(R, X)\n\n def exp(self, R, X):\n # TODO\n return\n\n def log(self, R, Q):\n # TODO\n return\n\n def geopoint(self, R, Q, t):\n # TODO\n return\n\n def discgeodesic(self, alp, bet, n=5, eps=1e-10, nsteps=30, verbosity=1):\n \"\"\"Discrete shortest path through space of Bézier curves of same degree\n\n :param alp: Bézier curve in manifold M\n :param bet: Bézier curve in manifold M\n :param n: create discrete n-geodesic\n :param eps: iteration stops when the difference in energy between the new and old iterate drops below eps\n :param nsteps : maximal number of steps\n :param verbosity: 0 (no text) or 1 (print information on convergence)\n :return: control points of the Bézier curves along the shortest path\n \"\"\"\n\n assert alp.degrees == self._degree and bet.degrees == self._degree\n\n def init_disc_curve(alp, bet, n):\n \"\"\"Initialize discrete curve by aligning control points along geodesics\n \"\"\"\n\n # initial discrete curve\n m = np.array(alp.control_points[0].shape)\n m[0] = self._degree + 1\n H = [alp]\n # logs between corresponding control points\n X = np.zeros(m)\n for j in range(self._degree + 1):\n X[j] = self._M.connec.log(alp.control_points[0][j], bet.control_points[0][j])\n # initialize control points along geodesics\n for i in range(1, n):\n P = np.zeros(m)\n for j in range(self._degree + 1):\n P[j] = self._M.connec.exp(alp.control_points[0][j], i / n * X[j])\n\n H.append(BezierSpline(self._M, [P]))\n\n H.append(bet)\n\n return H\n\n # initialize path\n H = init_disc_curve(alp, bet, n)\n\n Eold = self.disc_path_energy(H)\n Enew = self.disc_path_energy(H)\n step = 0\n # optimize path\n while (np.abs(Enew - Eold) > eps and step < nsteps) or step == 0:\n step += 1\n Eold = Enew\n H_old = copy.deepcopy(H)\n\n for i in range(1, n):\n t = np.linspace(0, 1, num=self._degree + 1)\n double_t = np.concatenate((t, t))\n\n h1 = H[i - 1].eval(t)\n h2 = H[i + 1].eval(t)\n Y = np.concatenate((h1, h2))\n\n regression = RiemannianRegression(self._M, Y, double_t, self._degree, verbosity=11*verbosity)\n\n H[i] = regression.trend\n\n Enew = self.disc_path_energy(H)\n\n # check whether energy has increased\n if Enew > Eold:\n print('Stopped computing discrete geodesic because the energy increased in step '+str(step)+'.')\n return H_old\n\n if np.isnan(Enew):\n # repeat\n H = H_old\n print('Had to repeat because of Nan-value.')\n else:\n if verbosity:\n print('Disc-Geo-Step', step, 'Energy:', Enew, 'Enew - Eold:', Enew - Eold)\n\n return H\n\n def loc_dist(self, alp: BezierSpline, bet: BezierSpline, t=np.array([0, 1 / 2, 1])):\n \"\"\" Evaluate distance between two Bézier curves in M at several points\n\n :param alp: Bézier curve\n :param bet: Bézier curve\n :param t: vector with elements in [0,1]\n\n :return: vector with point-wise distances\n \"\"\"\n a_val = alp.eval(t)\n b_val = bet.eval(t)\n d_M = []\n for i in range(len(t)):\n d_M.append(self._M.metric.dist(a_val[i], b_val[i]))\n return np.array(d_M), t\n\n def disc_path_energy(self, H):\n \"\"\"Discrete path energy\n\n :param H: discrete path given as ordered list of Bézier curves of the same degree\n :return: energy of H\n \"\"\"\n # test ¨regression-conform¨ distance\n t = np.linspace(0, 1, num=self._degree + 1)\n d = 0\n for i in range(len(H) - 1):\n dh, _ = self.loc_dist(H[i], H[i + 1], t)\n d += np.sum(dh**2, axis=0)\n\n return d\n\n def rand(self):\n # TODO\n return\n\n def randvec(self, X):\n # TODO\n return\n\n def zerovec(self):\n # TODO\n return\n\n def transp(self, R, Q, X):\n # TODO\n return\n\n def pairmean(self, alp, bet):\n # TODO\n return\n\n def dist(self, alp, bet, l=5):\n \"\"\"Approximate the distance between two Bézier splines\n\n :param alp: Bézier spline\n :param bet: Bézier spline\n :param l: work with discrete l-geodesic\n :return: length of l-geodesic between alp and bet (approximation of the distance)\n \"\"\"\n\n Gam = self.discgeodesic(alp, bet, n=l)\n\n d = 0\n for i in range(len(Gam) - 1):\n y, t = self.loc_dist(Gam[i], Gam[i + 1])\n d += integrate.simps(y, t)\n\n return d\n\n def mean(self, B, n=3, delta=1e-5, min_stepsize=1e-10, nsteps=20, eps=1e-5, n_stepsGeo=10, verbosity=1):\n \"\"\"Discrete mean of a set of Bézier curves\n\n :param B: list of Bézier curves\n :param n: use discrete n-geodesics\n :param delta: iteration stops when the difference in energy between the new and old iterate drops below eps\n :param min_stepsize: iteration stops when the step length is smaller than the given value\n :param nsteps: maximal number of iterations\n :param eps: see eps in discgeodesic\n :param n_stepsGeo: maximal number of iterations when computating discrete geodesics\n :param verbosity: 0 (no text) or 1 (print information on convergence)\n :return: mean curve\n \"\"\"\n begin_mean = time.time()\n\n # get shape of array of control points\n m = np.array(B[0].control_points[0].shape)\n m[0] = self._degree + 1\n\n def legs(meaniterate):\n \"\"\" Construct legs of polygonal spider, i.e., discrete n-geodesics between mean iterate and input curves\n \"\"\"\n return Parallel(n_jobs=-1, prefer='threads', require='sharedmem')(delayed(self.discgeodesic)\n (meaniterate, b, n=n, eps=eps,\n nsteps=n_stepsGeo, verbosity=0)\n for b in B)\n\n def loss(FF):\n G = 0\n for HH in FF:\n G += self.disc_path_energy(HH)\n return G\n\n # initialize i-th control point of the mean as the mean of the i-th control points of the data\n C = ExponentialBarycenter\n P = np.zeros(m)\n for i in range(self._degree + 1):\n D = []\n for bet in B:\n D.append(bet.control_points[0][i])\n\n P[i] = C.compute(self._M, D)\n\n # initial guess\n bet_mean = B[0]\n\n # initial legs\n F = legs(bet_mean)\n # initialize stopping parameters\n Eold = 10\n Enew = 1\n stepsize = 10\n step = 0\n while np.abs(Enew - Eold) > delta and stepsize > min_stepsize and step <= nsteps:\n step += 1\n Eold = Enew\n F_old = F\n old_mean = BezierSpline(self._M, bet_mean.control_points)\n\n # new data for regression\n t = np.linspace(0, 1, num=self._degree + 1)\n Y = []\n\n for H in F:\n Y.append(H[1].eval(t))\n\n # make regression w.r.t. mean values -> faster\n mean_Y = np.zeros_like(Y[0])\n for i in range(len(mean_Y)):\n dat = []\n for j in range(len(Y)):\n # take value of each curve at time t_i\n dat.append(Y[j][i])\n\n mean_Y[i] = C.compute(self._M, dat)\n\n if verbosity == 2:\n print('Step '+str(step)+': Updating the mean...')\n\n regression = RiemannianRegression(self._M, mean_Y, t, self._degree, verbosity=2)\n bet_mean = regression.trend\n\n # update discrete paths\n if verbosity == 2:\n print('Step '+str(step)+': Updating discrete paths...')\n start = time.time()\n F = legs(bet_mean)\n\n if verbosity == 2:\n end = time.time()\n print('...took ' + \"{:.2f}\".format(end - start) + ' seconds to update the legs.')\n\n print('Evaluating energy...')\n Enew = loss(F)\n\n # check for divergence\n if Enew > Eold:\n print('Stopped because the energy increased.')\n finish_mean = time.time()\n print('Computing the mean took ' + \"{:.2f}\".format(finish_mean - begin_mean) + ' seconds.')\n return old_mean, F_old\n\n # compute step size\n step_size = 0\n for i, p in enumerate(bet_mean.control_points[0]):\n step_size += self._M.metric.dist(p, old_mean.control_points[0][i]) ** 2\n stepsize = np.sqrt(step_size)\n\n if verbosity > 0:\n print('Mean-Comp-Step', step, 'Energy:', Enew, 'Enew - Eold:', Enew - Eold)\n\n finish_mean = time.time()\n print('Computing the mean took ' + \"{:.2f}\".format(finish_mean - begin_mean) + '.')\n\n return bet_mean, F\n\n def gram(self, B, B_mean=None, F=None, n=5, delta=1e-5, min_stepSize=1e-10, nsteps=20, eps=1e-5, n_stepsGeo=10,\n verbosity=2):\n \"\"\"Approximates the Gram matrix for a curve data set\n\n :param B: list of Bézier splines\n :param B_mean: mean of curves in B\n :param F: discrete spider, i.e, discrete paths from mean to data\n :param n: see mean method\n :param delta: see mean method\n :param min_stepSize: see mean method\n :param nsteps: see mean method\n :param eps: see mean method\n :param n_stepsGeo: see mean method\n :param verbosity: see mean method\n :return G: Gram matrix\n :return bet_mean: mean curve of data curves\n :return F: discrete geodesics from mean to data curves\n \"\"\"\n\n if B_mean is None:\n B_mean, F = self.mean(B, n=n, delta=delta, min_stepsize=min_stepSize, nsteps=nsteps, eps=eps,\n n_stepsGeo=n_stepsGeo, verbosity=verbosity)\n\n if verbosity == 2:\n print('Computing Gram matrix...')\n\n n = len(F)\n G = np.zeros((n, n))\n for i, si in enumerate(F):\n for j, sj in enumerate(F[i:], start=i):\n G[i, j] = n ** 2 / (2 * n) * (self.dist(B_mean, si[1], l=1) ** 2 + self.dist(B_mean, sj[1], l=1) ** 2\n - self.dist(si[1], sj[1], l=1) ** 2)\n G[j, i] = G[i, j]\n\n return G, B_mean, F\n\n def projToGeodesic(self, R, Q, P, max_iter=10):\n # TODO\n return\n"
] |
[
[
"numpy.sqrt",
"numpy.linspace",
"numpy.abs",
"numpy.isnan",
"numpy.concatenate",
"numpy.zeros_like",
"scipy.integrate.simps",
"numpy.array",
"numpy.zeros",
"numpy.sum"
]
] |
jyericlin/VBC
|
[
"cc34169e4f4ece500ad8c33ab69378f0a700a73e"
] |
[
"src/learners/q_learner_6h_vs_8z_vbc.py"
] |
[
"import copy\nfrom components.episode_buffer import EpisodeBatch\nfrom modules.mixers.vdn import VDNMixer\nfrom modules.mixers.qmix import QMixer\nimport torch as th\nimport numpy as np\nfrom torch.optim import RMSprop\n\n# learning for 6h_vs_8z scenario\nclass QLearner_6h_vs_8z:\n def __init__(self, mac, scheme, logger, args):\n self.args = args\n self.mac = mac\n self.logger = logger\n self.regularization_const = self.args.normalization_const\n self.params = list(mac.parameters())\n\n self.last_target_update_episode = 0\n\n self.mixer = None\n if args.mixer is not None:\n if args.mixer == \"vdn\":\n self.mixer = VDNMixer()\n elif args.mixer == \"qmix\":\n self.mixer = QMixer(args)\n else:\n raise ValueError(\"Mixer {} not recognised.\".format(args.mixer))\n self.params += list(self.mixer.parameters())\n self.target_mixer = copy.deepcopy(self.mixer)\n\n self.params += list(self.mac.env_blender.parameters())\n self.optimiser = RMSprop(params=self.params, lr=args.lr, alpha=args.optim_alpha, eps=args.optim_eps)\n self.target_mac = copy.deepcopy(mac)\n\n self.log_stats_t = -self.args.learner_log_interval - 1\n\n def train(self, batch: EpisodeBatch, t_env: int, episode_num: int):\n # Get the relevant quantities\n rewards = batch[\"reward\"][:, :-1]\n actions = batch[\"actions\"][:, :-1]\n terminated = batch[\"terminated\"][:, :-1].float()\n mask = batch[\"filled\"][:, :-1].float()\n mask[:, 1:] = mask[:, 1:] * (1 - terminated[:, :-1])\n avail_actions = batch[\"avail_actions\"]\n\n # Calculate estimated Q-Values\n mac_out = []\n difference_out = []\n difference_out1 = [] \n self.mac.init_hidden(batch.batch_size)\n for t in range(batch.max_seq_length):\n agent_local_outputs, hidden_states = self.mac.forward(batch, t=t) \n dummy0 = self.mac.env_blender(hidden_states[:,0,:].view(32,-1)) \n dummy1 = self.mac.env_blender(hidden_states[:,1,:].view(32,-1)) \n dummy2 = self.mac.env_blender(hidden_states[:,2,:].view(32,-1)) \n dummy3 = self.mac.env_blender(hidden_states[:,3,:].view(32,-1)) \n dummy4 = self.mac.env_blender(hidden_states[:,4,:].view(32,-1))\n dummy5 = self.mac.env_blender(hidden_states[:,5,:].view(32,-1)) \n \n agent0 = (dummy1 + dummy2 + dummy3 + dummy4 + dummy5)/5.0\n agent1 = (dummy0 + dummy2 + dummy3 + dummy4 + dummy5)/5.0\n agent2 = (dummy0 + dummy1 + dummy3 + dummy4 + dummy5)/5.0\n agent3 = (dummy0 + dummy1 + dummy2 + dummy4 + dummy5)/5.0\n agent4 = (dummy0 + dummy1 + dummy2 + dummy3 + dummy5)/5.0\n agent5 = (dummy0 + dummy1 + dummy2 + dummy3 + dummy4)/5.0\n agent_global_outputs =th.cat((agent0.view((32,1,14)),agent1.view((32,1,14)),agent2.view((32,1,14)),agent3.view((32,1,14)),agent4.view((32,1,14)),agent5.view((32,1,14))),1) \n agent_outs = agent_local_outputs + agent_global_outputs\n difference = agent_global_outputs \n mac_out.append(agent_outs)\n difference_out.append(difference)\n \n mac_out = th.stack(mac_out, dim=1) # Concat over time\n difference_out = th.stack(difference_out, dim=1) # Concat over time\n difference_out = th.std(difference_out,dim = 3).sum()\n # Pick the Q-Values for the actions taken by each agent\n chosen_action_qvals = th.gather(mac_out[:, :-1], dim=3, index=actions).squeeze(3) # Remove the last dim\n avg_difference = (difference_out.sum())/((agent_outs.shape[0]*agent_outs.shape[1]*agent_outs.shape[2]*batch.max_seq_length))\n # Calculate the Q-Values necessary for the target\n target_mac_out = []\n \n self.target_mac.init_hidden(batch.batch_size)\n for t in range(batch.max_seq_length):\n target_agent_local_outputs, target_hidden_states = self.target_mac.forward(batch, t=t)\n \n dummy0 = self.mac.env_blender(target_hidden_states[:,0,:].view(32,-1)) \n dummy1 = self.mac.env_blender(target_hidden_states[:,1,:].view(32,-1)) \n dummy2 = self.mac.env_blender(target_hidden_states[:,2,:].view(32,-1)) \n dummy3 = self.mac.env_blender(target_hidden_states[:,3,:].view(32,-1)) \n dummy4 = self.mac.env_blender(target_hidden_states[:,4,:].view(32,-1))\n dummy5 = self.mac.env_blender(target_hidden_states[:,5,:].view(32,-1)) \n \n target_agent0 = (dummy1 + dummy2 + dummy3 + dummy4 + dummy5)/5.0\n target_agent1 = (dummy0 + dummy2 + dummy3 + dummy4 + dummy5)/5.0\n target_agent2 = (dummy0 + dummy1 + dummy3 + dummy4 + dummy5)/5.0\n target_agent3 = (dummy0 + dummy1 + dummy2 + dummy4 + dummy5)/5.0\n target_agent4 = (dummy0 + dummy1 + dummy2 + dummy3 + dummy5)/5.0\n target_agent5 = (dummy0 + dummy1 + dummy2 + dummy3 + dummy4)/5.0 \n \n target_agent_global_outputs = th.cat((target_agent0.view((32,1,14)),target_agent1.view((32,1,14)),target_agent2.view((32,1,14)),target_agent3.view((32,1,14)),target_agent4.view((32,1,14)),target_agent5.view((32,1,14))),1)\n target_agent_outs = target_agent_local_outputs + target_agent_global_outputs\n target_mac_out.append(target_agent_outs)\n \n # We don't need the first timesteps Q-Value estimate for calculating targets\n target_mac_out = th.stack(target_mac_out[1:], dim=1) # Concat across time\n\n # Mask out unavailable actions\n target_mac_out[avail_actions[:, 1:] == 0] = -9999999\n\n # Max over target Q-Values\n if self.args.double_q:\n # Get actions that maximise live Q (for double q-learning)\n mac_out[avail_actions == 0] = -9999999\n cur_max_actions = mac_out[:, 1:].max(dim=3, keepdim=True)[1]\n target_max_qvals = th.gather(target_mac_out, 3, cur_max_actions).squeeze(3)\n else:\n target_max_qvals = target_mac_out.max(dim=3)[0]\n\n # Mix\n if self.mixer is not None:\n chosen_action_qvals = self.mixer(chosen_action_qvals, batch[\"state\"][:, :-1])\n target_max_qvals = self.target_mixer(target_max_qvals, batch[\"state\"][:, 1:])\n\n # Calculate 1-step Q-Learning targets\n targets = rewards + self.args.gamma * (1 - terminated) * target_max_qvals\n\n # Td-error\n td_error = (chosen_action_qvals - targets.detach())\n\n mask = mask.expand_as(td_error)\n\n # 0-out the targets that came from padded data\n masked_td_error = td_error * mask\n\n # Normal L2 loss, take mean over actual data\n loss = (masked_td_error ** 2).sum() / mask.sum() + self.args.normalization_const * avg_difference\n\n # Optimise\n self.optimiser.zero_grad()\n loss.backward()\n grad_norm = th.nn.utils.clip_grad_norm_(self.params, self.args.grad_norm_clip)\n self.optimiser.step()\n\n if (episode_num - self.last_target_update_episode) / self.args.target_update_interval >= 1.0:\n self._update_targets()\n self.last_target_update_episode = episode_num\n\n if t_env - self.log_stats_t >= self.args.learner_log_interval:\n self.logger.log_stat(\"loss\", loss.item(), t_env)\n self.logger.log_stat(\"grad_norm\", grad_norm, t_env)\n mask_elems = mask.sum().item()\n self.logger.log_stat(\"td_error_abs\", (masked_td_error.abs().sum().item()/mask_elems), t_env)\n self.logger.log_stat(\"q_taken_mean\", (chosen_action_qvals * mask).sum().item()/(mask_elems * self.args.n_agents), t_env)\n self.logger.log_stat(\"target_mean\", (targets * mask).sum().item()/(mask_elems * self.args.n_agents), t_env)\n self.log_stats_t = t_env\n\n def _update_targets(self):\n self.target_mac.load_state(self.mac)\n if self.mixer is not None:\n self.target_mixer.load_state_dict(self.mixer.state_dict())\n self.logger.console_logger.info(\"Updated target network\")\n\n def cuda(self):\n self.mac.cuda()\n self.target_mac.cuda()\n if self.mixer is not None:\n self.mixer.cuda()\n self.target_mixer.cuda()\n\n def save_models(self, path):\n self.mac.save_models(path)\n if self.mixer is not None:\n th.save(self.mixer.state_dict(), \"{}/mixer.th\".format(path))\n th.save(self.optimiser.state_dict(), \"{}/opt.th\".format(path))\n\n def load_models(self, path):\n self.mac.load_models(path)\n # Not quite right but I don't want to save target networks\n self.target_mac.load_models(path)\n if self.mixer is not None:\n self.mixer.load_state_dict(th.load(\"{}/mixer.th\".format(path), map_location=lambda storage, loc: storage))\n self.optimiser.load_state_dict(th.load(\"{}/opt.th\".format(path), map_location=lambda storage, loc: storage))\n\n\n\n"
] |
[
[
"torch.optim.RMSprop",
"torch.nn.utils.clip_grad_norm_",
"torch.std",
"torch.gather",
"torch.stack"
]
] |
choderalab/fragmenter_examples
|
[
"01d63aea340e91f8cbb3a21253a906a0c3c66da3",
"01d63aea340e91f8cbb3a21253a906a0c3c66da3",
"01d63aea340e91f8cbb3a21253a906a0c3c66da3",
"01d63aea340e91f8cbb3a21253a906a0c3c66da3"
] |
[
"wbo-manuscript-figures/proof_of_concept/generate_figures_coverage.py",
"combinatorial_fragmentation/fragment_bond_orders/compute_oe_wbo_parent.py",
"wbo-manuscript-figures/figure-12/Tedizolid_phosphate_0/generate_figure.py",
"wbo-manuscript-figures/figure-12/pemetrexed_0/generate_figure.py"
] |
[
"import json\nimport seaborn as sbn\nfrom scipy import stats\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport matplotlib.colors as mcolors\nimport pandas as pd\nimport arch.bootstrap\nimport math\nimport qcfractal.interface as ptl\nfrom fragmenter.utils import HARTREE_2_KJMOL\nfrom fragmenter import chemi\nfrom openeye import oedepict, oechem, oegraphsim\nfrom openforcefield.topology import Molecule, Topology\nfrom openforcefield.typing.engines.smirnoff import ForceField\nimport pickle\n\ndef checkTorsion(smiles, torsion_indices, ff_name):\n \"\"\"\n Take mollist and check if the molecules in a list match a specific torsion id\n\n Parameters\n ----------\n molList : List of objects\n List of oemols with datatags generated in genData function\n\n Returns\n -------\n molList : list of objects\n List of oemol objects that have a datatag \"IDMatch\" that contain the torsion id\n involved in the QCA torsion drive\n \"\"\"\n\n matches = []\n count = 0\n mols = []\n #tid=''\n #molecule = Molecule.from_mapped_smiles(smiles)\n print(smiles)\n from openeye import oechem\n # create a new molecule\n #mol = oechem.OEGraphMol()\n # convert the SMILES string into a molecule\n #oechem.OESmilesToMol(mol,smiles)\n #molecule = Molecule.from_smiles(smiles)\n #molecule=Molecule.from_openeye(mol)\n\n molecule = Molecule.from_mapped_smiles(smiles)\n topology = Topology.from_molecules(molecule)\n # Let's label using the Parsley force field\n forcefield = ForceField(ff_name, allow_cosmetic_attributes=True)\n # Run the molecule labeling\n molecule_force_list = forcefield.label_molecules(topology)\n params = []\n indices=[]\n # Print out a formatted description of the torsion parameters applied to this molecule\n for mol_idx, mol_forces in enumerate(molecule_force_list):\n # print(f'Forces for molecule {mol_idx}')\n for force_tag, force_dict in mol_forces.items():\n if force_tag == \"ProperTorsions\":\n for (atom_indices, parameter) in force_dict.items():\n params.append(parameter.id)\n indices.append(atom_indices)\n #torsion_indices=tuple(torsion_indices)\n #print(type(torsion_indices))\n print(torsion_indices)\n #print(type(atom_indices))\n print(atom_indices)\n if atom_indices == torsion_indices or tuple(\n reversed(atom_indices)\n ) == torsion_indices:\n #mol.SetData(\"IDMatch\", parameter.id)\n tid=parameter.id\n print(params)\n print(indices)\n return tid\n\n\nclient = ptl.FractalClient()\n# from the TorsionDriveDataset collection picking up given datasetName\nds = client.get_collection(\"TorsionDriveDataset\", 'OpenFF Substituted Phenyl Set 1')\n\ndef testQuery(smiles):\n #print(ds.get_entry(smiles))\n #print(dir(ds.get_entry(smiles)))\n dih=ds.get_entry(smiles).dict()['td_keywords']['dihedrals'][0]\n print(dih)\n mapped_smiles = ds.get_entry(smiles).attributes[\"canonical_isomeric_explicit_hydrogen_mapped_smiles\"]\n #print(mapped_smiles)\n return mapped_smiles, dih\n\n\ndef biphenyl(filename):\n with open(filename) as json_file:\n data = json.load(json_file)\n for key, item in data.items():\n testQuery(key)\nbiphenyl('biphenyls_set_input.json')\n\n\ncolor_keys= ['maroon', 'brown', 'indianred', 'red', 'coral','orange', 'gold', 'darkkhaki','yellowgreen','limegreen',\n 'mediumseagreen', 'teal', 'steelblue', 'cornflowerblue', 'royalblue', 'darkblue',\n 'mediumblue', 'slateblue', 'blueviolet', 'purple','mediumvioletred', 'deeppink', 'hotpink',\n 'palevioletred', 'pink', 'lightpink']\n\n\n\ncolor_keys2=['darkblue',\n 'mediumblue', 'slateblue', 'blueviolet', 'purple','mediumvioletred', 'deeppink', 'hotpink',\n 'cornflowerblue', 'pink', 'lightpink']\n\ncolor_keys2=['teal', 'hotpink', 'purple', 'gold', 'orange', 'slateblue', 'darkkhaki', 'lightpink', 'purple', 'hotpink']\n\nfgroup_symbols_colors = {\n #'phenoxide': 'C[O-]',\n 'dimethylamino': (r'$\\mathrm{\\mathsf{N(Me)_2}}$', color_keys[0]),\n 'methylamino': (r'$\\mathrm{\\mathsf{NHMe}}$', color_keys[1]),\n 'amino': (r'$\\mathrm{\\mathsf{NH_2}}$', color_keys[2]),\n 'ethylamino': (r'$\\mathrm{\\mathsf{NHEt}}$', color_keys[3]),\n 'propylamino': (r'$\\mathrm{\\mathsf{NH(C_3H_7)}}$', color_keys[4]),\n 'hydroxy': (r'$\\mathrm{\\mathsf{OH}}$', color_keys[5]),\n 'methoxy': (r'$\\mathrm{\\mathsf{OMe}}$', color_keys[6]),\n 'ethoxy': (r'$\\mathrm{\\mathsf{OEt}}$', color_keys[7]),\n 'dimethylurea': (r'$\\mathrm{\\mathsf{NHCON(Me)_2}}$', color_keys[8]),\n 'urea': (r'$\\mathrm{\\mathsf{NHCONHMe}}$', color_keys[9]),\n 'phenylurea': (r'$\\mathrm{\\mathsf{NHCONH_2}}$', color_keys[10]),\n 'ethylamide': (r'$\\mathrm{\\mathsf{NHCOEt}}$', color_keys[11]),\n 'amide': (r'$\\mathrm{\\mathsf{NHCOMe}}$', color_keys[12]),\n 'fluoro': (r'$\\mathrm{\\mathsf{F}}$', color_keys[13]),\n 'chloro': (r'$\\mathrm{\\mathsf{Cl}}$', color_keys[14]),\n 'cyano': (r'$\\mathrm{\\mathsf{CN}}$', color_keys[15]),\n 'methyl': (r'$\\mathrm{\\mathsf{Me}}$', color_keys[16]),\n 'bromo': (r'$\\mathrm{\\mathsf{Br}}$', color_keys[17]),\n 'carbamate': (r'$\\mathrm{\\mathsf{OCONH_2}}$', color_keys[18]),\n 'benzoicacid': (r'$\\mathrm{\\mathsf{COOH}}$', color_keys[19]),\n 'iodo': (r'$\\mathrm{\\mathsf{I}}$', color_keys[20]),\n 'ethoxycarbonyl': (r'$\\mathrm{\\mathsf{COOEt}}$', color_keys[21]),\n 'trimethylamonium': (r'$\\mathrm{\\mathsf{N(Me)_3^+}}$', color_keys[22]),\n 'trifluoromethyl': (r'$\\mathrm{\\mathsf{CF_3}}$', color_keys[23]),\n 'nitro': (r'$\\mathrm{\\mathsf{NO_2}}$', color_keys[24])\n}\n\n\n\n# Generate joy plot\nfgroup_wbos = {}\nfor fgroup in fgroup_symbols_colors:\n if fgroup not in fgroup_wbos:\n fgroup_wbos[fgroup] = []\n with open('../../phenyl_benchmark/data/{}_R1_wbos.json'.format(fgroup), 'r') as f:\n wbos = json.load(f)\n for w in wbos:\n fgroup_wbos[fgroup].append(w[0])\n\ncolors = mcolors.CSS4_COLORS\n\nfig, axes = plt.subplots(len(fgroup_wbos))\nfor i, fgroup in enumerate(fgroup_wbos):\n ax = plt.subplot(len(fgroup_wbos), 1, i+1)\n ax.spines['left'].set_visible(False)\n ax.spines['right'].set_visible(False)\n ax.spines['top'].set_visible(False)\n ax.patch.set_facecolor('none')\n sbn.kdeplot(fgroup_wbos[fgroup], shade=True, alpha=0.6,\n color=colors[fgroup_symbols_colors[fgroup][1]])\n sbn.kdeplot(fgroup_wbos[fgroup], shade=False, color='black', lw=0.8)\n plt.xlim(0.70, 1.4)\n plt.yticks([])\n ax.yaxis.set_label_coords(-0.05, 0)\n plt.ylabel(fgroup_symbols_colors[fgroup][0], rotation=0, size=10,\n color=colors[fgroup_symbols_colors[fgroup][1]])\n if i == len(fgroup_wbos)-1:\n plt.xlabel('AM1 ELF10 Wiberg bond order', fontsize=14)\n plt.xticks(fontsize=14)\n else:\n plt.xticks([])\n\noverlap=1.0\nh_pad = 5 + (- 5*(1 + overlap))\nfig.tight_layout(h_pad=h_pad)\nplt.savefig('figures/wbo_dist_joy_plot.pdf')\n\n\n# See if there is a correlation with Hammet sigma parameters. Values were taken from\n# doi:10.1021/cr00002a004\nsubs = ['H','dimethylamino', 'methylamino', 'amino', 'ethylamino', 'hydroxy', 'methoxy', 'phenylurea', 'amide',\n 'fluoro', 'chloro','cyano', 'methyl', 'bromo', 'benzoicacid', 'ethoxycarbonyl', 'trifluoromethyl', 'nitro']\nsigma_m = [0.0, -0.16, -0.21, -0.16, -0.24, 0.12, 0.12, -0.02, 0.21, 0.34, 0.37, 0.56, -0.07, 0.39, 0.37, 0.37, 0.43, 0.71]\nsigma_p = [0.0, -0.83, -0.70, -0.66, -0.61, -0.37, -0.27, -0.24, 0.0, 0.06, 0.23, 0.66, -0.17, 0.45, 0.45, 0.45, 0.54, 0.78]\nwbo_cooh_meta = [0.96, 0.95, 0.95, 0.95, 0.95, 0.95, 0.95, 0.96, 0.96, 0.95, 0.95, 0.95, 0.96, 0.95, 0.96, 0.95, 0.95, 95]\nwbo_cooh_para = [0.96, 0.97, 0.97, 0.97, 0.97, 0.96, 0.96, 0.97, 0.97, 0.96, 0.96, 0.96, 0.96, 0.96, 0.95, 0.95, 0.95, 95]\nwbo_r_meta = [0.96, 1.07, 1.08, 1.12, 1.08, 1.06, 1.04, 1.02, 1.02, 1.02, 1.0, 1.0, 1.0, 0.99, 0.96, 0.93, 0.91, 0.85]\nwbo_r_para = [0.96, 1.11, 1.10, 1.12, 1.14, 1.08, 1.05, 1.04, 1.03, 1.03, 1.01, 1.0, 1.0, 0.99, 0.95, 0.93, 0.91, 0.85]\n\nhammet_sigmas = {'substituent':subs, 'sigma_p': sigma_p, 'sigma_m': sigma_m, 'wbo_cooh_meta': wbo_cooh_meta,\n 'wbo_cooh_para': wbo_cooh_para,'wbo_r_meta': wbo_r_meta, 'wbo_r_para': wbo_r_para}\ndf = pd.DataFrame(hammet_sigmas)\n\n# plot correlation\nmarkersize=9\nfontsize=8\nfor sigma in ('m', 'p'):\n fig, ax = plt.subplots()\n for row in df.iterrows():\n if sigma == 'm':\n x = row[1].wbo_r_meta\n y = row[1].sigma_m\n if sigma == 'p':\n x = row[1].wbo_r_para\n y = row[1].sigma_p\n if row[1].substituent == 'H':\n plt.plot(x, y, '.', color='black', markersize=markersize, label='H')\n plt.annotate('H', (x, y),\n textcoords='offset points', xytext=(3, 2), color='black', fontsize=fontsize)\n continue\n plt.plot(x, y, '.', markersize=markersize, color=fgroup_symbols_colors[row[1].substituent][1],\n label=fgroup_symbols_colors[row[1].substituent][0])\n plt.annotate(fgroup_symbols_colors[row[1].substituent][0], (x, y),\n textcoords='offset points', xytext=(3, 2), color= fgroup_symbols_colors[row[1].substituent][1], fontsize=fontsize)\n\n plt.xlim(0.83, 1.16)\n plt.ylim(-0.86, 0.85)\n plt.ylabel(r'$\\sigma_{}$'.format(sigma), fontsize=14)\n plt.xlabel('AM1 ELF10 Wiberg Bond Order', fontsize=14);\n plt.xticks(fontsize=14)\n plt.yticks(fontsize=14)\n if sigma == 'm':\n r_value = df.corr().sigma_m.wbo_r_meta\n if sigma == 'p':\n r_value = df.corr().sigma_p.wbo_r_para\n #print(r_value)\n textstr = r'$\\rho =%.2f$' % (r_value)\n props = dict(boxstyle='square', facecolor='white', alpha=0.5)\n ax.text(0.75, 0.95, textstr, transform=ax.transAxes, fontsize=14,\n verticalalignment='top', bbox=props)\n plt.tight_layout()\n fig.savefig('figures/hammett_sigma_{}.pdf'.format(sigma))\n\n\n# Generate torsion barrier height vs ELF10 AM1 WBO plot\nwith open('../../phenyl_benchmark/data/qcarchive_torsiondrives.json', 'r') as f:\n fgroups_td = json.load(f)\n\n# Generate 2 plots. One for good lines and one for lines that have issues\nplot_1 = ['dimethylamino', 'methylamino', 'ethylamino', 'propylamino', 'hydroxy', 'methoxy', 'phenylurea', 'benzoicacid', 'nitro']\nplot_2 = ['amino', 'ethoxy', 'dimethylurea', 'urea', 'ethylamide', 'amide', 'carbamate', 'ethoxycarbonyl']\nsymbols = ['o', 'P', '^', '*', 's', 'p', 'X', 'd', 'H', '>']\n\nboth_plots=plot_1 + plot_2\n\ndef r_value_ci(am1_wbos, max_energies):\n return stats.linregress(am1_wbos, max_energies)[2]**2\n\nfontsize = 14\nfig, ax = plt.subplots()\ncolors = []\nr_values = []\nfor i, fgroup in enumerate(plot_1):\n if fgroup not in fgroups_td:\n print(fgroup)\n continue\n energies = fgroups_td[fgroup]['energy']\n am1_wbos = fgroups_td[fgroup]['elf10_am1_wbo']\n max_energies = [max(energy) for energy in energies]\n slope, intercept, r_value, p_value, std_err = stats.linregress(am1_wbos, max_energies)\n r_ci = arch.bootstrap.IIDBootstrap(np.asarray(am1_wbos), np.asarray(max_energies)).conf_int(r_value_ci, 1000, method='percentile')\n #print(r_ci)\n fgroups_td[fgroup]['stats'] = [slope, std_err, r_value**2, r_ci[0][0], r_ci[1][0]]\n plt.plot(np.unique(am1_wbos), np.poly1d([slope, intercept])(np.unique(am1_wbos)), fgroup_symbols_colors[fgroup][1])\n plt.scatter(x=am1_wbos, y=max_energies, color=fgroup_symbols_colors[fgroup][1], marker=symbols[i], label=fgroup_symbols_colors[fgroup][0])\n colors.append(fgroup_symbols_colors[fgroup][1])\n r_values.append([r_value**2, r_ci[0][0], r_ci[1][0]])\n\nl = ax.legend(bbox_to_anchor=(1, 1), fontsize=fontsize)\nfor i, text in enumerate(l.get_texts()):\n text.set_color(colors[i])\n\nplt.xlabel('AM1 ELF10 Wiberg bond order', fontsize=fontsize)\nplt.ylabel('Torsion barrier height (kJ/mol)', fontsize=fontsize)\nplt.xlim(0.8, 1.3)\nplt.ylim(0, 50)\nplt.xticks(fontsize=fontsize)\nplt.yticks(fontsize=fontsize)\nplt.tight_layout()\nplt.savefig('figures/energy_vs_wbo_1.pdf')\n\ncolors = []\nig, ax = plt.subplots()\ntig_dict={'TIG1':[[],[]], 'TIG2':[[],[]], 'TIG3':[[],[]], 'TIG4':[[],[]], 'TIG5':[[],[]], 'TIG6':[[],[]], 'TIG7':[[],[]], 'TIG8':[[],[]], 'TIG9':[[],[]], 'TIG10':[[],[]]}\nmolDict={}\n\"\"\"\nfor i, fgroup in enumerate(both_plots):\n if fgroup not in fgroups_td:\n continue\n print(i)\n print(fgroup)\n energies = fgroups_td[fgroup]['energy']\n am1_wbos = fgroups_td[fgroup]['elf10_am1_wbo']\n max_energies = [max(energy) for energy in energies]\n molcount=0\n torsions=[]\n for i, smiles in enumerate(fgroups_td[fgroup]['indices']):\n molDict[smiles]=[am1_wbos[i], max_energies[i]]\n molcount+=1\n #testQuery(smiles)\n #with open('../../phenyl_benchmark/data/{}_td_job_indices.json'.format(fgroup), 'r') as f:\n #/Users/jessica/Documents/Grad_research/fragmenter_data/wbo-manuscript-figures/proof_of_concept/data/data\n with open('data/data/{}_td_job_indices.json'.format(fgroup), 'r') as f:\n indices = json.load(f)\n for m in indices:\n if m[0] == smiles:\n molDict[smiles].extend([m[1], m[4]])\n for sm, dd in molDict.items():\n print(dd)\n smiles, dih=testQuery(sm)\n ff='tig_proof_of_concept_1.3.0.offxml'\n tid = checkTorsion(smiles, dih, ff)\n torsions.append(tid)\n tig_dict[tid][0].append(dd[0])\n tig_dict[tid][1].append(dd[1])\n print(molcount)\n print(tig_dict)\n print(torsions)\n print(len(torsions))\n with open('biphenyl_data.pickle', 'rb') as handle:\n b = pickle.load(handle)\n for key, item in b.items():\n smiles, dih=testQuery(key)\n tid = checkTorsion(smiles, item[2], ff)\n tig_dict[tid][0].append(item[0])\n tig_dict[tid][1].append(item[1])\n\n import pickle\n with open(\"wbotb.pkl\", \"wb\") as f:\n pickle.dump(tig_dict, f)\n\"\"\"\ndef makeCovPlot(filename):\n with open(filename, \"rb\") as f:\n plotdata = pickle.load(f)\n #print(plotdata)\n count=0\n colors=[]\n tid_td={}\n for key, data in plotdata.items():\n am1_wbos=data[0]\n max_energies=data[1]\n if am1_wbos==[]:\n continue\n #print(am1_wbos)\n #print(max_energies)\n\n slope, intercept, r_value, p_value, std_err = stats.linregress(am1_wbos, max_energies)\n r_ci = arch.bootstrap.IIDBootstrap(np.asarray(am1_wbos), np.asarray(max_energies)).conf_int(r_value_ci, 10000, method='percentile')\n #print(r_ci)\n fgroups_td[fgroup]['stats'] = [slope, std_err, r_value**2, r_ci[0][0], r_ci[1][0]]\n tid_td[key] = [slope, std_err, r_value**2, r_ci[0][0], r_ci[1][0]]\n plt.plot(np.unique(am1_wbos), np.poly1d([slope, intercept])(np.unique(am1_wbos)), color_keys2[count])\n plt.scatter(x=am1_wbos, y=max_energies, color=color_keys2[count], marker=symbols[count], label=key)\n colors.append(color_keys2[count])\n count+=1\n #store statistics from the td vs wbo plot for table generation\n with open(\"table_data.pkl\", \"wb\") as f:\n pickle.dump(tid_td, f)\n\n l = ax.legend(bbox_to_anchor=(1, 1), fontsize=fontsize)\n for i, text in enumerate(l.get_texts()):\n text.set_color(colors[i])\n\n plt.xlabel('AM1 ELF10 Wiberg bond order', fontsize=fontsize)\n plt.ylabel('Torsion barrier height (kJ/mol)', fontsize=fontsize)\n plt.xticks(fontsize=fontsize)\n plt.yticks(fontsize=fontsize)\n #plt.xlim(0.8, 1.5)\n #plt.ylim(0, 100)\n plt.tight_layout()\n plt.savefig('energy_vs_wbo_full_newcolors.pdf')\n\nmakeCovPlot('wbotb.pkl')\n\n\n# generate table\nstats_table = {'Parameter': [], 'smarts':[], 'slope': [],'standard error': [], 'r^2': [], 'CI_1': [], 'CI_2': []}\n#[slope, std_err, r_value**2, r_ci[0][0], r_ci[1][0]]\n\nwith open('table_data.pkl', 'rb') as f:\n tabledata = pickle.load(f)\nsmartsDict={\n 'TIG1':'[*:1]~[#6X3:2]-[#6X3:3]~[*:4]',\n 'TIG2':'[*:1]~[#6X3:2]-[#6X3$(*=[#8,#16,#7]):3]~[*:4]',\n 'TIG3':'[*:1]~[#6X3:2]-[#6X3:3](-[#8H1])=[#8X1:4]',\n 'TIG4':'[*:1]~[#7X3:2]-!@[#6X3:3]~@[#6:4]',\n 'TIG5':'[#6X3:1]~[#7X3:2]-!@[#6X3:3]~@[#6:4]',\n 'TIG6':'[#6X3$(*~[#6]):1]~[#7X3:2]-!@[#6X3:3]~@[#6:4]',\n 'TIG7':'[#6X4:1]~[#7X3:2]-!@[#6X3:3]~@[#6:4]',\n 'TIG8':'[#8X1:1]~[#7X3:2]~[#6X3:3]~[*:4]',\n 'TIG9':'[*:1]~[#6X3:2]-[#8X2:3]-[*:4]',\n 'TIG10':'[*:1]~[#6X3:2]-[#8X2:3]-[#1:4]'\n }\nfor key, item in tabledata.items():\n stats_table['Parameter'].append(key)\n stats_table['smarts'].append(smartsDict[key])\n stats_table['slope'].append(round(item[0],2))\n stats_table['standard error'].append(round(item[1],2))\n stats_table['r^2'].append(round(item[2],2))\n stats_table['CI_1'].append(round(item[3], 2))\n stats_table['CI_2'].append(round(item[4], 2))\nlatex_table = pd.DataFrame(stats_table).to_latex(index=False)\nwith open('figures/stats_tid.tex', 'w') as f:\n f.write(latex_table)\n\n",
"import fragmenter\nimport json\nimport cmiles\nfrom openeye import oechem, oequacpac\nfrom matplotlib.backends.backend_pdf import PdfPages\nimport matplotlib.pyplot as plt\nimport seaborn as sbn\n\n\nif __name__ == '__main__':\n import argparse\n parser = argparse.ArgumentParser(description=\"calculate OE WBO\")\n parser.add_argument('-n', '--name', type=str, help='Molecule name')\n args = parser.parse_args()\n name = args.name\n\n with open('../filter/filtered_kinase_inhibitors.json', 'r') as f:\n kinase_inhibitors = json.load(f)\n kinase_inhibitors[name] = kinase_inhibitors[name]\n\n mapped_smiles = kinase_inhibitors[name]['canonical_isomeric_explicit_hydrogen_mapped_smiles']\n oemol = cmiles.utils.load_molecule(mapped_smiles, toolkit='openeye')\n charged = fragmenter.chemi.get_charges(oemol, keep_confs=-1)\n\n oe_wbo_full = {}\n for bond in charged.GetBonds():\n bond_key = (bond.GetBgn().GetMapIdx(), bond.GetEnd().GetMapIdx())\n oe_wbo_full[bond_key] = {'ensamble': bond.GetData('WibergBondOrder')}\n oe_wbo_full[bond_key]['individual_conf'] = []\n\n for i, conf in enumerate(charged.GetConfs()):\n mol_copy = oechem.OEMol(conf)\n # Get WBO\n if oequacpac.OEAssignPartialCharges(mol_copy, oequacpac.OECharges_AM1BCCSym):\n for bond in mol_copy.GetBonds():\n bond_key = (bond.GetBgn().GetMapIdx(), bond.GetEnd().GetMapIdx())\n try:\n oe_wbo_full[bond_key]['individual_conf'].append(bond.GetData('WibergBondOrder'))\n except KeyError:\n if 0 in bond_key:\n oe_wbo_full[bond_key] = {'individual_conf': [bond.GetData('WibergBondOrder')]}\n else:\n reverse_key = tuple(reversed(bond_key))\n oe_wbo_full[reverse_key]['individual_conf'].append(bond.GetData('WibergBondOrder'))\n else:\n print('AM1BCC charging failed for {}, {}'.format(str(i), i))\n\n # serialize and save\n serialized = {}\n for bond in oe_wbo_full:\n key = fragmenter.workflow_api.serialize_key(bond)\n serialized[key] = oe_wbo_full[bond]\n # save file\n with open('{}/{}_parent_oe_wbo.json'.format(name, name), 'w') as f:\n json.dump(serialized, f, indent=2, sort_keys=True)\n\n # replot to include a red distribution for parent\n # load others, deserialize and replot\n with open('{}/{}_oe_wbo_by_bond.json'.format(name, name), 'r') as f:\n by_bond = json.load(f)\n frag_with_bond = {}\n for bond in by_bond:\n key = bond.split('[')[-1].split(']')[0].split(',')\n key = (int(key[0]), int(key[1]))\n frag_with_bond[key] = by_bond[bond]\n # add parent\n for bond in frag_with_bond:\n try:\n full = oe_wbo_full[bond]\n except KeyError:\n key = (bond[-1], bond[0])\n full = oe_wbo_full[key]\n frag_with_bond[bond]['parent'] = full\n # serialize and save\n\n serialized_with_parent = {}\n for bond in frag_with_bond:\n key =fragmenter. workflow_api.serialize_key(bond)\n serialized_with_parent[key] = frag_with_bond[bond]\n with open('{}/{}_oe_wbo_by_bond_with_parent.json'.format(name, name), 'w') as f:\n json.dump(serialized_with_parent, f, indent=2, sort_keys=True)\n\n # sort fragments by wbo\n sorted_frags = {}\n for b in frag_with_bond:\n list_1 = []\n list_2 = []\n for frag in frag_with_bond[b]:\n list_1.append(frag)\n list_2.append(frag_with_bond[b][frag]['ensamble'])\n sorted_frags[b] = [x for _,x in sorted(zip(list_2, list_1))]\n\n rot_bonds = list(frag_with_bond.keys())\n\n # plot results on one pdf page\n with PdfPages('{}/{}_fragment_bond_orders_with_parent.pdf'.format(name, name)) as pdf:\n for b in rot_bonds:\n #b = rot_bonds[3]\n n = len(frag_with_bond[b])\n\n fig, axes = plt.subplots(n, 1)\n fig.dpi = 400\n x_min = 3\n x_max = 0\n for f in frag_with_bond[b]:\n wbo = frag_with_bond[b][f]['individual_conf']\n if min(wbo) < x_min:\n x_min = min(wbo)\n if max(wbo) > x_max:\n x_max = max(wbo)\n\n for i, frag in enumerate(sorted_frags[b]):\n wbo = frag_with_bond[b][frag]['individual_conf']\n\n wbo_s = frag_with_bond[b][frag]['ensamble']\n ax = plt.subplot(n, 1, i+1)\n ax.spines['left'].set_visible(False)\n ax.spines['right'].set_visible(False)\n ax.spines['top'].set_visible(False)\n ax.spines['bottom'].set_visible(False)\n ax.patch.set_facecolor('none')\n if frag == 'parent':\n sbn.kdeplot(wbo, shade= True, color='red', alpha=0.8)\n else:\n sbn.kdeplot(wbo, shade= True, alpha=0.8)\n sbn.distplot(wbo, hist=False, rug=True, kde=False, color='black')\n sbn.kdeplot(wbo, lw=1, color='black')\n plt.axvline(x=wbo_s, ymin=0, ymax=1, color='black', linewidth=0.5)\n\n plt.xlim(x_min-0.05, x_max+0.05)\n plt.yticks([])\n ax.yaxis.set_label_coords(-0.05, 0)\n plt.ylabel(i, rotation=0, size=8)\n if i != n-1:\n plt.xticks([])\n else:\n plt.xlabel('Bond order')\n if i == 0:\n plt.title('bond {}'.format(b))\n overlap=0.5\n h_pad = 5 + (- 5*(1 + overlap))\n fig.tight_layout(h_pad=h_pad)\n pdf.savefig(bbox_inches='tight')\n plt.close()\n\n for b in frag_with_bond:\n try:\n wbo = oe_wbo_full[b]['ensamble']\n except KeyError:\n wbo = oe_wbo_full[(b[-1], b[0])]['ensamble']\n fragmenter.chemi.highlight_bond_by_map_idx(mapped_smiles, [b], wbo=wbo, fname='{}/parent_bond_{}_{}.png'.format(name, b[0], b[1]))\n\n\n\n\n",
"import fragmenter\nimport json\nfrom openeye import oechem, oequacpac, oedepict, oegraphsim\nimport matplotlib.pyplot as plt\nimport glob\nimport seaborn as sbn\nimport oenotebook as onb\nimport cmiles\nimport itertools\nimport numpy as np\n\ndef get_bond(mol, bond_tuple):\n a1 = mol.GetAtom(oechem.OEHasMapIdx(bond_tuple[0]))\n a2 = mol.GetAtom(oechem.OEHasMapIdx(bond_tuple[1]))\n if not a1 or not a2:\n print('no atoms')\n return False\n bond = mol.GetBond(a1, a2)\n if not bond:\n print('no bond')\n return False\n return bond\n\ndef visualize_mols(smiles, fname, rows, cols, bond_idx, wbos, colors, align_to=0):\n \"\"\"\n Visualize molecules with highlighted bond and labeled with WBO\n Parameters\n ----------\n smiles : list of SMILES to visualize.\n bond atoms should have map indices\n fname : str\n filename\n rows : int\n cols : int\n bond_idx : tuple of atom maps of bond to highlight.\n wbos : list of floats\n colors : list of hex values for colors\n align_to: int, optional, default 0\n index for which molecule to align to. If zero, will align to first molecules in SMILES list\n\n \"\"\"\n itf = oechem.OEInterface()\n\n ropts = oedepict.OEReportOptions(rows, cols)\n ropts.SetHeaderHeight(25)\n ropts.SetFooterHeight(25)\n ropts.SetCellGap(2)\n ropts.SetPageMargins(10)\n report = oedepict.OEReport(ropts)\n\n cellwidth, cellheight = report.GetCellWidth(), report.GetCellHeight()\n opts = oedepict.OE2DMolDisplayOptions(cellwidth, cellheight, oedepict.OEScale_AutoScale)\n oedepict.OESetup2DMolDisplayOptions(opts, itf)\n\n # align to chosen molecule\n ref_mol = oechem.OEGraphMol()\n oechem.OESmilesToMol(ref_mol, smiles[align_to])\n oedepict.OEPrepareDepiction(ref_mol)\n\n mols = []\n minscale = float(\"inf\")\n for s in smiles:\n mol = oechem.OEMol()\n oechem.OESmilesToMol(mol, s)\n mols.append(mol)\n oedepict.OEPrepareDepiction(mol, False, True)\n minscale = min(minscale, oedepict.OEGetMoleculeScale(mol, opts))\n print(minscale)\n\n print(minscale)\n opts.SetScale(minscale)\n for i, mol in enumerate(mols):\n\n cell = report.NewCell()\n oedepict.OEPrepareDepiction(mol, False, True)\n bond = get_bond(mol, bond_idx)\n atom_bond_set = oechem.OEAtomBondSet()\n atom_bond_set.AddAtoms([bond.GetBgn(), bond.GetEnd()])\n atom_bond_set.AddBond(bond)\n\n hstyle = oedepict.OEHighlightStyle_BallAndStick\n if i == 3:\n hcolor = oechem.OERed\n else:\n hcolor = oechem.OEColor(*colors[i])\n\n overlaps = oegraphsim.OEGetFPOverlap(ref_mol, mol, oegraphsim.OEGetFPType(oegraphsim.OEFPType_Tree))\n oedepict.OEPrepareMultiAlignedDepiction(mol, ref_mol, overlaps)\n\n disp = oedepict.OE2DMolDisplay(mol, opts)\n oedepict.OEAddHighlighting(disp, hcolor, hstyle, atom_bond_set)\n\n bond_label = oedepict.OEHighlightLabel(\"{:.2f}\".format((wbos[i])), hcolor)\n oedepict.OEAddLabel(disp, bond_label, atom_bond_set)\n oedepict.OERenderMolecule(cell, disp)\n # oedepict.OEDrawCurvedBorder(cell, oedepict.OELightGreyPen, 10.0)\n\n return (oedepict.OEWriteReport(fname, report))\n\ndef rbg_to_int(rbg, alpha):\n \"\"\"\n Convert rbg color to ints for openeye\n Parameters\n ----------\n rbg : list\n rbg\n alpha : int\n\n Returns\n -------\n list of ints\n\n \"\"\"\n rbg[-1] = int(rbg[-1]*alpha)\n colors = [int(round(i*255)) for i in rbg[:-1]]\n colors.append(int(rbg[-1]))\n return colors\n\nwith open('Tedizolid_phosphate_0_wbo_dists.json', 'r') as f:\n results = json.load(f)\nresults = results['[9, 23]']\nwith open('Tedizolid_phosphate_0_pfizer_wbo_dists.json', 'r') as f:\n pfizer_results = json.load(f)\n\nsbn.kdeplot(results['parent']['wbo_dist'], shade=True)\nsbn.distplot(results['parent']['wbo_dist'], rug=True, hist=False, color=sbn.color_palette()[0])\nsbn.distplot(results['parent']['wbo_dist'], hist=False, color=sbn.color_palette()[0])\n\nsbn.kdeplot(results['0.05_path_length_False_None']['wbo_dist'], shade=True)\nsbn.distplot(results['0.05_path_length_False_None']['wbo_dist'], rug=True, hist=False, color=sbn.color_palette()[1])\nsbn.distplot(results['0.05_path_length_False_None']['wbo_dist'], hist=False, color=sbn.color_palette()[1])\n\nsbn.kdeplot(results['0.1_path_length_False_None']['wbo_dist'], shade=True)\nsbn.distplot(results['0.1_path_length_False_None']['wbo_dist'], rug=True, hist=False, color=sbn.color_palette()[2])\nsbn.distplot(results['0.1_path_length_False_None']['wbo_dist'], hist=False, color=sbn.color_palette()[2])\n\n\nsbn.kdeplot(pfizer_results['[9, 23]']['wbo_dist'], shade=True)\nsbn.distplot(pfizer_results['[9, 23]']['wbo_dist'], rug=True, hist=False, color=sbn.color_palette()[3])\nsbn.distplot(pfizer_results['[9, 23]']['wbo_dist'], hist=False, color=sbn.color_palette()[3])\nplt.xticks(fontsize=14)\nplt.yticks([])\nplt.xlabel('Wiberg Bond Order', fontsize=14)\nplt.tight_layout()\nplt.savefig('wbo_dists.pdf')\n\ncolors = [rbg_to_int(list(i), alpha=255) for i in sbn.color_palette()[:3]]\nwbos = [results['parent']['elf10_wbo'], results['0.05_path_length_False_None']['elf10_wbo'],\n results['0.1_path_length_False_None']['elf10_wbo'], pfizer_results['[9, 23]']['elf10_wbo']]\nfrags = [results['parent']['frag'], results['0.05_path_length_False_None']['frag'],\n results['0.1_path_length_False_None']['frag'], pfizer_results['[9, 23]']['frag']]\nvisualize_mols(frags, cols=2, rows=2, bond_idx=(9, 23), colors=colors, wbos=wbos, fname='fragments.pdf', align_to=0)",
"import fragmenter\nimport json\nfrom openeye import oechem, oequacpac, oedepict, oegraphsim\nimport matplotlib.pyplot as plt\nimport glob\nimport seaborn as sbn\nimport oenotebook as onb\nimport cmiles\nimport itertools\nimport numpy as np\n\ndef get_bond(mol, bond_tuple):\n a1 = mol.GetAtom(oechem.OEHasMapIdx(bond_tuple[0]))\n a2 = mol.GetAtom(oechem.OEHasMapIdx(bond_tuple[1]))\n if not a1 or not a2:\n print('no atoms')\n return False\n bond = mol.GetBond(a1, a2)\n if not bond:\n print('no bond')\n return False\n return bond\n\ndef visualize_mols(smiles, fname, rows, cols, bond_idx, wbos, colors, align_to=0):\n \"\"\"\n Visualize molecules with highlighted bond and labeled with WBO\n Parameters\n ----------\n smiles : list of SMILES to visualize.\n bond atoms should have map indices\n fname : str\n filename\n rows : int\n cols : int\n bond_idx : tuple of atom maps of bond to highlight.\n wbos : list of floats\n colors : list of hex values for colors\n align_to: int, optional, default 0\n index for which molecule to align to. If zero, will align to first molecules in SMILES list\n\n \"\"\"\n itf = oechem.OEInterface()\n\n ropts = oedepict.OEReportOptions(rows, cols)\n ropts.SetHeaderHeight(25)\n ropts.SetFooterHeight(25)\n ropts.SetCellGap(2)\n ropts.SetPageMargins(10)\n report = oedepict.OEReport(ropts)\n\n cellwidth, cellheight = report.GetCellWidth(), report.GetCellHeight()\n opts = oedepict.OE2DMolDisplayOptions(cellwidth, cellheight, oedepict.OEScale_AutoScale)\n oedepict.OESetup2DMolDisplayOptions(opts, itf)\n\n # align to chosen molecule\n ref_mol = oechem.OEGraphMol()\n oechem.OESmilesToMol(ref_mol, smiles[align_to])\n oedepict.OEPrepareDepiction(ref_mol)\n\n mols = []\n minscale = float(\"inf\")\n for s in smiles:\n mol = oechem.OEMol()\n oechem.OESmilesToMol(mol, s)\n mols.append(mol)\n oedepict.OEPrepareDepiction(mol, False, True)\n minscale = min(minscale, oedepict.OEGetMoleculeScale(mol, opts))\n print(minscale)\n\n print(minscale)\n opts.SetScale(minscale)\n for i, mol in enumerate(mols):\n\n cell = report.NewCell()\n oedepict.OEPrepareDepiction(mol, False, True)\n bond = get_bond(mol, bond_idx)\n atom_bond_set = oechem.OEAtomBondSet()\n atom_bond_set.AddAtoms([bond.GetBgn(), bond.GetEnd()])\n atom_bond_set.AddBond(bond)\n\n hstyle = oedepict.OEHighlightStyle_BallAndStick\n if i == 3:\n hcolor = oechem.OERed\n else:\n hcolor = oechem.OEColor(*colors[i])\n\n overlaps = oegraphsim.OEGetFPOverlap(ref_mol, mol, oegraphsim.OEGetFPType(oegraphsim.OEFPType_Tree))\n oedepict.OEPrepareMultiAlignedDepiction(mol, ref_mol, overlaps)\n\n disp = oedepict.OE2DMolDisplay(mol, opts)\n oedepict.OEAddHighlighting(disp, hcolor, hstyle, atom_bond_set)\n\n bond_label = oedepict.OEHighlightLabel(\"{:.2f}\".format((wbos[i])), hcolor)\n oedepict.OEAddLabel(disp, bond_label, atom_bond_set)\n oedepict.OERenderMolecule(cell, disp)\n # oedepict.OEDrawCurvedBorder(cell, oedepict.OELightGreyPen, 10.0)\n\n return (oedepict.OEWriteReport(fname, report))\n\ndef rbg_to_int(rbg, alpha):\n \"\"\"\n Convert rbg color to ints for openeye\n Parameters\n ----------\n rbg : list\n rbg\n alpha : int\n\n Returns\n -------\n list of ints\n\n \"\"\"\n rbg[-1] = int(rbg[-1]*alpha)\n colors = [int(round(i*255)) for i in rbg[:-1]]\n colors.append(int(rbg[-1]))\n return colors\nname = 'Pemetrexed_0'\nbond = '[7, 13]'\nwith open('{}_wbo_dists.json'.format(name), 'r') as f:\n results = json.load(f)\nresults = results[bond]\nwith open('{}_pfizer_wbo_dists.json'.format(name), 'r') as f:\n pfizer_results = json.load(f)\n\nsbn.kdeplot(results['parent']['wbo_dist'], shade=True)\nsbn.distplot(results['parent']['wbo_dist'], rug=True, hist=False, color=sbn.color_palette()[0])\nsbn.distplot(results['parent']['wbo_dist'], hist=False, color=sbn.color_palette()[0])\n\nsbn.kdeplot(results['0.01_path_length_False_None']['wbo_dist'], shade=True)\nsbn.distplot(results['0.01_path_length_False_None']['wbo_dist'], rug=True, hist=False, color=sbn.color_palette()[1])\nsbn.distplot(results['0.01_path_length_False_None']['wbo_dist'], hist=False, color=sbn.color_palette()[1])\n\nsbn.kdeplot(results['0.03_path_length_False_None']['wbo_dist'], shade=True)\nsbn.distplot(results['0.03_path_length_False_None']['wbo_dist'], rug=True, hist=False, color=sbn.color_palette()[2])\nsbn.distplot(results['0.03_path_length_False_None']['wbo_dist'], hist=False, color=sbn.color_palette()[2])\n\nsbn.kdeplot(results['0.1_path_length_False_None']['wbo_dist'], shade=True)\nsbn.distplot(results['0.1_path_length_False_None']['wbo_dist'], rug=True, hist=False, color=sbn.color_palette()[3])\nsbn.distplot(results['0.1_path_length_False_None']['wbo_dist'], hist=False, color=sbn.color_palette()[3])\n\nsbn.kdeplot(pfizer_results[bond]['wbo_dist'], shade=True)\nsbn.distplot(pfizer_results[bond]['wbo_dist'], rug=True, hist=False, color=sbn.color_palette()[4])\nsbn.distplot(pfizer_results[bond]['wbo_dist'], hist=False, color=sbn.color_palette()[4])\nplt.xticks(fontsize=14)\nplt.yticks([])\nplt.xlabel('Wiberg Bond Order', fontsize=14)\nplt.tight_layout()\nplt.savefig('wbo_dists.pdf')\n\ncolors = [rbg_to_int(list(i), alpha=255) for i in sbn.color_palette()]\nwbos = [results['parent']['elf10_wbo'], results['0.01_path_length_False_None']['elf10_wbo'], results['0.03_path_length_False_None']['elf10_wbo'],\n results['0.1_path_length_False_None']['elf10_wbo'], pfizer_results[bond]['elf10_wbo']]\nfrags = [results['parent']['frag'], results['0.01_path_length_False_None']['frag'], results['0.03_path_length_False_None']['frag'],\n results['0.1_path_length_False_None']['frag'], pfizer_results[bond]['frag']]\ndes_bond = fragmenter.utils.deserialize_bond(bond)\nvisualize_mols(frags, cols=2, rows=2, bond_idx=des_bond, colors=colors, wbos=wbos, fname='fragments.pdf', align_to=0)"
] |
[
[
"matplotlib.pyplot.yticks",
"matplotlib.pyplot.tight_layout",
"numpy.poly1d",
"matplotlib.pyplot.scatter",
"numpy.unique",
"numpy.asarray",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.annotate",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.savefig",
"pandas.DataFrame",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.xlim",
"scipy.stats.linregress",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.ylabel"
],
[
"matplotlib.pyplot.axvline",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.close",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.yticks",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.ylabel"
],
[
"matplotlib.pyplot.yticks",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.xticks"
],
[
"matplotlib.pyplot.yticks",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.xticks"
]
] |
bennyrowland/suspect
|
[
"c09ab0a5013c5a199218214cdd791659243d7e41",
"c09ab0a5013c5a199218214cdd791659243d7e41"
] |
[
"suspect/processing/water_suppression.py",
"suspect/io/rda.py"
] |
[
"import numpy\n\nimport suspect.basis\n\n\ndef hsvd(data, rank, L=None):\n if L is None:\n L = data.np // 2\n # start by building the Hankel matrix\n hankel_matrix = numpy.zeros((L, data.np - L), \"complex\")\n for i in range(int(data.np - L)):\n hankel_matrix[:, i] = data[i:(i + L)]\n\n # perform the singular value decomposition\n U, s, V = numpy.linalg.svd(numpy.matrix(hankel_matrix))\n V = V.H # numpy returns the Hermitian conjugate of V\n\n # truncate the matrices to the given rank\n U_K = U[:, :rank]\n V_K = V[:, :rank]\n s_K = numpy.matrix(numpy.diag(s[:rank]))\n\n # because of the structure of the Hankel matrix, each row of U_K is the\n # result of multiplying the previous row by the delta t propagator matrix\n # Z' (a similar result holds for V as well). This gives us U_Kb * Z' = U_Kt\n # where U_Kb is U_K without the bottom row and U_Kt is U_K without the top\n # row.\n U_Kt = U_K[1:, :]\n U_Kb = U_K[:-1, :]\n # this gives us a set of linear equations which can be solved to find Z'.\n # Because of the noise in the system we solve with least-squares\n Zp = numpy.linalg.inv(U_Kb.H * U_Kb) * U_Kb.H * U_Kt\n\n # in the right basis, Zp is just the diagonal matrix describing the\n # evolution of each frequency component, by diagonalising the matrix we can\n # find that basis and get the z = exp((-damping + j*2pi * f) * dt) terms\n\n # alternatively we can just get the eigenvalues instead\n val, vec = numpy.linalg.eig(Zp)\n\n # the magnitude gives the damping and the angle gives the frequency\n damping_coeffs = numpy.zeros(rank)\n frequency_coeffs = numpy.zeros(rank)\n for i in range(rank):\n damping_coeffs[i] = - numpy.log(abs(val[i])) / data.dt\n frequency_coeffs[i] = numpy.angle(val[i]) / (data.dt * 2 * numpy.pi)\n\n # TODO in theory we can calculate the magnitude of each signal from the\n # RHS decomposition, linalg.inv(vec) * (S_K * V_K.H)[:, 0]\n\n # a simpler but more expensive way is to construct a basis set from the\n # known damping and frequency components and fit to the original data to\n # get the amplitudes and phase data\n X = numpy.zeros((data.np, rank), \"complex\")\n # TODO this should use the singlet fitting module to make the basis\n for i in range(rank):\n X[:, i] = suspect.basis.lorentzian(data.time_axis(),\n frequency_coeffs[i],\n 0,\n damping_coeffs[i] / numpy.pi) * data.np\n\n # we use the linear non-iterative least squares again\n U2, s2, V2 = numpy.linalg.svd(numpy.matrix(X), full_matrices=False)\n s2_inv = numpy.diag(1 / s2)\n beta = V2.H * s2_inv * U2.H * numpy.matrix(numpy.reshape(data, (data.np, 1)))\n\n components = []\n for i in range(rank):\n components.append({\n \"amplitude\": float(abs(beta[i])),\n \"phase\": float(numpy.angle(beta[i])),\n \"fwhm\": damping_coeffs[i] / numpy.pi,\n \"frequency\": frequency_coeffs[i]\n })\n\n return components\n\n\ndef construct_fid(components, time_axis):\n fid = numpy.zeros_like(time_axis, 'complex')\n for i in range(len(components)):\n lorentzian = suspect.basis.lorentzian(time_axis,\n components[i][\"frequency\"],\n components[i][\"phase\"],\n components[i][\"fwhm\"])\n fid += components[i][\"amplitude\"] * lorentzian * len(time_axis)\n return fid\n",
"from suspect import MRSData, transformation_matrix\n\nimport numpy\nimport struct\nimport re\n\n# The RDA format consists of a large number of key value pairs followed by raw\n# data. The values need to be cast into different datatypes depending on the\n# key, this dictionary stores a mapping of key to datatype.\n\nrda_types = {\n \"floats\": [\"PatientWeight\", \"TR\", \"TE\", \"TM\", \"TI\", \"DwellTime\", \"NumberOfAverages\",\n \"MRFrequency\", \"MagneticFieldStrength\", \"FlipAngle\", \"SliceThickness\",\n \"FoVHeight\", \"FoVWidth\", \"PercentOfRectFoV\", \"PixelSpacingRow\",\n \"PixelSpacingCol\", \"VOIPositionSag\", \"VOIPositionCor\",\n \"VOIPositionTra\", \"VOIThickness\", \"VOIPhaseFOV\", \"VOIReadoutFOV\",\n \"VOIReadoutVOV\", \"VOINormalSag\", \"VOINormalCor\", \"VOINormalTra\",\n \"VOIRotationInPlane\", \"FoV3D\", \"PixelSpacing3D\"],\n \"integers\": [\"SeriesNumber\", \"InstanceNumber\", \"AcquisitionNumber\", \"NumOfPhaseEncodingSteps\",\n \"NumberOfRows\", \"NumberOfColumns\", \"VectorSize\", \"EchoNumber\",\n \"NumberOf3DParts\", \"HammingFilterWidth\", \"NumberOfEchoes\"],\n \"strings\": [\"PatientID\", \"PatientName\", \"StudyDescription\", \"PatientBirthDate\",\n \"StudyDate\", \"StudyTime\", \"PatientAge\", \"SeriesDate\", \"SeriesTime\",\n \"SeriesDescription\", \"ProtocolName\", \"PatientPosition\", \"ModelName\",\n \"StationName\", \"InstitutionName\", \"DeviceSerialNumber\", \"InstanceDate\",\n \"InstanceTime\", \"InstanceComments\", \"SequenceName\", \"SequenceDescription\",\n \"Nucleus\", \"TransmitCoil\", \"PatientSex\", \"HammingFilter\", \"FrequenceCorrection\"],\n \"float_arrays\": [\"PositionVector\", \"RowVector\", \"ColumnVector\"],\n \"integer_arrays\": [\"CSIMatrixSize\", \"CSIMatrixSizeOfScan\", \"CSIGridShift\"],\n \"string_arrays\": [\"SoftwareVersion\"],\n \"dictionaries\": [\"TransmitRefAmplitude\"]\n}\n\n\ndef load_rda(filename):\n header_dict = {}\n with open(filename, 'rb') as fin:\n header_line = fin.readline().strip()\n if header_line != b\">>> Begin of header <<<\":\n raise Exception(\"Error reading file {} as a .rda\".format(filename))\n header_line = fin.readline().strip().decode('windows-1252')\n while header_line != \">>> End of header <<<\":\n key, value = map(str.strip, header_line.split(\":\", 1))\n if key in rda_types[\"strings\"]:\n header_dict[key] = value\n elif key in rda_types[\"integers\"]:\n header_dict[key] = int(value)\n elif key in rda_types[\"floats\"]:\n header_dict[key] = float(value)\n elif \"[\" in key and \"]\" in key:\n # could be a dict or a list\n key, index = re.split(\"\\]|\\[\", key)[0:2]\n if key in rda_types[\"dictionaries\"]:\n if key not in header_dict:\n header_dict[key] = {}\n header_dict[key][index] = value\n else:\n # not a dictionary, must be a list\n if key in rda_types[\"float_arrays\"]:\n value = float(value)\n elif key in rda_types[\"integer_arrays\"]:\n value = int(value)\n index = int(index)\n # make sure there is a list in the header_dict, with enough entries\n if not key in header_dict:\n header_dict[key] = []\n while len(header_dict[key]) <= index:\n header_dict[key].append(0)\n header_dict[key][index] = value\n header_line = fin.readline().strip().decode('windows-1252')\n # now we can read the data\n data = fin.read()\n\n # the shape of the data in slice, column, row, time format\n data_shape = header_dict[\"CSIMatrixSize\"][::-1]\n data_shape.append(header_dict[\"VectorSize\"])\n data_shape = numpy.array(data_shape)\n data_size = numpy.prod(data_shape) * 16 # each data point is a complex double, 16 bytes\n if data_size != len(data):\n raise ValueError(\"Error reading file {}: expected {} bytes of data, got {}\".format(filename, data_size, len(data)))\n\n # unpack the data into complex numbers\n data_as_floats = struct.unpack(\"<{}d\".format(numpy.prod(data_shape) * 2), data)\n float_iter = iter(data_as_floats)\n complex_iter = (complex(r, i) for r, i in zip(float_iter, float_iter))\n complex_data = numpy.fromiter(complex_iter, \"complex64\", int(numpy.prod(data_shape)))\n complex_data = numpy.reshape(complex_data, data_shape).squeeze()\n\n # some .rda files have a misnamed field, correct this here\n if \"VOIReadoutFOV\" not in header_dict:\n if \"VOIReadoutVOV\" in header_dict:\n header_dict[\"VOIReadoutFOV\"] = header_dict.pop(\"VOIReadoutVOV\")\n\n # combine positional elements in the header\n voi_size = (header_dict[\"VOIReadoutFOV\"],\n header_dict[\"VOIPhaseFOV\"],\n header_dict[\"VOIThickness\"])\n voi_center = (header_dict[\"VOIPositionSag\"],\n header_dict[\"VOIPositionCor\"],\n header_dict[\"VOIPositionTra\"])\n voxel_size = (header_dict[\"PixelSpacingCol\"],\n header_dict[\"PixelSpacingRow\"],\n header_dict[\"PixelSpacing3D\"])\n\n x_vector = numpy.array(header_dict[\"RowVector\"])\n y_vector = numpy.array(header_dict[\"ColumnVector\"])\n\n to_scanner = transformation_matrix(x_vector, y_vector, numpy.array(voi_center), voxel_size)\n\n # put useful components from the header in the metadata\n metadata = {\n \"voi_size\": voi_size,\n \"position\": voi_center,\n \"voxel_size\": voxel_size,\n \"protocol\": header_dict[\"ProtocolName\"],\n \"to_scanner\": to_scanner,\n \"from_scanner\": numpy.linalg.inv(to_scanner)\n }\n\n return MRSData(complex_data,\n header_dict[\"DwellTime\"] * 1e-6,\n header_dict[\"MRFrequency\"],\n te=header_dict[\"TE\"],\n transform=to_scanner,\n metadata=metadata)\n"
] |
[
[
"numpy.diag",
"numpy.matrix",
"numpy.linalg.inv",
"numpy.linalg.eig",
"numpy.reshape",
"numpy.zeros_like",
"numpy.angle",
"numpy.zeros"
],
[
"numpy.linalg.inv",
"numpy.array",
"numpy.reshape",
"numpy.prod"
]
] |
yanndupis/tf-encrypted
|
[
"cfaea3ba87520f73979ed4e4f397eba3beb0a535",
"cfaea3ba87520f73979ed4e4f397eba3beb0a535"
] |
[
"examples/deprecated/inputs.py",
"examples/federated-learning/run.py"
] |
[
"import sys\n\nimport numpy as np\nimport tensorflow as tf\nimport tf_encrypted as tfe\n\nconfig = tfe.get_config()\n\nif len(sys.argv) > 1:\n\n #\n # assume we're running as a server\n #\n\n player_name = str(sys.argv[1])\n\n server = config.server(player_name)\n server.start()\n server.join()\n\nelse:\n\n #\n # assume we're running as master\n #\n\n def provide_weights() -> tf.Tensor:\n raw_w = np.array([5, 5, 5, 5]).reshape((2, 2))\n w = tf.constant(raw_w)\n tf.print(w, [w])\n return w\n\n def provide_input() -> tf.Tensor:\n x = tf.constant([1, 2, 3, 4], shape=(2, 2), dtype=tf.float32)\n tf.print(x, [x])\n return x\n\n def receive_output(prediction):\n\n tf.print([], [prediction], summarize=4)\n return []\n\n with tfe.protocol.Pond() as prot:\n\n # treat weights as private\n w = prot.define_private_input('model-provider', provide_weights)\n\n # load input for prediction\n x = prot.define_private_input('input-provider', provide_input)\n\n # compute prediction\n y = prot.matmul(x, w)\n\n # send output\n prediction_op = prot.define_output('input-provider', y, receive_output)\n\n with tfe.Session() as sess:\n sess.run(tf.global_variables_initializer(), tag='init')\n\n for _ in range(5):\n sess.run(prediction_op, tag='prediction')\n",
"import sys\n\nimport tensorflow as tf\nimport tf_encrypted as tfe\n\nfrom convert import decode\n\nif len(sys.argv) > 1:\n # config file was specified\n config_file = sys.argv[1]\n config = tfe.RemoteConfig.load(config_file)\n tfe.set_config(config)\n tfe.set_protocol(tfe.protocol.Pond())\n\nsession_target = sys.argv[2] if len(sys.argv) > 2 else None\n\n\nclass ModelOwner:\n\n LEARNING_RATE = 0.1\n ITERATIONS = 60000 // 30\n\n def __init__(self, player_name):\n self.player_name = player_name\n\n with tf.device(tfe.get_config().get_player(player_name).device_name):\n self._initialize_weights()\n\n def _initialize_weights(self):\n with tf.name_scope('parameters'):\n self.w0 = tf.Variable(tf.random_normal([28 * 28, 512]))\n self.b0 = tf.Variable(tf.zeros([512]))\n self.w1 = tf.Variable(tf.random_normal([512, 10]))\n self.b1 = tf.Variable(tf.zeros([10]))\n\n def _build_model(self, x, y):\n w0 = self.w0.read_value()\n b0 = self.b0.read_value()\n w1 = self.w1.read_value()\n b1 = self.b1.read_value()\n params = (w0, b0, w1, b1)\n\n layer0 = tf.matmul(x, w0) + b0\n layer1 = tf.nn.sigmoid(layer0)\n layer2 = tf.matmul(layer1, w1) + b1\n predictions = layer2\n\n loss = tf.reduce_mean(tf.losses.sparse_softmax_cross_entropy(logits=predictions, labels=y))\n grads = tf.gradients(ys=loss, xs=params)\n return predictions, loss, grads\n\n def build_training_model(self, x, y):\n \"\"\"\n This method will be called once by all data owners\n to create a local gradient computation on their machine.\n \"\"\"\n _, _, grads = self._build_model(x, y)\n return grads\n\n def _build_validation_model(self, x, y):\n predictions, loss, _ = self._build_model(x, y)\n most_likely = tf.argmax(predictions, axis=1)\n return most_likely, loss\n\n def _build_data_pipeline(self):\n\n def normalize(image, label):\n image = tf.cast(image, tf.float32) / 255.0\n return image, label\n\n dataset = tf.data.TFRecordDataset([\"./data/train.tfrecord\"])\n dataset = dataset.map(decode)\n dataset = dataset.map(normalize)\n dataset = dataset.batch(50)\n dataset = dataset.take(1) # keep validating on the same items\n dataset = dataset.repeat()\n\n iterator = dataset.make_one_shot_iterator()\n return iterator.get_next()\n\n def update_model(self, *grads):\n params = [self.w0, self.b0, self.w1, self.b1]\n grads = [tf.cast(grad, tf.float32) for grad in grads]\n with tf.name_scope('update'):\n update_op = tf.group(*[\n param.assign(param - grad * self.LEARNING_RATE)\n for param, grad in zip(params, grads)\n ])\n # return update_op\n\n with tf.name_scope('validate'):\n x, y = self._build_data_pipeline()\n y_hat, loss = self._build_validation_model(x, y)\n\n with tf.control_dependencies([update_op]):\n return tf.print('expect', loss, y, y_hat, summarize=50)\n\n\nclass DataOwner:\n\n BATCH_SIZE = 30\n\n def __init__(self, player_name, build_training_model):\n self.player_name = player_name\n self._build_training_model = build_training_model\n\n def _build_data_pipeline(self):\n\n def normalize(image, label):\n image = tf.cast(image, tf.float32) / 255.0\n return image, label\n\n dataset = tf.data.TFRecordDataset([\"./data/train.tfrecord\"])\n dataset = dataset.map(decode)\n dataset = dataset.map(normalize)\n dataset = dataset.repeat()\n dataset = dataset.batch(self.BATCH_SIZE)\n\n iterator = dataset.make_one_shot_iterator()\n return iterator.get_next()\n\n def compute_gradient(self):\n\n with tf.name_scope('data_loading'):\n x, y = self._build_data_pipeline()\n\n with tf.name_scope('gradient_computation'):\n grads = self._build_training_model(x, y)\n\n return grads\n\n\nmodel_owner = ModelOwner('model-owner')\ndata_owners = [\n DataOwner('data-owner-0', model_owner.build_training_model),\n DataOwner('data-owner-1', model_owner.build_training_model),\n DataOwner('data-owner-2', model_owner.build_training_model),\n]\n\nmodel_grads = zip(*(\n tfe.define_private_input(data_owner.player_name, data_owner.compute_gradient)\n for data_owner in data_owners\n))\n\nwith tf.name_scope('secure_aggregation'):\n aggregated_model_grads = [\n tfe.add_n(grads) / len(grads)\n for grads in model_grads\n ]\n\niteration_op = tfe.define_output(model_owner.player_name, aggregated_model_grads, model_owner.update_model)\n\nwith tfe.Session(target=session_target) as sess:\n sess.run(tf.global_variables_initializer(), tag='init')\n\n for i in range(model_owner.ITERATIONS):\n if i % 100 == 0:\n print(\"Iteration {}\".format(i))\n sess.run(iteration_op, tag='iteration')\n else:\n sess.run(iteration_op)\n"
] |
[
[
"tensorflow.global_variables_initializer",
"tensorflow.print",
"numpy.array",
"tensorflow.constant"
],
[
"tensorflow.matmul",
"tensorflow.nn.sigmoid",
"tensorflow.losses.sparse_softmax_cross_entropy",
"tensorflow.zeros",
"tensorflow.control_dependencies",
"tensorflow.data.TFRecordDataset",
"tensorflow.cast",
"tensorflow.gradients",
"tensorflow.global_variables_initializer",
"tensorflow.print",
"tensorflow.name_scope",
"tensorflow.argmax",
"tensorflow.random_normal"
]
] |
suvarnak/datasets
|
[
"682b5adee6c36e9867f397076080ec23d9616dcc",
"682b5adee6c36e9867f397076080ec23d9616dcc"
] |
[
"tensorflow_datasets/core/download/download_manager.py",
"tensorflow_datasets/image/celebahq.py"
] |
[
"# coding=utf-8\n# Copyright 2019 The TensorFlow Datasets Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Download manager interface.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport sys\nimport uuid\n\nfrom absl import logging\nimport promise\nimport six\nimport tensorflow as tf\n\nfrom tensorflow_datasets.core import api_utils\nfrom tensorflow_datasets.core import utils\nfrom tensorflow_datasets.core.download import downloader\nfrom tensorflow_datasets.core.download import extractor\nfrom tensorflow_datasets.core.download import resource as resource_lib\nfrom tensorflow_datasets.core.download import util\n\n\nclass NonMatchingChecksumError(Exception):\n \"\"\"The downloaded file doesn't have expected checksum.\"\"\"\n\n def __init__(self, url, tmp_path):\n msg = 'Artifact %s, downloaded to %s, has wrong checksum.' % (url, tmp_path)\n Exception.__init__(self, msg)\n\n\nclass DownloadConfig(object):\n \"\"\"Configuration for `tfds.core.DatasetBuilder.download_and_prepare`.\"\"\"\n\n def __init__(self,\n extract_dir=None,\n manual_dir=None,\n download_mode=None,\n compute_stats=None,\n max_examples_per_split=None):\n \"\"\"Constructs a `DownloadConfig`.\n\n Args:\n extract_dir: `str`, directory where extracted files are stored.\n Defaults to \"<download_dir>/extracted\".\n manual_dir: `str`, read-only directory where manually downloaded/extracted\n data is stored. Defaults to\n \"<download_dir>/manual\".\n download_mode: `tfds.GenerateMode`, how to deal with downloads or data\n that already exists. Defaults to `REUSE_DATASET_IF_EXISTS`, which will\n reuse both downloads and data if it already exists.\n compute_stats: `tfds.download.ComputeStats`, whether to compute\n statistics over the generated data. Defaults to `AUTO`.\n max_examples_per_split: `int`, optional max number of examples to write\n into each split.\n \"\"\"\n self.extract_dir = extract_dir\n self.manual_dir = manual_dir\n self.download_mode = util.GenerateMode(\n download_mode or util.GenerateMode.REUSE_DATASET_IF_EXISTS)\n self.compute_stats = util.ComputeStatsMode(\n compute_stats or util.ComputeStatsMode.AUTO)\n self.max_examples_per_split = max_examples_per_split\n\n\nclass DownloadManager(object):\n \"\"\"Manages the download and extraction of files, as well as caching.\n\n Downloaded files are cached under `download_dir`. The file name of downloaded\n files follows pattern \"${sanitized_url}${content_checksum}.${ext}\". Eg:\n 'cs.toronto.edu_kriz_cifar-100-pythonJDF[...]I.tar.gz'.\n\n While a file is being downloaded, it is placed into a directory following a\n similar but different pattern:\n \"%{sanitized_url}${url_checksum}.tmp.${uuid}\".\n\n When a file is downloaded, a \"%{fname}s.INFO.json\" file is created next to it.\n This INFO file contains the following information:\n {\"dataset_names\": [\"name1\", \"name2\"],\n \"urls\": [\"http://url.of/downloaded_file\"]}\n\n Extracted files/dirs are stored under `extract_dir`. The file name or\n directory name is the same as the original name, prefixed with the extraction\n method. E.g.\n \"${extract_dir}/TAR_GZ.cs.toronto.edu_kriz_cifar-100-pythonJDF[...]I.tar.gz\".\n\n The function members accept either plain value, or values wrapped into list\n or dict. Giving a data structure will parallelize the downloads.\n\n Example of usage:\n\n ```\n # Sequential download: str -> str\n train_dir = dl_manager.download_and_extract('https://abc.org/train.tar.gz')\n test_dir = dl_manager.download_and_extract('https://abc.org/test.tar.gz')\n\n # Parallel download: list -> list\n image_files = dl_manager.download(\n ['https://a.org/1.jpg', 'https://a.org/2.jpg', ...])\n\n # Parallel download: dict -> dict\n data_dirs = dl_manager.download_and_extract({\n 'train': 'https://abc.org/train.zip',\n 'test': 'https://abc.org/test.zip',\n })\n data_dirs['train']\n data_dirs['test']\n ```\n\n For more customization on the download/extraction (ex: passwords, output_name,\n ...), you can pass a `tfds.download.Resource` as argument.\n \"\"\"\n\n @api_utils.disallow_positional_args\n def __init__(self,\n download_dir,\n extract_dir=None,\n manual_dir=None,\n dataset_name=None,\n checksums=None,\n force_download=False,\n force_extraction=False):\n \"\"\"Download manager constructor.\n\n Args:\n download_dir: `str`, path to directory where downloads are stored.\n extract_dir: `str`, path to directory where artifacts are extracted.\n manual_dir: `str`, path to manually downloaded/extracted data directory.\n dataset_name: `str`, name of dataset this instance will be used for. If\n provided, downloads will contain which datasets they were used for.\n checksums: `dict<str url, str sha256>`, url to sha256 of resource.\n Only URLs present are checked.\n If empty, checksum of (already) downloaded files is computed and can\n then be retrieved using `recorded_download_checksums` property.\n force_download: `bool`, default to False. If True, always [re]download.\n force_extraction: `bool`, default to False. If True, always [re]extract.\n \"\"\"\n self._dataset_name = dataset_name\n self._checksums = checksums or {}\n self._record_checksum_size = not checksums\n self._recorded_download_checksums = {}\n self._download_sizes = {}\n self._download_dir = os.path.expanduser(download_dir)\n self._extract_dir = os.path.expanduser(\n extract_dir or os.path.join(download_dir, 'extracted'))\n self._manual_dir = manual_dir and os.path.expanduser(manual_dir)\n tf.io.gfile.makedirs(self._download_dir)\n tf.io.gfile.makedirs(self._extract_dir)\n self._force_download = force_download\n self._force_extraction = force_extraction\n self._extractor = extractor.get_extractor()\n self._downloader = downloader.get_downloader()\n\n @property\n def recorded_download_checksums(self):\n \"\"\"Returns checksums for downloaded urls.\"\"\"\n return dict(self._recorded_download_checksums)\n\n @property\n def download_sizes(self):\n \"\"\"Returns sizes (in bytes) for downloaded urls.\"\"\"\n return dict(self._download_sizes)\n\n def _handle_download_result(self, resource, tmp_dir_path, sha256, dl_size):\n \"\"\"Store dled file to definitive place, write INFO file, return path.\"\"\"\n fnames = tf.io.gfile.listdir(tmp_dir_path)\n if len(fnames) > 1:\n raise AssertionError('More than one file in %s.' % tmp_dir_path)\n original_fname = fnames[0]\n tmp_path = os.path.join(tmp_dir_path, original_fname)\n if self._record_checksum_size:\n resource.sha256 = sha256\n self._download_sizes[resource.url] = dl_size\n self._recorded_download_checksums[resource.url] = sha256\n elif self._checksums[resource.url] != sha256:\n raise NonMatchingChecksumError(resource.url, tmp_path)\n resource.write_info_file(self._dataset_name, original_fname)\n # Unconditionally overwrite because either file doesn't exist or\n # FORCE_DOWNLOAD=true\n tf.io.gfile.rename(tmp_path, resource.path, overwrite=True)\n tf.io.gfile.rmtree(tmp_dir_path)\n return resource.path\n\n # synchronize and memoize decorators ensure same resource will only be\n # processed once, even if passed twice to download_manager.\n @util.build_synchronize_decorator()\n @utils.memoize()\n def _download(self, resource):\n \"\"\"Download resource, returns Promise->path to downloaded file.\"\"\"\n if isinstance(resource, six.string_types):\n resource = resource_lib.Resource(url=resource)\n resource.sha256 = self._checksums.get(resource.url, None)\n if not resource.path:\n resource.path = os.path.join(self._download_dir, resource.fname)\n if (not self._force_download and resource.sha256 and\n resource.exists_locally()):\n logging.info('URL %s already downloaded: reusing %s.', resource.url,\n resource.path)\n self._recorded_download_checksums[resource.url] = resource.sha256\n self._download_sizes[resource.url] = (\n tf.io.gfile.stat(resource.path).length)\n return promise.Promise.resolve(resource.path)\n # There is a slight difference between downloader and extractor here:\n # the extractor manages its own temp directory, while the DownloadManager\n # manages the temp directory of downloader.\n tmp_dir_path = '%s.tmp.%s' % (resource.path, uuid.uuid4().hex)\n tf.io.gfile.makedirs(tmp_dir_path)\n logging.info('Downloading %s into %s...', resource.url, tmp_dir_path)\n\n def callback(val):\n checksum, dl_size = val\n return self._handle_download_result(resource, tmp_dir_path, checksum,\n dl_size)\n return self._downloader.download(resource, tmp_dir_path).then(callback)\n\n @util.build_synchronize_decorator()\n @utils.memoize()\n def _extract(self, resource):\n \"\"\"Extract a single archive, returns Promise->path to extraction result.\"\"\"\n if isinstance(resource, six.string_types):\n resource = resource_lib.Resource(path=resource)\n if resource.extract_method == resource_lib.ExtractMethod.NO_EXTRACT:\n logging.info(\n 'Skipping extraction for %s (method=NO_EXTRACT).', resource.path)\n return promise.Promise.resolve(resource.path)\n extract_path = os.path.join(self._extract_dir, resource.extract_fname)\n if not self._force_extraction and tf.io.gfile.exists(extract_path):\n logging.info('Reusing extraction of %s at %s.', resource.path,\n extract_path)\n return promise.Promise.resolve(extract_path)\n return self._extractor.extract(resource, extract_path)\n\n @util.build_synchronize_decorator()\n @utils.memoize()\n def _download_extract(self, resource):\n \"\"\"Download-extract `Resource` or url, returns Promise->path.\"\"\"\n if isinstance(resource, six.string_types):\n resource = resource_lib.Resource(url=resource)\n def callback(path):\n resource.path = path\n return self._extract(resource)\n return self._download(resource).then(callback)\n\n def download_kaggle_data(self, competition_name):\n \"\"\"Download data for a given Kaggle competition.\"\"\"\n with self._downloader.tqdm():\n kaggle_downloader = self._downloader.kaggle_downloader(competition_name)\n urls = kaggle_downloader.competition_urls\n files = kaggle_downloader.competition_files\n return _map_promise(self._download,\n dict((f, u) for (f, u) in zip(files, urls)))\n\n def download(self, url_or_urls):\n \"\"\"Download given url(s).\n\n Args:\n url_or_urls: url or `list`/`dict` of urls to download and extract. Each\n url can be a `str` or `tfds.download.Resource`.\n\n Returns:\n downloaded_path(s): `str`, The downloaded paths matching the given input\n url_or_urls.\n \"\"\"\n # Add progress bar to follow the download state\n with self._downloader.tqdm():\n return _map_promise(self._download, url_or_urls)\n\n def iter_archive(self, resource):\n \"\"\"Returns iterator over files within archive.\n\n **Important Note**: caller should read files as they are yielded.\n Reading out of order is slow.\n\n Args:\n resource: path to archive or `tfds.download.Resource`.\n\n Returns:\n Generator yielding tuple (path_within_archive, file_obj).\n \"\"\"\n if isinstance(resource, six.string_types):\n resource = resource_lib.Resource(path=resource)\n return extractor.iter_archive(resource.path, resource.extract_method)\n\n def extract(self, path_or_paths):\n \"\"\"Extract given path(s).\n\n Args:\n path_or_paths: path or `list`/`dict` of path of file to extract. Each\n path can be a `str` or `tfds.download.Resource`.\n\n If not explicitly specified in `Resource`, the extraction method is deduced\n from downloaded file name.\n\n Returns:\n extracted_path(s): `str`, The extracted paths matching the given input\n path_or_paths.\n \"\"\"\n # Add progress bar to follow the download state\n with self._extractor.tqdm():\n return _map_promise(self._extract, path_or_paths)\n\n def download_and_extract(self, url_or_urls):\n \"\"\"Download and extract given url_or_urls.\n\n Is roughly equivalent to:\n\n ```\n extracted_paths = dl_manager.extract(dl_manager.download(url_or_urls))\n ```\n\n Args:\n url_or_urls: url or `list`/`dict` of urls to download and extract. Each\n url can be a `str` or `tfds.download.Resource`.\n\n If not explicitly specified in `Resource`, the extraction method will\n automatically be deduced from downloaded file name.\n\n Returns:\n extracted_path(s): `str`, extracted paths of given URL(s).\n \"\"\"\n # Add progress bar to follow the download state\n with self._downloader.tqdm():\n with self._extractor.tqdm():\n return _map_promise(self._download_extract, url_or_urls)\n\n @property\n def manual_dir(self):\n \"\"\"Returns the directory containing the manually extracted data.\"\"\"\n if not tf.io.gfile.exists(self._manual_dir):\n raise AssertionError(\n 'Manual directory {} does not exist. Create it and download/extract '\n 'dataset artifacts in there.'.format(self._manual_dir))\n return self._manual_dir\n\n\n# ============================================================================\n# In Python 2.X, threading.Condition.wait() cannot be interrupted by SIGINT,\n# unless it's given a timeout. Here we artificially give a long timeout to\n# allow ctrl+C.\n# This code should be deleted once python2 is no longer supported.\nif sys.version_info[0] > 2:\n\n def _wait_on_promise(p):\n return p.get()\n\nelse:\n\n def _wait_on_promise(p):\n while True:\n result = p.get(sys.maxint) # pylint: disable=g-deprecated-member-used\n if p.is_fulfilled:\n return result\n\n# ============================================================================\n\n\ndef _map_promise(map_fn, all_inputs):\n \"\"\"Map the function into each element and resolve the promise.\"\"\"\n all_promises = utils.map_nested(map_fn, all_inputs) # Apply the function\n res = utils.map_nested(_wait_on_promise, all_promises)\n return res\n",
"# coding=utf-8\n# Copyright 2019 The TensorFlow Datasets Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Celeba-HQ dataset.\"\"\"\nimport os\n\nimport tensorflow as tf\nfrom tensorflow_datasets.core import api_utils\nimport tensorflow_datasets.public_api as tfds\n\n_CITATION = \"\"\"\\\n@article{DBLP:journals/corr/abs-1710-10196,\n author = {Tero Karras and\n Timo Aila and\n Samuli Laine and\n Jaakko Lehtinen},\n title = {Progressive Growing of GANs for Improved Quality, Stability, and Variation},\n journal = {CoRR},\n volume = {abs/1710.10196},\n year = {2017},\n url = {http://arxiv.org/abs/1710.10196},\n archivePrefix = {arXiv},\n eprint = {1710.10196},\n timestamp = {Mon, 13 Aug 2018 16:46:42 +0200},\n biburl = {https://dblp.org/rec/bib/journals/corr/abs-1710-10196},\n bibsource = {dblp computer science bibliography, https://dblp.org}\n}\n\"\"\"\n\n_DESCRIPTION = \"\"\"\\\nHigh-quality version of the CELEBA\ndataset, consisting of 30000 images in 1024 x 1024 resolution.\n\nWARNING: This dataset currently requires you to prepare images on your own.\n\"\"\"\n\n\nclass CelebaHQConfig(tfds.core.BuilderConfig):\n \"\"\"BuilderConfig for CelebaHQ.\"\"\"\n\n @api_utils.disallow_positional_args\n def __init__(self, resolution, **kwargs):\n \"\"\"BuilderConfig for SQUAD.\n\n Args:\n resolution: Resolution of the image. Values supported: powers of 2 up to\n 1024.\n **kwargs: keyword arguments forwarded to super.\n \"\"\"\n super(CelebaHQConfig, self).__init__(\n name=\"%d\" % resolution,\n description=(\"CelebaHQ images in %d x %d resolution\" %\n (resolution, resolution)),\n **kwargs)\n self.resolution = resolution\n self.file_name = \"data%dx%d.tar\" % (resolution, resolution)\n\n\nclass CelebAHq(tfds.core.GeneratorBasedBuilder):\n \"\"\"Celeba_HQ Dataset.\"\"\"\n\n VERSION = tfds.core.Version(\"0.1.0\")\n\n BUILDER_CONFIGS = [\n CelebaHQConfig(resolution=1024, version=\"0.1.0\"),\n CelebaHQConfig(resolution=512, version=\"0.1.0\"),\n CelebaHQConfig(resolution=256, version=\"0.1.0\"),\n CelebaHQConfig(resolution=128, version=\"0.1.0\"),\n CelebaHQConfig(resolution=64, version=\"0.1.0\"),\n CelebaHQConfig(resolution=32, version=\"0.1.0\"),\n CelebaHQConfig(resolution=16, version=\"0.1.0\"),\n CelebaHQConfig(resolution=8, version=\"0.1.0\"),\n CelebaHQConfig(resolution=4, version=\"0.1.0\"),\n CelebaHQConfig(resolution=2, version=\"0.1.0\"),\n CelebaHQConfig(resolution=1, version=\"0.1.0\"),\n ]\n\n def _info(self):\n return tfds.core.DatasetInfo(\n builder=self,\n description=_DESCRIPTION,\n features=tfds.features.FeaturesDict({\n \"image\":\n tfds.features.Image(\n shape=(self.builder_config.resolution,\n self.builder_config.resolution, 3),\n encoding_format=\"png\"),\n \"image/filename\":\n tfds.features.Text(),\n },),\n urls=[\"https://github.com/tkarras/progressive_growing_of_gans\"],\n citation=_CITATION,\n )\n\n def _split_generators(self, dl_manager):\n image_tar_file = os.path.join(dl_manager.manual_dir,\n self.builder_config.file_name)\n if not tf.io.gfile.exists(image_tar_file):\n # The current celebahq generation code depends on a concrete version of\n # pillow library and cannot be easily ported into tfds.\n msg = \"You must download the dataset files manually and place them in: \"\n msg += dl_manager.manual_dir\n msg += \" as .tar files. See testing/test_data/fake_examples/celeb_a_hq \"\n raise AssertionError(msg)\n return [\n tfds.core.SplitGenerator(\n name=tfds.Split.TRAIN,\n num_shards=50,\n gen_kwargs={\"archive\": dl_manager.iter_archive(image_tar_file)},\n )\n ]\n\n def _generate_examples(self, archive):\n for fname, fobj in archive:\n yield {\"image\": fobj, \"image/filename\": fname}\n"
] |
[
[
"tensorflow.io.gfile.exists",
"tensorflow.io.gfile.stat",
"tensorflow.io.gfile.makedirs",
"tensorflow.io.gfile.listdir",
"tensorflow.io.gfile.rename",
"tensorflow.io.gfile.rmtree"
],
[
"tensorflow.io.gfile.exists"
]
] |
alisiahkoohi/devito
|
[
"f535a44dff12de2837eb6e3217a65ffb2d371cb8"
] |
[
"tests/test_derivatives.py"
] |
[
"import numpy as np\nimport pytest\nfrom sympy import simplify, diff, cos, sin, Float\n\nfrom devito import (Grid, Function, TimeFunction, Eq, Operator, NODE,\n ConditionalDimension, left, right, centered, div, grad)\nfrom devito.finite_differences import Derivative, Differentiable\nfrom devito.finite_differences.differentiable import EvalDiffDerivative\nfrom devito.symbolics import indexify, retrieve_indexed\n\n_PRECISION = 9\n\n\ndef x(grid):\n return grid.dimensions[0]\n\n\ndef y(grid):\n return grid.dimensions[1]\n\n\ndef z(grid):\n return grid.dimensions[2]\n\n\ndef t(grid):\n return grid.stepping_dim\n\n\nclass TestFD(object):\n \"\"\"\n Class for finite difference testing.\n Tests the accuracy w.r.t polynomials.\n Test that the shortcut produce the same answer as the FD functions.\n \"\"\"\n\n def setup_method(self):\n self.shape = (20, 20, 20)\n self.grid = Grid(self.shape)\n\n def test_diff(self):\n \"\"\"Test that expr.diff returns an object of type devito.Derivative.\"\"\"\n u = Function(name='u', grid=self.grid)\n du = u.diff(x(self.grid))\n assert isinstance(du, Derivative)\n\n @pytest.mark.parametrize('so', [2, 3, 4, 5])\n def test_fd_indices(self, so):\n \"\"\"\n Test that shifted derivative have Integer offset after indexification.\n \"\"\"\n grid = Grid((10,))\n x = grid.dimensions[0]\n x0 = x + .5 * x.spacing\n u = Function(name=\"u\", grid=grid, space_order=so)\n dx = indexify(u.dx(x0=x0).evaluate)\n for f in retrieve_indexed(dx):\n assert len(f.indices[0].atoms(Float)) == 0\n\n @pytest.mark.parametrize('SymbolType, dim', [\n (Function, x), (Function, y),\n (TimeFunction, x), (TimeFunction, y), (TimeFunction, t),\n ])\n def test_stencil_derivative(self, SymbolType, dim):\n \"\"\"Test symbolic behaviour when expanding stencil derivatives\"\"\"\n i = dim(self.grid)\n u = SymbolType(name='u', grid=self.grid)\n u.data[:] = 66.6\n di = u.diff(i)\n dii = u.diff(i, i)\n # Check for sympy Derivative objects\n assert(isinstance(di, Derivative) and isinstance(dii, Derivative))\n s_di = di.as_finite_difference([i - i.spacing, i])\n s_dii = dii.as_finite_difference([i - i.spacing, i, i + i.spacing])\n # Check stencil length of first and second derivatives\n assert(len(s_di.args) == 2 and len(s_dii.args) == 3)\n u_di = s_di.args[0].args[1]\n u_dii = s_di.args[0].args[1]\n # Ensure that devito meta-data survived symbolic transformation\n assert(u_di.grid.shape == self.shape and u_dii.grid.shape == self.shape)\n assert(u_di.shape == u.shape and u_dii.shape == u.shape)\n assert(np.allclose(u_di.data, 66.6))\n assert(np.allclose(u_dii.data, 66.6))\n\n @pytest.mark.parametrize('SymbolType, derivative, dim, expected', [\n (Function, ['dx2'], 3, 'Derivative(u(x, y, z), (x, 2))'),\n (Function, ['dx2dy'], 3, 'Derivative(u(x, y, z), (x, 2), y)'),\n (Function, ['dx2dydz'], 3, 'Derivative(u(x, y, z), (x, 2), y, z)'),\n (Function, ['dx2', 'dy'], 3, 'Derivative(Derivative(u(x, y, z), (x, 2)), y)'),\n (Function, ['dx2dy', 'dz2'], 3,\n 'Derivative(Derivative(u(x, y, z), (x, 2), y), (z, 2))'),\n (TimeFunction, ['dx2'], 3, 'Derivative(u(t, x, y, z), (x, 2))'),\n (TimeFunction, ['dx2dy'], 3, 'Derivative(u(t, x, y, z), (x, 2), y)'),\n (TimeFunction, ['dx2', 'dy'], 3,\n 'Derivative(Derivative(u(t, x, y, z), (x, 2)), y)'),\n (TimeFunction, ['dx', 'dy', 'dx2', 'dz', 'dydz'], 3,\n 'Derivative(Derivative(Derivative(Derivative(Derivative(u(t, x, y, z), x), y),' +\n ' (x, 2)), z), y, z)')\n ])\n def test_unevaluation(self, SymbolType, derivative, dim, expected):\n u = SymbolType(name='u', grid=self.grid, time_order=2, space_order=2)\n expr = getattr(u, derivative[0])\n for d in derivative[1:]:\n expr = getattr(expr, d)\n assert(expr.__str__() == expected)\n # Make sure the FD evaluation executes\n expr.evaluate\n\n @pytest.mark.parametrize('expr,expected', [\n ('u.dx + u.dy', 'Derivative(u, x) + Derivative(u, y)'),\n ('u.dxdy', 'Derivative(u, x, y)'),\n ('u.laplace',\n 'Derivative(u, (x, 2)) + Derivative(u, (y, 2)) + Derivative(u, (z, 2))'),\n ('(u.dx + u.dy).dx', 'Derivative(Derivative(u, x) + Derivative(u, y), x)'),\n ('((u.dx + u.dy).dx + u.dxdy).dx',\n 'Derivative(Derivative(Derivative(u, x) + Derivative(u, y), x) +' +\n ' Derivative(u, x, y), x)'),\n ('(u**4).dx', 'Derivative(u**4, x)'),\n ('(u/4).dx', 'Derivative(u/4, x)'),\n ('((u.dx + v.dy).dx * v.dx).dy.dz',\n 'Derivative(Derivative(Derivative(Derivative(u, x) + Derivative(v, y), x) *' +\n ' Derivative(v, x), y), z)')\n ])\n def test_arithmetic(self, expr, expected):\n x, y, z = self.grid.dimensions\n u = Function(name='u', grid=self.grid, time_order=2, space_order=2) # noqa\n v = Function(name='v', grid=self.grid, time_order=2, space_order=2) # noqa\n expr = eval(expr)\n expected = eval(expected)\n assert expr == expected\n\n @pytest.mark.parametrize('expr, rules', [\n ('u.dx + u.dy', '{u.indices[0]: 1, u.indices[1]: 0}'),\n ('u.dxdy - u.dxdz', '{u.indices[0]: u.indices[0] + u.indices[0].spacing,' +\n 'u.indices[1]: 0, u.indices[2]: u.indices[1]}'),\n ('u.dx2dy + u.dz ', '{u.indices[0]: u.indices[0] + u.indices[0].spacing,' +\n 'u.indices[2]: u.indices[2] - 10}'),\n ])\n def test_derivative_eval_at(self, expr, rules):\n u = Function(name='u', grid=self.grid, time_order=2, space_order=2) # noqa\n expr = eval(expr)\n rules = eval(rules)\n assert expr.evaluate.xreplace(rules) == expr.xreplace(rules).evaluate\n\n @pytest.mark.parametrize('expr, rules', [\n ('u.dx', '{u.indices[0]: 1}'),\n ('u.dy', '{u.indices[1]: u.indices[2] - 7}'),\n ('u.dz', '{u.indices[2]: u.indices[0] + u.indices[1].spacing}'),\n ])\n def test_derivative_eval_at_expr(self, expr, rules):\n u = Function(name='u', grid=self.grid, time_order=2, space_order=2) # noqa\n expr = eval(expr)\n rules = eval(rules)\n assert expr.evaluate.xreplace(rules) == expr.xreplace(rules).evaluate\n assert expr.expr == expr.xreplace(rules).expr\n\n @pytest.mark.parametrize('expr, composite_rules', [\n ('u.dx', '[{u.indices[0]: 1}, {1: 4}]'),\n ])\n def test_derivative_eval_at_composite(self, expr, composite_rules):\n u = Function(name='u', grid=self.grid, time_order=2, space_order=2) # noqa\n expr = eval(expr)\n evaluated_expr = expr.evaluate\n composite_rules = eval(composite_rules)\n for mapper in composite_rules:\n evaluated_expr = evaluated_expr.xreplace(mapper)\n expr = expr.xreplace(mapper)\n assert evaluated_expr == expr.evaluate\n\n @pytest.mark.parametrize('SymbolType, derivative, dim', [\n (Function, 'dx2', 3), (Function, 'dy2', 3),\n (TimeFunction, 'dx2', 3), (TimeFunction, 'dy2', 3), (TimeFunction, 'dt', 2)\n ])\n def test_preformed_derivatives(self, SymbolType, derivative, dim):\n \"\"\"Test the stencil expressions provided by devito objects\"\"\"\n u = SymbolType(name='u', grid=self.grid, time_order=2, space_order=2)\n expr = getattr(u, derivative)\n assert(len(expr.evaluate.args) == dim)\n\n @pytest.mark.parametrize('derivative, dim', [\n ('dx', x), ('dy', y), ('dz', z)\n ])\n @pytest.mark.parametrize('order', [1, 2, 4, 6, 8, 10, 12, 14, 16])\n def test_derivatives_space(self, derivative, dim, order):\n \"\"\"Test first derivative expressions against native sympy\"\"\"\n dim = dim(self.grid)\n u = TimeFunction(name='u', grid=self.grid, time_order=2, space_order=order)\n expr = getattr(u, derivative).evaluate\n # Establish native sympy derivative expression\n width = int(order / 2)\n if order <= 2:\n indices = [dim, dim + dim.spacing]\n else:\n indices = [(dim + i * dim.spacing) for i in range(-width, width + 1)]\n\n s_expr = u.diff(dim).as_finite_difference(indices).evalf(_PRECISION)\n assert(simplify(expr - s_expr) == 0) # Symbolic equality\n assert type(expr) == EvalDiffDerivative\n expr1 = s_expr.func(*expr.args)\n assert(expr1 == s_expr) # Exact equality\n\n @pytest.mark.parametrize('derivative, dim', [\n ('dx2', x), ('dy2', y), ('dz2', z)\n ])\n @pytest.mark.parametrize('order', [2, 4, 6, 8, 10, 12, 14, 16])\n def test_second_derivatives_space(self, derivative, dim, order):\n \"\"\"\n Test second derivative expressions against native sympy.\n \"\"\"\n dim = dim(self.grid)\n u = TimeFunction(name='u', grid=self.grid, time_order=2, space_order=order)\n expr = getattr(u, derivative).evaluate\n # Establish native sympy derivative expression\n width = int(order / 2)\n indices = [(dim + i * dim.spacing) for i in range(-width, width + 1)]\n s_expr = u.diff(dim, dim).as_finite_difference(indices).evalf(_PRECISION)\n assert(simplify(expr - s_expr) == 0) # Symbolic equality\n assert type(expr) == EvalDiffDerivative\n expr1 = s_expr.func(*expr.args)\n assert(expr1 == s_expr) # Exact equality\n\n @pytest.mark.parametrize('space_order', [2, 4, 6, 8, 10, 12, 14, 16, 18, 20])\n # Only test x and t as y and z are the same as x\n @pytest.mark.parametrize('derivative', ['dx', 'dxl', 'dxr', 'dx2'])\n def test_fd_space(self, derivative, space_order):\n \"\"\"\n This test compares the discrete finite-difference scheme against polynomials\n For a given order p, the finite difference scheme should\n be exact for polynomials of order p.\n \"\"\"\n # dummy axis dimension\n nx = 100\n xx = np.linspace(-1, 1, nx)\n dx = xx[1] - xx[0]\n # Symbolic data\n grid = Grid(shape=(nx,), dtype=np.float32)\n x = grid.dimensions[0]\n u = Function(name=\"u\", grid=grid, space_order=space_order)\n du = Function(name=\"du\", grid=grid, space_order=space_order)\n # Define polynomial with exact fd\n coeffs = np.ones((space_order,), dtype=np.float32)\n polynome = sum([coeffs[i]*x**i for i in range(0, space_order)])\n polyvalues = np.array([polynome.subs(x, xi) for xi in xx], np.float32)\n # Fill original data with the polynomial values\n u.data[:] = polyvalues\n # True derivative of the polynome\n Dpolynome = diff(diff(polynome)) if derivative == 'dx2' else diff(polynome)\n Dpolyvalues = np.array([Dpolynome.subs(x, xi) for xi in xx], np.float32)\n # FD derivative, symbolic\n u_deriv = getattr(u, derivative)\n # Compute numerical FD\n stencil = Eq(du, u_deriv)\n op = Operator(stencil, subs={x.spacing: dx})\n op.apply()\n\n # Check exactness of the numerical derivative except inside space_brd\n space_border = space_order\n error = abs(du.data[space_border:-space_border] -\n Dpolyvalues[space_border:-space_border])\n assert np.isclose(np.mean(error), 0., atol=1e-3)\n\n @pytest.mark.parametrize('space_order', [2, 4, 6, 8, 10, 12, 14, 16, 18, 20])\n @pytest.mark.parametrize('stagger', [centered, left, right])\n # Only test x and t as y and z are the same as x\n def test_fd_space_staggered(self, space_order, stagger):\n \"\"\"\n This test compares the discrete finite-difference scheme against polynomials\n For a given order p, the finite difference scheme should\n be exact for polynomials of order p\n \"\"\"\n # dummy axis dimension\n nx = 101\n xx = np.linspace(-1, 1, nx)\n dx = xx[1] - xx[0]\n # Symbolic data\n grid = Grid(shape=(nx,), dtype=np.float32)\n x = grid.dimensions[0]\n\n # Location of the staggered function\n if stagger == left:\n off = -.5\n side = -x\n xx2 = xx + off * dx\n elif stagger == right:\n off = .5\n side = x\n xx2 = xx + off * dx\n else:\n side = NODE\n xx2 = xx\n\n u = Function(name=\"u\", grid=grid, space_order=space_order, staggered=side)\n du = Function(name=\"du\", grid=grid, space_order=space_order, staggered=side)\n # Define polynomial with exact fd\n coeffs = np.ones((space_order-1,), dtype=np.float32)\n polynome = sum([coeffs[i]*x**i for i in range(0, space_order-1)])\n polyvalues = np.array([polynome.subs(x, xi) for xi in xx2], np.float32)\n # Fill original data with the polynomial values\n u.data[:] = polyvalues\n # True derivative of the polynome\n Dpolynome = diff(polynome)\n Dpolyvalues = np.array([Dpolynome.subs(x, xi) for xi in xx2], np.float32)\n # Compute numerical FD\n stencil = Eq(du, u.dx)\n op = Operator(stencil, subs={x.spacing: dx})\n op.apply()\n\n # Check exactness of the numerical derivative except inside space_brd\n space_border = space_order\n error = abs(du.data[space_border:-space_border] -\n Dpolyvalues[space_border:-space_border])\n\n assert np.isclose(np.mean(error), 0., atol=1e-3)\n\n @pytest.mark.parametrize('so', [2, 4, 6, 8])\n def test_fd_new_order(self, so):\n grid = Grid((10,))\n u = Function(name=\"u\", grid=grid, space_order=so)\n u1 = Function(name=\"u\", grid=grid, space_order=so//2)\n u2 = Function(name=\"u\", grid=grid, space_order=2*so)\n assert str(u.dx(fd_order=so//2).evaluate) == str(u1.dx.evaluate)\n assert str(u.dx(fd_order=2*so).evaluate) == str(u2.dx.evaluate)\n\n def test_fd_new_side(self):\n grid = Grid((10,))\n u = Function(name=\"u\", grid=grid, space_order=4)\n assert u.dx(side=left).evaluate == u.dxl.evaluate\n assert u.dx(side=right).evaluate == u.dxr.evaluate\n assert u.dxl(side=centered).evaluate == u.dx.evaluate\n\n @pytest.mark.parametrize('so, expected', [\n (2, '1.0*u(x)/h_x - 1.0*u(x - 1.0*h_x)/h_x'),\n (4, '1.125*u(x)/h_x + 0.0416666667*u(x - 2.0*h_x)/h_x - '\n '1.125*u(x - 1.0*h_x)/h_x - 0.0416666667*u(x + 1.0*h_x)/h_x'),\n (6, '1.171875*u(x)/h_x - 0.0046875*u(x - 3.0*h_x)/h_x + '\n '0.0651041667*u(x - 2.0*h_x)/h_x - 1.171875*u(x - 1.0*h_x)/h_x - '\n '0.0651041667*u(x + 1.0*h_x)/h_x + 0.0046875*u(x + 2.0*h_x)/h_x'),\n (8, '1.19628906*u(x)/h_x + 0.000697544643*u(x - 4.0*h_x)/h_x - '\n '0.0095703125*u(x - 3.0*h_x)/h_x + 0.0797526042*u(x - 2.0*h_x)/h_x - '\n '1.19628906*u(x - 1.0*h_x)/h_x - 0.0797526042*u(x + 1.0*h_x)/h_x + '\n '0.0095703125*u(x + 2.0*h_x)/h_x - 0.000697544643*u(x + 3.0*h_x)/h_x')])\n def test_fd_new_x0(self, so, expected):\n grid = Grid((10,))\n x = grid.dimensions[0]\n u = Function(name=\"u\", grid=grid, space_order=so)\n assert u.dx(x0=x + x.spacing).evaluate == u.dx.evaluate.subs({x: x + x.spacing})\n assert u.dx(x0=x - x.spacing).evaluate == u.dx.evaluate.subs({x: x - x.spacing})\n # half shifted compare to explicit coeffs (Forneberg)\n assert str(u.dx(x0=x - .5 * x.spacing).evaluate) == expected\n\n def test_new_x0_eval_at(self):\n \"\"\"\n Make sure that explicitly set x0 does not get overwritten by eval_at.\n \"\"\"\n grid = Grid((10,))\n x = grid.dimensions[0]\n u = Function(name=\"u\", grid=grid, space_order=2)\n v = Function(name=\"v\", grid=grid, space_order=2)\n assert u.dx(x0=x - x.spacing/2)._eval_at(v).x0 == {x: x - x.spacing/2}\n\n def test_fd_new_lo(self):\n grid = Grid((10,))\n x = grid.dimensions[0]\n u = Function(name=\"u\", grid=grid, space_order=2)\n\n dplus = \"-1.0*u(x)/h_x + 1.0*u(x + 1.0*h_x)/h_x\"\n dminus = \"1.0*u(x)/h_x - 1.0*u(x - 1.0*h_x)/h_x\"\n assert str(u.dx(x0=x + .5 * x.spacing).evaluate) == dplus\n assert str(u.dx(x0=x - .5 * x.spacing).evaluate) == dminus\n assert str(u.dx(x0=x + .5 * x.spacing, fd_order=1).evaluate) == dplus\n assert str(u.dx(x0=x - .5 * x.spacing, fd_order=1).evaluate) == dminus\n\n def test_subsampled_fd(self):\n \"\"\"\n Test that the symbolic interface is working for space subsampled\n functions.\n \"\"\"\n nt = 19\n grid = Grid(shape=(12, 12), extent=(11, 11))\n\n u = TimeFunction(name='u', grid=grid, save=nt, space_order=2)\n assert(grid.time_dim in u.indices)\n\n # Creates subsampled spatial dimensions and according grid\n dims = tuple([ConditionalDimension(d.name+'sub', parent=d, factor=2)\n for d in u.grid.dimensions])\n grid2 = Grid((6, 6), dimensions=dims)\n u2 = TimeFunction(name='u2', grid=grid2, save=nt, space_order=1)\n for i in range(nt):\n for j in range(u2.data_with_halo.shape[2]):\n u2.data_with_halo[i, :, j] = np.arange(u2.data_with_halo.shape[2])\n\n eqns = [Eq(u.forward, u + 1.), Eq(u2.forward, u2.dx)]\n op = Operator(eqns)\n op.apply(time_M=nt-2)\n # Verify that u2[1, x,y]= du2/dx[0, x, y]\n\n assert np.allclose(u.data[-1], nt-1)\n assert np.allclose(u2.data[1], 0.5)\n\n @pytest.mark.parametrize('expr,expected', [\n ('f.dx', '-f(x)/h_x + f(x + h_x)/h_x'),\n ('f.dx + g.dx', '-f(x)/h_x + f(x + h_x)/h_x - g(x)/h_x + g(x + h_x)/h_x'),\n ('-f', '-f(x)'),\n ('-(f + g)', '-f(x) - g(x)')\n ])\n def test_shortcuts(self, expr, expected):\n grid = Grid(shape=(10,))\n f = Function(name='f', grid=grid) # noqa\n g = Function(name='g', grid=grid) # noqa\n\n expr = eval(expr)\n\n assert isinstance(expr, Differentiable)\n assert expected == str(expr.evaluate)\n\n @pytest.mark.parametrize('so', [2, 5, 8])\n def test_all_shortcuts(self, so):\n \"\"\"\n Test that verify that all fd shortcuts are functional.\n \"\"\"\n grid = Grid(shape=(10, 10, 10))\n f = Function(name='f', grid=grid, space_order=so)\n g = TimeFunction(name='g', grid=grid, space_order=so)\n\n for fd in f._fd:\n assert getattr(f, fd)\n\n for fd in g._fd:\n assert getattr(g, fd)\n\n @pytest.mark.parametrize('so', [2, 4, 8, 12])\n @pytest.mark.parametrize('ndim', [1, 2])\n @pytest.mark.parametrize('derivative, adjoint_name', [\n ('dx', 'dx'),\n ('dx2', 'dx2'),\n ('dxl', 'dxr'),\n ('dxr', 'dxl')])\n def test_fd_adjoint(self, so, ndim, derivative, adjoint_name):\n grid = Grid(shape=tuple([51]*ndim), extent=tuple([25]*ndim))\n x = grid.dimensions[0]\n f = Function(name='f', grid=grid, space_order=so)\n f_deriv = Function(name='f_deriv', grid=grid, space_order=so)\n g = Function(name='g', grid=grid, space_order=so)\n g_deriv = Function(name='g_deriv', grid=grid, space_order=so)\n\n # Fill f and g with smooth cos/sin\n Operator([Eq(g, x*cos(2*np.pi*x/5)), Eq(f, sin(2*np.pi*x/8))]).apply()\n # Check symbolic expression are expected ones for the adjoint .T\n deriv = getattr(f, derivative)\n coeff = 1 if derivative == 'dx2' else -1\n expected = coeff * getattr(f, derivative).evaluate.subs({x.spacing: -x.spacing})\n assert simplify(deriv.T.evaluate) == simplify(expected)\n\n # Compute numerical derivatives and verify dot test\n # i.e <f.dx, g> = <f, g.dx.T>\n\n eq_f = Eq(f_deriv, deriv)\n eq_g = Eq(g_deriv, getattr(g, derivative).T)\n\n op = Operator([eq_f, eq_g])\n op()\n\n a = np.dot(f_deriv.data.reshape(-1), g.data.reshape(-1))\n b = np.dot(g_deriv.data.reshape(-1), f.data.reshape(-1))\n assert np.isclose(1 - a/b, 0, atol=1e-5)\n\n @pytest.mark.parametrize('shift', [None, .5, -.5])\n @pytest.mark.parametrize('ndim', [2, 3])\n def test_shifted_div(self, shift, ndim):\n grid = Grid(tuple([11]*ndim))\n f = Function(name=\"f\", grid=grid, space_order=4)\n df = div(f, shift=shift).evaluate\n ref = 0\n for d in grid.dimensions:\n x0 = None if shift is None else d + shift * d.spacing\n ref += getattr(f, 'd%s' % d.name)(x0=x0)\n assert df == ref.evaluate\n\n @pytest.mark.parametrize('shift', [None, .5, -.5])\n @pytest.mark.parametrize('ndim', [2, 3])\n def test_shifted_div_of_vectorfunction(self, shift, ndim):\n grid = Grid(tuple([11]*ndim))\n f = Function(name=\"f\", grid=grid, space_order=4)\n df = div(grad(f), shift=shift).evaluate\n ref = 0\n for i, d in enumerate(grid.dimensions):\n x0 = None if shift is None else d + shift * d.spacing\n ref += getattr(grad(f)[i], 'd%s' % d.name)(x0=x0)\n assert df == ref.evaluate\n\n @pytest.mark.parametrize('shift', [None, .5, -.5])\n @pytest.mark.parametrize('ndim', [2, 3])\n def test_shifted_grad(self, shift, ndim):\n grid = Grid(tuple([11]*ndim))\n f = Function(name=\"f\", grid=grid, space_order=4)\n g = grad(f, shift=shift).evaluate\n for d, gi in zip(grid.dimensions, g):\n x0 = None if shift is None else d + shift * d.spacing\n assert gi == getattr(f, 'd%s' % d.name)(x0=x0).evaluate\n"
] |
[
[
"numpy.allclose",
"numpy.linspace",
"numpy.arange",
"numpy.ones",
"numpy.mean",
"numpy.isclose"
]
] |
Leedk3/pvcnn
|
[
"8e3bddbc0719bdc262c5d438273eb2a54e45d9d4"
] |
[
"data/kitti/example.py"
] |
[
"''' Prepare KITTI data for 3D object detection.\n\nAuthor: Charles R. Qi\nDate: September 2017\n'''\nfrom __future__ import print_function\n\nimport os\nimport sys\nimport numpy as np\nimport cv2\nfrom PIL import Image\nBASE_DIR = os.path.dirname(os.path.abspath(__file__))\nROOT_DIR = os.path.dirname(BASE_DIR)\nsys.path.append(BASE_DIR)\nsys.path.append(os.path.join(ROOT_DIR, 'mayavi'))\nimport kitti_util as utils\nimport cPickle as pickle\nfrom kitti_object import *\nimport argparse\n\n\ndef in_hull(p, hull):\n from scipy.spatial import Delaunay\n if not isinstance(hull,Delaunay):\n hull = Delaunay(hull)\n return hull.find_simplex(p)>=0\n\ndef extract_pc_in_box3d(pc, box3d):\n ''' pc: (N,3), box3d: (8,3) '''\n box3d_roi_inds = in_hull(pc[:,0:3], box3d)\n return pc[box3d_roi_inds,:], box3d_roi_inds\n\ndef extract_pc_in_box2d(pc, box2d):\n ''' pc: (N,2), box2d: (xmin,ymin,xmax,ymax) '''\n box2d_corners = np.zeros((4,2))\n box2d_corners[0,:] = [box2d[0],box2d[1]] \n box2d_corners[1,:] = [box2d[2],box2d[1]] \n box2d_corners[2,:] = [box2d[2],box2d[3]] \n box2d_corners[3,:] = [box2d[0],box2d[3]] \n box2d_roi_inds = in_hull(pc[:,0:2], box2d_corners)\n return pc[box2d_roi_inds,:], box2d_roi_inds\n \ndef demo():\n import mayavi.mlab as mlab\n from viz_util import draw_lidar, draw_lidar_simple, draw_gt_boxes3d\n dataset = kitti_object(os.path.join(ROOT_DIR, 'dataset/KITTI/object'))\n data_idx = 0\n\n # Load data from dataset\n objects = dataset.get_label_objects(data_idx)\n objects[0].print_object()\n img = dataset.get_image(data_idx)\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) \n img_height, img_width, img_channel = img.shape\n print(('Image shape: ', img.shape))\n pc_velo = dataset.get_lidar(data_idx)[:,0:3]\n calib = dataset.get_calibration(data_idx)\n\n ## Draw lidar in rect camera coord\n #print(' -------- LiDAR points in rect camera coordination --------')\n #pc_rect = calib.project_velo_to_rect(pc_velo)\n #fig = draw_lidar_simple(pc_rect)\n #raw_input()\n\n # Draw 2d and 3d boxes on image\n print(' -------- 2D/3D bounding boxes in images --------')\n show_image_with_boxes(img, objects, calib)\n raw_input()\n\n # Show all LiDAR points. Draw 3d box in LiDAR point cloud\n print(' -------- LiDAR points and 3D boxes in velodyne coordinate --------')\n #show_lidar_with_boxes(pc_velo, objects, calib)\n #raw_input()\n show_lidar_with_boxes(pc_velo, objects, calib, True, img_width, img_height)\n raw_input()\n\n # Visualize LiDAR points on images\n print(' -------- LiDAR points projected to image plane --------')\n show_lidar_on_image(pc_velo, img, calib, img_width, img_height) \n raw_input()\n \n # Show LiDAR points that are in the 3d box\n print(' -------- LiDAR points in a 3D bounding box --------')\n box3d_pts_2d, box3d_pts_3d = utils.compute_box_3d(objects[0], calib.P) \n box3d_pts_3d_velo = calib.project_rect_to_velo(box3d_pts_3d)\n box3droi_pc_velo, _ = extract_pc_in_box3d(pc_velo, box3d_pts_3d_velo)\n print(('Number of points in 3d box: ', box3droi_pc_velo.shape[0]))\n\n fig = mlab.figure(figure=None, bgcolor=(0,0,0),\n fgcolor=None, engine=None, size=(1000, 500))\n draw_lidar(box3droi_pc_velo, fig=fig)\n draw_gt_boxes3d([box3d_pts_3d_velo], fig=fig)\n mlab.show(1)\n raw_input()\n \n # UVDepth Image and its backprojection to point clouds\n print(' -------- LiDAR points in a frustum from a 2D box --------')\n imgfov_pc_velo, pts_2d, fov_inds = get_lidar_in_image_fov(pc_velo,\n calib, 0, 0, img_width, img_height, True)\n imgfov_pts_2d = pts_2d[fov_inds,:]\n imgfov_pc_rect = calib.project_velo_to_rect(imgfov_pc_velo)\n\n cameraUVDepth = np.zeros_like(imgfov_pc_rect)\n cameraUVDepth[:,0:2] = imgfov_pts_2d\n cameraUVDepth[:,2] = imgfov_pc_rect[:,2]\n\n # Show that the points are exactly the same\n backprojected_pc_velo = calib.project_image_to_velo(cameraUVDepth)\n print(imgfov_pc_velo[0:20])\n print(backprojected_pc_velo[0:20])\n\n fig = mlab.figure(figure=None, bgcolor=(0,0,0),\n fgcolor=None, engine=None, size=(1000, 500))\n draw_lidar(backprojected_pc_velo, fig=fig)\n raw_input()\n\n # Only display those points that fall into 2d box\n print(' -------- LiDAR points in a frustum from a 2D box --------')\n xmin,ymin,xmax,ymax = \\\n objects[0].xmin, objects[0].ymin, objects[0].xmax, objects[0].ymax\n boxfov_pc_velo = \\\n get_lidar_in_image_fov(pc_velo, calib, xmin, ymin, xmax, ymax)\n print(('2d box FOV point num: ', boxfov_pc_velo.shape[0]))\n\n fig = mlab.figure(figure=None, bgcolor=(0,0,0),\n fgcolor=None, engine=None, size=(1000, 500))\n draw_lidar(boxfov_pc_velo, fig=fig)\n mlab.show(1)\n raw_input()\n\ndef random_shift_box2d(box2d, shift_ratio=0.1):\n ''' Randomly shift box center, randomly scale width and height \n '''\n r = shift_ratio\n xmin,ymin,xmax,ymax = box2d\n h = ymax-ymin\n w = xmax-xmin\n cx = (xmin+xmax)/2.0\n cy = (ymin+ymax)/2.0\n cx2 = cx + w*r*(np.random.random()*2-1)\n cy2 = cy + h*r*(np.random.random()*2-1)\n h2 = h*(1+np.random.random()*2*r-r) # 0.9 to 1.1\n w2 = w*(1+np.random.random()*2*r-r) # 0.9 to 1.1\n return np.array([cx2-w2/2.0, cy2-h2/2.0, cx2+w2/2.0, cy2+h2/2.0])\n \ndef extract_frustum_data(idx_filename, split, output_filename, viz=False,\n perturb_box2d=False, augmentX=1, type_whitelist=['Car']):\n ''' Extract point clouds and corresponding annotations in frustums\n defined generated from 2D bounding boxes\n Lidar points and 3d boxes are in *rect camera* coord system\n (as that in 3d box label files)\n \n Input:\n idx_filename: string, each line of the file is a sample ID\n split: string, either trianing or testing\n output_filename: string, the name for output .pickle file\n viz: bool, whether to visualize extracted data\n perturb_box2d: bool, whether to perturb the box2d\n (used for data augmentation in train set)\n augmentX: scalar, how many augmentations to have for each 2D box.\n type_whitelist: a list of strings, object types we are interested in.\n Output:\n None (will write a .pickle file to the disk)\n '''\n dataset = kitti_object(os.path.join(ROOT_DIR,'dataset/KITTI/object'), split)\n data_idx_list = [int(line.rstrip()) for line in open(idx_filename)]\n\n id_list = [] # int number\n box2d_list = [] # [xmin,ymin,xmax,ymax]\n box3d_list = [] # (8,3) array in rect camera coord\n input_list = [] # channel number = 4, xyz,intensity in rect camera coord\n label_list = [] # 1 for roi object, 0 for clutter\n type_list = [] # string e.g. Car\n heading_list = [] # ry (along y-axis in rect camera coord) radius of\n # (cont.) clockwise angle from positive x axis in velo coord.\n box3d_size_list = [] # array of l,w,h\n frustum_angle_list = [] # angle of 2d box center from pos x-axis\n\n pos_cnt = 0\n all_cnt = 0\n for data_idx in data_idx_list:\n print('------------- ', data_idx)\n calib = dataset.get_calibration(data_idx) # 3 by 4 matrix\n objects = dataset.get_label_objects(data_idx)\n pc_velo = dataset.get_lidar(data_idx)\n pc_rect = np.zeros_like(pc_velo)\n pc_rect[:,0:3] = calib.project_velo_to_rect(pc_velo[:,0:3])\n pc_rect[:,3] = pc_velo[:,3]\n img = dataset.get_image(data_idx)\n img_height, img_width, img_channel = img.shape\n _, pc_image_coord, img_fov_inds = get_lidar_in_image_fov(pc_velo[:,0:3],\n calib, 0, 0, img_width, img_height, True)\n\n for obj_idx in range(len(objects)):\n if objects[obj_idx].type not in type_whitelist :continue\n\n # 2D BOX: Get pts rect backprojected \n box2d = objects[obj_idx].box2d\n for _ in range(augmentX):\n # Augment data by box2d perturbation\n if perturb_box2d:\n xmin,ymin,xmax,ymax = random_shift_box2d(box2d)\n print(box2d)\n print(xmin,ymin,xmax,ymax)\n else:\n xmin,ymin,xmax,ymax = box2d\n box_fov_inds = (pc_image_coord[:,0]<xmax) & \\\n (pc_image_coord[:,0]>=xmin) & \\\n (pc_image_coord[:,1]<ymax) & \\\n (pc_image_coord[:,1]>=ymin)\n box_fov_inds = box_fov_inds & img_fov_inds\n pc_in_box_fov = pc_rect[box_fov_inds,:]\n # Get frustum angle (according to center pixel in 2D BOX)\n box2d_center = np.array([(xmin+xmax)/2.0, (ymin+ymax)/2.0])\n uvdepth = np.zeros((1,3))\n uvdepth[0,0:2] = box2d_center\n uvdepth[0,2] = 20 # some random depth\n box2d_center_rect = calib.project_image_to_rect(uvdepth)\n frustum_angle = -1 * np.arctan2(box2d_center_rect[0,2],\n box2d_center_rect[0,0])\n # 3D BOX: Get pts velo in 3d box\n obj = objects[obj_idx]\n box3d_pts_2d, box3d_pts_3d = utils.compute_box_3d(obj, calib.P) \n _,inds = extract_pc_in_box3d(pc_in_box_fov, box3d_pts_3d)\n label = np.zeros((pc_in_box_fov.shape[0]))\n label[inds] = 1\n # Get 3D BOX heading\n heading_angle = obj.ry\n # Get 3D BOX size\n box3d_size = np.array([obj.l, obj.w, obj.h])\n\n # Reject too far away object or object without points\n if ymax-ymin<25 or np.sum(label)==0:\n continue\n\n id_list.append(data_idx)\n box2d_list.append(np.array([xmin,ymin,xmax,ymax]))\n box3d_list.append(box3d_pts_3d)\n input_list.append(pc_in_box_fov)\n label_list.append(label)\n type_list.append(objects[obj_idx].type)\n heading_list.append(heading_angle)\n box3d_size_list.append(box3d_size)\n frustum_angle_list.append(frustum_angle)\n \n # collect statistics\n pos_cnt += np.sum(label)\n all_cnt += pc_in_box_fov.shape[0]\n \n print('Average pos ratio: %f' % (pos_cnt/float(all_cnt)))\n print('Average npoints: %f' % (float(all_cnt)/len(id_list)))\n \n with open(output_filename,'wb') as fp:\n pickle.dump(id_list, fp)\n pickle.dump(box2d_list,fp)\n pickle.dump(box3d_list,fp)\n pickle.dump(input_list, fp)\n pickle.dump(label_list, fp)\n pickle.dump(type_list, fp)\n pickle.dump(heading_list, fp)\n pickle.dump(box3d_size_list, fp)\n pickle.dump(frustum_angle_list, fp)\n \n if viz:\n import mayavi.mlab as mlab\n for i in range(10):\n p1 = input_list[i]\n seg = label_list[i] \n fig = mlab.figure(figure=None, bgcolor=(0.4,0.4,0.4),\n fgcolor=None, engine=None, size=(500, 500))\n mlab.points3d(p1[:,0], p1[:,1], p1[:,2], seg, mode='point',\n colormap='gnuplot', scale_factor=1, figure=fig)\n fig = mlab.figure(figure=None, bgcolor=(0.4,0.4,0.4),\n fgcolor=None, engine=None, size=(500, 500))\n mlab.points3d(p1[:,2], -p1[:,0], -p1[:,1], seg, mode='point',\n colormap='gnuplot', scale_factor=1, figure=fig)\n raw_input()\n\ndef get_box3d_dim_statistics(idx_filename):\n ''' Collect and dump 3D bounding box statistics '''\n dataset = kitti_object(os.path.join(ROOT_DIR,'dataset/KITTI/object'))\n dimension_list = []\n type_list = []\n ry_list = []\n data_idx_list = [int(line.rstrip()) for line in open(idx_filename)]\n for data_idx in data_idx_list:\n print('------------- ', data_idx)\n calib = dataset.get_calibration(data_idx) # 3 by 4 matrix\n objects = dataset.get_label_objects(data_idx)\n for obj_idx in range(len(objects)):\n obj = objects[obj_idx]\n if obj.type=='DontCare':continue\n dimension_list.append(np.array([obj.l,obj.w,obj.h])) \n type_list.append(obj.type) \n ry_list.append(obj.ry)\n\n with open('box3d_dimensions.pickle','wb') as fp:\n pickle.dump(type_list, fp)\n pickle.dump(dimension_list, fp)\n pickle.dump(ry_list, fp)\n\ndef read_det_file(det_filename):\n ''' Parse lines in 2D detection output files '''\n det_id2str = {1:'Pedestrian', 2:'Car', 3:'Cyclist'}\n id_list = []\n type_list = []\n prob_list = []\n box2d_list = []\n for line in open(det_filename, 'r'):\n t = line.rstrip().split(\" \")\n id_list.append(int(os.path.basename(t[0]).rstrip('.png')))\n type_list.append(det_id2str[int(t[1])])\n prob_list.append(float(t[2]))\n box2d_list.append(np.array([float(t[i]) for i in range(3,7)]))\n return id_list, type_list, box2d_list, prob_list\n\n \ndef extract_frustum_data_rgb_detection(det_filename, split, output_filename,\n viz=False,\n type_whitelist=['Car'],\n img_height_threshold=25,\n lidar_point_threshold=5):\n ''' Extract point clouds in frustums extruded from 2D detection boxes.\n Update: Lidar points and 3d boxes are in *rect camera* coord system\n (as that in 3d box label files)\n \n Input:\n det_filename: string, each line is\n img_path typeid confidence xmin ymin xmax ymax\n split: string, either trianing or testing\n output_filename: string, the name for output .pickle file\n type_whitelist: a list of strings, object types we are interested in.\n img_height_threshold: int, neglect image with height lower than that.\n lidar_point_threshold: int, neglect frustum with too few points.\n Output:\n None (will write a .pickle file to the disk)\n '''\n dataset = kitti_object(os.path.join(ROOT_DIR, 'dataset/KITTI/object'), split)\n det_id_list, det_type_list, det_box2d_list, det_prob_list = \\\n read_det_file(det_filename)\n cache_id = -1\n cache = None\n \n id_list = []\n type_list = []\n box2d_list = []\n prob_list = []\n input_list = [] # channel number = 4, xyz,intensity in rect camera coord\n frustum_angle_list = [] # angle of 2d box center from pos x-axis\n\n for det_idx in range(len(det_id_list)):\n data_idx = det_id_list[det_idx]\n print('det idx: %d/%d, data idx: %d' % \\\n (det_idx, len(det_id_list), data_idx))\n if cache_id != data_idx:\n calib = dataset.get_calibration(data_idx) # 3 by 4 matrix\n pc_velo = dataset.get_lidar(data_idx)\n pc_rect = np.zeros_like(pc_velo)\n pc_rect[:,0:3] = calib.project_velo_to_rect(pc_velo[:,0:3])\n pc_rect[:,3] = pc_velo[:,3]\n img = dataset.get_image(data_idx)\n img_height, img_width, img_channel = img.shape\n _, pc_image_coord, img_fov_inds = get_lidar_in_image_fov(\\\n pc_velo[:,0:3], calib, 0, 0, img_width, img_height, True)\n cache = [calib,pc_rect,pc_image_coord,img_fov_inds]\n cache_id = data_idx\n else:\n calib,pc_rect,pc_image_coord,img_fov_inds = cache\n\n if det_type_list[det_idx] not in type_whitelist: continue\n\n # 2D BOX: Get pts rect backprojected \n xmin,ymin,xmax,ymax = det_box2d_list[det_idx]\n box_fov_inds = (pc_image_coord[:,0]<xmax) & \\\n (pc_image_coord[:,0]>=xmin) & \\\n (pc_image_coord[:,1]<ymax) & \\\n (pc_image_coord[:,1]>=ymin)\n box_fov_inds = box_fov_inds & img_fov_inds\n pc_in_box_fov = pc_rect[box_fov_inds,:]\n # Get frustum angle (according to center pixel in 2D BOX)\n box2d_center = np.array([(xmin+xmax)/2.0, (ymin+ymax)/2.0])\n uvdepth = np.zeros((1,3))\n uvdepth[0,0:2] = box2d_center\n uvdepth[0,2] = 20 # some random depth\n box2d_center_rect = calib.project_image_to_rect(uvdepth)\n frustum_angle = -1 * np.arctan2(box2d_center_rect[0,2],\n box2d_center_rect[0,0])\n \n # Pass objects that are too small\n if ymax-ymin<img_height_threshold or \\\n len(pc_in_box_fov)<lidar_point_threshold:\n continue\n \n id_list.append(data_idx)\n type_list.append(det_type_list[det_idx])\n box2d_list.append(det_box2d_list[det_idx])\n prob_list.append(det_prob_list[det_idx])\n input_list.append(pc_in_box_fov)\n frustum_angle_list.append(frustum_angle)\n \n with open(output_filename,'wb') as fp:\n pickle.dump(id_list, fp)\n pickle.dump(box2d_list,fp)\n pickle.dump(input_list, fp)\n pickle.dump(type_list, fp)\n pickle.dump(frustum_angle_list, fp)\n pickle.dump(prob_list, fp)\n \n if viz:\n import mayavi.mlab as mlab\n for i in range(10):\n p1 = input_list[i]\n fig = mlab.figure(figure=None, bgcolor=(0.4,0.4,0.4),\n fgcolor=None, engine=None, size=(500, 500))\n mlab.points3d(p1[:,0], p1[:,1], p1[:,2], p1[:,1], mode='point',\n colormap='gnuplot', scale_factor=1, figure=fig)\n fig = mlab.figure(figure=None, bgcolor=(0.4,0.4,0.4),\n fgcolor=None, engine=None, size=(500, 500))\n mlab.points3d(p1[:,2], -p1[:,0], -p1[:,1], seg, mode='point',\n colormap='gnuplot', scale_factor=1, figure=fig)\n raw_input()\n\ndef write_2d_rgb_detection(det_filename, split, result_dir):\n ''' Write 2D detection results for KITTI evaluation.\n Convert from Wei's format to KITTI format. \n \n Input:\n det_filename: string, each line is\n img_path typeid confidence xmin ymin xmax ymax\n split: string, either trianing or testing\n result_dir: string, folder path for results dumping\n Output:\n None (will write <xxx>.txt files to disk)\n\n Usage:\n write_2d_rgb_detection(\"val_det.txt\", \"training\", \"results\")\n '''\n dataset = kitti_object(os.path.join(ROOT_DIR, 'dataset/KITTI/object'), split)\n det_id_list, det_type_list, det_box2d_list, det_prob_list = \\\n read_det_file(det_filename)\n # map from idx to list of strings, each string is a line without \\n\n results = {} \n for i in range(len(det_id_list)):\n idx = det_id_list[i]\n typename = det_type_list[i]\n box2d = det_box2d_list[i]\n prob = det_prob_list[i]\n output_str = typename + \" -1 -1 -10 \"\n output_str += \"%f %f %f %f \" % (box2d[0],box2d[1],box2d[2],box2d[3])\n output_str += \"-1 -1 -1 -1000 -1000 -1000 -10 %f\" % (prob)\n if idx not in results: results[idx] = []\n results[idx].append(output_str)\n if not os.path.exists(result_dir): os.mkdir(result_dir)\n output_dir = os.path.join(result_dir, 'data')\n if not os.path.exists(output_dir): os.mkdir(output_dir)\n for idx in results:\n pred_filename = os.path.join(output_dir, '%06d.txt'%(idx))\n fout = open(pred_filename, 'w')\n for line in results[idx]:\n fout.write(line+'\\n')\n fout.close() \n\nif __name__=='__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--demo', action='store_true', help='Run demo.')\n parser.add_argument('--gen_train', action='store_true', help='Generate train split frustum data with perturbed GT 2D boxes')\n parser.add_argument('--gen_val', action='store_true', help='Generate val split frustum data with GT 2D boxes')\n parser.add_argument('--gen_val_rgb_detection', action='store_true', help='Generate val split frustum data with RGB detection 2D boxes')\n parser.add_argument('--car_only', action='store_true', help='Only generate cars; otherwise cars, peds and cycs')\n args = parser.parse_args()\n\n if args.demo:\n demo()\n exit()\n\n if args.car_only:\n type_whitelist = ['Car']\n output_prefix = 'frustum_caronly_'\n else:\n type_whitelist = ['Car', 'Pedestrian', 'Cyclist']\n output_prefix = 'frustum_carpedcyc_'\n\n if args.gen_train:\n extract_frustum_data(\\\n os.path.join(BASE_DIR, 'image_sets/train.txt'),\n 'training',\n os.path.join(BASE_DIR, output_prefix+'train.pickle'), \n viz=False, perturb_box2d=True, augmentX=5,\n type_whitelist=type_whitelist)\n\n if args.gen_val:\n extract_frustum_data(\\\n os.path.join(BASE_DIR, 'image_sets/val.txt'),\n 'training',\n os.path.join(BASE_DIR, output_prefix+'val.pickle'),\n viz=False, perturb_box2d=False, augmentX=1,\n type_whitelist=type_whitelist)\n\n if args.gen_val_rgb_detection:\n extract_frustum_data_rgb_detection(\\\n os.path.join(BASE_DIR, 'rgb_detections/rgb_detection_val.txt'),\n 'training',\n os.path.join(BASE_DIR, output_prefix+'val_rgb_detection.pickle'),\n viz=False,\n type_whitelist=type_whitelist) \n{\"mode\":\"full\",\"isActive\":false}"
] |
[
[
"numpy.random.random",
"scipy.spatial.Delaunay",
"numpy.arctan2",
"numpy.zeros_like",
"numpy.array",
"numpy.zeros",
"numpy.sum"
]
] |
vsuomi/fibroid-classification
|
[
"749e77af4dbd28b00184a9aa9e32b9d891493bd4"
] |
[
"scale_features.py"
] |
[
"# -*- coding: utf-8 -*-\n'''\nCreated on Thu May 31 11:38:48 2018\n\n@author:\n \n Visa Suomi\n Turku University Hospital\n May 2018\n \n@description:\n \n This function is used to scale features using different scaling types\n \n'''\n\n#%% import necessary packages\n\nimport numpy as np\nimport pandas as pd\n\n#%% define function\n\ndef scale_features(features, scaling):\n \n ''' Scales given features with standard deviation\n \n Args:\n features: pandas Dataframe of features\n scaling: type of scaling: linear ('linear'), logarithmic ('log') or\n z-score ('z-score')\n Returns:\n scaled_features: scaled features\n '''\n \n if scaling == 'linear':\n min_val = features.min()\n max_val = features.max()\n scale = (max_val - min_val) / 2.0\n a = (features - min_val)\n b = scale\n scaled_features = np.divide(a, b, out=np.zeros_like(a), where=b!=0) - 1.0 # NaN to zero - 1\n elif scaling == 'log':\n scaled_features = np.log(features + 1.0)\n elif scaling == 'z-score':\n a = (features - features.mean())\n b = features.std()\n scaled_features = np.divide(a, b, out=np.zeros_like(a), where=b!=0) # NaN to zero\n else:\n print('Unknown scaling type')\n scaled_features = features\n \n scaled_features = pd.DataFrame(scaled_features, columns = list(features), \n index = features.index, dtype = float)\n \n return scaled_features"
] |
[
[
"numpy.log",
"numpy.zeros_like"
]
] |
MaiRajborirug/scikit-learn
|
[
"c18d015372f7041099d19c215cd4c36ffd6fe5c5",
"c18d015372f7041099d19c215cd4c36ffd6fe5c5",
"c18d015372f7041099d19c215cd4c36ffd6fe5c5",
"c18d015372f7041099d19c215cd4c36ffd6fe5c5",
"c18d015372f7041099d19c215cd4c36ffd6fe5c5",
"c18d015372f7041099d19c215cd4c36ffd6fe5c5",
"c18d015372f7041099d19c215cd4c36ffd6fe5c5",
"c18d015372f7041099d19c215cd4c36ffd6fe5c5",
"c18d015372f7041099d19c215cd4c36ffd6fe5c5",
"c18d015372f7041099d19c215cd4c36ffd6fe5c5",
"c18d015372f7041099d19c215cd4c36ffd6fe5c5",
"c18d015372f7041099d19c215cd4c36ffd6fe5c5",
"c18d015372f7041099d19c215cd4c36ffd6fe5c5",
"c18d015372f7041099d19c215cd4c36ffd6fe5c5",
"c18d015372f7041099d19c215cd4c36ffd6fe5c5",
"c18d015372f7041099d19c215cd4c36ffd6fe5c5"
] |
[
"sklearn/tests/test_config.py",
"sklearn/decomposition/_base.py",
"examples/multioutput/plot_classifier_chain_yeast.py",
"sklearn/metrics/cluster/tests/test_unsupervised.py",
"sklearn/manifold/_spectral_embedding.py",
"sklearn/ensemble/_hist_gradient_boosting/tests/test_gradient_boosting.py",
"sklearn/random_projection.py",
"sklearn/discriminant_analysis.py",
"sklearn/datasets/_olivetti_faces.py",
"examples/ensemble/plot_adaboost_multiclass.py",
"sklearn/metrics/_plot/confusion_matrix.py",
"examples/svm/plot_oneclass.py",
"sklearn/decomposition/tests/test_factor_analysis.py",
"sklearn/cluster/tests/test_dbscan.py",
"examples/classification/plot_classifier_comparison.py",
"examples/cluster/plot_feature_agglomeration_vs_univariate_selection.py"
] |
[
"import time\nfrom concurrent.futures import ThreadPoolExecutor\n\nfrom joblib import Parallel\nimport joblib\nimport pytest\n\nfrom sklearn import get_config, set_config, config_context\nfrom sklearn.utils.fixes import delayed\nfrom sklearn.utils.fixes import parse_version\n\n\ndef test_config_context():\n assert get_config() == {\n \"assume_finite\": False,\n \"working_memory\": 1024,\n \"print_changed_only\": True,\n \"display\": \"text\",\n }\n\n # Not using as a context manager affects nothing\n config_context(assume_finite=True)\n assert get_config()[\"assume_finite\"] is False\n\n with config_context(assume_finite=True):\n assert get_config() == {\n \"assume_finite\": True,\n \"working_memory\": 1024,\n \"print_changed_only\": True,\n \"display\": \"text\",\n }\n assert get_config()[\"assume_finite\"] is False\n\n with config_context(assume_finite=True):\n with config_context(assume_finite=None):\n assert get_config()[\"assume_finite\"] is True\n\n assert get_config()[\"assume_finite\"] is True\n\n with config_context(assume_finite=False):\n assert get_config()[\"assume_finite\"] is False\n\n with config_context(assume_finite=None):\n assert get_config()[\"assume_finite\"] is False\n\n # global setting will not be retained outside of context that\n # did not modify this setting\n set_config(assume_finite=True)\n assert get_config()[\"assume_finite\"] is True\n\n assert get_config()[\"assume_finite\"] is False\n\n assert get_config()[\"assume_finite\"] is True\n\n assert get_config() == {\n \"assume_finite\": False,\n \"working_memory\": 1024,\n \"print_changed_only\": True,\n \"display\": \"text\",\n }\n\n # No positional arguments\n with pytest.raises(TypeError):\n config_context(True)\n\n # No unknown arguments\n with pytest.raises(TypeError):\n config_context(do_something_else=True).__enter__()\n\n\ndef test_config_context_exception():\n assert get_config()[\"assume_finite\"] is False\n try:\n with config_context(assume_finite=True):\n assert get_config()[\"assume_finite\"] is True\n raise ValueError()\n except ValueError:\n pass\n assert get_config()[\"assume_finite\"] is False\n\n\ndef test_set_config():\n assert get_config()[\"assume_finite\"] is False\n set_config(assume_finite=None)\n assert get_config()[\"assume_finite\"] is False\n set_config(assume_finite=True)\n assert get_config()[\"assume_finite\"] is True\n set_config(assume_finite=None)\n assert get_config()[\"assume_finite\"] is True\n set_config(assume_finite=False)\n assert get_config()[\"assume_finite\"] is False\n\n # No unknown arguments\n with pytest.raises(TypeError):\n set_config(do_something_else=True)\n\n\ndef set_assume_finite(assume_finite, sleep_duration):\n \"\"\"Return the value of assume_finite after waiting `sleep_duration`.\"\"\"\n with config_context(assume_finite=assume_finite):\n time.sleep(sleep_duration)\n return get_config()[\"assume_finite\"]\n\n\[email protected](\"backend\", [\"loky\", \"multiprocessing\", \"threading\"])\ndef test_config_threadsafe_joblib(backend):\n \"\"\"Test that the global config is threadsafe with all joblib backends.\n Two jobs are spawned and sets assume_finite to two different values.\n When the job with a duration 0.1s completes, the assume_finite value\n should be the same as the value passed to the function. In other words,\n it is not influenced by the other job setting assume_finite to True.\n \"\"\"\n\n if parse_version(joblib.__version__) < parse_version(\"0.12\") and backend == \"loky\":\n pytest.skip(\"loky backend does not exist in joblib <0.12\") # noqa\n\n assume_finites = [False, True]\n sleep_durations = [0.1, 0.2]\n\n items = Parallel(backend=backend, n_jobs=2)(\n delayed(set_assume_finite)(assume_finite, sleep_dur)\n for assume_finite, sleep_dur in zip(assume_finites, sleep_durations)\n )\n\n assert items == [False, True]\n\n\ndef test_config_threadsafe():\n \"\"\"Uses threads directly to test that the global config does not change\n between threads. Same test as `test_config_threadsafe_joblib` but with\n `ThreadPoolExecutor`.\"\"\"\n\n assume_finites = [False, True]\n sleep_durations = [0.1, 0.2]\n\n with ThreadPoolExecutor(max_workers=2) as e:\n items = [\n output\n for output in e.map(set_assume_finite, assume_finites, sleep_durations)\n ]\n\n assert items == [False, True]\n",
"\"\"\"Principal Component Analysis Base Classes\"\"\"\n\n# Author: Alexandre Gramfort <[email protected]>\n# Olivier Grisel <[email protected]>\n# Mathieu Blondel <[email protected]>\n# Denis A. Engemann <[email protected]>\n# Kyle Kastner <[email protected]>\n#\n# License: BSD 3 clause\n\nimport numpy as np\nfrom scipy import linalg\n\nfrom ..base import BaseEstimator, TransformerMixin, _ClassNamePrefixFeaturesOutMixin\nfrom ..utils.validation import check_is_fitted\nfrom abc import ABCMeta, abstractmethod\n\n\nclass _BasePCA(\n _ClassNamePrefixFeaturesOutMixin, TransformerMixin, BaseEstimator, metaclass=ABCMeta\n):\n \"\"\"Base class for PCA methods.\n\n Warning: This class should not be used directly.\n Use derived classes instead.\n \"\"\"\n\n def get_covariance(self):\n \"\"\"Compute data covariance with the generative model.\n\n ``cov = components_.T * S**2 * components_ + sigma2 * eye(n_features)``\n where S**2 contains the explained variances, and sigma2 contains the\n noise variances.\n\n Returns\n -------\n cov : array of shape=(n_features, n_features)\n Estimated covariance of data.\n \"\"\"\n components_ = self.components_\n exp_var = self.explained_variance_\n if self.whiten:\n components_ = components_ * np.sqrt(exp_var[:, np.newaxis])\n exp_var_diff = np.maximum(exp_var - self.noise_variance_, 0.0)\n cov = np.dot(components_.T * exp_var_diff, components_)\n cov.flat[:: len(cov) + 1] += self.noise_variance_ # modify diag inplace\n return cov\n\n def get_precision(self):\n \"\"\"Compute data precision matrix with the generative model.\n\n Equals the inverse of the covariance but computed with\n the matrix inversion lemma for efficiency.\n\n Returns\n -------\n precision : array, shape=(n_features, n_features)\n Estimated precision of data.\n \"\"\"\n n_features = self.components_.shape[1]\n\n # handle corner cases first\n if self.n_components_ == 0:\n return np.eye(n_features) / self.noise_variance_\n if self.n_components_ == n_features:\n return linalg.inv(self.get_covariance())\n\n # Get precision using matrix inversion lemma\n components_ = self.components_\n exp_var = self.explained_variance_\n if self.whiten:\n components_ = components_ * np.sqrt(exp_var[:, np.newaxis])\n exp_var_diff = np.maximum(exp_var - self.noise_variance_, 0.0)\n precision = np.dot(components_, components_.T) / self.noise_variance_\n precision.flat[:: len(precision) + 1] += 1.0 / exp_var_diff\n precision = np.dot(components_.T, np.dot(linalg.inv(precision), components_))\n precision /= -(self.noise_variance_ ** 2)\n precision.flat[:: len(precision) + 1] += 1.0 / self.noise_variance_\n return precision\n\n @abstractmethod\n def fit(self, X, y=None):\n \"\"\"Placeholder for fit. Subclasses should implement this method!\n\n Fit the model with X.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n Training data, where `n_samples` is the number of samples and\n `n_features` is the number of features.\n\n Returns\n -------\n self : object\n Returns the instance itself.\n \"\"\"\n\n def transform(self, X):\n \"\"\"Apply dimensionality reduction to X.\n\n X is projected on the first principal components previously extracted\n from a training set.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n New data, where `n_samples` is the number of samples\n and `n_features` is the number of features.\n\n Returns\n -------\n X_new : array-like of shape (n_samples, n_components)\n Projection of X in the first principal components, where `n_samples`\n is the number of samples and `n_components` is the number of the components.\n \"\"\"\n check_is_fitted(self)\n\n X = self._validate_data(X, dtype=[np.float64, np.float32], reset=False)\n if self.mean_ is not None:\n X = X - self.mean_\n X_transformed = np.dot(X, self.components_.T)\n if self.whiten:\n X_transformed /= np.sqrt(self.explained_variance_)\n return X_transformed\n\n def inverse_transform(self, X):\n \"\"\"Transform data back to its original space.\n\n In other words, return an input `X_original` whose transform would be X.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_components)\n New data, where `n_samples` is the number of samples\n and `n_components` is the number of components.\n\n Returns\n -------\n X_original array-like of shape (n_samples, n_features)\n Original data, where `n_samples` is the number of samples\n and `n_features` is the number of features.\n\n Notes\n -----\n If whitening is enabled, inverse_transform will compute the\n exact inverse operation, which includes reversing whitening.\n \"\"\"\n if self.whiten:\n return (\n np.dot(\n X,\n np.sqrt(self.explained_variance_[:, np.newaxis]) * self.components_,\n )\n + self.mean_\n )\n else:\n return np.dot(X, self.components_) + self.mean_\n\n @property\n def _n_features_out(self):\n \"\"\"Number of transformed output features.\"\"\"\n return self.components_.shape[0]\n",
"\"\"\"\n============================\nClassifier Chain\n============================\nExample of using classifier chain on a multilabel dataset.\n\nFor this example we will use the `yeast\n<https://www.openml.org/d/40597>`_ dataset which contains\n2417 datapoints each with 103 features and 14 possible labels. Each\ndata point has at least one label. As a baseline we first train a logistic\nregression classifier for each of the 14 labels. To evaluate the performance of\nthese classifiers we predict on a held-out test set and calculate the\n:ref:`jaccard score <jaccard_similarity_score>` for each sample.\n\nNext we create 10 classifier chains. Each classifier chain contains a\nlogistic regression model for each of the 14 labels. The models in each\nchain are ordered randomly. In addition to the 103 features in the dataset,\neach model gets the predictions of the preceding models in the chain as\nfeatures (note that by default at training time each model gets the true\nlabels as features). These additional features allow each chain to exploit\ncorrelations among the classes. The Jaccard similarity score for each chain\ntends to be greater than that of the set independent logistic models.\n\nBecause the models in each chain are arranged randomly there is significant\nvariation in performance among the chains. Presumably there is an optimal\nordering of the classes in a chain that will yield the best performance.\nHowever we do not know that ordering a priori. Instead we can construct an\nvoting ensemble of classifier chains by averaging the binary predictions of\nthe chains and apply a threshold of 0.5. The Jaccard similarity score of the\nensemble is greater than that of the independent models and tends to exceed\nthe score of each chain in the ensemble (although this is not guaranteed\nwith randomly ordered chains).\n\n\"\"\"\n\n# Author: Adam Kleczewski\n# License: BSD 3 clause\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.datasets import fetch_openml\nfrom sklearn.multioutput import ClassifierChain\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.multiclass import OneVsRestClassifier\nfrom sklearn.metrics import jaccard_score\nfrom sklearn.linear_model import LogisticRegression\n\n# Load a multi-label dataset from https://www.openml.org/d/40597\nX, Y = fetch_openml(\"yeast\", version=4, return_X_y=True)\nY = Y == \"TRUE\"\nX_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2, random_state=0)\n\n# Fit an independent logistic regression model for each class using the\n# OneVsRestClassifier wrapper.\nbase_lr = LogisticRegression()\novr = OneVsRestClassifier(base_lr)\novr.fit(X_train, Y_train)\nY_pred_ovr = ovr.predict(X_test)\novr_jaccard_score = jaccard_score(Y_test, Y_pred_ovr, average=\"samples\")\n\n# Fit an ensemble of logistic regression classifier chains and take the\n# take the average prediction of all the chains.\nchains = [ClassifierChain(base_lr, order=\"random\", random_state=i) for i in range(10)]\nfor chain in chains:\n chain.fit(X_train, Y_train)\n\nY_pred_chains = np.array([chain.predict(X_test) for chain in chains])\nchain_jaccard_scores = [\n jaccard_score(Y_test, Y_pred_chain >= 0.5, average=\"samples\")\n for Y_pred_chain in Y_pred_chains\n]\n\nY_pred_ensemble = Y_pred_chains.mean(axis=0)\nensemble_jaccard_score = jaccard_score(\n Y_test, Y_pred_ensemble >= 0.5, average=\"samples\"\n)\n\nmodel_scores = [ovr_jaccard_score] + chain_jaccard_scores\nmodel_scores.append(ensemble_jaccard_score)\n\nmodel_names = (\n \"Independent\",\n \"Chain 1\",\n \"Chain 2\",\n \"Chain 3\",\n \"Chain 4\",\n \"Chain 5\",\n \"Chain 6\",\n \"Chain 7\",\n \"Chain 8\",\n \"Chain 9\",\n \"Chain 10\",\n \"Ensemble\",\n)\n\nx_pos = np.arange(len(model_names))\n\n# Plot the Jaccard similarity scores for the independent model, each of the\n# chains, and the ensemble (note that the vertical axis on this plot does\n# not begin at 0).\n\nfig, ax = plt.subplots(figsize=(7, 4))\nax.grid(True)\nax.set_title(\"Classifier Chain Ensemble Performance Comparison\")\nax.set_xticks(x_pos)\nax.set_xticklabels(model_names, rotation=\"vertical\")\nax.set_ylabel(\"Jaccard Similarity Score\")\nax.set_ylim([min(model_scores) * 0.9, max(model_scores) * 1.1])\ncolors = [\"r\"] + [\"b\"] * len(chain_jaccard_scores) + [\"g\"]\nax.bar(x_pos, model_scores, alpha=0.5, color=colors)\nplt.tight_layout()\nplt.show()\n",
"import numpy as np\nimport scipy.sparse as sp\nimport pytest\nfrom scipy.sparse import csr_matrix\n\nfrom sklearn import datasets\nfrom sklearn.utils._testing import assert_array_equal\nfrom sklearn.metrics.cluster import silhouette_score\nfrom sklearn.metrics.cluster import silhouette_samples\nfrom sklearn.metrics import pairwise_distances\nfrom sklearn.metrics.cluster import calinski_harabasz_score\nfrom sklearn.metrics.cluster import davies_bouldin_score\n\n\ndef test_silhouette():\n # Tests the Silhouette Coefficient.\n dataset = datasets.load_iris()\n X_dense = dataset.data\n X_csr = csr_matrix(X_dense)\n X_dok = sp.dok_matrix(X_dense)\n X_lil = sp.lil_matrix(X_dense)\n y = dataset.target\n\n for X in [X_dense, X_csr, X_dok, X_lil]:\n D = pairwise_distances(X, metric=\"euclidean\")\n # Given that the actual labels are used, we can assume that S would be\n # positive.\n score_precomputed = silhouette_score(D, y, metric=\"precomputed\")\n assert score_precomputed > 0\n # Test without calculating D\n score_euclidean = silhouette_score(X, y, metric=\"euclidean\")\n pytest.approx(score_precomputed, score_euclidean)\n\n if X is X_dense:\n score_dense_without_sampling = score_precomputed\n else:\n pytest.approx(score_euclidean, score_dense_without_sampling)\n\n # Test with sampling\n score_precomputed = silhouette_score(\n D, y, metric=\"precomputed\", sample_size=int(X.shape[0] / 2), random_state=0\n )\n score_euclidean = silhouette_score(\n X, y, metric=\"euclidean\", sample_size=int(X.shape[0] / 2), random_state=0\n )\n assert score_precomputed > 0\n assert score_euclidean > 0\n pytest.approx(score_euclidean, score_precomputed)\n\n if X is X_dense:\n score_dense_with_sampling = score_precomputed\n else:\n pytest.approx(score_euclidean, score_dense_with_sampling)\n\n\ndef test_cluster_size_1():\n # Assert Silhouette Coefficient == 0 when there is 1 sample in a cluster\n # (cluster 0). We also test the case where there are identical samples\n # as the only members of a cluster (cluster 2). To our knowledge, this case\n # is not discussed in reference material, and we choose for it a sample\n # score of 1.\n X = [[0.0], [1.0], [1.0], [2.0], [3.0], [3.0]]\n labels = np.array([0, 1, 1, 1, 2, 2])\n\n # Cluster 0: 1 sample -> score of 0 by Rousseeuw's convention\n # Cluster 1: intra-cluster = [.5, .5, 1]\n # inter-cluster = [1, 1, 1]\n # silhouette = [.5, .5, 0]\n # Cluster 2: intra-cluster = [0, 0]\n # inter-cluster = [arbitrary, arbitrary]\n # silhouette = [1., 1.]\n\n silhouette = silhouette_score(X, labels)\n assert not np.isnan(silhouette)\n ss = silhouette_samples(X, labels)\n assert_array_equal(ss, [0, 0.5, 0.5, 0, 1, 1])\n\n\ndef test_silhouette_paper_example():\n # Explicitly check per-sample results against Rousseeuw (1987)\n # Data from Table 1\n lower = [\n 5.58,\n 7.00,\n 6.50,\n 7.08,\n 7.00,\n 3.83,\n 4.83,\n 5.08,\n 8.17,\n 5.83,\n 2.17,\n 5.75,\n 6.67,\n 6.92,\n 4.92,\n 6.42,\n 5.00,\n 5.58,\n 6.00,\n 4.67,\n 6.42,\n 3.42,\n 5.50,\n 6.42,\n 6.42,\n 5.00,\n 3.92,\n 6.17,\n 2.50,\n 4.92,\n 6.25,\n 7.33,\n 4.50,\n 2.25,\n 6.33,\n 2.75,\n 6.08,\n 6.67,\n 4.25,\n 2.67,\n 6.00,\n 6.17,\n 6.17,\n 6.92,\n 6.17,\n 5.25,\n 6.83,\n 4.50,\n 3.75,\n 5.75,\n 5.42,\n 6.08,\n 5.83,\n 6.67,\n 3.67,\n 4.75,\n 3.00,\n 6.08,\n 6.67,\n 5.00,\n 5.58,\n 4.83,\n 6.17,\n 5.67,\n 6.50,\n 6.92,\n ]\n D = np.zeros((12, 12))\n D[np.tril_indices(12, -1)] = lower\n D += D.T\n\n names = [\n \"BEL\",\n \"BRA\",\n \"CHI\",\n \"CUB\",\n \"EGY\",\n \"FRA\",\n \"IND\",\n \"ISR\",\n \"USA\",\n \"USS\",\n \"YUG\",\n \"ZAI\",\n ]\n\n # Data from Figure 2\n labels1 = [1, 1, 2, 2, 1, 1, 2, 1, 1, 2, 2, 1]\n expected1 = {\n \"USA\": 0.43,\n \"BEL\": 0.39,\n \"FRA\": 0.35,\n \"ISR\": 0.30,\n \"BRA\": 0.22,\n \"EGY\": 0.20,\n \"ZAI\": 0.19,\n \"CUB\": 0.40,\n \"USS\": 0.34,\n \"CHI\": 0.33,\n \"YUG\": 0.26,\n \"IND\": -0.04,\n }\n score1 = 0.28\n\n # Data from Figure 3\n labels2 = [1, 2, 3, 3, 1, 1, 2, 1, 1, 3, 3, 2]\n expected2 = {\n \"USA\": 0.47,\n \"FRA\": 0.44,\n \"BEL\": 0.42,\n \"ISR\": 0.37,\n \"EGY\": 0.02,\n \"ZAI\": 0.28,\n \"BRA\": 0.25,\n \"IND\": 0.17,\n \"CUB\": 0.48,\n \"USS\": 0.44,\n \"YUG\": 0.31,\n \"CHI\": 0.31,\n }\n score2 = 0.33\n\n for labels, expected, score in [\n (labels1, expected1, score1),\n (labels2, expected2, score2),\n ]:\n expected = [expected[name] for name in names]\n # we check to 2dp because that's what's in the paper\n pytest.approx(\n expected,\n silhouette_samples(D, np.array(labels), metric=\"precomputed\"),\n abs=1e-2,\n )\n pytest.approx(\n score, silhouette_score(D, np.array(labels), metric=\"precomputed\"), abs=1e-2\n )\n\n\ndef test_correct_labelsize():\n # Assert 1 < n_labels < n_samples\n dataset = datasets.load_iris()\n X = dataset.data\n\n # n_labels = n_samples\n y = np.arange(X.shape[0])\n err_msg = (\n r\"Number of labels is %d\\. Valid values are 2 \"\n r\"to n_samples - 1 \\(inclusive\\)\" % len(np.unique(y))\n )\n with pytest.raises(ValueError, match=err_msg):\n silhouette_score(X, y)\n\n # n_labels = 1\n y = np.zeros(X.shape[0])\n err_msg = (\n r\"Number of labels is %d\\. Valid values are 2 \"\n r\"to n_samples - 1 \\(inclusive\\)\" % len(np.unique(y))\n )\n with pytest.raises(ValueError, match=err_msg):\n silhouette_score(X, y)\n\n\ndef test_non_encoded_labels():\n dataset = datasets.load_iris()\n X = dataset.data\n labels = dataset.target\n assert silhouette_score(X, labels * 2 + 10) == silhouette_score(X, labels)\n assert_array_equal(\n silhouette_samples(X, labels * 2 + 10), silhouette_samples(X, labels)\n )\n\n\ndef test_non_numpy_labels():\n dataset = datasets.load_iris()\n X = dataset.data\n y = dataset.target\n assert silhouette_score(list(X), list(y)) == silhouette_score(X, y)\n\n\[email protected](\"dtype\", (np.float32, np.float64))\ndef test_silhouette_nonzero_diag(dtype):\n # Make sure silhouette_samples requires diagonal to be zero.\n # Non-regression test for #12178\n\n # Construct a zero-diagonal matrix\n dists = pairwise_distances(\n np.array([[0.2, 0.1, 0.12, 1.34, 1.11, 1.6]], dtype=dtype).T\n )\n labels = [0, 0, 0, 1, 1, 1]\n\n # small values on the diagonal are OK\n dists[2][2] = np.finfo(dists.dtype).eps * 10\n silhouette_samples(dists, labels, metric=\"precomputed\")\n\n # values bigger than eps * 100 are not\n dists[2][2] = np.finfo(dists.dtype).eps * 1000\n with pytest.raises(ValueError, match=\"contains non-zero\"):\n silhouette_samples(dists, labels, metric=\"precomputed\")\n\n\ndef assert_raises_on_only_one_label(func):\n \"\"\"Assert message when there is only one label\"\"\"\n rng = np.random.RandomState(seed=0)\n with pytest.raises(ValueError, match=\"Number of labels is\"):\n func(rng.rand(10, 2), np.zeros(10))\n\n\ndef assert_raises_on_all_points_same_cluster(func):\n \"\"\"Assert message when all point are in different clusters\"\"\"\n rng = np.random.RandomState(seed=0)\n with pytest.raises(ValueError, match=\"Number of labels is\"):\n func(rng.rand(10, 2), np.arange(10))\n\n\ndef test_calinski_harabasz_score():\n assert_raises_on_only_one_label(calinski_harabasz_score)\n\n assert_raises_on_all_points_same_cluster(calinski_harabasz_score)\n\n # Assert the value is 1. when all samples are equals\n assert 1.0 == calinski_harabasz_score(np.ones((10, 2)), [0] * 5 + [1] * 5)\n\n # Assert the value is 0. when all the mean cluster are equal\n assert 0.0 == calinski_harabasz_score([[-1, -1], [1, 1]] * 10, [0] * 10 + [1] * 10)\n\n # General case (with non numpy arrays)\n X = (\n [[0, 0], [1, 1]] * 5\n + [[3, 3], [4, 4]] * 5\n + [[0, 4], [1, 3]] * 5\n + [[3, 1], [4, 0]] * 5\n )\n labels = [0] * 10 + [1] * 10 + [2] * 10 + [3] * 10\n pytest.approx(calinski_harabasz_score(X, labels), 45 * (40 - 4) / (5 * (4 - 1)))\n\n\ndef test_davies_bouldin_score():\n assert_raises_on_only_one_label(davies_bouldin_score)\n assert_raises_on_all_points_same_cluster(davies_bouldin_score)\n\n # Assert the value is 0. when all samples are equals\n assert davies_bouldin_score(np.ones((10, 2)), [0] * 5 + [1] * 5) == pytest.approx(\n 0.0\n )\n\n # Assert the value is 0. when all the mean cluster are equal\n assert davies_bouldin_score(\n [[-1, -1], [1, 1]] * 10, [0] * 10 + [1] * 10\n ) == pytest.approx(0.0)\n\n # General case (with non numpy arrays)\n X = (\n [[0, 0], [1, 1]] * 5\n + [[3, 3], [4, 4]] * 5\n + [[0, 4], [1, 3]] * 5\n + [[3, 1], [4, 0]] * 5\n )\n labels = [0] * 10 + [1] * 10 + [2] * 10 + [3] * 10\n pytest.approx(davies_bouldin_score(X, labels), 2 * np.sqrt(0.5) / 3)\n\n # Ensure divide by zero warning is not raised in general case\n with pytest.warns(None) as record:\n davies_bouldin_score(X, labels)\n div_zero_warnings = [\n warning\n for warning in record\n if \"divide by zero encountered\" in warning.message.args[0]\n ]\n assert len(div_zero_warnings) == 0\n\n # General case - cluster have one sample\n X = [[0, 0], [2, 2], [3, 3], [5, 5]]\n labels = [0, 0, 1, 2]\n pytest.approx(davies_bouldin_score(X, labels), (5.0 / 4) / 3)\n",
"\"\"\"Spectral Embedding.\"\"\"\n\n# Author: Gael Varoquaux <[email protected]>\n# Wei LI <[email protected]>\n# License: BSD 3 clause\n\n\nimport warnings\n\nimport numpy as np\nfrom scipy import sparse\nfrom scipy.linalg import eigh\nfrom scipy.sparse.linalg import eigsh\nfrom scipy.sparse.csgraph import connected_components\nfrom scipy.sparse.csgraph import laplacian as csgraph_laplacian\n\nfrom ..base import BaseEstimator\nfrom ..utils import (\n check_array,\n check_random_state,\n check_symmetric,\n)\nfrom ..utils._arpack import _init_arpack_v0\nfrom ..utils.extmath import _deterministic_vector_sign_flip\nfrom ..utils.fixes import lobpcg\nfrom ..metrics.pairwise import rbf_kernel\nfrom ..neighbors import kneighbors_graph, NearestNeighbors\nfrom ..utils.deprecation import deprecated\n\n\ndef _graph_connected_component(graph, node_id):\n \"\"\"Find the largest graph connected components that contains one\n given node.\n\n Parameters\n ----------\n graph : array-like of shape (n_samples, n_samples)\n Adjacency matrix of the graph, non-zero weight means an edge\n between the nodes.\n\n node_id : int\n The index of the query node of the graph.\n\n Returns\n -------\n connected_components_matrix : array-like of shape (n_samples,)\n An array of bool value indicating the indexes of the nodes\n belonging to the largest connected components of the given query\n node.\n \"\"\"\n n_node = graph.shape[0]\n if sparse.issparse(graph):\n # speed up row-wise access to boolean connection mask\n graph = graph.tocsr()\n connected_nodes = np.zeros(n_node, dtype=bool)\n nodes_to_explore = np.zeros(n_node, dtype=bool)\n nodes_to_explore[node_id] = True\n for _ in range(n_node):\n last_num_component = connected_nodes.sum()\n np.logical_or(connected_nodes, nodes_to_explore, out=connected_nodes)\n if last_num_component >= connected_nodes.sum():\n break\n indices = np.where(nodes_to_explore)[0]\n nodes_to_explore.fill(False)\n for i in indices:\n if sparse.issparse(graph):\n neighbors = graph[i].toarray().ravel()\n else:\n neighbors = graph[i]\n np.logical_or(nodes_to_explore, neighbors, out=nodes_to_explore)\n return connected_nodes\n\n\ndef _graph_is_connected(graph):\n \"\"\"Return whether the graph is connected (True) or Not (False).\n\n Parameters\n ----------\n graph : {array-like, sparse matrix} of shape (n_samples, n_samples)\n Adjacency matrix of the graph, non-zero weight means an edge\n between the nodes.\n\n Returns\n -------\n is_connected : bool\n True means the graph is fully connected and False means not.\n \"\"\"\n if sparse.isspmatrix(graph):\n # sparse graph, find all the connected components\n n_connected_components, _ = connected_components(graph)\n return n_connected_components == 1\n else:\n # dense graph, find all connected components start from node 0\n return _graph_connected_component(graph, 0).sum() == graph.shape[0]\n\n\ndef _set_diag(laplacian, value, norm_laplacian):\n \"\"\"Set the diagonal of the laplacian matrix and convert it to a\n sparse format well suited for eigenvalue decomposition.\n\n Parameters\n ----------\n laplacian : {ndarray, sparse matrix}\n The graph laplacian.\n\n value : float\n The value of the diagonal.\n\n norm_laplacian : bool\n Whether the value of the diagonal should be changed or not.\n\n Returns\n -------\n laplacian : {array, sparse matrix}\n An array of matrix in a form that is well suited to fast\n eigenvalue decomposition, depending on the band width of the\n matrix.\n \"\"\"\n n_nodes = laplacian.shape[0]\n # We need all entries in the diagonal to values\n if not sparse.isspmatrix(laplacian):\n if norm_laplacian:\n laplacian.flat[:: n_nodes + 1] = value\n else:\n laplacian = laplacian.tocoo()\n if norm_laplacian:\n diag_idx = laplacian.row == laplacian.col\n laplacian.data[diag_idx] = value\n # If the matrix has a small number of diagonals (as in the\n # case of structured matrices coming from images), the\n # dia format might be best suited for matvec products:\n n_diags = np.unique(laplacian.row - laplacian.col).size\n if n_diags <= 7:\n # 3 or less outer diagonals on each side\n laplacian = laplacian.todia()\n else:\n # csr has the fastest matvec and is thus best suited to\n # arpack\n laplacian = laplacian.tocsr()\n return laplacian\n\n\ndef spectral_embedding(\n adjacency,\n *,\n n_components=8,\n eigen_solver=None,\n random_state=None,\n eigen_tol=0.0,\n norm_laplacian=True,\n drop_first=True,\n):\n \"\"\"Project the sample on the first eigenvectors of the graph Laplacian.\n\n The adjacency matrix is used to compute a normalized graph Laplacian\n whose spectrum (especially the eigenvectors associated to the\n smallest eigenvalues) has an interpretation in terms of minimal\n number of cuts necessary to split the graph into comparably sized\n components.\n\n This embedding can also 'work' even if the ``adjacency`` variable is\n not strictly the adjacency matrix of a graph but more generally\n an affinity or similarity matrix between samples (for instance the\n heat kernel of a euclidean distance matrix or a k-NN matrix).\n\n However care must taken to always make the affinity matrix symmetric\n so that the eigenvector decomposition works as expected.\n\n Note : Laplacian Eigenmaps is the actual algorithm implemented here.\n\n Read more in the :ref:`User Guide <spectral_embedding>`.\n\n Parameters\n ----------\n adjacency : {array-like, sparse graph} of shape (n_samples, n_samples)\n The adjacency matrix of the graph to embed.\n\n n_components : int, default=8\n The dimension of the projection subspace.\n\n eigen_solver : {'arpack', 'lobpcg', 'amg'}, default=None\n The eigenvalue decomposition strategy to use. AMG requires pyamg\n to be installed. It can be faster on very large, sparse problems,\n but may also lead to instabilities. If None, then ``'arpack'`` is\n used.\n\n random_state : int, RandomState instance or None, default=None\n A pseudo random number generator used for the initialization\n of the lobpcg eigen vectors decomposition when `eigen_solver ==\n 'amg'`, and for the K-Means initialization. Use an int to make\n the results deterministic across calls (See\n :term:`Glossary <random_state>`).\n\n .. note::\n When using `eigen_solver == 'amg'`,\n it is necessary to also fix the global numpy seed with\n `np.random.seed(int)` to get deterministic results. See\n https://github.com/pyamg/pyamg/issues/139 for further\n information.\n\n eigen_tol : float, default=0.0\n Stopping criterion for eigendecomposition of the Laplacian matrix\n when using arpack eigen_solver.\n\n norm_laplacian : bool, default=True\n If True, then compute symmetric normalized Laplacian.\n\n drop_first : bool, default=True\n Whether to drop the first eigenvector. For spectral embedding, this\n should be True as the first eigenvector should be constant vector for\n connected graph, but for spectral clustering, this should be kept as\n False to retain the first eigenvector.\n\n Returns\n -------\n embedding : ndarray of shape (n_samples, n_components)\n The reduced samples.\n\n Notes\n -----\n Spectral Embedding (Laplacian Eigenmaps) is most useful when the graph\n has one connected component. If there graph has many components, the first\n few eigenvectors will simply uncover the connected components of the graph.\n\n References\n ----------\n * https://en.wikipedia.org/wiki/LOBPCG\n\n * Toward the Optimal Preconditioned Eigensolver: Locally Optimal\n Block Preconditioned Conjugate Gradient Method\n Andrew V. Knyazev\n https://doi.org/10.1137%2FS1064827500366124\n \"\"\"\n adjacency = check_symmetric(adjacency)\n\n try:\n from pyamg import smoothed_aggregation_solver\n except ImportError as e:\n if eigen_solver == \"amg\":\n raise ValueError(\n \"The eigen_solver was set to 'amg', but pyamg is not available.\"\n ) from e\n\n if eigen_solver is None:\n eigen_solver = \"arpack\"\n elif eigen_solver not in (\"arpack\", \"lobpcg\", \"amg\"):\n raise ValueError(\n \"Unknown value for eigen_solver: '%s'.\"\n \"Should be 'amg', 'arpack', or 'lobpcg'\" % eigen_solver\n )\n\n random_state = check_random_state(random_state)\n\n n_nodes = adjacency.shape[0]\n # Whether to drop the first eigenvector\n if drop_first:\n n_components = n_components + 1\n\n if not _graph_is_connected(adjacency):\n warnings.warn(\n \"Graph is not fully connected, spectral embedding may not work as expected.\"\n )\n\n laplacian, dd = csgraph_laplacian(\n adjacency, normed=norm_laplacian, return_diag=True\n )\n if (\n eigen_solver == \"arpack\"\n or eigen_solver != \"lobpcg\"\n and (not sparse.isspmatrix(laplacian) or n_nodes < 5 * n_components)\n ):\n # lobpcg used with eigen_solver='amg' has bugs for low number of nodes\n # for details see the source code in scipy:\n # https://github.com/scipy/scipy/blob/v0.11.0/scipy/sparse/linalg/eigen\n # /lobpcg/lobpcg.py#L237\n # or matlab:\n # https://www.mathworks.com/matlabcentral/fileexchange/48-lobpcg-m\n laplacian = _set_diag(laplacian, 1, norm_laplacian)\n\n # Here we'll use shift-invert mode for fast eigenvalues\n # (see https://docs.scipy.org/doc/scipy/reference/tutorial/arpack.html\n # for a short explanation of what this means)\n # Because the normalized Laplacian has eigenvalues between 0 and 2,\n # I - L has eigenvalues between -1 and 1. ARPACK is most efficient\n # when finding eigenvalues of largest magnitude (keyword which='LM')\n # and when these eigenvalues are very large compared to the rest.\n # For very large, very sparse graphs, I - L can have many, many\n # eigenvalues very near 1.0. This leads to slow convergence. So\n # instead, we'll use ARPACK's shift-invert mode, asking for the\n # eigenvalues near 1.0. This effectively spreads-out the spectrum\n # near 1.0 and leads to much faster convergence: potentially an\n # orders-of-magnitude speedup over simply using keyword which='LA'\n # in standard mode.\n try:\n # We are computing the opposite of the laplacian inplace so as\n # to spare a memory allocation of a possibly very large array\n laplacian *= -1\n v0 = _init_arpack_v0(laplacian.shape[0], random_state)\n _, diffusion_map = eigsh(\n laplacian, k=n_components, sigma=1.0, which=\"LM\", tol=eigen_tol, v0=v0\n )\n embedding = diffusion_map.T[n_components::-1]\n if norm_laplacian:\n # recover u = D^-1/2 x from the eigenvector output x\n embedding = embedding / dd\n except RuntimeError:\n # When submatrices are exactly singular, an LU decomposition\n # in arpack fails. We fallback to lobpcg\n eigen_solver = \"lobpcg\"\n # Revert the laplacian to its opposite to have lobpcg work\n laplacian *= -1\n\n elif eigen_solver == \"amg\":\n # Use AMG to get a preconditioner and speed up the eigenvalue\n # problem.\n if not sparse.issparse(laplacian):\n warnings.warn(\"AMG works better for sparse matrices\")\n laplacian = check_array(\n laplacian, dtype=[np.float64, np.float32], accept_sparse=True\n )\n laplacian = _set_diag(laplacian, 1, norm_laplacian)\n\n # The Laplacian matrix is always singular, having at least one zero\n # eigenvalue, corresponding to the trivial eigenvector, which is a\n # constant. Using a singular matrix for preconditioning may result in\n # random failures in LOBPCG and is not supported by the existing\n # theory:\n # see https://doi.org/10.1007/s10208-015-9297-1\n # Shift the Laplacian so its diagononal is not all ones. The shift\n # does change the eigenpairs however, so we'll feed the shifted\n # matrix to the solver and afterward set it back to the original.\n diag_shift = 1e-5 * sparse.eye(laplacian.shape[0])\n laplacian += diag_shift\n ml = smoothed_aggregation_solver(check_array(laplacian, accept_sparse=\"csr\"))\n laplacian -= diag_shift\n\n M = ml.aspreconditioner()\n # Create initial approximation X to eigenvectors\n X = random_state.rand(laplacian.shape[0], n_components + 1)\n X[:, 0] = dd.ravel()\n X = X.astype(laplacian.dtype)\n _, diffusion_map = lobpcg(laplacian, X, M=M, tol=1.0e-5, largest=False)\n embedding = diffusion_map.T\n if norm_laplacian:\n # recover u = D^-1/2 x from the eigenvector output x\n embedding = embedding / dd\n if embedding.shape[0] == 1:\n raise ValueError\n\n if eigen_solver == \"lobpcg\":\n laplacian = check_array(\n laplacian, dtype=[np.float64, np.float32], accept_sparse=True\n )\n if n_nodes < 5 * n_components + 1:\n # see note above under arpack why lobpcg has problems with small\n # number of nodes\n # lobpcg will fallback to eigh, so we short circuit it\n if sparse.isspmatrix(laplacian):\n laplacian = laplacian.toarray()\n _, diffusion_map = eigh(laplacian, check_finite=False)\n embedding = diffusion_map.T[:n_components]\n if norm_laplacian:\n # recover u = D^-1/2 x from the eigenvector output x\n embedding = embedding / dd\n else:\n laplacian = _set_diag(laplacian, 1, norm_laplacian)\n # We increase the number of eigenvectors requested, as lobpcg\n # doesn't behave well in low dimension and create initial\n # approximation X to eigenvectors\n X = random_state.rand(laplacian.shape[0], n_components + 1)\n X[:, 0] = dd.ravel()\n X = X.astype(laplacian.dtype)\n _, diffusion_map = lobpcg(\n laplacian, X, tol=1e-5, largest=False, maxiter=2000\n )\n embedding = diffusion_map.T[:n_components]\n if norm_laplacian:\n # recover u = D^-1/2 x from the eigenvector output x\n embedding = embedding / dd\n if embedding.shape[0] == 1:\n raise ValueError\n\n embedding = _deterministic_vector_sign_flip(embedding)\n if drop_first:\n return embedding[1:n_components].T\n else:\n return embedding[:n_components].T\n\n\nclass SpectralEmbedding(BaseEstimator):\n \"\"\"Spectral embedding for non-linear dimensionality reduction.\n\n Forms an affinity matrix given by the specified function and\n applies spectral decomposition to the corresponding graph laplacian.\n The resulting transformation is given by the value of the\n eigenvectors for each data point.\n\n Note : Laplacian Eigenmaps is the actual algorithm implemented here.\n\n Read more in the :ref:`User Guide <spectral_embedding>`.\n\n Parameters\n ----------\n n_components : int, default=2\n The dimension of the projected subspace.\n\n affinity : {'nearest_neighbors', 'rbf', 'precomputed', \\\n 'precomputed_nearest_neighbors'} or callable, \\\n default='nearest_neighbors'\n How to construct the affinity matrix.\n - 'nearest_neighbors' : construct the affinity matrix by computing a\n graph of nearest neighbors.\n - 'rbf' : construct the affinity matrix by computing a radial basis\n function (RBF) kernel.\n - 'precomputed' : interpret ``X`` as a precomputed affinity matrix.\n - 'precomputed_nearest_neighbors' : interpret ``X`` as a sparse graph\n of precomputed nearest neighbors, and constructs the affinity matrix\n by selecting the ``n_neighbors`` nearest neighbors.\n - callable : use passed in function as affinity\n the function takes in data matrix (n_samples, n_features)\n and return affinity matrix (n_samples, n_samples).\n\n gamma : float, default=None\n Kernel coefficient for rbf kernel. If None, gamma will be set to\n 1/n_features.\n\n random_state : int, RandomState instance or None, default=None\n A pseudo random number generator used for the initialization\n of the lobpcg eigen vectors decomposition when `eigen_solver ==\n 'amg'`, and for the K-Means initialization. Use an int to make\n the results deterministic across calls (See\n :term:`Glossary <random_state>`).\n\n .. note::\n When using `eigen_solver == 'amg'`,\n it is necessary to also fix the global numpy seed with\n `np.random.seed(int)` to get deterministic results. See\n https://github.com/pyamg/pyamg/issues/139 for further\n information.\n\n eigen_solver : {'arpack', 'lobpcg', 'amg'}, default=None\n The eigenvalue decomposition strategy to use. AMG requires pyamg\n to be installed. It can be faster on very large, sparse problems.\n If None, then ``'arpack'`` is used.\n\n n_neighbors : int, default=None\n Number of nearest neighbors for nearest_neighbors graph building.\n If None, n_neighbors will be set to max(n_samples/10, 1).\n\n n_jobs : int, default=None\n The number of parallel jobs to run.\n ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.\n ``-1`` means using all processors. See :term:`Glossary <n_jobs>`\n for more details.\n\n Attributes\n ----------\n embedding_ : ndarray of shape (n_samples, n_components)\n Spectral embedding of the training matrix.\n\n affinity_matrix_ : ndarray of shape (n_samples, n_samples)\n Affinity_matrix constructed from samples or precomputed.\n\n n_features_in_ : int\n Number of features seen during :term:`fit`.\n\n .. versionadded:: 0.24\n\n feature_names_in_ : ndarray of shape (`n_features_in_`,)\n Names of features seen during :term:`fit`. Defined only when `X`\n has feature names that are all strings.\n\n .. versionadded:: 1.0\n\n n_neighbors_ : int\n Number of nearest neighbors effectively used.\n\n See Also\n --------\n Isomap : Non-linear dimensionality reduction through Isometric Mapping.\n\n References\n ----------\n\n - A Tutorial on Spectral Clustering, 2007\n Ulrike von Luxburg\n http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.165.9323\n\n - On Spectral Clustering: Analysis and an algorithm, 2001\n Andrew Y. Ng, Michael I. Jordan, Yair Weiss\n http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.19.8100\n\n - Normalized cuts and image segmentation, 2000\n Jianbo Shi, Jitendra Malik\n http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.160.2324\n\n Examples\n --------\n >>> from sklearn.datasets import load_digits\n >>> from sklearn.manifold import SpectralEmbedding\n >>> X, _ = load_digits(return_X_y=True)\n >>> X.shape\n (1797, 64)\n >>> embedding = SpectralEmbedding(n_components=2)\n >>> X_transformed = embedding.fit_transform(X[:100])\n >>> X_transformed.shape\n (100, 2)\n \"\"\"\n\n def __init__(\n self,\n n_components=2,\n *,\n affinity=\"nearest_neighbors\",\n gamma=None,\n random_state=None,\n eigen_solver=None,\n n_neighbors=None,\n n_jobs=None,\n ):\n self.n_components = n_components\n self.affinity = affinity\n self.gamma = gamma\n self.random_state = random_state\n self.eigen_solver = eigen_solver\n self.n_neighbors = n_neighbors\n self.n_jobs = n_jobs\n\n def _more_tags(self):\n return {\n \"pairwise\": self.affinity\n in [\"precomputed\", \"precomputed_nearest_neighbors\"]\n }\n\n # TODO: Remove in 1.1\n # mypy error: Decorated property not supported\n @deprecated( # type: ignore\n \"Attribute `_pairwise` was deprecated in \"\n \"version 0.24 and will be removed in 1.1 (renaming of 0.26).\"\n )\n @property\n def _pairwise(self):\n return self.affinity in [\"precomputed\", \"precomputed_nearest_neighbors\"]\n\n def _get_affinity_matrix(self, X, Y=None):\n \"\"\"Calculate the affinity matrix from data\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n Training vector, where `n_samples` is the number of samples\n and `n_features` is the number of features.\n\n If affinity is \"precomputed\"\n X : array-like of shape (n_samples, n_samples),\n Interpret X as precomputed adjacency graph computed from\n samples.\n\n Y: Ignored\n\n Returns\n -------\n affinity_matrix of shape (n_samples, n_samples)\n \"\"\"\n if self.affinity == \"precomputed\":\n self.affinity_matrix_ = X\n return self.affinity_matrix_\n if self.affinity == \"precomputed_nearest_neighbors\":\n estimator = NearestNeighbors(\n n_neighbors=self.n_neighbors, n_jobs=self.n_jobs, metric=\"precomputed\"\n ).fit(X)\n connectivity = estimator.kneighbors_graph(X=X, mode=\"connectivity\")\n self.affinity_matrix_ = 0.5 * (connectivity + connectivity.T)\n return self.affinity_matrix_\n if self.affinity == \"nearest_neighbors\":\n if sparse.issparse(X):\n warnings.warn(\n \"Nearest neighbors affinity currently does \"\n \"not support sparse input, falling back to \"\n \"rbf affinity\"\n )\n self.affinity = \"rbf\"\n else:\n self.n_neighbors_ = (\n self.n_neighbors\n if self.n_neighbors is not None\n else max(int(X.shape[0] / 10), 1)\n )\n self.affinity_matrix_ = kneighbors_graph(\n X, self.n_neighbors_, include_self=True, n_jobs=self.n_jobs\n )\n # currently only symmetric affinity_matrix supported\n self.affinity_matrix_ = 0.5 * (\n self.affinity_matrix_ + self.affinity_matrix_.T\n )\n return self.affinity_matrix_\n if self.affinity == \"rbf\":\n self.gamma_ = self.gamma if self.gamma is not None else 1.0 / X.shape[1]\n self.affinity_matrix_ = rbf_kernel(X, gamma=self.gamma_)\n return self.affinity_matrix_\n self.affinity_matrix_ = self.affinity(X)\n return self.affinity_matrix_\n\n def fit(self, X, y=None):\n \"\"\"Fit the model from data in X.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n Training vector, where `n_samples` is the number of samples\n and `n_features` is the number of features.\n\n If affinity is \"precomputed\"\n X : {array-like, sparse matrix}, shape (n_samples, n_samples),\n Interpret X as precomputed adjacency graph computed from\n samples.\n\n y : Ignored\n Not used, present for API consistency by convention.\n\n Returns\n -------\n self : object\n Returns the instance itself.\n \"\"\"\n\n X = self._validate_data(X, accept_sparse=\"csr\", ensure_min_samples=2)\n\n random_state = check_random_state(self.random_state)\n if isinstance(self.affinity, str):\n if self.affinity not in {\n \"nearest_neighbors\",\n \"rbf\",\n \"precomputed\",\n \"precomputed_nearest_neighbors\",\n }:\n raise ValueError(\n \"%s is not a valid affinity. Expected \"\n \"'precomputed', 'rbf', 'nearest_neighbors' \"\n \"or a callable.\"\n % self.affinity\n )\n elif not callable(self.affinity):\n raise ValueError(\n \"'affinity' is expected to be an affinity name or a callable. Got: %s\"\n % self.affinity\n )\n\n affinity_matrix = self._get_affinity_matrix(X)\n self.embedding_ = spectral_embedding(\n affinity_matrix,\n n_components=self.n_components,\n eigen_solver=self.eigen_solver,\n random_state=random_state,\n )\n return self\n\n def fit_transform(self, X, y=None):\n \"\"\"Fit the model from data in X and transform X.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n Training vector, where `n_samples` is the number of samples\n and `n_features` is the number of features.\n\n If affinity is \"precomputed\"\n X : {array-like, sparse matrix} of shape (n_samples, n_samples),\n Interpret X as precomputed adjacency graph computed from\n samples.\n\n y : Ignored\n Not used, present for API consistency by convention.\n\n Returns\n -------\n X_new : array-like of shape (n_samples, n_components)\n Spectral embedding of the training matrix.\n \"\"\"\n self.fit(X)\n return self.embedding_\n",
"import numpy as np\nimport pytest\nfrom numpy.testing import assert_allclose, assert_array_equal\nfrom sklearn.datasets import make_classification, make_regression\nfrom sklearn.datasets import make_low_rank_matrix\nfrom sklearn.preprocessing import KBinsDiscretizer, MinMaxScaler, OneHotEncoder\nfrom sklearn.model_selection import train_test_split, cross_val_score\nfrom sklearn.base import clone, BaseEstimator, TransformerMixin\nfrom sklearn.base import is_regressor\nfrom sklearn.pipeline import make_pipeline\nfrom sklearn.metrics import mean_poisson_deviance\nfrom sklearn.dummy import DummyRegressor\nfrom sklearn.exceptions import NotFittedError\nfrom sklearn.compose import make_column_transformer\n\nfrom sklearn.ensemble import HistGradientBoostingRegressor\nfrom sklearn.ensemble import HistGradientBoostingClassifier\nfrom sklearn.ensemble._hist_gradient_boosting.loss import _LOSSES\nfrom sklearn.ensemble._hist_gradient_boosting.loss import LeastSquares\nfrom sklearn.ensemble._hist_gradient_boosting.loss import BinaryCrossEntropy\nfrom sklearn.ensemble._hist_gradient_boosting.grower import TreeGrower\nfrom sklearn.ensemble._hist_gradient_boosting.binning import _BinMapper\nfrom sklearn.utils import shuffle\nfrom sklearn.utils._openmp_helpers import _openmp_effective_n_threads\n\nn_threads = _openmp_effective_n_threads()\n\n\nX_classification, y_classification = make_classification(random_state=0)\nX_regression, y_regression = make_regression(random_state=0)\nX_multi_classification, y_multi_classification = make_classification(\n n_classes=3, n_informative=3, random_state=0\n)\n\n\ndef _make_dumb_dataset(n_samples):\n \"\"\"Make a dumb dataset to test early stopping.\"\"\"\n rng = np.random.RandomState(42)\n X_dumb = rng.randn(n_samples, 1)\n y_dumb = (X_dumb[:, 0] > 0).astype(\"int64\")\n return X_dumb, y_dumb\n\n\[email protected](\n \"GradientBoosting, X, y\",\n [\n (HistGradientBoostingClassifier, X_classification, y_classification),\n (HistGradientBoostingRegressor, X_regression, y_regression),\n ],\n)\[email protected](\n \"params, err_msg\",\n [\n ({\"loss\": \"blah\"}, \"Loss blah is not supported for\"),\n ({\"learning_rate\": 0}, \"learning_rate=0 must be strictly positive\"),\n ({\"learning_rate\": -1}, \"learning_rate=-1 must be strictly positive\"),\n ({\"max_iter\": 0}, \"max_iter=0 must not be smaller than 1\"),\n ({\"max_leaf_nodes\": 0}, \"max_leaf_nodes=0 should not be smaller than 2\"),\n ({\"max_leaf_nodes\": 1}, \"max_leaf_nodes=1 should not be smaller than 2\"),\n ({\"max_depth\": 0}, \"max_depth=0 should not be smaller than 1\"),\n ({\"min_samples_leaf\": 0}, \"min_samples_leaf=0 should not be smaller\"),\n ({\"l2_regularization\": -1}, \"l2_regularization=-1 must be positive\"),\n ({\"max_bins\": 1}, \"max_bins=1 should be no smaller than 2 and no larger\"),\n ({\"max_bins\": 256}, \"max_bins=256 should be no smaller than 2 and no\"),\n ({\"n_iter_no_change\": -1}, \"n_iter_no_change=-1 must be positive\"),\n ({\"validation_fraction\": -1}, \"validation_fraction=-1 must be strictly\"),\n ({\"validation_fraction\": 0}, \"validation_fraction=0 must be strictly\"),\n ({\"tol\": -1}, \"tol=-1 must not be smaller than 0\"),\n ],\n)\ndef test_init_parameters_validation(GradientBoosting, X, y, params, err_msg):\n\n with pytest.raises(ValueError, match=err_msg):\n GradientBoosting(**params).fit(X, y)\n\n\ndef test_invalid_classification_loss():\n binary_clf = HistGradientBoostingClassifier(loss=\"binary_crossentropy\")\n err_msg = (\n \"loss='binary_crossentropy' is not defined for multiclass \"\n \"classification with n_classes=3, use \"\n \"loss='categorical_crossentropy' instead\"\n )\n with pytest.raises(ValueError, match=err_msg):\n binary_clf.fit(np.zeros(shape=(3, 2)), np.arange(3))\n\n\[email protected](\n \"scoring, validation_fraction, early_stopping, n_iter_no_change, tol\",\n [\n (\"neg_mean_squared_error\", 0.1, True, 5, 1e-7), # use scorer\n (\"neg_mean_squared_error\", None, True, 5, 1e-1), # use scorer on train\n (None, 0.1, True, 5, 1e-7), # same with default scorer\n (None, None, True, 5, 1e-1),\n (\"loss\", 0.1, True, 5, 1e-7), # use loss\n (\"loss\", None, True, 5, 1e-1), # use loss on training data\n (None, None, False, 5, 0.0), # no early stopping\n ],\n)\ndef test_early_stopping_regression(\n scoring, validation_fraction, early_stopping, n_iter_no_change, tol\n):\n\n max_iter = 200\n\n X, y = make_regression(n_samples=50, random_state=0)\n\n gb = HistGradientBoostingRegressor(\n verbose=1, # just for coverage\n min_samples_leaf=5, # easier to overfit fast\n scoring=scoring,\n tol=tol,\n early_stopping=early_stopping,\n validation_fraction=validation_fraction,\n max_iter=max_iter,\n n_iter_no_change=n_iter_no_change,\n random_state=0,\n )\n gb.fit(X, y)\n\n if early_stopping:\n assert n_iter_no_change <= gb.n_iter_ < max_iter\n else:\n assert gb.n_iter_ == max_iter\n\n\[email protected](\n \"data\",\n (\n make_classification(n_samples=30, random_state=0),\n make_classification(\n n_samples=30, n_classes=3, n_clusters_per_class=1, random_state=0\n ),\n ),\n)\[email protected](\n \"scoring, validation_fraction, early_stopping, n_iter_no_change, tol\",\n [\n (\"accuracy\", 0.1, True, 5, 1e-7), # use scorer\n (\"accuracy\", None, True, 5, 1e-1), # use scorer on training data\n (None, 0.1, True, 5, 1e-7), # same with default scorer\n (None, None, True, 5, 1e-1),\n (\"loss\", 0.1, True, 5, 1e-7), # use loss\n (\"loss\", None, True, 5, 1e-1), # use loss on training data\n (None, None, False, 5, 0.0), # no early stopping\n ],\n)\ndef test_early_stopping_classification(\n data, scoring, validation_fraction, early_stopping, n_iter_no_change, tol\n):\n\n max_iter = 50\n\n X, y = data\n\n gb = HistGradientBoostingClassifier(\n verbose=1, # just for coverage\n min_samples_leaf=5, # easier to overfit fast\n scoring=scoring,\n tol=tol,\n early_stopping=early_stopping,\n validation_fraction=validation_fraction,\n max_iter=max_iter,\n n_iter_no_change=n_iter_no_change,\n random_state=0,\n )\n gb.fit(X, y)\n\n if early_stopping is True:\n assert n_iter_no_change <= gb.n_iter_ < max_iter\n else:\n assert gb.n_iter_ == max_iter\n\n\[email protected](\n \"GradientBoosting, X, y\",\n [\n (HistGradientBoostingClassifier, *_make_dumb_dataset(10000)),\n (HistGradientBoostingClassifier, *_make_dumb_dataset(10001)),\n (HistGradientBoostingRegressor, *_make_dumb_dataset(10000)),\n (HistGradientBoostingRegressor, *_make_dumb_dataset(10001)),\n ],\n)\ndef test_early_stopping_default(GradientBoosting, X, y):\n # Test that early stopping is enabled by default if and only if there\n # are more than 10000 samples\n gb = GradientBoosting(max_iter=10, n_iter_no_change=2, tol=1e-1)\n gb.fit(X, y)\n if X.shape[0] > 10000:\n assert gb.n_iter_ < gb.max_iter\n else:\n assert gb.n_iter_ == gb.max_iter\n\n\[email protected](\n \"scores, n_iter_no_change, tol, stopping\",\n [\n ([], 1, 0.001, False), # not enough iterations\n ([1, 1, 1], 5, 0.001, False), # not enough iterations\n ([1, 1, 1, 1, 1], 5, 0.001, False), # not enough iterations\n ([1, 2, 3, 4, 5, 6], 5, 0.001, False), # significant improvement\n ([1, 2, 3, 4, 5, 6], 5, 0.0, False), # significant improvement\n ([1, 2, 3, 4, 5, 6], 5, 0.999, False), # significant improvement\n ([1, 2, 3, 4, 5, 6], 5, 5 - 1e-5, False), # significant improvement\n ([1] * 6, 5, 0.0, True), # no significant improvement\n ([1] * 6, 5, 0.001, True), # no significant improvement\n ([1] * 6, 5, 5, True), # no significant improvement\n ],\n)\ndef test_should_stop(scores, n_iter_no_change, tol, stopping):\n\n gbdt = HistGradientBoostingClassifier(n_iter_no_change=n_iter_no_change, tol=tol)\n assert gbdt._should_stop(scores) == stopping\n\n\ndef test_absolute_error():\n # For coverage only.\n X, y = make_regression(n_samples=500, random_state=0)\n gbdt = HistGradientBoostingRegressor(loss=\"absolute_error\", random_state=0)\n gbdt.fit(X, y)\n assert gbdt.score(X, y) > 0.9\n\n\ndef test_absolute_error_sample_weight():\n # non regression test for issue #19400\n # make sure no error is thrown during fit of\n # HistGradientBoostingRegressor with absolute_error loss function\n # and passing sample_weight\n rng = np.random.RandomState(0)\n n_samples = 100\n X = rng.uniform(-1, 1, size=(n_samples, 2))\n y = rng.uniform(-1, 1, size=n_samples)\n sample_weight = rng.uniform(0, 1, size=n_samples)\n gbdt = HistGradientBoostingRegressor(loss=\"absolute_error\")\n gbdt.fit(X, y, sample_weight=sample_weight)\n\n\[email protected](\"y\", [([1.0, -2.0, 0.0]), ([0.0, 0.0, 0.0])])\ndef test_poisson_y_positive(y):\n # Test that ValueError is raised if either one y_i < 0 or sum(y_i) <= 0.\n err_msg = r\"loss='poisson' requires non-negative y and sum\\(y\\) > 0.\"\n gbdt = HistGradientBoostingRegressor(loss=\"poisson\", random_state=0)\n with pytest.raises(ValueError, match=err_msg):\n gbdt.fit(np.zeros(shape=(len(y), 1)), y)\n\n\ndef test_poisson():\n # For Poisson distributed target, Poisson loss should give better results\n # than least squares measured in Poisson deviance as metric.\n rng = np.random.RandomState(42)\n n_train, n_test, n_features = 500, 100, 100\n X = make_low_rank_matrix(\n n_samples=n_train + n_test, n_features=n_features, random_state=rng\n )\n # We create a log-linear Poisson model and downscale coef as it will get\n # exponentiated.\n coef = rng.uniform(low=-2, high=2, size=n_features) / np.max(X, axis=0)\n y = rng.poisson(lam=np.exp(X @ coef))\n X_train, X_test, y_train, y_test = train_test_split(\n X, y, test_size=n_test, random_state=rng\n )\n gbdt_pois = HistGradientBoostingRegressor(loss=\"poisson\", random_state=rng)\n gbdt_ls = HistGradientBoostingRegressor(loss=\"squared_error\", random_state=rng)\n gbdt_pois.fit(X_train, y_train)\n gbdt_ls.fit(X_train, y_train)\n dummy = DummyRegressor(strategy=\"mean\").fit(X_train, y_train)\n\n for X, y in [(X_train, y_train), (X_test, y_test)]:\n metric_pois = mean_poisson_deviance(y, gbdt_pois.predict(X))\n # squared_error might produce non-positive predictions => clip\n metric_ls = mean_poisson_deviance(y, np.clip(gbdt_ls.predict(X), 1e-15, None))\n metric_dummy = mean_poisson_deviance(y, dummy.predict(X))\n assert metric_pois < metric_ls\n assert metric_pois < metric_dummy\n\n\ndef test_binning_train_validation_are_separated():\n # Make sure training and validation data are binned separately.\n # See issue 13926\n\n rng = np.random.RandomState(0)\n validation_fraction = 0.2\n gb = HistGradientBoostingClassifier(\n early_stopping=True, validation_fraction=validation_fraction, random_state=rng\n )\n gb.fit(X_classification, y_classification)\n mapper_training_data = gb._bin_mapper\n\n # Note that since the data is small there is no subsampling and the\n # random_state doesn't matter\n mapper_whole_data = _BinMapper(random_state=0)\n mapper_whole_data.fit(X_classification)\n\n n_samples = X_classification.shape[0]\n assert np.all(\n mapper_training_data.n_bins_non_missing_\n == int((1 - validation_fraction) * n_samples)\n )\n assert np.all(\n mapper_training_data.n_bins_non_missing_\n != mapper_whole_data.n_bins_non_missing_\n )\n\n\ndef test_missing_values_trivial():\n # sanity check for missing values support. With only one feature and\n # y == isnan(X), the gbdt is supposed to reach perfect accuracy on the\n # training set.\n\n n_samples = 100\n n_features = 1\n rng = np.random.RandomState(0)\n\n X = rng.normal(size=(n_samples, n_features))\n mask = rng.binomial(1, 0.5, size=X.shape).astype(bool)\n X[mask] = np.nan\n y = mask.ravel()\n gb = HistGradientBoostingClassifier()\n gb.fit(X, y)\n\n assert gb.score(X, y) == pytest.approx(1)\n\n\[email protected](\"problem\", (\"classification\", \"regression\"))\[email protected](\n \"missing_proportion, expected_min_score_classification, \"\n \"expected_min_score_regression\",\n [(0.1, 0.97, 0.89), (0.2, 0.93, 0.81), (0.5, 0.79, 0.52)],\n)\ndef test_missing_values_resilience(\n problem,\n missing_proportion,\n expected_min_score_classification,\n expected_min_score_regression,\n):\n # Make sure the estimators can deal with missing values and still yield\n # decent predictions\n\n rng = np.random.RandomState(0)\n n_samples = 1000\n n_features = 2\n if problem == \"regression\":\n X, y = make_regression(\n n_samples=n_samples,\n n_features=n_features,\n n_informative=n_features,\n random_state=rng,\n )\n gb = HistGradientBoostingRegressor()\n expected_min_score = expected_min_score_regression\n else:\n X, y = make_classification(\n n_samples=n_samples,\n n_features=n_features,\n n_informative=n_features,\n n_redundant=0,\n n_repeated=0,\n random_state=rng,\n )\n gb = HistGradientBoostingClassifier()\n expected_min_score = expected_min_score_classification\n\n mask = rng.binomial(1, missing_proportion, size=X.shape).astype(bool)\n X[mask] = np.nan\n\n gb.fit(X, y)\n\n assert gb.score(X, y) > expected_min_score\n\n\[email protected](\n \"data\",\n [\n make_classification(random_state=0, n_classes=2),\n make_classification(random_state=0, n_classes=3, n_informative=3),\n ],\n ids=[\"binary_crossentropy\", \"categorical_crossentropy\"],\n)\ndef test_zero_division_hessians(data):\n # non regression test for issue #14018\n # make sure we avoid zero division errors when computing the leaves values.\n\n # If the learning rate is too high, the raw predictions are bad and will\n # saturate the softmax (or sigmoid in binary classif). This leads to\n # probabilities being exactly 0 or 1, gradients being constant, and\n # hessians being zero.\n X, y = data\n gb = HistGradientBoostingClassifier(learning_rate=100, max_iter=10)\n gb.fit(X, y)\n\n\ndef test_small_trainset():\n # Make sure that the small trainset is stratified and has the expected\n # length (10k samples)\n n_samples = 20000\n original_distrib = {0: 0.1, 1: 0.2, 2: 0.3, 3: 0.4}\n rng = np.random.RandomState(42)\n X = rng.randn(n_samples).reshape(n_samples, 1)\n y = [\n [class_] * int(prop * n_samples) for (class_, prop) in original_distrib.items()\n ]\n y = shuffle(np.concatenate(y))\n gb = HistGradientBoostingClassifier()\n\n # Compute the small training set\n X_small, y_small, _ = gb._get_small_trainset(\n X, y, seed=42, sample_weight_train=None\n )\n\n # Compute the class distribution in the small training set\n unique, counts = np.unique(y_small, return_counts=True)\n small_distrib = {class_: count / 10000 for (class_, count) in zip(unique, counts)}\n\n # Test that the small training set has the expected length\n assert X_small.shape[0] == 10000\n assert y_small.shape[0] == 10000\n\n # Test that the class distributions in the whole dataset and in the small\n # training set are identical\n assert small_distrib == pytest.approx(original_distrib)\n\n\ndef test_missing_values_minmax_imputation():\n # Compare the buit-in missing value handling of Histogram GBC with an\n # a-priori missing value imputation strategy that should yield the same\n # results in terms of decision function.\n #\n # Each feature (containing NaNs) is replaced by 2 features:\n # - one where the nans are replaced by min(feature) - 1\n # - one where the nans are replaced by max(feature) + 1\n # A split where nans go to the left has an equivalent split in the\n # first (min) feature, and a split where nans go to the right has an\n # equivalent split in the second (max) feature.\n #\n # Assuming the data is such that there is never a tie to select the best\n # feature to split on during training, the learned decision trees should be\n # strictly equivalent (learn a sequence of splits that encode the same\n # decision function).\n #\n # The MinMaxImputer transformer is meant to be a toy implementation of the\n # \"Missing In Attributes\" (MIA) missing value handling for decision trees\n # https://www.sciencedirect.com/science/article/abs/pii/S0167865508000305\n # The implementation of MIA as an imputation transformer was suggested by\n # \"Remark 3\" in https://arxiv.org/abs/1902.06931\n\n class MinMaxImputer(TransformerMixin, BaseEstimator):\n def fit(self, X, y=None):\n mm = MinMaxScaler().fit(X)\n self.data_min_ = mm.data_min_\n self.data_max_ = mm.data_max_\n return self\n\n def transform(self, X):\n X_min, X_max = X.copy(), X.copy()\n\n for feature_idx in range(X.shape[1]):\n nan_mask = np.isnan(X[:, feature_idx])\n X_min[nan_mask, feature_idx] = self.data_min_[feature_idx] - 1\n X_max[nan_mask, feature_idx] = self.data_max_[feature_idx] + 1\n\n return np.concatenate([X_min, X_max], axis=1)\n\n def make_missing_value_data(n_samples=int(1e4), seed=0):\n rng = np.random.RandomState(seed)\n X, y = make_regression(n_samples=n_samples, n_features=4, random_state=rng)\n\n # Pre-bin the data to ensure a deterministic handling by the 2\n # strategies and also make it easier to insert np.nan in a structured\n # way:\n X = KBinsDiscretizer(n_bins=42, encode=\"ordinal\").fit_transform(X)\n\n # First feature has missing values completely at random:\n rnd_mask = rng.rand(X.shape[0]) > 0.9\n X[rnd_mask, 0] = np.nan\n\n # Second and third features have missing values for extreme values\n # (censoring missingness):\n low_mask = X[:, 1] == 0\n X[low_mask, 1] = np.nan\n\n high_mask = X[:, 2] == X[:, 2].max()\n X[high_mask, 2] = np.nan\n\n # Make the last feature nan pattern very informative:\n y_max = np.percentile(y, 70)\n y_max_mask = y >= y_max\n y[y_max_mask] = y_max\n X[y_max_mask, 3] = np.nan\n\n # Check that there is at least one missing value in each feature:\n for feature_idx in range(X.shape[1]):\n assert any(np.isnan(X[:, feature_idx]))\n\n # Let's use a test set to check that the learned decision function is\n # the same as evaluated on unseen data. Otherwise it could just be the\n # case that we find two independent ways to overfit the training set.\n return train_test_split(X, y, random_state=rng)\n\n # n_samples need to be large enough to minimize the likelihood of having\n # several candidate splits with the same gain value in a given tree.\n X_train, X_test, y_train, y_test = make_missing_value_data(\n n_samples=int(1e4), seed=0\n )\n\n # Use a small number of leaf nodes and iterations so as to keep\n # under-fitting models to minimize the likelihood of ties when training the\n # model.\n gbm1 = HistGradientBoostingRegressor(max_iter=100, max_leaf_nodes=5, random_state=0)\n gbm1.fit(X_train, y_train)\n\n gbm2 = make_pipeline(MinMaxImputer(), clone(gbm1))\n gbm2.fit(X_train, y_train)\n\n # Check that the model reach the same score:\n assert gbm1.score(X_train, y_train) == pytest.approx(gbm2.score(X_train, y_train))\n\n assert gbm1.score(X_test, y_test) == pytest.approx(gbm2.score(X_test, y_test))\n\n # Check the individual prediction match as a finer grained\n # decision function check.\n assert_allclose(gbm1.predict(X_train), gbm2.predict(X_train))\n assert_allclose(gbm1.predict(X_test), gbm2.predict(X_test))\n\n\ndef test_infinite_values():\n # Basic test for infinite values\n\n X = np.array([-np.inf, 0, 1, np.inf]).reshape(-1, 1)\n y = np.array([0, 0, 1, 1])\n\n gbdt = HistGradientBoostingRegressor(min_samples_leaf=1)\n gbdt.fit(X, y)\n np.testing.assert_allclose(gbdt.predict(X), y, atol=1e-4)\n\n\ndef test_consistent_lengths():\n X = np.array([-np.inf, 0, 1, np.inf]).reshape(-1, 1)\n y = np.array([0, 0, 1, 1])\n sample_weight = np.array([0.1, 0.3, 0.1])\n gbdt = HistGradientBoostingRegressor()\n with pytest.raises(ValueError, match=r\"sample_weight.shape == \\(3,\\), expected\"):\n gbdt.fit(X, y, sample_weight)\n\n with pytest.raises(\n ValueError, match=\"Found input variables with inconsistent number\"\n ):\n gbdt.fit(X, y[1:])\n\n\ndef test_infinite_values_missing_values():\n # High level test making sure that inf and nan values are properly handled\n # when both are present. This is similar to\n # test_split_on_nan_with_infinite_values() in test_grower.py, though we\n # cannot check the predictions for binned values here.\n\n X = np.asarray([-np.inf, 0, 1, np.inf, np.nan]).reshape(-1, 1)\n y_isnan = np.isnan(X.ravel())\n y_isinf = X.ravel() == np.inf\n\n stump_clf = HistGradientBoostingClassifier(\n min_samples_leaf=1, max_iter=1, learning_rate=1, max_depth=2\n )\n\n assert stump_clf.fit(X, y_isinf).score(X, y_isinf) == 1\n assert stump_clf.fit(X, y_isnan).score(X, y_isnan) == 1\n\n\ndef test_crossentropy_binary_problem():\n # categorical_crossentropy should only be used if there are more than two\n # classes present. PR #14869\n X = [[1], [0]]\n y = [0, 1]\n gbrt = HistGradientBoostingClassifier(loss=\"categorical_crossentropy\")\n with pytest.raises(\n ValueError, match=\"'categorical_crossentropy' is not suitable for\"\n ):\n gbrt.fit(X, y)\n\n\[email protected](\"scoring\", [None, \"loss\"])\ndef test_string_target_early_stopping(scoring):\n # Regression tests for #14709 where the targets need to be encoded before\n # to compute the score\n rng = np.random.RandomState(42)\n X = rng.randn(100, 10)\n y = np.array([\"x\"] * 50 + [\"y\"] * 50, dtype=object)\n gbrt = HistGradientBoostingClassifier(n_iter_no_change=10, scoring=scoring)\n gbrt.fit(X, y)\n\n\ndef test_zero_sample_weights_regression():\n # Make sure setting a SW to zero amounts to ignoring the corresponding\n # sample\n\n X = [[1, 0], [1, 0], [1, 0], [0, 1]]\n y = [0, 0, 1, 0]\n # ignore the first 2 training samples by setting their weight to 0\n sample_weight = [0, 0, 1, 1]\n gb = HistGradientBoostingRegressor(min_samples_leaf=1)\n gb.fit(X, y, sample_weight=sample_weight)\n assert gb.predict([[1, 0]])[0] > 0.5\n\n\ndef test_zero_sample_weights_classification():\n # Make sure setting a SW to zero amounts to ignoring the corresponding\n # sample\n\n X = [[1, 0], [1, 0], [1, 0], [0, 1]]\n y = [0, 0, 1, 0]\n # ignore the first 2 training samples by setting their weight to 0\n sample_weight = [0, 0, 1, 1]\n gb = HistGradientBoostingClassifier(loss=\"binary_crossentropy\", min_samples_leaf=1)\n gb.fit(X, y, sample_weight=sample_weight)\n assert_array_equal(gb.predict([[1, 0]]), [1])\n\n X = [[1, 0], [1, 0], [1, 0], [0, 1], [1, 1]]\n y = [0, 0, 1, 0, 2]\n # ignore the first 2 training samples by setting their weight to 0\n sample_weight = [0, 0, 1, 1, 1]\n gb = HistGradientBoostingClassifier(\n loss=\"categorical_crossentropy\", min_samples_leaf=1\n )\n gb.fit(X, y, sample_weight=sample_weight)\n assert_array_equal(gb.predict([[1, 0]]), [1])\n\n\[email protected](\n \"problem\", (\"regression\", \"binary_classification\", \"multiclass_classification\")\n)\[email protected](\"duplication\", (\"half\", \"all\"))\ndef test_sample_weight_effect(problem, duplication):\n # High level test to make sure that duplicating a sample is equivalent to\n # giving it weight of 2.\n\n # fails for n_samples > 255 because binning does not take sample weights\n # into account. Keeping n_samples <= 255 makes\n # sure only unique values are used so SW have no effect on binning.\n n_samples = 255\n n_features = 2\n if problem == \"regression\":\n X, y = make_regression(\n n_samples=n_samples,\n n_features=n_features,\n n_informative=n_features,\n random_state=0,\n )\n Klass = HistGradientBoostingRegressor\n else:\n n_classes = 2 if problem == \"binary_classification\" else 3\n X, y = make_classification(\n n_samples=n_samples,\n n_features=n_features,\n n_informative=n_features,\n n_redundant=0,\n n_clusters_per_class=1,\n n_classes=n_classes,\n random_state=0,\n )\n Klass = HistGradientBoostingClassifier\n\n # This test can't pass if min_samples_leaf > 1 because that would force 2\n # samples to be in the same node in est_sw, while these samples would be\n # free to be separate in est_dup: est_dup would just group together the\n # duplicated samples.\n est = Klass(min_samples_leaf=1)\n\n # Create dataset with duplicate and corresponding sample weights\n if duplication == \"half\":\n lim = n_samples // 2\n else:\n lim = n_samples\n X_dup = np.r_[X, X[:lim]]\n y_dup = np.r_[y, y[:lim]]\n sample_weight = np.ones(shape=(n_samples))\n sample_weight[:lim] = 2\n\n est_sw = clone(est).fit(X, y, sample_weight=sample_weight)\n est_dup = clone(est).fit(X_dup, y_dup)\n\n # checking raw_predict is stricter than just predict for classification\n assert np.allclose(est_sw._raw_predict(X_dup), est_dup._raw_predict(X_dup))\n\n\[email protected](\"loss_name\", (\"squared_error\", \"absolute_error\"))\ndef test_sum_hessians_are_sample_weight(loss_name):\n # For losses with constant hessians, the sum_hessians field of the\n # histograms must be equal to the sum of the sample weight of samples at\n # the corresponding bin.\n\n rng = np.random.RandomState(0)\n n_samples = 1000\n n_features = 2\n X, y = make_regression(n_samples=n_samples, n_features=n_features, random_state=rng)\n bin_mapper = _BinMapper()\n X_binned = bin_mapper.fit_transform(X)\n\n sample_weight = rng.normal(size=n_samples)\n\n loss = _LOSSES[loss_name](sample_weight=sample_weight, n_threads=n_threads)\n gradients, hessians = loss.init_gradients_and_hessians(\n n_samples=n_samples, prediction_dim=1, sample_weight=sample_weight\n )\n raw_predictions = rng.normal(size=(1, n_samples))\n loss.update_gradients_and_hessians(\n gradients, hessians, y, raw_predictions, sample_weight\n )\n\n # build sum_sample_weight which contains the sum of the sample weights at\n # each bin (for each feature). This must be equal to the sum_hessians\n # field of the corresponding histogram\n sum_sw = np.zeros(shape=(n_features, bin_mapper.n_bins))\n for feature_idx in range(n_features):\n for sample_idx in range(n_samples):\n sum_sw[feature_idx, X_binned[sample_idx, feature_idx]] += sample_weight[\n sample_idx\n ]\n\n # Build histogram\n grower = TreeGrower(X_binned, gradients[0], hessians[0], n_bins=bin_mapper.n_bins)\n histograms = grower.histogram_builder.compute_histograms_brute(\n grower.root.sample_indices\n )\n\n for feature_idx in range(n_features):\n for bin_idx in range(bin_mapper.n_bins):\n assert histograms[feature_idx, bin_idx][\"sum_hessians\"] == (\n pytest.approx(sum_sw[feature_idx, bin_idx], rel=1e-5)\n )\n\n\ndef test_max_depth_max_leaf_nodes():\n # Non regression test for\n # https://github.com/scikit-learn/scikit-learn/issues/16179\n # there was a bug when the max_depth and the max_leaf_nodes criteria were\n # met at the same time, which would lead to max_leaf_nodes not being\n # respected.\n X, y = make_classification(random_state=0)\n est = HistGradientBoostingClassifier(max_depth=2, max_leaf_nodes=3, max_iter=1).fit(\n X, y\n )\n tree = est._predictors[0][0]\n assert tree.get_max_depth() == 2\n assert tree.get_n_leaf_nodes() == 3 # would be 4 prior to bug fix\n\n\ndef test_early_stopping_on_test_set_with_warm_start():\n # Non regression test for #16661 where second fit fails with\n # warm_start=True, early_stopping is on, and no validation set\n X, y = make_classification(random_state=0)\n gb = HistGradientBoostingClassifier(\n max_iter=1,\n scoring=\"loss\",\n warm_start=True,\n early_stopping=True,\n n_iter_no_change=1,\n validation_fraction=None,\n )\n\n gb.fit(X, y)\n # does not raise on second call\n gb.set_params(max_iter=2)\n gb.fit(X, y)\n\n\[email protected](\n \"Est\", (HistGradientBoostingClassifier, HistGradientBoostingRegressor)\n)\ndef test_single_node_trees(Est):\n # Make sure it's still possible to build single-node trees. In that case\n # the value of the root is set to 0. That's a correct value: if the tree is\n # single-node that's because min_gain_to_split is not respected right from\n # the root, so we don't want the tree to have any impact on the\n # predictions.\n\n X, y = make_classification(random_state=0)\n y[:] = 1 # constant target will lead to a single root node\n\n est = Est(max_iter=20)\n est.fit(X, y)\n\n assert all(len(predictor[0].nodes) == 1 for predictor in est._predictors)\n assert all(predictor[0].nodes[0][\"value\"] == 0 for predictor in est._predictors)\n # Still gives correct predictions thanks to the baseline prediction\n assert_allclose(est.predict(X), y)\n\n\[email protected](\n \"Est, loss, X, y\",\n [\n (\n HistGradientBoostingClassifier,\n BinaryCrossEntropy(sample_weight=None),\n X_classification,\n y_classification,\n ),\n (\n HistGradientBoostingRegressor,\n LeastSquares(sample_weight=None),\n X_regression,\n y_regression,\n ),\n ],\n)\ndef test_custom_loss(Est, loss, X, y):\n est = Est(loss=loss, max_iter=20)\n est.fit(X, y)\n\n\[email protected](\n \"HistGradientBoosting, X, y\",\n [\n (HistGradientBoostingClassifier, X_classification, y_classification),\n (HistGradientBoostingRegressor, X_regression, y_regression),\n (\n HistGradientBoostingClassifier,\n X_multi_classification,\n y_multi_classification,\n ),\n ],\n)\ndef test_staged_predict(HistGradientBoosting, X, y):\n\n # Test whether staged predictor eventually gives\n # the same prediction.\n X_train, X_test, y_train, y_test = train_test_split(\n X, y, test_size=0.5, random_state=0\n )\n gb = HistGradientBoosting(max_iter=10)\n\n # test raise NotFittedError if not fitted\n with pytest.raises(NotFittedError):\n next(gb.staged_predict(X_test))\n\n gb.fit(X_train, y_train)\n\n # test if the staged predictions of each iteration\n # are equal to the corresponding predictions of the same estimator\n # trained from scratch.\n # this also test limit case when max_iter = 1\n method_names = (\n [\"predict\"]\n if is_regressor(gb)\n else [\"predict\", \"predict_proba\", \"decision_function\"]\n )\n for method_name in method_names:\n\n staged_method = getattr(gb, \"staged_\" + method_name)\n staged_predictions = list(staged_method(X_test))\n assert len(staged_predictions) == gb.n_iter_\n for n_iter, staged_predictions in enumerate(staged_method(X_test), 1):\n aux = HistGradientBoosting(max_iter=n_iter)\n aux.fit(X_train, y_train)\n pred_aux = getattr(aux, method_name)(X_test)\n\n assert_allclose(staged_predictions, pred_aux)\n assert staged_predictions.shape == pred_aux.shape\n\n\[email protected](\"insert_missing\", [False, True])\[email protected](\n \"Est\", (HistGradientBoostingRegressor, HistGradientBoostingClassifier)\n)\[email protected](\"bool_categorical_parameter\", [True, False])\ndef test_unknown_categories_nan(insert_missing, Est, bool_categorical_parameter):\n # Make sure no error is raised at predict if a category wasn't seen during\n # fit. We also make sure they're treated as nans.\n\n rng = np.random.RandomState(0)\n n_samples = 1000\n f1 = rng.rand(n_samples)\n f2 = rng.randint(4, size=n_samples)\n X = np.c_[f1, f2]\n y = np.zeros(shape=n_samples)\n y[X[:, 1] % 2 == 0] = 1\n\n if bool_categorical_parameter:\n categorical_features = [False, True]\n else:\n categorical_features = [1]\n\n if insert_missing:\n mask = rng.binomial(1, 0.01, size=X.shape).astype(bool)\n assert mask.sum() > 0\n X[mask] = np.nan\n\n est = Est(max_iter=20, categorical_features=categorical_features).fit(X, y)\n assert_array_equal(est.is_categorical_, [False, True])\n\n # Make sure no error is raised on unknown categories and nans\n # unknown categories will be treated as nans\n X_test = np.zeros((10, X.shape[1]), dtype=float)\n X_test[:5, 1] = 30\n X_test[5:, 1] = np.nan\n assert len(np.unique(est.predict(X_test))) == 1\n\n\ndef test_categorical_encoding_strategies():\n # Check native categorical handling vs different encoding strategies. We\n # make sure that native encoding needs only 1 split to achieve a perfect\n # prediction on a simple dataset. In contrast, OneHotEncoded data needs\n # more depth / splits, and treating categories as ordered (just using\n # OrdinalEncoder) requires even more depth.\n\n # dataset with one random continuous feature, and one categorical feature\n # with values in [0, 5], e.g. from an OrdinalEncoder.\n # class == 1 iff categorical value in {0, 2, 4}\n rng = np.random.RandomState(0)\n n_samples = 10_000\n f1 = rng.rand(n_samples)\n f2 = rng.randint(6, size=n_samples)\n X = np.c_[f1, f2]\n y = np.zeros(shape=n_samples)\n y[X[:, 1] % 2 == 0] = 1\n\n # make sure dataset is balanced so that the baseline_prediction doesn't\n # influence predictions too much with max_iter = 1\n assert 0.49 < y.mean() < 0.51\n\n clf_cat = HistGradientBoostingClassifier(\n max_iter=1, max_depth=1, categorical_features=[False, True]\n )\n\n # Using native categorical encoding, we get perfect predictions with just\n # one split\n assert cross_val_score(clf_cat, X, y).mean() == 1\n\n # quick sanity check for the bitset: 0, 2, 4 = 2**0 + 2**2 + 2**4 = 21\n expected_left_bitset = [21, 0, 0, 0, 0, 0, 0, 0]\n left_bitset = clf_cat.fit(X, y)._predictors[0][0].raw_left_cat_bitsets[0]\n assert_array_equal(left_bitset, expected_left_bitset)\n\n # Treating categories as ordered, we need more depth / more splits to get\n # the same predictions\n clf_no_cat = HistGradientBoostingClassifier(\n max_iter=1, max_depth=4, categorical_features=None\n )\n assert cross_val_score(clf_no_cat, X, y).mean() < 0.9\n\n clf_no_cat.set_params(max_depth=5)\n assert cross_val_score(clf_no_cat, X, y).mean() == 1\n\n # Using OHEd data, we need less splits than with pure OEd data, but we\n # still need more splits than with the native categorical splits\n ct = make_column_transformer(\n (OneHotEncoder(sparse=False), [1]), remainder=\"passthrough\"\n )\n X_ohe = ct.fit_transform(X)\n clf_no_cat.set_params(max_depth=2)\n assert cross_val_score(clf_no_cat, X_ohe, y).mean() < 0.9\n\n clf_no_cat.set_params(max_depth=3)\n assert cross_val_score(clf_no_cat, X_ohe, y).mean() == 1\n\n\[email protected](\n \"Est\", (HistGradientBoostingClassifier, HistGradientBoostingRegressor)\n)\[email protected](\n \"categorical_features, monotonic_cst, expected_msg\",\n [\n (\n [\"hello\", \"world\"],\n None,\n \"categorical_features must be an array-like of bools or array-like of \"\n \"ints.\",\n ),\n (\n [0, -1],\n None,\n (\n r\"categorical_features set as integer indices must be in \"\n r\"\\[0, n_features - 1\\]\"\n ),\n ),\n (\n [True, True, False, False, True],\n None,\n r\"categorical_features set as a boolean mask must have shape \"\n r\"\\(n_features,\\)\",\n ),\n (\n [True, True, False, False],\n [0, -1, 0, 1],\n \"Categorical features cannot have monotonic constraints\",\n ),\n ],\n)\ndef test_categorical_spec_errors(\n Est, categorical_features, monotonic_cst, expected_msg\n):\n # Test errors when categories are specified incorrectly\n n_samples = 100\n X, y = make_classification(random_state=0, n_features=4, n_samples=n_samples)\n rng = np.random.RandomState(0)\n X[:, 0] = rng.randint(0, 10, size=n_samples)\n X[:, 1] = rng.randint(0, 10, size=n_samples)\n est = Est(categorical_features=categorical_features, monotonic_cst=monotonic_cst)\n\n with pytest.raises(ValueError, match=expected_msg):\n est.fit(X, y)\n\n\[email protected](\n \"Est\", (HistGradientBoostingClassifier, HistGradientBoostingRegressor)\n)\[email protected](\"categorical_features\", ([False, False], []))\[email protected](\"as_array\", (True, False))\ndef test_categorical_spec_no_categories(Est, categorical_features, as_array):\n # Make sure we can properly detect that no categorical features are present\n # even if the categorical_features parameter is not None\n X = np.arange(10).reshape(5, 2)\n y = np.arange(5)\n if as_array:\n categorical_features = np.asarray(categorical_features)\n est = Est(categorical_features=categorical_features).fit(X, y)\n assert est.is_categorical_ is None\n\n\[email protected](\n \"Est\", (HistGradientBoostingClassifier, HistGradientBoostingRegressor)\n)\ndef test_categorical_bad_encoding_errors(Est):\n # Test errors when categories are encoded incorrectly\n\n gb = Est(categorical_features=[True], max_bins=2)\n\n X = np.array([[0, 1, 2]]).T\n y = np.arange(3)\n msg = \"Categorical feature at index 0 is expected to have a cardinality <= 2\"\n with pytest.raises(ValueError, match=msg):\n gb.fit(X, y)\n\n X = np.array([[0, 2]]).T\n y = np.arange(2)\n msg = \"Categorical feature at index 0 is expected to be encoded with values < 2\"\n with pytest.raises(ValueError, match=msg):\n gb.fit(X, y)\n\n # nans are ignored in the counts\n X = np.array([[0, 1, np.nan]]).T\n y = np.arange(3)\n gb.fit(X, y)\n\n\[email protected](\n \"Est\", (HistGradientBoostingClassifier, HistGradientBoostingRegressor)\n)\ndef test_uint8_predict(Est):\n # Non regression test for\n # https://github.com/scikit-learn/scikit-learn/issues/18408\n # Make sure X can be of dtype uint8 (i.e. X_BINNED_DTYPE) in predict. It\n # will be converted to X_DTYPE.\n\n rng = np.random.RandomState(0)\n\n X = rng.randint(0, 100, size=(10, 2)).astype(np.uint8)\n y = rng.randint(0, 2, size=10).astype(np.uint8)\n est = Est()\n est.fit(X, y)\n est.predict(X)\n\n\n# TODO: Remove in v1.2\[email protected](\n \"old_loss, new_loss\",\n [\n (\"least_squares\", \"squared_error\"),\n (\"least_absolute_deviation\", \"absolute_error\"),\n ],\n)\ndef test_loss_deprecated(old_loss, new_loss):\n X, y = make_regression(n_samples=50, random_state=0)\n est1 = HistGradientBoostingRegressor(loss=old_loss, random_state=0)\n\n with pytest.warns(FutureWarning, match=f\"The loss '{old_loss}' was deprecated\"):\n est1.fit(X, y)\n\n est2 = HistGradientBoostingRegressor(loss=new_loss, random_state=0)\n est2.fit(X, y)\n assert_allclose(est1.predict(X), est2.predict(X))\n",
"# -*- coding: utf8\n\"\"\"Random Projection transformers.\n\nRandom Projections are a simple and computationally efficient way to\nreduce the dimensionality of the data by trading a controlled amount\nof accuracy (as additional variance) for faster processing times and\nsmaller model sizes.\n\nThe dimensions and distribution of Random Projections matrices are\ncontrolled so as to preserve the pairwise distances between any two\nsamples of the dataset.\n\nThe main theoretical result behind the efficiency of random projection is the\n`Johnson-Lindenstrauss lemma (quoting Wikipedia)\n<https://en.wikipedia.org/wiki/Johnson%E2%80%93Lindenstrauss_lemma>`_:\n\n In mathematics, the Johnson-Lindenstrauss lemma is a result\n concerning low-distortion embeddings of points from high-dimensional\n into low-dimensional Euclidean space. The lemma states that a small set\n of points in a high-dimensional space can be embedded into a space of\n much lower dimension in such a way that distances between the points are\n nearly preserved. The map used for the embedding is at least Lipschitz,\n and can even be taken to be an orthogonal projection.\n\n\"\"\"\n# Authors: Olivier Grisel <[email protected]>,\n# Arnaud Joly <[email protected]>\n# License: BSD 3 clause\n\nimport warnings\nfrom abc import ABCMeta, abstractmethod\n\nimport numpy as np\nimport scipy.sparse as sp\n\nfrom .base import BaseEstimator, TransformerMixin\nfrom .base import _ClassNamePrefixFeaturesOutMixin\n\nfrom .utils import check_random_state\nfrom .utils.extmath import safe_sparse_dot\nfrom .utils.random import sample_without_replacement\nfrom .utils.validation import check_is_fitted\nfrom .exceptions import DataDimensionalityWarning\n\n\n__all__ = [\n \"SparseRandomProjection\",\n \"GaussianRandomProjection\",\n \"johnson_lindenstrauss_min_dim\",\n]\n\n\ndef johnson_lindenstrauss_min_dim(n_samples, *, eps=0.1):\n \"\"\"Find a 'safe' number of components to randomly project to.\n\n The distortion introduced by a random projection `p` only changes the\n distance between two points by a factor (1 +- eps) in an euclidean space\n with good probability. The projection `p` is an eps-embedding as defined\n by:\n\n (1 - eps) ||u - v||^2 < ||p(u) - p(v)||^2 < (1 + eps) ||u - v||^2\n\n Where u and v are any rows taken from a dataset of shape (n_samples,\n n_features), eps is in ]0, 1[ and p is a projection by a random Gaussian\n N(0, 1) matrix of shape (n_components, n_features) (or a sparse\n Achlioptas matrix).\n\n The minimum number of components to guarantee the eps-embedding is\n given by:\n\n n_components >= 4 log(n_samples) / (eps^2 / 2 - eps^3 / 3)\n\n Note that the number of dimensions is independent of the original\n number of features but instead depends on the size of the dataset:\n the larger the dataset, the higher is the minimal dimensionality of\n an eps-embedding.\n\n Read more in the :ref:`User Guide <johnson_lindenstrauss>`.\n\n Parameters\n ----------\n n_samples : int or array-like of int\n Number of samples that should be a integer greater than 0. If an array\n is given, it will compute a safe number of components array-wise.\n\n eps : float or ndarray of shape (n_components,), dtype=float, \\\n default=0.1\n Maximum distortion rate in the range (0,1 ) as defined by the\n Johnson-Lindenstrauss lemma. If an array is given, it will compute a\n safe number of components array-wise.\n\n Returns\n -------\n n_components : int or ndarray of int\n The minimal number of components to guarantee with good probability\n an eps-embedding with n_samples.\n\n Examples\n --------\n >>> from sklearn.random_projection import johnson_lindenstrauss_min_dim\n >>> johnson_lindenstrauss_min_dim(1e6, eps=0.5)\n 663\n\n >>> johnson_lindenstrauss_min_dim(1e6, eps=[0.5, 0.1, 0.01])\n array([ 663, 11841, 1112658])\n\n >>> johnson_lindenstrauss_min_dim([1e4, 1e5, 1e6], eps=0.1)\n array([ 7894, 9868, 11841])\n\n References\n ----------\n\n .. [1] https://en.wikipedia.org/wiki/Johnson%E2%80%93Lindenstrauss_lemma\n\n .. [2] Sanjoy Dasgupta and Anupam Gupta, 1999,\n \"An elementary proof of the Johnson-Lindenstrauss Lemma.\"\n http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.45.3654\n\n \"\"\"\n eps = np.asarray(eps)\n n_samples = np.asarray(n_samples)\n\n if np.any(eps <= 0.0) or np.any(eps >= 1):\n raise ValueError(\"The JL bound is defined for eps in ]0, 1[, got %r\" % eps)\n\n if np.any(n_samples) <= 0:\n raise ValueError(\n \"The JL bound is defined for n_samples greater than zero, got %r\"\n % n_samples\n )\n\n denominator = (eps ** 2 / 2) - (eps ** 3 / 3)\n return (4 * np.log(n_samples) / denominator).astype(np.int64)\n\n\ndef _check_density(density, n_features):\n \"\"\"Factorize density check according to Li et al.\"\"\"\n if density == \"auto\":\n density = 1 / np.sqrt(n_features)\n\n elif density <= 0 or density > 1:\n raise ValueError(\"Expected density in range ]0, 1], got: %r\" % density)\n return density\n\n\ndef _check_input_size(n_components, n_features):\n \"\"\"Factorize argument checking for random matrix generation.\"\"\"\n if n_components <= 0:\n raise ValueError(\n \"n_components must be strictly positive, got %d\" % n_components\n )\n if n_features <= 0:\n raise ValueError(\"n_features must be strictly positive, got %d\" % n_features)\n\n\ndef _gaussian_random_matrix(n_components, n_features, random_state=None):\n \"\"\"Generate a dense Gaussian random matrix.\n\n The components of the random matrix are drawn from\n\n N(0, 1.0 / n_components).\n\n Read more in the :ref:`User Guide <gaussian_random_matrix>`.\n\n Parameters\n ----------\n n_components : int,\n Dimensionality of the target projection space.\n\n n_features : int,\n Dimensionality of the original source space.\n\n random_state : int, RandomState instance or None, default=None\n Controls the pseudo random number generator used to generate the matrix\n at fit time.\n Pass an int for reproducible output across multiple function calls.\n See :term:`Glossary <random_state>`.\n\n Returns\n -------\n components : ndarray of shape (n_components, n_features)\n The generated Gaussian random matrix.\n\n See Also\n --------\n GaussianRandomProjection\n \"\"\"\n _check_input_size(n_components, n_features)\n rng = check_random_state(random_state)\n components = rng.normal(\n loc=0.0, scale=1.0 / np.sqrt(n_components), size=(n_components, n_features)\n )\n return components\n\n\ndef _sparse_random_matrix(n_components, n_features, density=\"auto\", random_state=None):\n \"\"\"Generalized Achlioptas random sparse matrix for random projection.\n\n Setting density to 1 / 3 will yield the original matrix by Dimitris\n Achlioptas while setting a lower value will yield the generalization\n by Ping Li et al.\n\n If we note :math:`s = 1 / density`, the components of the random matrix are\n drawn from:\n\n - -sqrt(s) / sqrt(n_components) with probability 1 / 2s\n - 0 with probability 1 - 1 / s\n - +sqrt(s) / sqrt(n_components) with probability 1 / 2s\n\n Read more in the :ref:`User Guide <sparse_random_matrix>`.\n\n Parameters\n ----------\n n_components : int,\n Dimensionality of the target projection space.\n\n n_features : int,\n Dimensionality of the original source space.\n\n density : float or 'auto', default='auto'\n Ratio of non-zero component in the random projection matrix in the\n range `(0, 1]`\n\n If density = 'auto', the value is set to the minimum density\n as recommended by Ping Li et al.: 1 / sqrt(n_features).\n\n Use density = 1 / 3.0 if you want to reproduce the results from\n Achlioptas, 2001.\n\n random_state : int, RandomState instance or None, default=None\n Controls the pseudo random number generator used to generate the matrix\n at fit time.\n Pass an int for reproducible output across multiple function calls.\n See :term:`Glossary <random_state>`.\n\n Returns\n -------\n components : {ndarray, sparse matrix} of shape (n_components, n_features)\n The generated Gaussian random matrix. Sparse matrix will be of CSR\n format.\n\n See Also\n --------\n SparseRandomProjection\n\n References\n ----------\n\n .. [1] Ping Li, T. Hastie and K. W. Church, 2006,\n \"Very Sparse Random Projections\".\n https://web.stanford.edu/~hastie/Papers/Ping/KDD06_rp.pdf\n\n .. [2] D. Achlioptas, 2001, \"Database-friendly random projections\",\n http://www.cs.ucsc.edu/~optas/papers/jl.pdf\n\n \"\"\"\n _check_input_size(n_components, n_features)\n density = _check_density(density, n_features)\n rng = check_random_state(random_state)\n\n if density == 1:\n # skip index generation if totally dense\n components = rng.binomial(1, 0.5, (n_components, n_features)) * 2 - 1\n return 1 / np.sqrt(n_components) * components\n\n else:\n # Generate location of non zero elements\n indices = []\n offset = 0\n indptr = [offset]\n for _ in range(n_components):\n # find the indices of the non-zero components for row i\n n_nonzero_i = rng.binomial(n_features, density)\n indices_i = sample_without_replacement(\n n_features, n_nonzero_i, random_state=rng\n )\n indices.append(indices_i)\n offset += n_nonzero_i\n indptr.append(offset)\n\n indices = np.concatenate(indices)\n\n # Among non zero components the probability of the sign is 50%/50%\n data = rng.binomial(1, 0.5, size=np.size(indices)) * 2 - 1\n\n # build the CSR structure by concatenating the rows\n components = sp.csr_matrix(\n (data, indices, indptr), shape=(n_components, n_features)\n )\n\n return np.sqrt(1 / density) / np.sqrt(n_components) * components\n\n\nclass BaseRandomProjection(\n TransformerMixin, BaseEstimator, _ClassNamePrefixFeaturesOutMixin, metaclass=ABCMeta\n):\n \"\"\"Base class for random projections.\n\n Warning: This class should not be used directly.\n Use derived classes instead.\n \"\"\"\n\n @abstractmethod\n def __init__(\n self, n_components=\"auto\", *, eps=0.1, dense_output=False, random_state=None\n ):\n self.n_components = n_components\n self.eps = eps\n self.dense_output = dense_output\n self.random_state = random_state\n\n @abstractmethod\n def _make_random_matrix(self, n_components, n_features):\n \"\"\"Generate the random projection matrix.\n\n Parameters\n ----------\n n_components : int,\n Dimensionality of the target projection space.\n\n n_features : int,\n Dimensionality of the original source space.\n\n Returns\n -------\n components : {ndarray, sparse matrix} of shape \\\n (n_components, n_features)\n The generated random matrix. Sparse matrix will be of CSR format.\n\n \"\"\"\n\n def fit(self, X, y=None):\n \"\"\"Generate a sparse random projection matrix.\n\n Parameters\n ----------\n X : {ndarray, sparse matrix} of shape (n_samples, n_features)\n Training set: only the shape is used to find optimal random\n matrix dimensions based on the theory referenced in the\n afore mentioned papers.\n\n y : Ignored\n Not used, present here for API consistency by convention.\n\n Returns\n -------\n self : object\n BaseRandomProjection class instance.\n \"\"\"\n X = self._validate_data(X, accept_sparse=[\"csr\", \"csc\"])\n\n n_samples, n_features = X.shape\n\n if self.n_components == \"auto\":\n self.n_components_ = johnson_lindenstrauss_min_dim(\n n_samples=n_samples, eps=self.eps\n )\n\n if self.n_components_ <= 0:\n raise ValueError(\n \"eps=%f and n_samples=%d lead to a target dimension of \"\n \"%d which is invalid\" % (self.eps, n_samples, self.n_components_)\n )\n\n elif self.n_components_ > n_features:\n raise ValueError(\n \"eps=%f and n_samples=%d lead to a target dimension of \"\n \"%d which is larger than the original space with \"\n \"n_features=%d\"\n % (self.eps, n_samples, self.n_components_, n_features)\n )\n else:\n if self.n_components <= 0:\n raise ValueError(\n \"n_components must be greater than 0, got %s\" % self.n_components\n )\n\n elif self.n_components > n_features:\n warnings.warn(\n \"The number of components is higher than the number of\"\n \" features: n_features < n_components (%s < %s).\"\n \"The dimensionality of the problem will not be reduced.\"\n % (n_features, self.n_components),\n DataDimensionalityWarning,\n )\n\n self.n_components_ = self.n_components\n\n # Generate a projection matrix of size [n_components, n_features]\n self.components_ = self._make_random_matrix(self.n_components_, n_features)\n\n # Check contract\n assert self.components_.shape == (self.n_components_, n_features), (\n \"An error has occurred the self.components_ matrix has \"\n \" not the proper shape.\"\n )\n\n return self\n\n def transform(self, X):\n \"\"\"Project the data by using matrix product with the random matrix.\n\n Parameters\n ----------\n X : {ndarray, sparse matrix} of shape (n_samples, n_features)\n The input data to project into a smaller dimensional space.\n\n Returns\n -------\n X_new : {ndarray, sparse matrix} of shape (n_samples, n_components)\n Projected array.\n \"\"\"\n check_is_fitted(self)\n X = self._validate_data(X, accept_sparse=[\"csr\", \"csc\"], reset=False)\n\n if X.shape[1] != self.components_.shape[1]:\n raise ValueError(\n \"Impossible to perform projection:\"\n \"X at fit stage had a different number of features. \"\n \"(%s != %s)\" % (X.shape[1], self.components_.shape[1])\n )\n\n X_new = safe_sparse_dot(X, self.components_.T, dense_output=self.dense_output)\n return X_new\n\n @property\n def _n_features_out(self):\n \"\"\"Number of transformed output features.\n\n Used by _ClassNamePrefixFeaturesOutMixin.get_feature_names_out.\n \"\"\"\n return self.n_components\n\n\nclass GaussianRandomProjection(BaseRandomProjection):\n \"\"\"Reduce dimensionality through Gaussian random projection.\n\n The components of the random matrix are drawn from N(0, 1 / n_components).\n\n Read more in the :ref:`User Guide <gaussian_random_matrix>`.\n\n .. versionadded:: 0.13\n\n Parameters\n ----------\n n_components : int or 'auto', default='auto'\n Dimensionality of the target projection space.\n\n n_components can be automatically adjusted according to the\n number of samples in the dataset and the bound given by the\n Johnson-Lindenstrauss lemma. In that case the quality of the\n embedding is controlled by the ``eps`` parameter.\n\n It should be noted that Johnson-Lindenstrauss lemma can yield\n very conservative estimated of the required number of components\n as it makes no assumption on the structure of the dataset.\n\n eps : float, default=0.1\n Parameter to control the quality of the embedding according to\n the Johnson-Lindenstrauss lemma when `n_components` is set to\n 'auto'. The value should be strictly positive.\n\n Smaller values lead to better embedding and higher number of\n dimensions (n_components) in the target projection space.\n\n random_state : int, RandomState instance or None, default=None\n Controls the pseudo random number generator used to generate the\n projection matrix at fit time.\n Pass an int for reproducible output across multiple function calls.\n See :term:`Glossary <random_state>`.\n\n Attributes\n ----------\n n_components_ : int\n Concrete number of components computed when n_components=\"auto\".\n\n components_ : ndarray of shape (n_components, n_features)\n Random matrix used for the projection.\n\n n_features_in_ : int\n Number of features seen during :term:`fit`.\n\n .. versionadded:: 0.24\n\n feature_names_in_ : ndarray of shape (`n_features_in_`,)\n Names of features seen during :term:`fit`. Defined only when `X`\n has feature names that are all strings.\n\n .. versionadded:: 1.0\n\n See Also\n --------\n SparseRandomProjection : Reduce dimensionality through sparse\n random projection.\n\n Examples\n --------\n >>> import numpy as np\n >>> from sklearn.random_projection import GaussianRandomProjection\n >>> rng = np.random.RandomState(42)\n >>> X = rng.rand(25, 3000)\n >>> transformer = GaussianRandomProjection(random_state=rng)\n >>> X_new = transformer.fit_transform(X)\n >>> X_new.shape\n (25, 2759)\n \"\"\"\n\n def __init__(self, n_components=\"auto\", *, eps=0.1, random_state=None):\n super().__init__(\n n_components=n_components,\n eps=eps,\n dense_output=True,\n random_state=random_state,\n )\n\n def _make_random_matrix(self, n_components, n_features):\n \"\"\" Generate the random projection matrix.\n\n Parameters\n ----------\n n_components : int,\n Dimensionality of the target projection space.\n\n n_features : int,\n Dimensionality of the original source space.\n\n Returns\n -------\n components : {ndarray, sparse matrix} of shape \\\n (n_components, n_features)\n The generated random matrix. Sparse matrix will be of CSR format.\n\n \"\"\"\n random_state = check_random_state(self.random_state)\n return _gaussian_random_matrix(\n n_components, n_features, random_state=random_state\n )\n\n\nclass SparseRandomProjection(BaseRandomProjection):\n \"\"\"Reduce dimensionality through sparse random projection.\n\n Sparse random matrix is an alternative to dense random\n projection matrix that guarantees similar embedding quality while being\n much more memory efficient and allowing faster computation of the\n projected data.\n\n If we note `s = 1 / density` the components of the random matrix are\n drawn from:\n\n - -sqrt(s) / sqrt(n_components) with probability 1 / 2s\n - 0 with probability 1 - 1 / s\n - +sqrt(s) / sqrt(n_components) with probability 1 / 2s\n\n Read more in the :ref:`User Guide <sparse_random_matrix>`.\n\n .. versionadded:: 0.13\n\n Parameters\n ----------\n n_components : int or 'auto', default='auto'\n Dimensionality of the target projection space.\n\n n_components can be automatically adjusted according to the\n number of samples in the dataset and the bound given by the\n Johnson-Lindenstrauss lemma. In that case the quality of the\n embedding is controlled by the ``eps`` parameter.\n\n It should be noted that Johnson-Lindenstrauss lemma can yield\n very conservative estimated of the required number of components\n as it makes no assumption on the structure of the dataset.\n\n density : float or 'auto', default='auto'\n Ratio in the range (0, 1] of non-zero component in the random\n projection matrix.\n\n If density = 'auto', the value is set to the minimum density\n as recommended by Ping Li et al.: 1 / sqrt(n_features).\n\n Use density = 1 / 3.0 if you want to reproduce the results from\n Achlioptas, 2001.\n\n eps : float, default=0.1\n Parameter to control the quality of the embedding according to\n the Johnson-Lindenstrauss lemma when n_components is set to\n 'auto'. This value should be strictly positive.\n\n Smaller values lead to better embedding and higher number of\n dimensions (n_components) in the target projection space.\n\n dense_output : bool, default=False\n If True, ensure that the output of the random projection is a\n dense numpy array even if the input and random projection matrix\n are both sparse. In practice, if the number of components is\n small the number of zero components in the projected data will\n be very small and it will be more CPU and memory efficient to\n use a dense representation.\n\n If False, the projected data uses a sparse representation if\n the input is sparse.\n\n random_state : int, RandomState instance or None, default=None\n Controls the pseudo random number generator used to generate the\n projection matrix at fit time.\n Pass an int for reproducible output across multiple function calls.\n See :term:`Glossary <random_state>`.\n\n Attributes\n ----------\n n_components_ : int\n Concrete number of components computed when n_components=\"auto\".\n\n components_ : sparse matrix of shape (n_components, n_features)\n Random matrix used for the projection. Sparse matrix will be of CSR\n format.\n\n density_ : float in range 0.0 - 1.0\n Concrete density computed from when density = \"auto\".\n\n n_features_in_ : int\n Number of features seen during :term:`fit`.\n\n .. versionadded:: 0.24\n\n feature_names_in_ : ndarray of shape (`n_features_in_`,)\n Names of features seen during :term:`fit`. Defined only when `X`\n has feature names that are all strings.\n\n .. versionadded:: 1.0\n\n See Also\n --------\n GaussianRandomProjection : Reduce dimensionality through Gaussian\n random projection.\n\n References\n ----------\n\n .. [1] Ping Li, T. Hastie and K. W. Church, 2006,\n \"Very Sparse Random Projections\".\n https://web.stanford.edu/~hastie/Papers/Ping/KDD06_rp.pdf\n\n .. [2] D. Achlioptas, 2001, \"Database-friendly random projections\",\n https://users.soe.ucsc.edu/~optas/papers/jl.pdf\n\n Examples\n --------\n >>> import numpy as np\n >>> from sklearn.random_projection import SparseRandomProjection\n >>> rng = np.random.RandomState(42)\n >>> X = rng.rand(25, 3000)\n >>> transformer = SparseRandomProjection(random_state=rng)\n >>> X_new = transformer.fit_transform(X)\n >>> X_new.shape\n (25, 2759)\n >>> # very few components are non-zero\n >>> np.mean(transformer.components_ != 0)\n 0.0182...\n \"\"\"\n\n def __init__(\n self,\n n_components=\"auto\",\n *,\n density=\"auto\",\n eps=0.1,\n dense_output=False,\n random_state=None,\n ):\n super().__init__(\n n_components=n_components,\n eps=eps,\n dense_output=dense_output,\n random_state=random_state,\n )\n\n self.density = density\n\n def _make_random_matrix(self, n_components, n_features):\n \"\"\" Generate the random projection matrix\n\n Parameters\n ----------\n n_components : int\n Dimensionality of the target projection space.\n\n n_features : int\n Dimensionality of the original source space.\n\n Returns\n -------\n components : {ndarray, sparse matrix} of shape \\\n (n_components, n_features)\n The generated random matrix. Sparse matrix will be of CSR format.\n\n \"\"\"\n random_state = check_random_state(self.random_state)\n self.density_ = _check_density(self.density, n_features)\n return _sparse_random_matrix(\n n_components, n_features, density=self.density_, random_state=random_state\n )\n",
"\"\"\"\nLinear Discriminant Analysis and Quadratic Discriminant Analysis\n\"\"\"\n\n# Authors: Clemens Brunner\n# Martin Billinger\n# Matthieu Perrot\n# Mathieu Blondel\n\n# License: BSD 3-Clause\n\nimport warnings\nimport numpy as np\nfrom scipy import linalg\nfrom scipy.special import expit\n\nfrom .base import BaseEstimator, TransformerMixin, ClassifierMixin\nfrom .linear_model._base import LinearClassifierMixin\nfrom .covariance import ledoit_wolf, empirical_covariance, shrunk_covariance\nfrom .utils.multiclass import unique_labels\nfrom .utils.validation import check_is_fitted\nfrom .utils.multiclass import check_classification_targets\nfrom .utils.extmath import softmax\nfrom .preprocessing import StandardScaler\n\n\n__all__ = [\"LinearDiscriminantAnalysis\", \"QuadraticDiscriminantAnalysis\"]\n\n\ndef _cov(X, shrinkage=None, covariance_estimator=None):\n \"\"\"Estimate covariance matrix (using optional covariance_estimator).\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n Input data.\n\n shrinkage : {'empirical', 'auto'} or float, default=None\n Shrinkage parameter, possible values:\n - None or 'empirical': no shrinkage (default).\n - 'auto': automatic shrinkage using the Ledoit-Wolf lemma.\n - float between 0 and 1: fixed shrinkage parameter.\n\n Shrinkage parameter is ignored if `covariance_estimator`\n is not None.\n\n covariance_estimator : estimator, default=None\n If not None, `covariance_estimator` is used to estimate\n the covariance matrices instead of relying on the empirical\n covariance estimator (with potential shrinkage).\n The object should have a fit method and a ``covariance_`` attribute\n like the estimators in :mod:`sklearn.covariance``.\n if None the shrinkage parameter drives the estimate.\n\n .. versionadded:: 0.24\n\n Returns\n -------\n s : ndarray of shape (n_features, n_features)\n Estimated covariance matrix.\n \"\"\"\n if covariance_estimator is None:\n shrinkage = \"empirical\" if shrinkage is None else shrinkage\n if isinstance(shrinkage, str):\n if shrinkage == \"auto\":\n sc = StandardScaler() # standardize features\n X = sc.fit_transform(X)\n s = ledoit_wolf(X)[0]\n # rescale\n s = sc.scale_[:, np.newaxis] * s * sc.scale_[np.newaxis, :]\n elif shrinkage == \"empirical\":\n s = empirical_covariance(X)\n else:\n raise ValueError(\"unknown shrinkage parameter\")\n elif isinstance(shrinkage, float) or isinstance(shrinkage, int):\n if shrinkage < 0 or shrinkage > 1:\n raise ValueError(\"shrinkage parameter must be between 0 and 1\")\n s = shrunk_covariance(empirical_covariance(X), shrinkage)\n else:\n raise TypeError(\"shrinkage must be a float or a string\")\n else:\n if shrinkage is not None and shrinkage != 0:\n raise ValueError(\n \"covariance_estimator and shrinkage parameters \"\n \"are not None. Only one of the two can be set.\"\n )\n covariance_estimator.fit(X)\n if not hasattr(covariance_estimator, \"covariance_\"):\n raise ValueError(\n \"%s does not have a covariance_ attribute\"\n % covariance_estimator.__class__.__name__\n )\n s = covariance_estimator.covariance_\n return s\n\n\ndef _class_means(X, y):\n \"\"\"Compute class means.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n Input data.\n\n y : array-like of shape (n_samples,) or (n_samples, n_targets)\n Target values.\n\n Returns\n -------\n means : array-like of shape (n_classes, n_features)\n Class means.\n \"\"\"\n classes, y = np.unique(y, return_inverse=True)\n cnt = np.bincount(y)\n means = np.zeros(shape=(len(classes), X.shape[1]))\n np.add.at(means, y, X)\n means /= cnt[:, None]\n return means\n\n\ndef _class_cov(X, y, priors, shrinkage=None, covariance_estimator=None):\n \"\"\"Compute weighted within-class covariance matrix.\n\n The per-class covariance are weighted by the class priors.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n Input data.\n\n y : array-like of shape (n_samples,) or (n_samples, n_targets)\n Target values.\n\n priors : array-like of shape (n_classes,)\n Class priors.\n\n shrinkage : 'auto' or float, default=None\n Shrinkage parameter, possible values:\n - None: no shrinkage (default).\n - 'auto': automatic shrinkage using the Ledoit-Wolf lemma.\n - float between 0 and 1: fixed shrinkage parameter.\n\n Shrinkage parameter is ignored if `covariance_estimator` is not None.\n\n covariance_estimator : estimator, default=None\n If not None, `covariance_estimator` is used to estimate\n the covariance matrices instead of relying the empirical\n covariance estimator (with potential shrinkage).\n The object should have a fit method and a ``covariance_`` attribute\n like the estimators in sklearn.covariance.\n If None, the shrinkage parameter drives the estimate.\n\n .. versionadded:: 0.24\n\n Returns\n -------\n cov : array-like of shape (n_features, n_features)\n Weighted within-class covariance matrix\n \"\"\"\n classes = np.unique(y)\n cov = np.zeros(shape=(X.shape[1], X.shape[1]))\n for idx, group in enumerate(classes):\n Xg = X[y == group, :]\n cov += priors[idx] * np.atleast_2d(_cov(Xg, shrinkage, covariance_estimator))\n return cov\n\n\nclass LinearDiscriminantAnalysis(\n LinearClassifierMixin, TransformerMixin, BaseEstimator\n):\n \"\"\"Linear Discriminant Analysis.\n\n A classifier with a linear decision boundary, generated by fitting class\n conditional densities to the data and using Bayes' rule.\n\n The model fits a Gaussian density to each class, assuming that all classes\n share the same covariance matrix.\n\n The fitted model can also be used to reduce the dimensionality of the input\n by projecting it to the most discriminative directions, using the\n `transform` method.\n\n .. versionadded:: 0.17\n *LinearDiscriminantAnalysis*.\n\n Read more in the :ref:`User Guide <lda_qda>`.\n\n Parameters\n ----------\n solver : {'svd', 'lsqr', 'eigen'}, default='svd'\n Solver to use, possible values:\n - 'svd': Singular value decomposition (default).\n Does not compute the covariance matrix, therefore this solver is\n recommended for data with a large number of features.\n - 'lsqr': Least squares solution.\n Can be combined with shrinkage or custom covariance estimator.\n - 'eigen': Eigenvalue decomposition.\n Can be combined with shrinkage or custom covariance estimator.\n\n shrinkage : 'auto' or float, default=None\n Shrinkage parameter, possible values:\n - None: no shrinkage (default).\n - 'auto': automatic shrinkage using the Ledoit-Wolf lemma.\n - float between 0 and 1: fixed shrinkage parameter.\n\n This should be left to None if `covariance_estimator` is used.\n Note that shrinkage works only with 'lsqr' and 'eigen' solvers.\n\n priors : array-like of shape (n_classes,), default=None\n The class prior probabilities. By default, the class proportions are\n inferred from the training data.\n\n n_components : int, default=None\n Number of components (<= min(n_classes - 1, n_features)) for\n dimensionality reduction. If None, will be set to\n min(n_classes - 1, n_features). This parameter only affects the\n `transform` method.\n\n store_covariance : bool, default=False\n If True, explicitly compute the weighted within-class covariance\n matrix when solver is 'svd'. The matrix is always computed\n and stored for the other solvers.\n\n .. versionadded:: 0.17\n\n tol : float, default=1.0e-4\n Absolute threshold for a singular value of X to be considered\n significant, used to estimate the rank of X. Dimensions whose\n singular values are non-significant are discarded. Only used if\n solver is 'svd'.\n\n .. versionadded:: 0.17\n\n covariance_estimator : covariance estimator, default=None\n If not None, `covariance_estimator` is used to estimate\n the covariance matrices instead of relying on the empirical\n covariance estimator (with potential shrinkage).\n The object should have a fit method and a ``covariance_`` attribute\n like the estimators in :mod:`sklearn.covariance`.\n if None the shrinkage parameter drives the estimate.\n\n This should be left to None if `shrinkage` is used.\n Note that `covariance_estimator` works only with 'lsqr' and 'eigen'\n solvers.\n\n .. versionadded:: 0.24\n\n Attributes\n ----------\n coef_ : ndarray of shape (n_features,) or (n_classes, n_features)\n Weight vector(s).\n\n intercept_ : ndarray of shape (n_classes,)\n Intercept term.\n\n covariance_ : array-like of shape (n_features, n_features)\n Weighted within-class covariance matrix. It corresponds to\n `sum_k prior_k * C_k` where `C_k` is the covariance matrix of the\n samples in class `k`. The `C_k` are estimated using the (potentially\n shrunk) biased estimator of covariance. If solver is 'svd', only\n exists when `store_covariance` is True.\n\n explained_variance_ratio_ : ndarray of shape (n_components,)\n Percentage of variance explained by each of the selected components.\n If ``n_components`` is not set then all components are stored and the\n sum of explained variances is equal to 1.0. Only available when eigen\n or svd solver is used.\n\n means_ : array-like of shape (n_classes, n_features)\n Class-wise means.\n\n priors_ : array-like of shape (n_classes,)\n Class priors (sum to 1).\n\n scalings_ : array-like of shape (rank, n_classes - 1)\n Scaling of the features in the space spanned by the class centroids.\n Only available for 'svd' and 'eigen' solvers.\n\n xbar_ : array-like of shape (n_features,)\n Overall mean. Only present if solver is 'svd'.\n\n classes_ : array-like of shape (n_classes,)\n Unique class labels.\n\n n_features_in_ : int\n Number of features seen during :term:`fit`.\n\n .. versionadded:: 0.24\n\n feature_names_in_ : ndarray of shape (`n_features_in_`,)\n Names of features seen during :term:`fit`. Defined only when `X`\n has feature names that are all strings.\n\n .. versionadded:: 1.0\n\n See Also\n --------\n QuadraticDiscriminantAnalysis : Quadratic Discriminant Analysis.\n\n Examples\n --------\n >>> import numpy as np\n >>> from sklearn.discriminant_analysis import LinearDiscriminantAnalysis\n >>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])\n >>> y = np.array([1, 1, 1, 2, 2, 2])\n >>> clf = LinearDiscriminantAnalysis()\n >>> clf.fit(X, y)\n LinearDiscriminantAnalysis()\n >>> print(clf.predict([[-0.8, -1]]))\n [1]\n \"\"\"\n\n def __init__(\n self,\n solver=\"svd\",\n shrinkage=None,\n priors=None,\n n_components=None,\n store_covariance=False,\n tol=1e-4,\n covariance_estimator=None,\n ):\n self.solver = solver\n self.shrinkage = shrinkage\n self.priors = priors\n self.n_components = n_components\n self.store_covariance = store_covariance # used only in svd solver\n self.tol = tol # used only in svd solver\n self.covariance_estimator = covariance_estimator\n\n def _solve_lsqr(self, X, y, shrinkage, covariance_estimator):\n \"\"\"Least squares solver.\n\n The least squares solver computes a straightforward solution of the\n optimal decision rule based directly on the discriminant functions. It\n can only be used for classification (with any covariance estimator),\n because\n estimation of eigenvectors is not performed. Therefore, dimensionality\n reduction with the transform is not supported.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n Training data.\n\n y : array-like of shape (n_samples,) or (n_samples, n_classes)\n Target values.\n\n shrinkage : 'auto', float or None\n Shrinkage parameter, possible values:\n - None: no shrinkage.\n - 'auto': automatic shrinkage using the Ledoit-Wolf lemma.\n - float between 0 and 1: fixed shrinkage parameter.\n\n Shrinkage parameter is ignored if `covariance_estimator` i\n not None\n\n covariance_estimator : estimator, default=None\n If not None, `covariance_estimator` is used to estimate\n the covariance matrices instead of relying the empirical\n covariance estimator (with potential shrinkage).\n The object should have a fit method and a ``covariance_`` attribute\n like the estimators in sklearn.covariance.\n if None the shrinkage parameter drives the estimate.\n\n .. versionadded:: 0.24\n\n Notes\n -----\n This solver is based on [1]_, section 2.6.2, pp. 39-41.\n\n References\n ----------\n .. [1] R. O. Duda, P. E. Hart, D. G. Stork. Pattern Classification\n (Second Edition). John Wiley & Sons, Inc., New York, 2001. ISBN\n 0-471-05669-3.\n \"\"\"\n self.means_ = _class_means(X, y)\n self.covariance_ = _class_cov(\n X, y, self.priors_, shrinkage, covariance_estimator\n )\n self.coef_ = linalg.lstsq(self.covariance_, self.means_.T)[0].T\n self.intercept_ = -0.5 * np.diag(np.dot(self.means_, self.coef_.T)) + np.log(\n self.priors_\n )\n\n def _solve_eigen(self, X, y, shrinkage, covariance_estimator):\n \"\"\"Eigenvalue solver.\n\n The eigenvalue solver computes the optimal solution of the Rayleigh\n coefficient (basically the ratio of between class scatter to within\n class scatter). This solver supports both classification and\n dimensionality reduction (with any covariance estimator).\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n Training data.\n\n y : array-like of shape (n_samples,) or (n_samples, n_targets)\n Target values.\n\n shrinkage : 'auto', float or None\n Shrinkage parameter, possible values:\n - None: no shrinkage.\n - 'auto': automatic shrinkage using the Ledoit-Wolf lemma.\n - float between 0 and 1: fixed shrinkage constant.\n\n Shrinkage parameter is ignored if `covariance_estimator` i\n not None\n\n covariance_estimator : estimator, default=None\n If not None, `covariance_estimator` is used to estimate\n the covariance matrices instead of relying the empirical\n covariance estimator (with potential shrinkage).\n The object should have a fit method and a ``covariance_`` attribute\n like the estimators in sklearn.covariance.\n if None the shrinkage parameter drives the estimate.\n\n .. versionadded:: 0.24\n\n Notes\n -----\n This solver is based on [1]_, section 3.8.3, pp. 121-124.\n\n References\n ----------\n .. [1] R. O. Duda, P. E. Hart, D. G. Stork. Pattern Classification\n (Second Edition). John Wiley & Sons, Inc., New York, 2001. ISBN\n 0-471-05669-3.\n \"\"\"\n self.means_ = _class_means(X, y)\n self.covariance_ = _class_cov(\n X, y, self.priors_, shrinkage, covariance_estimator\n )\n\n Sw = self.covariance_ # within scatter\n St = _cov(X, shrinkage, covariance_estimator) # total scatter\n Sb = St - Sw # between scatter\n\n evals, evecs = linalg.eigh(Sb, Sw)\n self.explained_variance_ratio_ = np.sort(evals / np.sum(evals))[::-1][\n : self._max_components\n ]\n evecs = evecs[:, np.argsort(evals)[::-1]] # sort eigenvectors\n\n self.scalings_ = evecs\n self.coef_ = np.dot(self.means_, evecs).dot(evecs.T)\n self.intercept_ = -0.5 * np.diag(np.dot(self.means_, self.coef_.T)) + np.log(\n self.priors_\n )\n\n def _solve_svd(self, X, y):\n \"\"\"SVD solver.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n Training data.\n\n y : array-like of shape (n_samples,) or (n_samples, n_targets)\n Target values.\n \"\"\"\n n_samples, n_features = X.shape\n n_classes = len(self.classes_)\n\n self.means_ = _class_means(X, y)\n if self.store_covariance:\n self.covariance_ = _class_cov(X, y, self.priors_)\n\n Xc = []\n for idx, group in enumerate(self.classes_):\n Xg = X[y == group, :]\n Xc.append(Xg - self.means_[idx])\n\n self.xbar_ = np.dot(self.priors_, self.means_)\n\n Xc = np.concatenate(Xc, axis=0)\n\n # 1) within (univariate) scaling by with classes std-dev\n std = Xc.std(axis=0)\n # avoid division by zero in normalization\n std[std == 0] = 1.0\n fac = 1.0 / (n_samples - n_classes)\n\n # 2) Within variance scaling\n X = np.sqrt(fac) * (Xc / std)\n # SVD of centered (within)scaled data\n U, S, Vt = linalg.svd(X, full_matrices=False)\n\n rank = np.sum(S > self.tol)\n # Scaling of within covariance is: V' 1/S\n scalings = (Vt[:rank] / std).T / S[:rank]\n\n # 3) Between variance scaling\n # Scale weighted centers\n X = np.dot(\n (\n (np.sqrt((n_samples * self.priors_) * fac))\n * (self.means_ - self.xbar_).T\n ).T,\n scalings,\n )\n # Centers are living in a space with n_classes-1 dim (maximum)\n # Use SVD to find projection in the space spanned by the\n # (n_classes) centers\n _, S, Vt = linalg.svd(X, full_matrices=0)\n\n if self._max_components == 0:\n self.explained_variance_ratio_ = np.empty((0,), dtype=S.dtype)\n else:\n self.explained_variance_ratio_ = (S ** 2 / np.sum(S ** 2))[\n : self._max_components\n ]\n\n rank = np.sum(S > self.tol * S[0])\n self.scalings_ = np.dot(scalings, Vt.T[:, :rank])\n coef = np.dot(self.means_ - self.xbar_, self.scalings_)\n self.intercept_ = -0.5 * np.sum(coef ** 2, axis=1) + np.log(self.priors_)\n self.coef_ = np.dot(coef, self.scalings_.T)\n self.intercept_ -= np.dot(self.xbar_, self.coef_.T)\n\n def fit(self, X, y):\n \"\"\"Fit the Linear Discriminant Analysis model.\n\n .. versionchanged:: 0.19\n *store_covariance* has been moved to main constructor.\n\n .. versionchanged:: 0.19\n *tol* has been moved to main constructor.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n Training data.\n\n y : array-like of shape (n_samples,)\n Target values.\n\n Returns\n -------\n self : object\n Fitted estimator.\n \"\"\"\n X, y = self._validate_data(\n X, y, ensure_min_samples=2, dtype=[np.float64, np.float32]\n )\n self.classes_ = unique_labels(y)\n n_samples, _ = X.shape\n n_classes = len(self.classes_)\n\n if n_samples == n_classes:\n raise ValueError(\n \"The number of samples must be more than the number of classes.\"\n )\n\n if self.priors is None: # estimate priors from sample\n _, y_t = np.unique(y, return_inverse=True) # non-negative ints\n self.priors_ = np.bincount(y_t) / float(len(y))\n else:\n self.priors_ = np.asarray(self.priors)\n\n if (self.priors_ < 0).any():\n raise ValueError(\"priors must be non-negative\")\n if not np.isclose(self.priors_.sum(), 1.0):\n warnings.warn(\"The priors do not sum to 1. Renormalizing\", UserWarning)\n self.priors_ = self.priors_ / self.priors_.sum()\n\n # Maximum number of components no matter what n_components is\n # specified:\n max_components = min(len(self.classes_) - 1, X.shape[1])\n\n if self.n_components is None:\n self._max_components = max_components\n else:\n if self.n_components > max_components:\n raise ValueError(\n \"n_components cannot be larger than min(n_features, n_classes - 1).\"\n )\n self._max_components = self.n_components\n\n if self.solver == \"svd\":\n if self.shrinkage is not None:\n raise NotImplementedError(\"shrinkage not supported\")\n if self.covariance_estimator is not None:\n raise ValueError(\n \"covariance estimator \"\n \"is not supported \"\n \"with svd solver. Try another solver\"\n )\n self._solve_svd(X, y)\n elif self.solver == \"lsqr\":\n self._solve_lsqr(\n X,\n y,\n shrinkage=self.shrinkage,\n covariance_estimator=self.covariance_estimator,\n )\n elif self.solver == \"eigen\":\n self._solve_eigen(\n X,\n y,\n shrinkage=self.shrinkage,\n covariance_estimator=self.covariance_estimator,\n )\n else:\n raise ValueError(\n \"unknown solver {} (valid solvers are 'svd', \"\n \"'lsqr', and 'eigen').\".format(self.solver)\n )\n if self.classes_.size == 2: # treat binary case as a special case\n self.coef_ = np.array(\n self.coef_[1, :] - self.coef_[0, :], ndmin=2, dtype=X.dtype\n )\n self.intercept_ = np.array(\n self.intercept_[1] - self.intercept_[0], ndmin=1, dtype=X.dtype\n )\n return self\n\n def transform(self, X):\n \"\"\"Project data to maximize class separation.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n Input data.\n\n Returns\n -------\n X_new : ndarray of shape (n_samples, n_components)\n Transformed data.\n \"\"\"\n if self.solver == \"lsqr\":\n raise NotImplementedError(\n \"transform not implemented for 'lsqr' solver (use 'svd' or 'eigen').\"\n )\n check_is_fitted(self)\n\n X = self._validate_data(X, reset=False)\n if self.solver == \"svd\":\n X_new = np.dot(X - self.xbar_, self.scalings_)\n elif self.solver == \"eigen\":\n X_new = np.dot(X, self.scalings_)\n\n return X_new[:, : self._max_components]\n\n def predict_proba(self, X):\n \"\"\"Estimate probability.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n Input data.\n\n Returns\n -------\n C : ndarray of shape (n_samples, n_classes)\n Estimated probabilities.\n \"\"\"\n check_is_fitted(self)\n\n decision = self.decision_function(X)\n if self.classes_.size == 2:\n proba = expit(decision)\n return np.vstack([1 - proba, proba]).T\n else:\n return softmax(decision)\n\n def predict_log_proba(self, X):\n \"\"\"Estimate log probability.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n Input data.\n\n Returns\n -------\n C : ndarray of shape (n_samples, n_classes)\n Estimated log probabilities.\n \"\"\"\n prediction = self.predict_proba(X)\n prediction[prediction == 0.0] += np.finfo(prediction.dtype).tiny\n return np.log(prediction)\n\n def decision_function(self, X):\n \"\"\"Apply decision function to an array of samples.\n\n The decision function is equal (up to a constant factor) to the\n log-posterior of the model, i.e. `log p(y = k | x)`. In a binary\n classification setting this instead corresponds to the difference\n `log p(y = 1 | x) - log p(y = 0 | x)`. See :ref:`lda_qda_math`.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n Array of samples (test vectors).\n\n Returns\n -------\n C : ndarray of shape (n_samples,) or (n_samples, n_classes)\n Decision function values related to each class, per sample.\n In the two-class case, the shape is (n_samples,), giving the\n log likelihood ratio of the positive class.\n \"\"\"\n # Only override for the doc\n return super().decision_function(X)\n\n\nclass QuadraticDiscriminantAnalysis(ClassifierMixin, BaseEstimator):\n \"\"\"Quadratic Discriminant Analysis.\n\n A classifier with a quadratic decision boundary, generated\n by fitting class conditional densities to the data\n and using Bayes' rule.\n\n The model fits a Gaussian density to each class.\n\n .. versionadded:: 0.17\n *QuadraticDiscriminantAnalysis*\n\n Read more in the :ref:`User Guide <lda_qda>`.\n\n Parameters\n ----------\n priors : ndarray of shape (n_classes,), default=None\n Class priors. By default, the class proportions are inferred from the\n training data.\n\n reg_param : float, default=0.0\n Regularizes the per-class covariance estimates by transforming S2 as\n ``S2 = (1 - reg_param) * S2 + reg_param * np.eye(n_features)``,\n where S2 corresponds to the `scaling_` attribute of a given class.\n\n store_covariance : bool, default=False\n If True, the class covariance matrices are explicitly computed and\n stored in the `self.covariance_` attribute.\n\n .. versionadded:: 0.17\n\n tol : float, default=1.0e-4\n Absolute threshold for a singular value to be considered significant,\n used to estimate the rank of `Xk` where `Xk` is the centered matrix\n of samples in class k. This parameter does not affect the\n predictions. It only controls a warning that is raised when features\n are considered to be colinear.\n\n .. versionadded:: 0.17\n\n Attributes\n ----------\n covariance_ : list of len n_classes of ndarray \\\n of shape (n_features, n_features)\n For each class, gives the covariance matrix estimated using the\n samples of that class. The estimations are unbiased. Only present if\n `store_covariance` is True.\n\n means_ : array-like of shape (n_classes, n_features)\n Class-wise means.\n\n priors_ : array-like of shape (n_classes,)\n Class priors (sum to 1).\n\n rotations_ : list of len n_classes of ndarray of shape (n_features, n_k)\n For each class k an array of shape (n_features, n_k), where\n ``n_k = min(n_features, number of elements in class k)``\n It is the rotation of the Gaussian distribution, i.e. its\n principal axis. It corresponds to `V`, the matrix of eigenvectors\n coming from the SVD of `Xk = U S Vt` where `Xk` is the centered\n matrix of samples from class k.\n\n scalings_ : list of len n_classes of ndarray of shape (n_k,)\n For each class, contains the scaling of\n the Gaussian distributions along its principal axes, i.e. the\n variance in the rotated coordinate system. It corresponds to `S^2 /\n (n_samples - 1)`, where `S` is the diagonal matrix of singular values\n from the SVD of `Xk`, where `Xk` is the centered matrix of samples\n from class k.\n\n classes_ : ndarray of shape (n_classes,)\n Unique class labels.\n\n n_features_in_ : int\n Number of features seen during :term:`fit`.\n\n .. versionadded:: 0.24\n\n feature_names_in_ : ndarray of shape (`n_features_in_`,)\n Names of features seen during :term:`fit`. Defined only when `X`\n has feature names that are all strings.\n\n .. versionadded:: 1.0\n\n See Also\n --------\n LinearDiscriminantAnalysis : Linear Discriminant Analysis.\n\n Examples\n --------\n >>> from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis\n >>> import numpy as np\n >>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])\n >>> y = np.array([1, 1, 1, 2, 2, 2])\n >>> clf = QuadraticDiscriminantAnalysis()\n >>> clf.fit(X, y)\n QuadraticDiscriminantAnalysis()\n >>> print(clf.predict([[-0.8, -1]]))\n [1]\n \"\"\"\n\n def __init__(\n self, *, priors=None, reg_param=0.0, store_covariance=False, tol=1.0e-4\n ):\n self.priors = np.asarray(priors) if priors is not None else None\n self.reg_param = reg_param\n self.store_covariance = store_covariance\n self.tol = tol\n\n def fit(self, X, y):\n \"\"\"Fit the model according to the given training data and parameters.\n\n .. versionchanged:: 0.19\n ``store_covariances`` has been moved to main constructor as\n ``store_covariance``\n\n .. versionchanged:: 0.19\n ``tol`` has been moved to main constructor.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n Training vector, where `n_samples` is the number of samples and\n `n_features` is the number of features.\n\n y : array-like of shape (n_samples,)\n Target values (integers).\n\n Returns\n -------\n self : object\n Fitted estimator.\n \"\"\"\n X, y = self._validate_data(X, y)\n check_classification_targets(y)\n self.classes_, y = np.unique(y, return_inverse=True)\n n_samples, n_features = X.shape\n n_classes = len(self.classes_)\n if n_classes < 2:\n raise ValueError(\n \"The number of classes has to be greater than one; got %d class\"\n % (n_classes)\n )\n if self.priors is None:\n self.priors_ = np.bincount(y) / float(n_samples)\n else:\n self.priors_ = self.priors\n\n cov = None\n store_covariance = self.store_covariance\n if store_covariance:\n cov = []\n means = []\n scalings = []\n rotations = []\n for ind in range(n_classes):\n Xg = X[y == ind, :]\n meang = Xg.mean(0)\n means.append(meang)\n if len(Xg) == 1:\n raise ValueError(\n \"y has only 1 sample in class %s, covariance is ill defined.\"\n % str(self.classes_[ind])\n )\n Xgc = Xg - meang\n # Xgc = U * S * V.T\n _, S, Vt = np.linalg.svd(Xgc, full_matrices=False)\n rank = np.sum(S > self.tol)\n if rank < n_features:\n warnings.warn(\"Variables are collinear\")\n S2 = (S ** 2) / (len(Xg) - 1)\n S2 = ((1 - self.reg_param) * S2) + self.reg_param\n if self.store_covariance or store_covariance:\n # cov = V * (S^2 / (n-1)) * V.T\n cov.append(np.dot(S2 * Vt.T, Vt))\n scalings.append(S2)\n rotations.append(Vt.T)\n if self.store_covariance or store_covariance:\n self.covariance_ = cov\n self.means_ = np.asarray(means)\n self.scalings_ = scalings\n self.rotations_ = rotations\n return self\n\n def _decision_function(self, X):\n # return log posterior, see eq (4.12) p. 110 of the ESL.\n check_is_fitted(self)\n\n X = self._validate_data(X, reset=False)\n norm2 = []\n for i in range(len(self.classes_)):\n R = self.rotations_[i]\n S = self.scalings_[i]\n Xm = X - self.means_[i]\n X2 = np.dot(Xm, R * (S ** (-0.5)))\n norm2.append(np.sum(X2 ** 2, axis=1))\n norm2 = np.array(norm2).T # shape = [len(X), n_classes]\n u = np.asarray([np.sum(np.log(s)) for s in self.scalings_])\n return -0.5 * (norm2 + u) + np.log(self.priors_)\n\n def decision_function(self, X):\n \"\"\"Apply decision function to an array of samples.\n\n The decision function is equal (up to a constant factor) to the\n log-posterior of the model, i.e. `log p(y = k | x)`. In a binary\n classification setting this instead corresponds to the difference\n `log p(y = 1 | x) - log p(y = 0 | x)`. See :ref:`lda_qda_math`.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n Array of samples (test vectors).\n\n Returns\n -------\n C : ndarray of shape (n_samples,) or (n_samples, n_classes)\n Decision function values related to each class, per sample.\n In the two-class case, the shape is (n_samples,), giving the\n log likelihood ratio of the positive class.\n \"\"\"\n dec_func = self._decision_function(X)\n # handle special case of two classes\n if len(self.classes_) == 2:\n return dec_func[:, 1] - dec_func[:, 0]\n return dec_func\n\n def predict(self, X):\n \"\"\"Perform classification on an array of test vectors X.\n\n The predicted class C for each sample in X is returned.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n Vector to be scored, where `n_samples` is the number of samples and\n `n_features` is the number of features.\n\n Returns\n -------\n C : ndarray of shape (n_samples,)\n Estimated probabilities.\n \"\"\"\n d = self._decision_function(X)\n y_pred = self.classes_.take(d.argmax(1))\n return y_pred\n\n def predict_proba(self, X):\n \"\"\"Return posterior probabilities of classification.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n Array of samples/test vectors.\n\n Returns\n -------\n C : ndarray of shape (n_samples, n_classes)\n Posterior probabilities of classification per class.\n \"\"\"\n values = self._decision_function(X)\n # compute the likelihood of the underlying gaussian models\n # up to a multiplicative constant.\n likelihood = np.exp(values - values.max(axis=1)[:, np.newaxis])\n # compute posterior probabilities\n return likelihood / likelihood.sum(axis=1)[:, np.newaxis]\n\n def predict_log_proba(self, X):\n \"\"\"Return log of posterior probabilities of classification.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n Array of samples/test vectors.\n\n Returns\n -------\n C : ndarray of shape (n_samples, n_classes)\n Posterior log-probabilities of classification per class.\n \"\"\"\n # XXX : can do better to avoid precision overflows\n probas_ = self.predict_proba(X)\n return np.log(probas_)\n",
"\"\"\"Modified Olivetti faces dataset.\n\nThe original database was available from (now defunct)\n\n https://www.cl.cam.ac.uk/research/dtg/attarchive/facedatabase.html\n\nThe version retrieved here comes in MATLAB format from the personal\nweb page of Sam Roweis:\n\n https://cs.nyu.edu/~roweis/\n\"\"\"\n\n# Copyright (c) 2011 David Warde-Farley <wardefar at iro dot umontreal dot ca>\n# License: BSD 3 clause\n\nfrom os.path import exists\nfrom os import makedirs, remove\n\nimport numpy as np\nfrom scipy.io.matlab import loadmat\nimport joblib\n\nfrom . import get_data_home\nfrom ._base import _fetch_remote\nfrom ._base import RemoteFileMetadata\nfrom ._base import _pkl_filepath\nfrom ._base import load_descr\nfrom ..utils import check_random_state, Bunch\n\n# The original data can be found at:\n# https://cs.nyu.edu/~roweis/data/olivettifaces.mat\nFACES = RemoteFileMetadata(\n filename=\"olivettifaces.mat\",\n url=\"https://ndownloader.figshare.com/files/5976027\",\n checksum=\"b612fb967f2dc77c9c62d3e1266e0c73d5fca46a4b8906c18e454d41af987794\",\n)\n\n\ndef fetch_olivetti_faces(\n *,\n data_home=None,\n shuffle=False,\n random_state=0,\n download_if_missing=True,\n return_X_y=False,\n):\n \"\"\"Load the Olivetti faces data-set from AT&T (classification).\n\n Download it if necessary.\n\n ================= =====================\n Classes 40\n Samples total 400\n Dimensionality 4096\n Features real, between 0 and 1\n ================= =====================\n\n Read more in the :ref:`User Guide <olivetti_faces_dataset>`.\n\n Parameters\n ----------\n data_home : str, default=None\n Specify another download and cache folder for the datasets. By default\n all scikit-learn data is stored in '~/scikit_learn_data' subfolders.\n\n shuffle : bool, default=False\n If True the order of the dataset is shuffled to avoid having\n images of the same person grouped.\n\n random_state : int, RandomState instance or None, default=0\n Determines random number generation for dataset shuffling. Pass an int\n for reproducible output across multiple function calls.\n See :term:`Glossary <random_state>`.\n\n download_if_missing : bool, default=True\n If False, raise a IOError if the data is not locally available\n instead of trying to download the data from the source site.\n\n return_X_y : bool, default=False\n If True, returns `(data, target)` instead of a `Bunch` object. See\n below for more information about the `data` and `target` object.\n\n .. versionadded:: 0.22\n\n Returns\n -------\n data : :class:`~sklearn.utils.Bunch`\n Dictionary-like object, with the following attributes.\n\n data: ndarray, shape (400, 4096)\n Each row corresponds to a ravelled\n face image of original size 64 x 64 pixels.\n images : ndarray, shape (400, 64, 64)\n Each row is a face image\n corresponding to one of the 40 subjects of the dataset.\n target : ndarray, shape (400,)\n Labels associated to each face image.\n Those labels are ranging from 0-39 and correspond to the\n Subject IDs.\n DESCR : str\n Description of the modified Olivetti Faces Dataset.\n\n (data, target) : tuple if `return_X_y=True`\n .. versionadded:: 0.22\n \"\"\"\n data_home = get_data_home(data_home=data_home)\n if not exists(data_home):\n makedirs(data_home)\n filepath = _pkl_filepath(data_home, \"olivetti.pkz\")\n if not exists(filepath):\n if not download_if_missing:\n raise IOError(\"Data not found and `download_if_missing` is False\")\n\n print(\"downloading Olivetti faces from %s to %s\" % (FACES.url, data_home))\n mat_path = _fetch_remote(FACES, dirname=data_home)\n mfile = loadmat(file_name=mat_path)\n # delete raw .mat data\n remove(mat_path)\n\n faces = mfile[\"faces\"].T.copy()\n joblib.dump(faces, filepath, compress=6)\n del mfile\n else:\n faces = joblib.load(filepath)\n\n # We want floating point data, but float32 is enough (there is only\n # one byte of precision in the original uint8s anyway)\n faces = np.float32(faces)\n faces = faces - faces.min()\n faces /= faces.max()\n faces = faces.reshape((400, 64, 64)).transpose(0, 2, 1)\n # 10 images per class, 400 images total, each class is contiguous.\n target = np.array([i // 10 for i in range(400)])\n if shuffle:\n random_state = check_random_state(random_state)\n order = random_state.permutation(len(faces))\n faces = faces[order]\n target = target[order]\n faces_vectorized = faces.reshape(len(faces), -1)\n\n fdescr = load_descr(\"olivetti_faces.rst\")\n\n if return_X_y:\n return faces_vectorized, target\n\n return Bunch(data=faces_vectorized, images=faces, target=target, DESCR=fdescr)\n",
"r\"\"\"\n=====================================\nMulti-class AdaBoosted Decision Trees\n=====================================\n\nThis example reproduces Figure 1 of Zhu et al [1]_ and shows how boosting can\nimprove prediction accuracy on a multi-class problem. The classification\ndataset is constructed by taking a ten-dimensional standard normal distribution\nand defining three classes separated by nested concentric ten-dimensional\nspheres such that roughly equal numbers of samples are in each class (quantiles\nof the :math:`\\chi^2` distribution).\n\nThe performance of the SAMME and SAMME.R [1]_ algorithms are compared. SAMME.R\nuses the probability estimates to update the additive model, while SAMME uses\nthe classifications only. As the example illustrates, the SAMME.R algorithm\ntypically converges faster than SAMME, achieving a lower test error with fewer\nboosting iterations. The error of each algorithm on the test set after each\nboosting iteration is shown on the left, the classification error on the test\nset of each tree is shown in the middle, and the boost weight of each tree is\nshown on the right. All trees have a weight of one in the SAMME.R algorithm and\ntherefore are not shown.\n\n.. [1] J. Zhu, H. Zou, S. Rosset, T. Hastie, \"Multi-class AdaBoost\", 2009.\n\n\"\"\"\n\n# Author: Noel Dawe <[email protected]>\n#\n# License: BSD 3 clause\n\nimport matplotlib.pyplot as plt\n\nfrom sklearn.datasets import make_gaussian_quantiles\nfrom sklearn.ensemble import AdaBoostClassifier\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.tree import DecisionTreeClassifier\n\n\nX, y = make_gaussian_quantiles(\n n_samples=13000, n_features=10, n_classes=3, random_state=1\n)\n\nn_split = 3000\n\nX_train, X_test = X[:n_split], X[n_split:]\ny_train, y_test = y[:n_split], y[n_split:]\n\nbdt_real = AdaBoostClassifier(\n DecisionTreeClassifier(max_depth=2), n_estimators=600, learning_rate=1\n)\n\nbdt_discrete = AdaBoostClassifier(\n DecisionTreeClassifier(max_depth=2),\n n_estimators=600,\n learning_rate=1.5,\n algorithm=\"SAMME\",\n)\n\nbdt_real.fit(X_train, y_train)\nbdt_discrete.fit(X_train, y_train)\n\nreal_test_errors = []\ndiscrete_test_errors = []\n\nfor real_test_predict, discrete_train_predict in zip(\n bdt_real.staged_predict(X_test), bdt_discrete.staged_predict(X_test)\n):\n real_test_errors.append(1.0 - accuracy_score(real_test_predict, y_test))\n discrete_test_errors.append(1.0 - accuracy_score(discrete_train_predict, y_test))\n\nn_trees_discrete = len(bdt_discrete)\nn_trees_real = len(bdt_real)\n\n# Boosting might terminate early, but the following arrays are always\n# n_estimators long. We crop them to the actual number of trees here:\ndiscrete_estimator_errors = bdt_discrete.estimator_errors_[:n_trees_discrete]\nreal_estimator_errors = bdt_real.estimator_errors_[:n_trees_real]\ndiscrete_estimator_weights = bdt_discrete.estimator_weights_[:n_trees_discrete]\n\nplt.figure(figsize=(15, 5))\n\nplt.subplot(131)\nplt.plot(range(1, n_trees_discrete + 1), discrete_test_errors, c=\"black\", label=\"SAMME\")\nplt.plot(\n range(1, n_trees_real + 1),\n real_test_errors,\n c=\"black\",\n linestyle=\"dashed\",\n label=\"SAMME.R\",\n)\nplt.legend()\nplt.ylim(0.18, 0.62)\nplt.ylabel(\"Test Error\")\nplt.xlabel(\"Number of Trees\")\n\nplt.subplot(132)\nplt.plot(\n range(1, n_trees_discrete + 1),\n discrete_estimator_errors,\n \"b\",\n label=\"SAMME\",\n alpha=0.5,\n)\nplt.plot(\n range(1, n_trees_real + 1), real_estimator_errors, \"r\", label=\"SAMME.R\", alpha=0.5\n)\nplt.legend()\nplt.ylabel(\"Error\")\nplt.xlabel(\"Number of Trees\")\nplt.ylim((0.2, max(real_estimator_errors.max(), discrete_estimator_errors.max()) * 1.2))\nplt.xlim((-20, len(bdt_discrete) + 20))\n\nplt.subplot(133)\nplt.plot(range(1, n_trees_discrete + 1), discrete_estimator_weights, \"b\", label=\"SAMME\")\nplt.legend()\nplt.ylabel(\"Weight\")\nplt.xlabel(\"Number of Trees\")\nplt.ylim((0, discrete_estimator_weights.max() * 1.2))\nplt.xlim((-20, n_trees_discrete + 20))\n\n# prevent overlapping y-axis labels\nplt.subplots_adjust(wspace=0.25)\nplt.show()\n",
"from itertools import product\n\nimport numpy as np\n\nfrom .. import confusion_matrix\nfrom ...utils import check_matplotlib_support\nfrom ...utils import deprecated\nfrom ...utils.multiclass import unique_labels\nfrom ...base import is_classifier\n\n\nclass ConfusionMatrixDisplay:\n \"\"\"Confusion Matrix visualization.\n\n It is recommend to use\n :func:`~sklearn.metrics.ConfusionMatrixDisplay.from_estimator` or\n :func:`~sklearn.metrics.ConfusionMatrixDisplay.from_predictions` to\n create a :class:`ConfusionMatrixDisplay`. All parameters are stored as\n attributes.\n\n Read more in the :ref:`User Guide <visualizations>`.\n\n Parameters\n ----------\n confusion_matrix : ndarray of shape (n_classes, n_classes)\n Confusion matrix.\n\n display_labels : ndarray of shape (n_classes,), default=None\n Display labels for plot. If None, display labels are set from 0 to\n `n_classes - 1`.\n\n Attributes\n ----------\n im_ : matplotlib AxesImage\n Image representing the confusion matrix.\n\n text_ : ndarray of shape (n_classes, n_classes), dtype=matplotlib Text, \\\n or None\n Array of matplotlib axes. `None` if `include_values` is false.\n\n ax_ : matplotlib Axes\n Axes with confusion matrix.\n\n figure_ : matplotlib Figure\n Figure containing the confusion matrix.\n\n See Also\n --------\n confusion_matrix : Compute Confusion Matrix to evaluate the accuracy of a\n classification.\n ConfusionMatrixDisplay.from_estimator : Plot the confusion matrix\n given an estimator, the data, and the label.\n ConfusionMatrixDisplay.from_predictions : Plot the confusion matrix\n given the true and predicted labels.\n\n Examples\n --------\n >>> import matplotlib.pyplot as plt\n >>> from sklearn.datasets import make_classification\n >>> from sklearn.metrics import confusion_matrix, ConfusionMatrixDisplay\n >>> from sklearn.model_selection import train_test_split\n >>> from sklearn.svm import SVC\n >>> X, y = make_classification(random_state=0)\n >>> X_train, X_test, y_train, y_test = train_test_split(X, y,\n ... random_state=0)\n >>> clf = SVC(random_state=0)\n >>> clf.fit(X_train, y_train)\n SVC(random_state=0)\n >>> predictions = clf.predict(X_test)\n >>> cm = confusion_matrix(y_test, predictions, labels=clf.classes_)\n >>> disp = ConfusionMatrixDisplay(confusion_matrix=cm,\n ... display_labels=clf.classes_)\n >>> disp.plot()\n <...>\n >>> plt.show()\n \"\"\"\n\n def __init__(self, confusion_matrix, *, display_labels=None):\n self.confusion_matrix = confusion_matrix\n self.display_labels = display_labels\n\n def plot(\n self,\n *,\n include_values=True,\n cmap=\"viridis\",\n xticks_rotation=\"horizontal\",\n values_format=None,\n ax=None,\n colorbar=True,\n ):\n \"\"\"Plot visualization.\n\n Parameters\n ----------\n include_values : bool, default=True\n Includes values in confusion matrix.\n\n cmap : str or matplotlib Colormap, default='viridis'\n Colormap recognized by matplotlib.\n\n xticks_rotation : {'vertical', 'horizontal'} or float, \\\n default='horizontal'\n Rotation of xtick labels.\n\n values_format : str, default=None\n Format specification for values in confusion matrix. If `None`,\n the format specification is 'd' or '.2g' whichever is shorter.\n\n ax : matplotlib axes, default=None\n Axes object to plot on. If `None`, a new figure and axes is\n created.\n\n colorbar : bool, default=True\n Whether or not to add a colorbar to the plot.\n\n Returns\n -------\n display : :class:`~sklearn.metrics.ConfusionMatrixDisplay`\n \"\"\"\n check_matplotlib_support(\"ConfusionMatrixDisplay.plot\")\n import matplotlib.pyplot as plt\n\n if ax is None:\n fig, ax = plt.subplots()\n else:\n fig = ax.figure\n\n cm = self.confusion_matrix\n n_classes = cm.shape[0]\n self.im_ = ax.imshow(cm, interpolation=\"nearest\", cmap=cmap)\n self.text_ = None\n cmap_min, cmap_max = self.im_.cmap(0), self.im_.cmap(1.0)\n\n if include_values:\n self.text_ = np.empty_like(cm, dtype=object)\n\n # print text with appropriate color depending on background\n thresh = (cm.max() + cm.min()) / 2.0\n\n for i, j in product(range(n_classes), range(n_classes)):\n color = cmap_max if cm[i, j] < thresh else cmap_min\n\n if values_format is None:\n text_cm = format(cm[i, j], \".2g\")\n if cm.dtype.kind != \"f\":\n text_d = format(cm[i, j], \"d\")\n if len(text_d) < len(text_cm):\n text_cm = text_d\n else:\n text_cm = format(cm[i, j], values_format)\n\n self.text_[i, j] = ax.text(\n j, i, text_cm, ha=\"center\", va=\"center\", color=color\n )\n\n if self.display_labels is None:\n display_labels = np.arange(n_classes)\n else:\n display_labels = self.display_labels\n if colorbar:\n fig.colorbar(self.im_, ax=ax)\n ax.set(\n xticks=np.arange(n_classes),\n yticks=np.arange(n_classes),\n xticklabels=display_labels,\n yticklabels=display_labels,\n ylabel=\"True label\",\n xlabel=\"Predicted label\",\n )\n\n ax.set_ylim((n_classes - 0.5, -0.5))\n plt.setp(ax.get_xticklabels(), rotation=xticks_rotation)\n\n self.figure_ = fig\n self.ax_ = ax\n return self\n\n @classmethod\n def from_estimator(\n cls,\n estimator,\n X,\n y,\n *,\n labels=None,\n sample_weight=None,\n normalize=None,\n display_labels=None,\n include_values=True,\n xticks_rotation=\"horizontal\",\n values_format=None,\n cmap=\"viridis\",\n ax=None,\n colorbar=True,\n ):\n \"\"\"Plot Confusion Matrix given an estimator and some data.\n\n Read more in the :ref:`User Guide <confusion_matrix>`.\n\n .. versionadded:: 1.0\n\n Parameters\n ----------\n estimator : estimator instance\n Fitted classifier or a fitted :class:`~sklearn.pipeline.Pipeline`\n in which the last estimator is a classifier.\n\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n Input values.\n\n y : array-like of shape (n_samples,)\n Target values.\n\n labels : array-like of shape (n_classes,), default=None\n List of labels to index the confusion matrix. This may be used to\n reorder or select a subset of labels. If `None` is given, those\n that appear at least once in `y_true` or `y_pred` are used in\n sorted order.\n\n sample_weight : array-like of shape (n_samples,), default=None\n Sample weights.\n\n normalize : {'true', 'pred', 'all'}, default=None\n Either to normalize the counts display in the matrix:\n\n - if `'true'`, the confusion matrix is normalized over the true\n conditions (e.g. rows);\n - if `'pred'`, the confusion matrix is normalized over the\n predicted conditions (e.g. columns);\n - if `'all'`, the confusion matrix is normalized by the total\n number of samples;\n - if `None` (default), the confusion matrix will not be normalized.\n\n display_labels : array-like of shape (n_classes,), default=None\n Target names used for plotting. By default, `labels` will be used\n if it is defined, otherwise the unique labels of `y_true` and\n `y_pred` will be used.\n\n include_values : bool, default=True\n Includes values in confusion matrix.\n\n xticks_rotation : {'vertical', 'horizontal'} or float, \\\n default='horizontal'\n Rotation of xtick labels.\n\n values_format : str, default=None\n Format specification for values in confusion matrix. If `None`, the\n format specification is 'd' or '.2g' whichever is shorter.\n\n cmap : str or matplotlib Colormap, default='viridis'\n Colormap recognized by matplotlib.\n\n ax : matplotlib Axes, default=None\n Axes object to plot on. If `None`, a new figure and axes is\n created.\n\n colorbar : bool, default=True\n Whether or not to add a colorbar to the plot.\n\n Returns\n -------\n display : :class:`~sklearn.metrics.ConfusionMatrixDisplay`\n\n See Also\n --------\n ConfusionMatrixDisplay.from_predictions : Plot the confusion matrix\n given the true and predicted labels.\n\n Examples\n --------\n >>> import matplotlib.pyplot as plt\n >>> from sklearn.datasets import make_classification\n >>> from sklearn.metrics import ConfusionMatrixDisplay\n >>> from sklearn.model_selection import train_test_split\n >>> from sklearn.svm import SVC\n >>> X, y = make_classification(random_state=0)\n >>> X_train, X_test, y_train, y_test = train_test_split(\n ... X, y, random_state=0)\n >>> clf = SVC(random_state=0)\n >>> clf.fit(X_train, y_train)\n SVC(random_state=0)\n >>> ConfusionMatrixDisplay.from_estimator(\n ... clf, X_test, y_test)\n <...>\n >>> plt.show()\n \"\"\"\n method_name = f\"{cls.__name__}.from_estimator\"\n check_matplotlib_support(method_name)\n if not is_classifier(estimator):\n raise ValueError(f\"{method_name} only supports classifiers\")\n y_pred = estimator.predict(X)\n\n return cls.from_predictions(\n y,\n y_pred,\n sample_weight=sample_weight,\n labels=labels,\n normalize=normalize,\n display_labels=display_labels,\n include_values=include_values,\n cmap=cmap,\n ax=ax,\n xticks_rotation=xticks_rotation,\n values_format=values_format,\n colorbar=colorbar,\n )\n\n @classmethod\n def from_predictions(\n cls,\n y_true,\n y_pred,\n *,\n labels=None,\n sample_weight=None,\n normalize=None,\n display_labels=None,\n include_values=True,\n xticks_rotation=\"horizontal\",\n values_format=None,\n cmap=\"viridis\",\n ax=None,\n colorbar=True,\n ):\n \"\"\"Plot Confusion Matrix given true and predicted labels.\n\n Read more in the :ref:`User Guide <confusion_matrix>`.\n\n .. versionadded:: 0.24\n\n Parameters\n ----------\n y_true : array-like of shape (n_samples,)\n True labels.\n\n y_pred : array-like of shape (n_samples,)\n The predicted labels given by the method `predict` of an\n classifier.\n\n labels : array-like of shape (n_classes,), default=None\n List of labels to index the confusion matrix. This may be used to\n reorder or select a subset of labels. If `None` is given, those\n that appear at least once in `y_true` or `y_pred` are used in\n sorted order.\n\n sample_weight : array-like of shape (n_samples,), default=None\n Sample weights.\n\n normalize : {'true', 'pred', 'all'}, default=None\n Either to normalize the counts display in the matrix:\n\n - if `'true'`, the confusion matrix is normalized over the true\n conditions (e.g. rows);\n - if `'pred'`, the confusion matrix is normalized over the\n predicted conditions (e.g. columns);\n - if `'all'`, the confusion matrix is normalized by the total\n number of samples;\n - if `None` (default), the confusion matrix will not be normalized.\n\n display_labels : array-like of shape (n_classes,), default=None\n Target names used for plotting. By default, `labels` will be used\n if it is defined, otherwise the unique labels of `y_true` and\n `y_pred` will be used.\n\n include_values : bool, default=True\n Includes values in confusion matrix.\n\n xticks_rotation : {'vertical', 'horizontal'} or float, \\\n default='horizontal'\n Rotation of xtick labels.\n\n values_format : str, default=None\n Format specification for values in confusion matrix. If `None`, the\n format specification is 'd' or '.2g' whichever is shorter.\n\n cmap : str or matplotlib Colormap, default='viridis'\n Colormap recognized by matplotlib.\n\n ax : matplotlib Axes, default=None\n Axes object to plot on. If `None`, a new figure and axes is\n created.\n\n colorbar : bool, default=True\n Whether or not to add a colorbar to the plot.\n\n Returns\n -------\n display : :class:`~sklearn.metrics.ConfusionMatrixDisplay`\n\n See Also\n --------\n ConfusionMatrixDisplay.from_estimator : Plot the confusion matrix\n given an estimator, the data, and the label.\n\n Examples\n --------\n >>> import matplotlib.pyplot as plt\n >>> from sklearn.datasets import make_classification\n >>> from sklearn.metrics import ConfusionMatrixDisplay\n >>> from sklearn.model_selection import train_test_split\n >>> from sklearn.svm import SVC\n >>> X, y = make_classification(random_state=0)\n >>> X_train, X_test, y_train, y_test = train_test_split(\n ... X, y, random_state=0)\n >>> clf = SVC(random_state=0)\n >>> clf.fit(X_train, y_train)\n SVC(random_state=0)\n >>> y_pred = clf.predict(X_test)\n >>> ConfusionMatrixDisplay.from_predictions(\n ... y_test, y_pred)\n <...>\n >>> plt.show()\n \"\"\"\n check_matplotlib_support(f\"{cls.__name__}.from_predictions\")\n\n if display_labels is None:\n if labels is None:\n display_labels = unique_labels(y_true, y_pred)\n else:\n display_labels = labels\n\n cm = confusion_matrix(\n y_true,\n y_pred,\n sample_weight=sample_weight,\n labels=labels,\n normalize=normalize,\n )\n\n disp = cls(confusion_matrix=cm, display_labels=display_labels)\n\n return disp.plot(\n include_values=include_values,\n cmap=cmap,\n ax=ax,\n xticks_rotation=xticks_rotation,\n values_format=values_format,\n colorbar=colorbar,\n )\n\n\n@deprecated(\n \"Function `plot_confusion_matrix` is deprecated in 1.0 and will be \"\n \"removed in 1.2. Use one of the class methods: \"\n \"ConfusionMatrixDisplay.from_predictions or \"\n \"ConfusionMatrixDisplay.from_estimator.\"\n)\ndef plot_confusion_matrix(\n estimator,\n X,\n y_true,\n *,\n labels=None,\n sample_weight=None,\n normalize=None,\n display_labels=None,\n include_values=True,\n xticks_rotation=\"horizontal\",\n values_format=None,\n cmap=\"viridis\",\n ax=None,\n colorbar=True,\n):\n \"\"\"Plot Confusion Matrix.\n\n Read more in the :ref:`User Guide <confusion_matrix>`.\n\n .. deprecated:: 1.0\n `plot_confusion_matrix` is deprecated in 1.0 and will be removed in\n 1.2. Use one of the following class methods:\n :func:`~sklearn.metrics.ConfusionMatrixDisplay.from_predictions` or\n :func:`~sklearn.metrics.ConfusionMatrixDisplay.from_estimator`.\n\n Parameters\n ----------\n estimator : estimator instance\n Fitted classifier or a fitted :class:`~sklearn.pipeline.Pipeline`\n in which the last estimator is a classifier.\n\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n Input values.\n\n y_true : array-like of shape (n_samples,)\n Target values.\n\n labels : array-like of shape (n_classes,), default=None\n List of labels to index the matrix. This may be used to reorder or\n select a subset of labels. If `None` is given, those that appear at\n least once in `y_true` or `y_pred` are used in sorted order.\n\n sample_weight : array-like of shape (n_samples,), default=None\n Sample weights.\n\n normalize : {'true', 'pred', 'all'}, default=None\n Either to normalize the counts display in the matrix:\n\n - if `'true'`, the confusion matrix is normalized over the true\n conditions (e.g. rows);\n - if `'pred'`, the confusion matrix is normalized over the\n predicted conditions (e.g. columns);\n - if `'all'`, the confusion matrix is normalized by the total\n number of samples;\n - if `None` (default), the confusion matrix will not be normalized.\n\n display_labels : array-like of shape (n_classes,), default=None\n Target names used for plotting. By default, `labels` will be used if\n it is defined, otherwise the unique labels of `y_true` and `y_pred`\n will be used.\n\n include_values : bool, default=True\n Includes values in confusion matrix.\n\n xticks_rotation : {'vertical', 'horizontal'} or float, \\\n default='horizontal'\n Rotation of xtick labels.\n\n values_format : str, default=None\n Format specification for values in confusion matrix. If `None`,\n the format specification is 'd' or '.2g' whichever is shorter.\n\n cmap : str or matplotlib Colormap, default='viridis'\n Colormap recognized by matplotlib.\n\n ax : matplotlib Axes, default=None\n Axes object to plot on. If `None`, a new figure and axes is\n created.\n\n colorbar : bool, default=True\n Whether or not to add a colorbar to the plot.\n\n .. versionadded:: 0.24\n\n Returns\n -------\n display : :class:`~sklearn.metrics.ConfusionMatrixDisplay`\n\n See Also\n --------\n confusion_matrix : Compute Confusion Matrix to evaluate the accuracy of a\n classification.\n ConfusionMatrixDisplay : Confusion Matrix visualization.\n\n Examples\n --------\n >>> import matplotlib.pyplot as plt\n >>> from sklearn.datasets import make_classification\n >>> from sklearn.metrics import plot_confusion_matrix\n >>> from sklearn.model_selection import train_test_split\n >>> from sklearn.svm import SVC\n >>> X, y = make_classification(random_state=0)\n >>> X_train, X_test, y_train, y_test = train_test_split(\n ... X, y, random_state=0)\n >>> clf = SVC(random_state=0)\n >>> clf.fit(X_train, y_train)\n SVC(random_state=0)\n >>> plot_confusion_matrix(clf, X_test, y_test) # doctest: +SKIP\n >>> plt.show()\n \"\"\"\n check_matplotlib_support(\"plot_confusion_matrix\")\n\n if not is_classifier(estimator):\n raise ValueError(\"plot_confusion_matrix only supports classifiers\")\n\n y_pred = estimator.predict(X)\n cm = confusion_matrix(\n y_true, y_pred, sample_weight=sample_weight, labels=labels, normalize=normalize\n )\n\n if display_labels is None:\n if labels is None:\n display_labels = unique_labels(y_true, y_pred)\n else:\n display_labels = labels\n\n disp = ConfusionMatrixDisplay(confusion_matrix=cm, display_labels=display_labels)\n return disp.plot(\n include_values=include_values,\n cmap=cmap,\n ax=ax,\n xticks_rotation=xticks_rotation,\n values_format=values_format,\n colorbar=colorbar,\n )\n",
"\"\"\"\n==========================================\nOne-class SVM with non-linear kernel (RBF)\n==========================================\n\nAn example using a one-class SVM for novelty detection.\n\n:ref:`One-class SVM <svm_outlier_detection>` is an unsupervised\nalgorithm that learns a decision function for novelty detection:\nclassifying new data as similar or different to the training set.\n\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.font_manager\nfrom sklearn import svm\n\nxx, yy = np.meshgrid(np.linspace(-5, 5, 500), np.linspace(-5, 5, 500))\n# Generate train data\nX = 0.3 * np.random.randn(100, 2)\nX_train = np.r_[X + 2, X - 2]\n# Generate some regular novel observations\nX = 0.3 * np.random.randn(20, 2)\nX_test = np.r_[X + 2, X - 2]\n# Generate some abnormal novel observations\nX_outliers = np.random.uniform(low=-4, high=4, size=(20, 2))\n\n# fit the model\nclf = svm.OneClassSVM(nu=0.1, kernel=\"rbf\", gamma=0.1)\nclf.fit(X_train)\ny_pred_train = clf.predict(X_train)\ny_pred_test = clf.predict(X_test)\ny_pred_outliers = clf.predict(X_outliers)\nn_error_train = y_pred_train[y_pred_train == -1].size\nn_error_test = y_pred_test[y_pred_test == -1].size\nn_error_outliers = y_pred_outliers[y_pred_outliers == 1].size\n\n# plot the line, the points, and the nearest vectors to the plane\nZ = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])\nZ = Z.reshape(xx.shape)\n\nplt.title(\"Novelty Detection\")\nplt.contourf(xx, yy, Z, levels=np.linspace(Z.min(), 0, 7), cmap=plt.cm.PuBu)\na = plt.contour(xx, yy, Z, levels=[0], linewidths=2, colors=\"darkred\")\nplt.contourf(xx, yy, Z, levels=[0, Z.max()], colors=\"palevioletred\")\n\ns = 40\nb1 = plt.scatter(X_train[:, 0], X_train[:, 1], c=\"white\", s=s, edgecolors=\"k\")\nb2 = plt.scatter(X_test[:, 0], X_test[:, 1], c=\"blueviolet\", s=s, edgecolors=\"k\")\nc = plt.scatter(X_outliers[:, 0], X_outliers[:, 1], c=\"gold\", s=s, edgecolors=\"k\")\nplt.axis(\"tight\")\nplt.xlim((-5, 5))\nplt.ylim((-5, 5))\nplt.legend(\n [a.collections[0], b1, b2, c],\n [\n \"learned frontier\",\n \"training observations\",\n \"new regular observations\",\n \"new abnormal observations\",\n ],\n loc=\"upper left\",\n prop=matplotlib.font_manager.FontProperties(size=11),\n)\nplt.xlabel(\n \"error train: %d/200 ; errors novel regular: %d/40 ; errors novel abnormal: %d/40\"\n % (n_error_train, n_error_test, n_error_outliers)\n)\nplt.show()\n",
"# Author: Christian Osendorfer <[email protected]>\n# Alexandre Gramfort <[email protected]>\n# License: BSD3\n\nfrom itertools import combinations\n\nimport numpy as np\nimport pytest\n\nfrom sklearn.utils._testing import assert_almost_equal\nfrom sklearn.utils._testing import assert_array_almost_equal\nfrom sklearn.exceptions import ConvergenceWarning\nfrom sklearn.decomposition import FactorAnalysis\nfrom sklearn.utils._testing import ignore_warnings\nfrom sklearn.decomposition._factor_analysis import _ortho_rotation\n\n\n# Ignore warnings from switching to more power iterations in randomized_svd\n@ignore_warnings\ndef test_factor_analysis():\n # Test FactorAnalysis ability to recover the data covariance structure\n rng = np.random.RandomState(0)\n n_samples, n_features, n_components = 20, 5, 3\n\n # Some random settings for the generative model\n W = rng.randn(n_components, n_features)\n # latent variable of dim 3, 20 of it\n h = rng.randn(n_samples, n_components)\n # using gamma to model different noise variance\n # per component\n noise = rng.gamma(1, size=n_features) * rng.randn(n_samples, n_features)\n\n # generate observations\n # wlog, mean is 0\n X = np.dot(h, W) + noise\n\n with pytest.raises(ValueError):\n FactorAnalysis(svd_method=\"foo\")\n fa_fail = FactorAnalysis()\n fa_fail.svd_method = \"foo\"\n with pytest.raises(ValueError):\n fa_fail.fit(X)\n fas = []\n for method in [\"randomized\", \"lapack\"]:\n fa = FactorAnalysis(n_components=n_components, svd_method=method)\n fa.fit(X)\n fas.append(fa)\n\n X_t = fa.transform(X)\n assert X_t.shape == (n_samples, n_components)\n\n assert_almost_equal(fa.loglike_[-1], fa.score_samples(X).sum())\n assert_almost_equal(fa.score_samples(X).mean(), fa.score(X))\n\n diff = np.all(np.diff(fa.loglike_))\n assert diff > 0.0, \"Log likelihood dif not increase\"\n\n # Sample Covariance\n scov = np.cov(X, rowvar=0.0, bias=1.0)\n\n # Model Covariance\n mcov = fa.get_covariance()\n diff = np.sum(np.abs(scov - mcov)) / W.size\n assert diff < 0.1, \"Mean absolute difference is %f\" % diff\n fa = FactorAnalysis(\n n_components=n_components, noise_variance_init=np.ones(n_features)\n )\n with pytest.raises(ValueError):\n fa.fit(X[:, :2])\n\n def f(x, y):\n return np.abs(getattr(x, y)) # sign will not be equal\n\n fa1, fa2 = fas\n for attr in [\"loglike_\", \"components_\", \"noise_variance_\"]:\n assert_almost_equal(f(fa1, attr), f(fa2, attr))\n\n fa1.max_iter = 1\n fa1.verbose = True\n with pytest.warns(ConvergenceWarning):\n fa1.fit(X)\n\n # Test get_covariance and get_precision with n_components == n_features\n # with n_components < n_features and with n_components == 0\n for n_components in [0, 2, X.shape[1]]:\n fa.n_components = n_components\n fa.fit(X)\n cov = fa.get_covariance()\n precision = fa.get_precision()\n assert_array_almost_equal(np.dot(cov, precision), np.eye(X.shape[1]), 12)\n\n # test rotation\n n_components = 2\n\n results, projections = {}, {}\n for method in (None, \"varimax\", \"quartimax\"):\n fa_var = FactorAnalysis(n_components=n_components, rotation=method)\n results[method] = fa_var.fit_transform(X)\n projections[method] = fa_var.get_covariance()\n for rot1, rot2 in combinations([None, \"varimax\", \"quartimax\"], 2):\n assert not np.allclose(results[rot1], results[rot2])\n assert np.allclose(projections[rot1], projections[rot2], atol=3)\n\n with pytest.raises(ValueError):\n FactorAnalysis(rotation=\"not_implemented\").fit_transform(X)\n\n # test against R's psych::principal with rotate=\"varimax\"\n # (i.e., the values below stem from rotating the components in R)\n # R's factor analysis returns quite different values; therefore, we only\n # test the rotation itself\n factors = np.array(\n [\n [0.89421016, -0.35854928, -0.27770122, 0.03773647],\n [-0.45081822, -0.89132754, 0.0932195, -0.01787973],\n [0.99500666, -0.02031465, 0.05426497, -0.11539407],\n [0.96822861, -0.06299656, 0.24411001, 0.07540887],\n ]\n )\n r_solution = np.array(\n [[0.962, 0.052], [-0.141, 0.989], [0.949, -0.300], [0.937, -0.251]]\n )\n rotated = _ortho_rotation(factors[:, :n_components], method=\"varimax\").T\n assert_array_almost_equal(np.abs(rotated), np.abs(r_solution), decimal=3)\n",
"\"\"\"\nTests for DBSCAN clustering algorithm\n\"\"\"\n\nimport pickle\n\nimport numpy as np\n\nimport warnings\n\nfrom scipy.spatial import distance\nfrom scipy import sparse\n\nimport pytest\n\nfrom sklearn.utils._testing import assert_array_equal\nfrom sklearn.neighbors import NearestNeighbors\nfrom sklearn.cluster import DBSCAN\nfrom sklearn.cluster import dbscan\nfrom sklearn.cluster.tests.common import generate_clustered_data\nfrom sklearn.metrics.pairwise import pairwise_distances\n\n\nn_clusters = 3\nX = generate_clustered_data(n_clusters=n_clusters)\n\n\ndef test_dbscan_similarity():\n # Tests the DBSCAN algorithm with a similarity array.\n # Parameters chosen specifically for this task.\n eps = 0.15\n min_samples = 10\n # Compute similarities\n D = distance.squareform(distance.pdist(X))\n D /= np.max(D)\n # Compute DBSCAN\n core_samples, labels = dbscan(\n D, metric=\"precomputed\", eps=eps, min_samples=min_samples\n )\n # number of clusters, ignoring noise if present\n n_clusters_1 = len(set(labels)) - (1 if -1 in labels else 0)\n\n assert n_clusters_1 == n_clusters\n\n db = DBSCAN(metric=\"precomputed\", eps=eps, min_samples=min_samples)\n labels = db.fit(D).labels_\n\n n_clusters_2 = len(set(labels)) - int(-1 in labels)\n assert n_clusters_2 == n_clusters\n\n\ndef test_dbscan_feature():\n # Tests the DBSCAN algorithm with a feature vector array.\n # Parameters chosen specifically for this task.\n # Different eps to other test, because distance is not normalised.\n eps = 0.8\n min_samples = 10\n metric = \"euclidean\"\n # Compute DBSCAN\n # parameters chosen for task\n core_samples, labels = dbscan(X, metric=metric, eps=eps, min_samples=min_samples)\n\n # number of clusters, ignoring noise if present\n n_clusters_1 = len(set(labels)) - int(-1 in labels)\n assert n_clusters_1 == n_clusters\n\n db = DBSCAN(metric=metric, eps=eps, min_samples=min_samples)\n labels = db.fit(X).labels_\n\n n_clusters_2 = len(set(labels)) - int(-1 in labels)\n assert n_clusters_2 == n_clusters\n\n\ndef test_dbscan_sparse():\n core_sparse, labels_sparse = dbscan(sparse.lil_matrix(X), eps=0.8, min_samples=10)\n core_dense, labels_dense = dbscan(X, eps=0.8, min_samples=10)\n assert_array_equal(core_dense, core_sparse)\n assert_array_equal(labels_dense, labels_sparse)\n\n\[email protected](\"include_self\", [False, True])\ndef test_dbscan_sparse_precomputed(include_self):\n D = pairwise_distances(X)\n nn = NearestNeighbors(radius=0.9).fit(X)\n X_ = X if include_self else None\n D_sparse = nn.radius_neighbors_graph(X=X_, mode=\"distance\")\n # Ensure it is sparse not merely on diagonals:\n assert D_sparse.nnz < D.shape[0] * (D.shape[0] - 1)\n core_sparse, labels_sparse = dbscan(\n D_sparse, eps=0.8, min_samples=10, metric=\"precomputed\"\n )\n core_dense, labels_dense = dbscan(D, eps=0.8, min_samples=10, metric=\"precomputed\")\n assert_array_equal(core_dense, core_sparse)\n assert_array_equal(labels_dense, labels_sparse)\n\n\ndef test_dbscan_sparse_precomputed_different_eps():\n # test that precomputed neighbors graph is filtered if computed with\n # a radius larger than DBSCAN's eps.\n lower_eps = 0.2\n nn = NearestNeighbors(radius=lower_eps).fit(X)\n D_sparse = nn.radius_neighbors_graph(X, mode=\"distance\")\n dbscan_lower = dbscan(D_sparse, eps=lower_eps, metric=\"precomputed\")\n\n higher_eps = lower_eps + 0.7\n nn = NearestNeighbors(radius=higher_eps).fit(X)\n D_sparse = nn.radius_neighbors_graph(X, mode=\"distance\")\n dbscan_higher = dbscan(D_sparse, eps=lower_eps, metric=\"precomputed\")\n\n assert_array_equal(dbscan_lower[0], dbscan_higher[0])\n assert_array_equal(dbscan_lower[1], dbscan_higher[1])\n\n\[email protected](\"use_sparse\", [True, False])\[email protected](\"metric\", [\"precomputed\", \"minkowski\"])\ndef test_dbscan_input_not_modified(use_sparse, metric):\n # test that the input is not modified by dbscan\n X = np.random.RandomState(0).rand(10, 10)\n X = sparse.csr_matrix(X) if use_sparse else X\n X_copy = X.copy()\n dbscan(X, metric=metric)\n\n if use_sparse:\n assert_array_equal(X.toarray(), X_copy.toarray())\n else:\n assert_array_equal(X, X_copy)\n\n\ndef test_dbscan_no_core_samples():\n rng = np.random.RandomState(0)\n X = rng.rand(40, 10)\n X[X < 0.8] = 0\n\n for X_ in [X, sparse.csr_matrix(X)]:\n db = DBSCAN(min_samples=6).fit(X_)\n assert_array_equal(db.components_, np.empty((0, X_.shape[1])))\n assert_array_equal(db.labels_, -1)\n assert db.core_sample_indices_.shape == (0,)\n\n\ndef test_dbscan_callable():\n # Tests the DBSCAN algorithm with a callable metric.\n # Parameters chosen specifically for this task.\n # Different eps to other test, because distance is not normalised.\n eps = 0.8\n min_samples = 10\n # metric is the function reference, not the string key.\n metric = distance.euclidean\n # Compute DBSCAN\n # parameters chosen for task\n core_samples, labels = dbscan(\n X, metric=metric, eps=eps, min_samples=min_samples, algorithm=\"ball_tree\"\n )\n\n # number of clusters, ignoring noise if present\n n_clusters_1 = len(set(labels)) - int(-1 in labels)\n assert n_clusters_1 == n_clusters\n\n db = DBSCAN(metric=metric, eps=eps, min_samples=min_samples, algorithm=\"ball_tree\")\n labels = db.fit(X).labels_\n\n n_clusters_2 = len(set(labels)) - int(-1 in labels)\n assert n_clusters_2 == n_clusters\n\n\ndef test_dbscan_metric_params():\n # Tests that DBSCAN works with the metrics_params argument.\n eps = 0.8\n min_samples = 10\n p = 1\n\n # Compute DBSCAN with metric_params arg\n\n with warnings.catch_warnings(record=True) as warns:\n db = DBSCAN(\n metric=\"minkowski\",\n metric_params={\"p\": p},\n eps=eps,\n p=None,\n min_samples=min_samples,\n algorithm=\"ball_tree\",\n ).fit(X)\n assert not warns, warns[0].message\n core_sample_1, labels_1 = db.core_sample_indices_, db.labels_\n\n # Test that sample labels are the same as passing Minkowski 'p' directly\n db = DBSCAN(\n metric=\"minkowski\", eps=eps, min_samples=min_samples, algorithm=\"ball_tree\", p=p\n ).fit(X)\n core_sample_2, labels_2 = db.core_sample_indices_, db.labels_\n\n assert_array_equal(core_sample_1, core_sample_2)\n assert_array_equal(labels_1, labels_2)\n\n # Minkowski with p=1 should be equivalent to Manhattan distance\n db = DBSCAN(\n metric=\"manhattan\", eps=eps, min_samples=min_samples, algorithm=\"ball_tree\"\n ).fit(X)\n core_sample_3, labels_3 = db.core_sample_indices_, db.labels_\n\n assert_array_equal(core_sample_1, core_sample_3)\n assert_array_equal(labels_1, labels_3)\n\n with pytest.warns(\n SyntaxWarning,\n match=(\n \"Parameter p is found in metric_params. \"\n \"The corresponding parameter from __init__ \"\n \"is ignored.\"\n ),\n ):\n # Test that checks p is ignored in favor of metric_params={'p': <val>}\n db = DBSCAN(\n metric=\"minkowski\",\n metric_params={\"p\": p},\n eps=eps,\n p=p + 1,\n min_samples=min_samples,\n algorithm=\"ball_tree\",\n ).fit(X)\n core_sample_4, labels_4 = db.core_sample_indices_, db.labels_\n\n assert_array_equal(core_sample_1, core_sample_4)\n assert_array_equal(labels_1, labels_4)\n\n\ndef test_dbscan_balltree():\n # Tests the DBSCAN algorithm with balltree for neighbor calculation.\n eps = 0.8\n min_samples = 10\n\n D = pairwise_distances(X)\n core_samples, labels = dbscan(\n D, metric=\"precomputed\", eps=eps, min_samples=min_samples\n )\n\n # number of clusters, ignoring noise if present\n n_clusters_1 = len(set(labels)) - int(-1 in labels)\n assert n_clusters_1 == n_clusters\n\n db = DBSCAN(p=2.0, eps=eps, min_samples=min_samples, algorithm=\"ball_tree\")\n labels = db.fit(X).labels_\n\n n_clusters_2 = len(set(labels)) - int(-1 in labels)\n assert n_clusters_2 == n_clusters\n\n db = DBSCAN(p=2.0, eps=eps, min_samples=min_samples, algorithm=\"kd_tree\")\n labels = db.fit(X).labels_\n\n n_clusters_3 = len(set(labels)) - int(-1 in labels)\n assert n_clusters_3 == n_clusters\n\n db = DBSCAN(p=1.0, eps=eps, min_samples=min_samples, algorithm=\"ball_tree\")\n labels = db.fit(X).labels_\n\n n_clusters_4 = len(set(labels)) - int(-1 in labels)\n assert n_clusters_4 == n_clusters\n\n db = DBSCAN(leaf_size=20, eps=eps, min_samples=min_samples, algorithm=\"ball_tree\")\n labels = db.fit(X).labels_\n\n n_clusters_5 = len(set(labels)) - int(-1 in labels)\n assert n_clusters_5 == n_clusters\n\n\ndef test_input_validation():\n # DBSCAN.fit should accept a list of lists.\n X = [[1.0, 2.0], [3.0, 4.0]]\n DBSCAN().fit(X) # must not raise exception\n\n\[email protected](\n \"args\",\n [\n {\"algorithm\": \"blah\"},\n {\"metric\": \"blah\"},\n ],\n)\ndef test_dbscan_badargs(args):\n # Test bad argument values: these should all raise ValueErrors\n with pytest.raises(ValueError):\n dbscan(X, **args)\n\n\ndef test_pickle():\n obj = DBSCAN()\n s = pickle.dumps(obj)\n assert type(pickle.loads(s)) == obj.__class__\n\n\ndef test_boundaries():\n # ensure min_samples is inclusive of core point\n core, _ = dbscan([[0], [1]], eps=2, min_samples=2)\n assert 0 in core\n # ensure eps is inclusive of circumference\n core, _ = dbscan([[0], [1], [1]], eps=1, min_samples=2)\n assert 0 in core\n core, _ = dbscan([[0], [1], [1]], eps=0.99, min_samples=2)\n assert 0 not in core\n\n\ndef test_weighted_dbscan():\n # ensure sample_weight is validated\n with pytest.raises(ValueError):\n dbscan([[0], [1]], sample_weight=[2])\n with pytest.raises(ValueError):\n dbscan([[0], [1]], sample_weight=[2, 3, 4])\n\n # ensure sample_weight has an effect\n assert_array_equal([], dbscan([[0], [1]], sample_weight=None, min_samples=6)[0])\n assert_array_equal([], dbscan([[0], [1]], sample_weight=[5, 5], min_samples=6)[0])\n assert_array_equal([0], dbscan([[0], [1]], sample_weight=[6, 5], min_samples=6)[0])\n assert_array_equal(\n [0, 1], dbscan([[0], [1]], sample_weight=[6, 6], min_samples=6)[0]\n )\n\n # points within eps of each other:\n assert_array_equal(\n [0, 1], dbscan([[0], [1]], eps=1.5, sample_weight=[5, 1], min_samples=6)[0]\n )\n # and effect of non-positive and non-integer sample_weight:\n assert_array_equal(\n [], dbscan([[0], [1]], sample_weight=[5, 0], eps=1.5, min_samples=6)[0]\n )\n assert_array_equal(\n [0, 1], dbscan([[0], [1]], sample_weight=[5.9, 0.1], eps=1.5, min_samples=6)[0]\n )\n assert_array_equal(\n [0, 1], dbscan([[0], [1]], sample_weight=[6, 0], eps=1.5, min_samples=6)[0]\n )\n assert_array_equal(\n [], dbscan([[0], [1]], sample_weight=[6, -1], eps=1.5, min_samples=6)[0]\n )\n\n # for non-negative sample_weight, cores should be identical to repetition\n rng = np.random.RandomState(42)\n sample_weight = rng.randint(0, 5, X.shape[0])\n core1, label1 = dbscan(X, sample_weight=sample_weight)\n assert len(label1) == len(X)\n\n X_repeated = np.repeat(X, sample_weight, axis=0)\n core_repeated, label_repeated = dbscan(X_repeated)\n core_repeated_mask = np.zeros(X_repeated.shape[0], dtype=bool)\n core_repeated_mask[core_repeated] = True\n core_mask = np.zeros(X.shape[0], dtype=bool)\n core_mask[core1] = True\n assert_array_equal(np.repeat(core_mask, sample_weight), core_repeated_mask)\n\n # sample_weight should work with precomputed distance matrix\n D = pairwise_distances(X)\n core3, label3 = dbscan(D, sample_weight=sample_weight, metric=\"precomputed\")\n assert_array_equal(core1, core3)\n assert_array_equal(label1, label3)\n\n # sample_weight should work with estimator\n est = DBSCAN().fit(X, sample_weight=sample_weight)\n core4 = est.core_sample_indices_\n label4 = est.labels_\n assert_array_equal(core1, core4)\n assert_array_equal(label1, label4)\n\n est = DBSCAN()\n label5 = est.fit_predict(X, sample_weight=sample_weight)\n core5 = est.core_sample_indices_\n assert_array_equal(core1, core5)\n assert_array_equal(label1, label5)\n assert_array_equal(label1, est.labels_)\n\n\[email protected](\"algorithm\", [\"brute\", \"kd_tree\", \"ball_tree\"])\ndef test_dbscan_core_samples_toy(algorithm):\n X = [[0], [2], [3], [4], [6], [8], [10]]\n n_samples = len(X)\n\n # Degenerate case: every sample is a core sample, either with its own\n # cluster or including other close core samples.\n core_samples, labels = dbscan(X, algorithm=algorithm, eps=1, min_samples=1)\n assert_array_equal(core_samples, np.arange(n_samples))\n assert_array_equal(labels, [0, 1, 1, 1, 2, 3, 4])\n\n # With eps=1 and min_samples=2 only the 3 samples from the denser area\n # are core samples. All other points are isolated and considered noise.\n core_samples, labels = dbscan(X, algorithm=algorithm, eps=1, min_samples=2)\n assert_array_equal(core_samples, [1, 2, 3])\n assert_array_equal(labels, [-1, 0, 0, 0, -1, -1, -1])\n\n # Only the sample in the middle of the dense area is core. Its two\n # neighbors are edge samples. Remaining samples are noise.\n core_samples, labels = dbscan(X, algorithm=algorithm, eps=1, min_samples=3)\n assert_array_equal(core_samples, [2])\n assert_array_equal(labels, [-1, 0, 0, 0, -1, -1, -1])\n\n # It's no longer possible to extract core samples with eps=1:\n # everything is noise.\n core_samples, labels = dbscan(X, algorithm=algorithm, eps=1, min_samples=4)\n assert_array_equal(core_samples, [])\n assert_array_equal(labels, np.full(n_samples, -1.0))\n\n\ndef test_dbscan_precomputed_metric_with_degenerate_input_arrays():\n # see https://github.com/scikit-learn/scikit-learn/issues/4641 for\n # more details\n X = np.eye(10)\n labels = DBSCAN(eps=0.5, metric=\"precomputed\").fit(X).labels_\n assert len(set(labels)) == 1\n\n X = np.zeros((10, 10))\n labels = DBSCAN(eps=0.5, metric=\"precomputed\").fit(X).labels_\n assert len(set(labels)) == 1\n\n\ndef test_dbscan_precomputed_metric_with_initial_rows_zero():\n # sample matrix with initial two row all zero\n ar = np.array(\n [\n [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],\n [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],\n [0.0, 0.0, 0.0, 0.0, 0.1, 0.0, 0.0],\n [0.0, 0.0, 0.0, 0.0, 0.1, 0.0, 0.0],\n [0.0, 0.0, 0.1, 0.1, 0.0, 0.0, 0.3],\n [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.1],\n [0.0, 0.0, 0.0, 0.0, 0.3, 0.1, 0.0],\n ]\n )\n matrix = sparse.csr_matrix(ar)\n labels = DBSCAN(eps=0.2, metric=\"precomputed\", min_samples=2).fit(matrix).labels_\n assert_array_equal(labels, [-1, -1, 0, 0, 0, 1, 1])\n\n\[email protected](\n \"params, err_type, err_msg\",\n [\n ({\"eps\": -1.0}, ValueError, \"eps == -1.0, must be > 0.0.\"),\n ({\"eps\": 0.0}, ValueError, \"eps == 0.0, must be > 0.0.\"),\n ({\"min_samples\": 0}, ValueError, \"min_samples == 0, must be >= 1.\"),\n (\n {\"min_samples\": 1.5},\n TypeError,\n \"min_samples must be an instance of <class 'numbers.Integral'>, not <class\"\n \" 'float'>.\",\n ),\n ({\"min_samples\": -2}, ValueError, \"min_samples == -2, must be >= 1.\"),\n ({\"leaf_size\": 0}, ValueError, \"leaf_size == 0, must be >= 1.\"),\n (\n {\"leaf_size\": 2.5},\n TypeError,\n \"leaf_size must be an instance of <class 'numbers.Integral'>, not <class\"\n \" 'float'>.\",\n ),\n ({\"leaf_size\": -3}, ValueError, \"leaf_size == -3, must be >= 1.\"),\n ({\"p\": -2}, ValueError, \"p == -2, must be >= 0.0.\"),\n (\n {\"n_jobs\": 2.5},\n TypeError,\n \"n_jobs must be an instance of <class 'numbers.Integral'>, not <class\"\n \" 'float'>.\",\n ),\n ],\n)\ndef test_dbscan_params_validation(params, err_type, err_msg):\n \"\"\"Check the parameters validation in `DBSCAN`.\"\"\"\n with pytest.raises(err_type, match=err_msg):\n DBSCAN(**params).fit(X)\n",
"# -*- coding: utf-8 -*-\n\"\"\"\n=====================\nClassifier comparison\n=====================\n\nA comparison of a several classifiers in scikit-learn on synthetic datasets.\nThe point of this example is to illustrate the nature of decision boundaries\nof different classifiers.\nThis should be taken with a grain of salt, as the intuition conveyed by\nthese examples does not necessarily carry over to real datasets.\n\nParticularly in high-dimensional spaces, data can more easily be separated\nlinearly and the simplicity of classifiers such as naive Bayes and linear SVMs\nmight lead to better generalization than is achieved by other classifiers.\n\nThe plots show training points in solid colors and testing points\nsemi-transparent. The lower right shows the classification accuracy on the test\nset.\n\n\"\"\"\n\n# Code source: Gaël Varoquaux\n# Andreas Müller\n# Modified for documentation by Jaques Grobler\n# License: BSD 3 clause\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib.colors import ListedColormap\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.datasets import make_moons, make_circles, make_classification\nfrom sklearn.neural_network import MLPClassifier\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.svm import SVC\nfrom sklearn.gaussian_process import GaussianProcessClassifier\nfrom sklearn.gaussian_process.kernels import RBF\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier\nfrom sklearn.naive_bayes import GaussianNB\nfrom sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis\n\nh = 0.02 # step size in the mesh\n\nnames = [\n \"Nearest Neighbors\",\n \"Linear SVM\",\n \"RBF SVM\",\n \"Gaussian Process\",\n \"Decision Tree\",\n \"Random Forest\",\n \"Neural Net\",\n \"AdaBoost\",\n \"Naive Bayes\",\n \"QDA\",\n]\n\nclassifiers = [\n KNeighborsClassifier(3),\n SVC(kernel=\"linear\", C=0.025),\n SVC(gamma=2, C=1),\n GaussianProcessClassifier(1.0 * RBF(1.0)),\n DecisionTreeClassifier(max_depth=5),\n RandomForestClassifier(max_depth=5, n_estimators=10, max_features=1),\n MLPClassifier(alpha=1, max_iter=1000),\n AdaBoostClassifier(),\n GaussianNB(),\n QuadraticDiscriminantAnalysis(),\n]\n\nX, y = make_classification(\n n_features=2, n_redundant=0, n_informative=2, random_state=1, n_clusters_per_class=1\n)\nrng = np.random.RandomState(2)\nX += 2 * rng.uniform(size=X.shape)\nlinearly_separable = (X, y)\n\ndatasets = [\n make_moons(noise=0.3, random_state=0),\n make_circles(noise=0.2, factor=0.5, random_state=1),\n linearly_separable,\n]\n\nfigure = plt.figure(figsize=(27, 9))\ni = 1\n# iterate over datasets\nfor ds_cnt, ds in enumerate(datasets):\n # preprocess dataset, split into training and test part\n X, y = ds\n X = StandardScaler().fit_transform(X)\n X_train, X_test, y_train, y_test = train_test_split(\n X, y, test_size=0.4, random_state=42\n )\n\n x_min, x_max = X[:, 0].min() - 0.5, X[:, 0].max() + 0.5\n y_min, y_max = X[:, 1].min() - 0.5, X[:, 1].max() + 0.5\n xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))\n\n # just plot the dataset first\n cm = plt.cm.RdBu\n cm_bright = ListedColormap([\"#FF0000\", \"#0000FF\"])\n ax = plt.subplot(len(datasets), len(classifiers) + 1, i)\n if ds_cnt == 0:\n ax.set_title(\"Input data\")\n # Plot the training points\n ax.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright, edgecolors=\"k\")\n # Plot the testing points\n ax.scatter(\n X_test[:, 0], X_test[:, 1], c=y_test, cmap=cm_bright, alpha=0.6, edgecolors=\"k\"\n )\n ax.set_xlim(xx.min(), xx.max())\n ax.set_ylim(yy.min(), yy.max())\n ax.set_xticks(())\n ax.set_yticks(())\n i += 1\n\n # iterate over classifiers\n for name, clf in zip(names, classifiers):\n ax = plt.subplot(len(datasets), len(classifiers) + 1, i)\n clf.fit(X_train, y_train)\n score = clf.score(X_test, y_test)\n\n # Plot the decision boundary. For that, we will assign a color to each\n # point in the mesh [x_min, x_max]x[y_min, y_max].\n if hasattr(clf, \"decision_function\"):\n Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])\n else:\n Z = clf.predict_proba(np.c_[xx.ravel(), yy.ravel()])[:, 1]\n\n # Put the result into a color plot\n Z = Z.reshape(xx.shape)\n ax.contourf(xx, yy, Z, cmap=cm, alpha=0.8)\n\n # Plot the training points\n ax.scatter(\n X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright, edgecolors=\"k\"\n )\n # Plot the testing points\n ax.scatter(\n X_test[:, 0],\n X_test[:, 1],\n c=y_test,\n cmap=cm_bright,\n edgecolors=\"k\",\n alpha=0.6,\n )\n\n ax.set_xlim(xx.min(), xx.max())\n ax.set_ylim(yy.min(), yy.max())\n ax.set_xticks(())\n ax.set_yticks(())\n if ds_cnt == 0:\n ax.set_title(name)\n ax.text(\n xx.max() - 0.3,\n yy.min() + 0.3,\n (\"%.2f\" % score).lstrip(\"0\"),\n size=15,\n horizontalalignment=\"right\",\n )\n i += 1\n\nplt.tight_layout()\nplt.show()\n",
"\"\"\"\n==============================================\nFeature agglomeration vs. univariate selection\n==============================================\n\nThis example compares 2 dimensionality reduction strategies:\n\n- univariate feature selection with Anova\n\n- feature agglomeration with Ward hierarchical clustering\n\nBoth methods are compared in a regression problem using\na BayesianRidge as supervised estimator.\n\n\"\"\"\n\n# Author: Alexandre Gramfort <[email protected]>\n# License: BSD 3 clause\n\nimport shutil\nimport tempfile\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy import linalg, ndimage\nfrom joblib import Memory\n\nfrom sklearn.feature_extraction.image import grid_to_graph\nfrom sklearn import feature_selection\nfrom sklearn.cluster import FeatureAgglomeration\nfrom sklearn.linear_model import BayesianRidge\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.model_selection import KFold\n\n# #############################################################################\n# Generate data\nn_samples = 200\nsize = 40 # image size\nroi_size = 15\nsnr = 5.0\nnp.random.seed(0)\nmask = np.ones([size, size], dtype=bool)\n\ncoef = np.zeros((size, size))\ncoef[0:roi_size, 0:roi_size] = -1.0\ncoef[-roi_size:, -roi_size:] = 1.0\n\nX = np.random.randn(n_samples, size ** 2)\nfor x in X: # smooth data\n x[:] = ndimage.gaussian_filter(x.reshape(size, size), sigma=1.0).ravel()\nX -= X.mean(axis=0)\nX /= X.std(axis=0)\n\ny = np.dot(X, coef.ravel())\nnoise = np.random.randn(y.shape[0])\nnoise_coef = (linalg.norm(y, 2) / np.exp(snr / 20.0)) / linalg.norm(noise, 2)\ny += noise_coef * noise # add noise\n\n# #############################################################################\n# Compute the coefs of a Bayesian Ridge with GridSearch\ncv = KFold(2) # cross-validation generator for model selection\nridge = BayesianRidge()\ncachedir = tempfile.mkdtemp()\nmem = Memory(location=cachedir, verbose=1)\n\n# Ward agglomeration followed by BayesianRidge\nconnectivity = grid_to_graph(n_x=size, n_y=size)\nward = FeatureAgglomeration(n_clusters=10, connectivity=connectivity, memory=mem)\nclf = Pipeline([(\"ward\", ward), (\"ridge\", ridge)])\n# Select the optimal number of parcels with grid search\nclf = GridSearchCV(clf, {\"ward__n_clusters\": [10, 20, 30]}, n_jobs=1, cv=cv)\nclf.fit(X, y) # set the best parameters\ncoef_ = clf.best_estimator_.steps[-1][1].coef_\ncoef_ = clf.best_estimator_.steps[0][1].inverse_transform(coef_)\ncoef_agglomeration_ = coef_.reshape(size, size)\n\n# Anova univariate feature selection followed by BayesianRidge\nf_regression = mem.cache(feature_selection.f_regression) # caching function\nanova = feature_selection.SelectPercentile(f_regression)\nclf = Pipeline([(\"anova\", anova), (\"ridge\", ridge)])\n# Select the optimal percentage of features with grid search\nclf = GridSearchCV(clf, {\"anova__percentile\": [5, 10, 20]}, cv=cv)\nclf.fit(X, y) # set the best parameters\ncoef_ = clf.best_estimator_.steps[-1][1].coef_\ncoef_ = clf.best_estimator_.steps[0][1].inverse_transform(coef_.reshape(1, -1))\ncoef_selection_ = coef_.reshape(size, size)\n\n# #############################################################################\n# Inverse the transformation to plot the results on an image\nplt.close(\"all\")\nplt.figure(figsize=(7.3, 2.7))\nplt.subplot(1, 3, 1)\nplt.imshow(coef, interpolation=\"nearest\", cmap=plt.cm.RdBu_r)\nplt.title(\"True weights\")\nplt.subplot(1, 3, 2)\nplt.imshow(coef_selection_, interpolation=\"nearest\", cmap=plt.cm.RdBu_r)\nplt.title(\"Feature Selection\")\nplt.subplot(1, 3, 3)\nplt.imshow(coef_agglomeration_, interpolation=\"nearest\", cmap=plt.cm.RdBu_r)\nplt.title(\"Feature Agglomeration\")\nplt.subplots_adjust(0.04, 0.0, 0.98, 0.94, 0.16, 0.26)\nplt.show()\n\n# Attempt to remove the temporary cachedir, but don't worry if it fails\nshutil.rmtree(cachedir, ignore_errors=True)\n"
] |
[
[
"sklearn.utils.fixes.delayed",
"sklearn.config_context",
"sklearn.get_config",
"sklearn.utils.fixes.parse_version",
"sklearn.set_config"
],
[
"numpy.dot",
"numpy.maximum",
"numpy.sqrt",
"numpy.eye",
"scipy.linalg.inv"
],
[
"sklearn.multioutput.ClassifierChain",
"matplotlib.pyplot.tight_layout",
"sklearn.linear_model.LogisticRegression",
"sklearn.metrics.jaccard_score",
"sklearn.model_selection.train_test_split",
"matplotlib.pyplot.subplots",
"sklearn.datasets.fetch_openml",
"sklearn.multiclass.OneVsRestClassifier",
"matplotlib.pyplot.show"
],
[
"numpy.sqrt",
"sklearn.metrics.cluster.calinski_harabasz_score",
"sklearn.metrics.cluster.silhouette_score",
"sklearn.metrics.cluster.silhouette_samples",
"numpy.unique",
"numpy.tril_indices",
"numpy.arange",
"scipy.sparse.dok_matrix",
"numpy.finfo",
"numpy.zeros",
"sklearn.utils._testing.assert_array_equal",
"numpy.isnan",
"sklearn.datasets.load_iris",
"scipy.sparse.csr_matrix",
"sklearn.metrics.cluster.davies_bouldin_score",
"numpy.random.RandomState",
"numpy.array",
"sklearn.metrics.pairwise_distances",
"numpy.ones",
"scipy.sparse.lil_matrix"
],
[
"scipy.sparse.isspmatrix",
"scipy.sparse.issparse",
"numpy.unique",
"scipy.sparse.eye",
"scipy.sparse.csgraph.laplacian",
"numpy.logical_or",
"scipy.linalg.eigh",
"scipy.sparse.linalg.eigsh",
"numpy.zeros",
"numpy.where",
"scipy.sparse.csgraph.connected_components"
],
[
"sklearn.datasets.make_classification",
"numpy.asarray",
"sklearn.ensemble.HistGradientBoostingClassifier",
"numpy.all",
"numpy.max",
"numpy.concatenate",
"sklearn.base.clone",
"sklearn.ensemble._hist_gradient_boosting.loss.LeastSquares",
"sklearn.ensemble._hist_gradient_boosting.loss.BinaryCrossEntropy",
"numpy.exp",
"sklearn.preprocessing.MinMaxScaler",
"sklearn.ensemble._hist_gradient_boosting.binning._BinMapper",
"numpy.unique",
"sklearn.utils._openmp_helpers._openmp_effective_n_threads",
"numpy.arange",
"sklearn.base.is_regressor",
"numpy.zeros",
"sklearn.ensemble.HistGradientBoostingRegressor",
"numpy.isnan",
"sklearn.dummy.DummyRegressor",
"sklearn.model_selection.train_test_split",
"numpy.testing.assert_allclose",
"numpy.array",
"numpy.random.RandomState",
"sklearn.ensemble._hist_gradient_boosting.grower.TreeGrower",
"sklearn.model_selection.cross_val_score",
"sklearn.preprocessing.OneHotEncoder",
"sklearn.preprocessing.KBinsDiscretizer",
"sklearn.datasets.make_low_rank_matrix",
"numpy.ones",
"numpy.testing.assert_array_equal",
"sklearn.datasets.make_regression",
"numpy.percentile"
],
[
"numpy.log",
"numpy.sqrt",
"numpy.asarray",
"scipy.sparse.csr_matrix",
"numpy.concatenate",
"numpy.size",
"numpy.any"
],
[
"numpy.dot",
"scipy.linalg.svd",
"numpy.sqrt",
"numpy.asarray",
"numpy.concatenate",
"numpy.linalg.svd",
"numpy.unique",
"scipy.linalg.lstsq",
"numpy.finfo",
"scipy.linalg.eigh",
"numpy.zeros",
"numpy.log",
"numpy.argsort",
"numpy.array",
"numpy.sum",
"numpy.add.at",
"scipy.special.expit",
"numpy.empty",
"numpy.bincount",
"numpy.vstack"
],
[
"scipy.io.matlab.loadmat",
"numpy.float32"
],
[
"matplotlib.pyplot.legend",
"sklearn.datasets.make_gaussian_quantiles",
"matplotlib.pyplot.ylim",
"sklearn.metrics.accuracy_score",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.xlim",
"sklearn.tree.DecisionTreeClassifier",
"matplotlib.pyplot.subplots_adjust",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
],
[
"numpy.empty_like",
"numpy.arange",
"matplotlib.pyplot.subplots"
],
[
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.title",
"numpy.linspace",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.contour",
"sklearn.svm.OneClassSVM",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.axis",
"numpy.random.randn",
"numpy.random.uniform",
"matplotlib.pyplot.show"
],
[
"numpy.dot",
"numpy.allclose",
"numpy.abs",
"numpy.eye",
"sklearn.decomposition._factor_analysis._ortho_rotation",
"numpy.ones",
"sklearn.decomposition.FactorAnalysis",
"numpy.cov",
"numpy.diff",
"numpy.array",
"numpy.random.RandomState"
],
[
"numpy.array",
"sklearn.cluster.dbscan",
"sklearn.cluster.tests.common.generate_clustered_data",
"numpy.eye",
"numpy.arange",
"sklearn.cluster.DBSCAN",
"scipy.sparse.csr_matrix",
"numpy.full",
"numpy.max",
"numpy.empty",
"scipy.spatial.distance.pdist",
"sklearn.neighbors.NearestNeighbors",
"sklearn.metrics.pairwise.pairwise_distances",
"numpy.repeat",
"numpy.random.RandomState",
"sklearn.utils._testing.assert_array_equal",
"numpy.zeros",
"scipy.sparse.lil_matrix"
],
[
"sklearn.neural_network.MLPClassifier",
"sklearn.datasets.make_classification",
"sklearn.tree.DecisionTreeClassifier",
"matplotlib.pyplot.tight_layout",
"sklearn.ensemble.RandomForestClassifier",
"numpy.arange",
"sklearn.neighbors.KNeighborsClassifier",
"matplotlib.pyplot.figure",
"sklearn.naive_bayes.GaussianNB",
"sklearn.model_selection.train_test_split",
"sklearn.ensemble.AdaBoostClassifier",
"matplotlib.colors.ListedColormap",
"sklearn.svm.SVC",
"sklearn.gaussian_process.kernels.RBF",
"numpy.random.RandomState",
"matplotlib.pyplot.show",
"sklearn.discriminant_analysis.QuadraticDiscriminantAnalysis",
"sklearn.datasets.make_moons",
"sklearn.datasets.make_circles",
"sklearn.preprocessing.StandardScaler"
],
[
"matplotlib.pyplot.imshow",
"sklearn.model_selection.KFold",
"numpy.random.randn",
"numpy.exp",
"sklearn.pipeline.Pipeline",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.close",
"matplotlib.pyplot.subplots_adjust",
"scipy.linalg.norm",
"numpy.zeros",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.title",
"sklearn.feature_extraction.image.grid_to_graph",
"sklearn.feature_selection.SelectPercentile",
"sklearn.linear_model.BayesianRidge",
"matplotlib.pyplot.show",
"sklearn.cluster.FeatureAgglomeration",
"sklearn.model_selection.GridSearchCV",
"numpy.random.seed",
"numpy.ones"
]
] |
davidpneal/adventofcode
|
[
"f31b5132462b44aeadfdbcffe75f25215961a9ae"
] |
[
"2018/day11/day11p2.py"
] |
[
"#12/24/2018\n#Find the square which has the largest total power, the square can be anywhere from 1x1 to 300x300\n\n#The package numpy has some tools that can help with the multidimensional arrays and creating the summed area table\n#Note that numpy uses matrix indexing (i,j / row,col) vs cartesian indexing (x,y) --> if the matrix is printed out, it will be \"rotated\"\nimport numpy as np\n\n\n#Puzzle input\nserialNum = 9995\n\n\n\ndef calcCellPower(x,y,serialNum):\n\trackID = x + 10\n\tvalue = ((y * (x + 10)) + serialNum) * rackID\n\n\t#Keep the hundredths digit\n\ts = str(value) \n\thundreds = int(s[len(s)-3])\n\n\tpowerLevel = hundreds - 5\n\n\treturn powerLevel\n\n\n\ndef calcAreaPower(x,y,s):\n\t#Need to bound these params - if we are on the edge (ie, 0) will index outside the table!\n\t#This method will probably cause issues if the solution is near the edge of the grid, but works for the problem here\n\tif x == 0:\n\t\tx = 1\n\t\n\tif y == 0:\n\t\ty = 1\n\t\n\t#Must subtract 1 from the size (s) since the grid size is inclusive; ie, if the grid is 3x3, adding 3 would check a grid that is 4x4\n\tpower = sumTable[x+(s-1)][y+(s-1)] + sumTable[x-1][y-1] - sumTable[x-1][y+(s-1)] - sumTable[x+(s-1)][y-1]\n\treturn power\t\n\n\n\n#Create fuel grid: 300x300, use ints (defaults to float)\nfuelGrid = np.zeros(shape=(300,300),dtype=int)\n\n#Populate the values in the fuelGrid\nfor x in range(300):\n\tfor y in range(300):\n\t\tfuelGrid[x][y] = calcCellPower(x+1, y+1, serialNum)\n\n#Calculate summed area table\nsumTable = fuelGrid.cumsum(axis=0).cumsum(axis=1)\n\n\n#Find the square with the highest power rating, it is identified by the x,y coordinate in the upper left corner\nmax = 0\nfor s in range(299):\n\tfor x in range(300-s):\n\t\tfor y in range(300-s):\n\t\t\tsum = calcAreaPower(x,y,s)\n\t\t\tif sum > max:\n\t\t\t\tmax = sum\n\t\t\t\t#Add one to the answer since the matrix starts from 0\n\t\t\t\tloc = x+1,y+1,s\n\t\t\t\t##print(\"new max:\",max,loc)\n\t\t\nprint(\"Largest total power:\",loc)\nprint(\"Total power:\",max)\n\n\n\n''' MISC\n\nCorrect answer: 233,116,15 \n\n\n#print a partial grid\nfor x in range(10):\n\tprint(fuelGrid[x][:10])\n\t\n\t\n\t\n'''"
] |
[
[
"numpy.zeros"
]
] |
ddempsey/python_for_geoscientists
|
[
"428e2eaeb869f8478a3517d01a5fdff6de30e7d2"
] |
[
"2_visualisation/mesh_plot.py"
] |
[
"# import tools for 3D axes\nfrom matplotlib import pyplot as plt \nfrom mpl_toolkits.mplot3d import Axes3D\nfrom matplotlib import cm \nimport numpy as np \n\n# create a grid\nxg = np.linspace(0,1,31) # evenly spaced grid points\nyg = np.linspace(0,1,31)\nymin,ymax = [0.15,0.85] # create a smaller subgrid in the y-dir for coloring\ni1 = np.argmin(abs(yg-ymin))\ni2 = np.argmin(abs(yg-ymax))\nyg2 = yg[i1:i2+1] # subsample y coords\n[X,Y] = np.meshgrid(xg,yg) # create the two mesh grids\n[X2,Y2] = np.meshgrid(xg,yg2)\n\n# create a custom surface\n # parameters\nxm = np.mean(xg)*0.8\nym = np.mean(yg)*1.2\nsx = 0.02*3.\nsy = 0.04*3.\n # function defining the surface in terms of x, y and parameters\ndef r(X,Y): \n return (5-np.exp(-((X-xm)**2/sx+(Y-ym)**2/sy)))*(1-(X/4)**2)*(1+(Y/4)**2)\n\n# create a figure with a 3D projection\nfig = plt.figure(figsize=[15,8])\nax = fig.add_subplot(111, projection='3d')\n\n# plot the function as a wireframe over the large grid\nax.plot_wireframe(X, Y, r(X,Y), lw = 0.5, color = 'k')\n # shade part of the wireframe according to the function value\nCS = ax.plot_surface(X2, Y2, r(X2,Y2), rstride=1, cstride=1,cmap=cm.Oranges, lw = 0.5)\nplt.colorbar(CS, ax=ax)\n\n# display the interactive figure to the screen\nplt.show()"
] |
[
[
"numpy.linspace",
"matplotlib.pyplot.colorbar",
"numpy.mean",
"numpy.exp",
"numpy.meshgrid",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
]
] |
ing-bank/popmon
|
[
"729d61a4bfe45715d3970326d28b70b09d7fc13a"
] |
[
"popmon/pipeline/report.py"
] |
[
"# Copyright (c) 2021 ING Wholesale Banking Advanced Analytics\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy of\n# this software and associated documentation files (the \"Software\"), to deal in\n# the Software without restriction, including without limitation the rights to\n# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of\n# the Software, and to permit persons to whom the Software is furnished to do so,\n# subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS\n# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR\n# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER\n# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\n# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\n\nimport logging\n\nimport pandas as pd\nfrom histogrammar.dfinterface.make_histograms import (\n get_bin_specs,\n get_time_axes,\n make_histograms,\n)\n\nfrom ..base import Module\nfrom ..config import config\nfrom ..pipeline.report_pipelines import (\n ReportPipe,\n expanding_reference,\n external_reference,\n rolling_reference,\n self_reference,\n)\nfrom ..resources import templates_env\n\nlogging.basicConfig(\n level=logging.INFO, format=\"%(asctime)s %(levelname)s [%(module)s]: %(message)s\"\n)\nlogger = logging.getLogger()\n\n_report_pipeline = {\n \"self\": self_reference,\n \"external\": external_reference,\n \"rolling\": rolling_reference,\n \"expanding\": expanding_reference,\n}\n\n\ndef stability_report(\n hists,\n reference_type=\"self\",\n reference=None,\n time_axis=\"\",\n window=10,\n shift=1,\n monitoring_rules=None,\n pull_rules=None,\n features=None,\n skip_empty_plots=True,\n last_n=0,\n plot_hist_n=2,\n report_filepath=None,\n extended_report=True,\n show_stats=config[\"limited_stats\"],\n **kwargs,\n):\n \"\"\"Create a data stability monitoring html report for given dict of input histograms.\n\n :param dict hists: input histograms to be profiled and monitored over time.\n :param reference_type: type or reference used for comparisons. Options [self, external, rolling, expanding].\n default is 'self'.\n :param reference: histograms used as reference. default is None\n :param str time_axis: name of datetime feature, used as time axis, eg 'date'. auto-guessed when not provided.\n :param int window: size of rolling window and/or trend detection. default is 10.\n :param int shift: shift of time-bins in rolling/expanding window. default is 1.\n :param dict monitoring_rules: monitoring rules to generate traffic light alerts.\n The default setting is:\n\n .. code-block:: python\n\n monitoring_rules = {\"*_pull\": [7, 4, -4, -7],\n \"*_zscore\": [7, 4, -4, -7],\n \"[!p]*_unknown_labels\": [0.5, 0.5, 0, 0]}\n\n Note that the (filename based) wildcards such as * apply to all statistic names matching that pattern.\n For example, ``\"*_pull\"`` applies for all features to all statistics ending on \"_pull\".\n You can also specify rules for specific features and/or statistics by leaving out wildcard and putting the\n feature name in front. E.g.\n\n .. code-block:: python\n\n monitoring_rules = {\"featureA:*_pull\": [5, 3, -3, -5],\n \"featureA:nan\": [4, 1, 0, 0],\n \"*_pull\": [7, 4, -4, -7],\n \"nan\": [8, 1, 0, 0]}\n\n In case of multiple rules could apply for a feature's statistic, the most specific one applies.\n So in case of the statistic \"nan\": \"featureA:nan\" is used for \"featureA\", and the other \"nan\" rule\n for all other features.\n :param dict pull_rules: red and yellow (possibly dynamic) boundaries shown in plots in the report.\n Default is:\n\n .. code-block:: python\n\n pull_rules = {\"*_pull\": [7, 4, -4, -7]}\n\n This means that the shown yellow boundaries are at -4, +4 standard deviations around the (reference) mean,\n and the shown red boundaries are at -7, +7 standard deviations around the (reference) mean.\n Note that the (filename based) wildcards such as * apply to all statistic names matching that pattern.\n (The same string logic applies as for monitoring_rules.)\n :param list features: histograms to pick up from the 'hists' dictionary (default is all keys)\n :param bool skip_empty_plots: if false, also show empty plots in report with only nans or zeroes (optional)\n :param int last_n: plot statistic data for last 'n' periods (optional)\n :param int plot_hist_n: plot histograms for last 'n' periods. default is 2 (optional)\n :param str report_filepath: the file path where to output the report (optional)\n :param bool extended_report: if True, show all the generated statistics in the report (optional)\n :param list show_stats: list of statistic name patterns to show in the report. If None, show all (optional)\n :param kwargs: residual keyword arguments passed on to report pipeline.\n :return: dict with results of reporting pipeline\n \"\"\"\n # perform basic input checks\n reference_types = list(_report_pipeline.keys())\n if reference_type not in reference_types:\n raise ValueError(f\"reference_type should be one of {str(reference_types)}.\")\n if not isinstance(hists, dict):\n raise TypeError(\"hists should be a dict of histogrammar histograms.\")\n if reference_type == \"external\" and not isinstance(reference, dict):\n raise TypeError(\"reference should be a dict of histogrammar histograms.\")\n if not isinstance(monitoring_rules, dict):\n monitoring_rules = {\n \"*_pull\": [7, 4, -4, -7],\n \"*_zscore\": [7, 4, -4, -7],\n \"[!p]*_unknown_labels\": [0.5, 0.5, 0, 0],\n }\n if not isinstance(pull_rules, dict):\n pull_rules = {\"*_pull\": [7, 4, -4, -7]}\n\n if (isinstance(time_axis, str) and len(time_axis) == 0) or (\n isinstance(time_axis, bool) and time_axis\n ):\n # auto guess the time_axis: find the most frequent first column name in the histograms list\n first_cols = [k.split(\":\")[0] for k in list(hists.keys())]\n time_axis = max(set(first_cols), key=first_cols.count)\n\n # if limited report is selected, check if stats list is provided, if not, get a default minimal list\n show_stats = show_stats if not extended_report else None\n\n # configuration and datastore for report pipeline\n cfg = {\n \"hists_key\": \"hists\",\n \"ref_hists_key\": \"ref_hists\",\n \"time_axis\": time_axis,\n \"window\": window,\n \"shift\": shift,\n \"monitoring_rules\": monitoring_rules,\n \"pull_rules\": pull_rules,\n \"features\": features,\n \"skip_empty_plots\": skip_empty_plots,\n \"last_n\": last_n,\n \"plot_hist_n\": plot_hist_n,\n \"report_filepath\": report_filepath,\n \"show_stats\": show_stats,\n **kwargs,\n }\n\n datastore = {\"hists\": hists}\n if reference_type == \"external\":\n datastore[\"ref_hists\"] = reference\n\n # execute reporting pipeline\n pipeline = _report_pipeline[reference_type](**cfg)\n stability_report = StabilityReport()\n stability_report.transform(pipeline.transform(datastore))\n return stability_report\n\n\ndef df_stability_report(\n df,\n time_axis,\n features=None,\n binning=\"auto\",\n bin_specs=None,\n time_width=None,\n time_offset=0,\n var_dtype=None,\n reference_type=\"self\",\n reference=None,\n window=10,\n shift=1,\n monitoring_rules=None,\n pull_rules=None,\n skip_empty_plots=True,\n last_n=0,\n plot_hist_n=2,\n report_filepath=None,\n extended_report=True,\n show_stats=config[\"limited_stats\"],\n **kwargs,\n):\n \"\"\"Create a data stability monitoring html report for given pandas or spark dataframe.\n\n :param df: input pandas/spark dataframe to be profiled and monitored over time.\n :param str time_axis: name of datetime feature, used as time axis, eg 'date'. if True, will be auto-guessed.\n If time_axis is set or found, and if no features provided, features becomes: ['date:x', 'date:y', 'date:z'] etc.\n :param list features: columns to pick up from input data. (default is all features).\n For multi-dimensional histograms, separate the column names with a ':'. Example features list is:\n\n .. code-block:: python\n\n features = ['x', 'date', 'date:x', 'date:y', 'date:x:y']\n\n :param str binning: default binning to revert to in case bin_specs not supplied. options are:\n \"unit\" or \"auto\", default is \"auto\". When using \"auto\", semi-clever binning is automatically done.\n :param dict bin_specs: dictionaries used for rebinning numeric or timestamp features.\n An example bin_specs dictionary is:\n\n .. code-block:: python\n\n bin_specs = {'x': {'bin_width': 1, 'bin_offset': 0},\n 'y': {'num': 10, 'low': 0.0, 'high': 2.0},\n 'x:y': [{}, {'num': 5, 'low': 0.0, 'high': 1.0}]}\n\n In the bin specs for x:y, x is not provided (here) and reverts to the 1-dim setting.\n The 'bin_width', 'bin_offset' notation makes an open-ended histogram (for that feature) with given bin width\n and offset. The notation 'num', 'low', 'high' gives a fixed range histogram from 'low' to 'high' with 'num'\n number of bins.\n :param time_width: bin width of time axis. str or number (ns). note: bin_specs takes precedence. (optional)\n\n .. code-block:: text\n\n Examples: '1w', 3600e9 (number of ns),\n anything understood by pd.Timedelta(time_width).value\n\n :param time_offset: bin offset of time axis. str or number (ns). note: bin_specs takes precedence. (optional)\n\n .. code-block:: text\n\n Examples: '1-1-2020', 0 (number of ns since 1-1-1970),\n anything parsed by pd.Timestamp(time_offset).value\n\n :param dict var_dtype: dictionary with specified datatype per feature. auto-guessed when not provided.\n :param reference_type: type or reference used for comparisons. Options [self, external, rolling, expanding].\n default is 'self'.\n :param reference: reference dataframe or histograms. default is None\n :param int window: size of rolling window and/or trend detection. default is 10.\n :param int shift: shift of time-bins in rolling/expanding window. default is 1.\n :param dict monitoring_rules: monitoring rules to generate traffic light alerts.\n The default setting is:\n\n .. code-block:: python\n\n monitoring_rules = {\"*_pull\": [7, 4, -4, -7],\n \"*_zscore\": [7, 4, -4, -7],\n \"[!p]*_unknown_labels\": [0.5, 0.5, 0, 0]}\n\n Note that the (filename based) wildcards such as * apply to all statistic names matching that pattern.\n For example, ``\"*_pull\"`` applies for all features to all statistics ending on \"_pull\".\n You can also specify rules for specific features and/or statistics by leaving out wildcard and putting the\n feature name in front. E.g.\n\n .. code-block:: python\n\n monitoring_rules = {\"featureA:*_pull\": [5, 3, -3, -5],\n \"featureA:nan\": [4, 1, 0, 0],\n \"*_pull\": [7, 4, -4, -7],\n \"nan\": [8, 1, 0, 0]}\n\n In case of multiple rules could apply for a feature's statistic, the most specific one applies.\n So in case of the statistic \"nan\": \"featureA:nan\" is used for \"featureA\", and the other \"nan\" rule\n for all other features.\n :param dict pull_rules: red and yellow (possibly dynamic) boundaries shown in plots in the report.\n Default is:\n\n .. code-block:: python\n\n pull_rules = {\"*_pull\": [7, 4, -4, -7]}\n\n This means that the shown yellow boundaries are at -4, +4 standard deviations around the (reference) mean,\n and the shown red boundaries are at -7, +7 standard deviations around the (reference) mean.\n Note that the (filename based) wildcards such as * apply to all statistic names matching that pattern.\n (The same string logic applies as for monitoring_rules.)\n :param bool skip_empty_plots: if false, also show empty plots in report with only nans or zeroes (optional)\n :param int last_n: plot statistic data for last 'n' periods (optional)\n :param int plot_hist_n: plot histograms for last 'n' periods. default is 2 (optional)\n :param str report_filepath: the file path where to output the report (optional)\n :param bool extended_report: if True, show all the generated statistics in the report (optional)\n :param list show_stats: list of statistic name patterns to show in the report. If None, show all (optional)\n :param kwargs: residual keyword arguments, passed on to stability_report()\n :return: dict with results of reporting pipeline\n \"\"\"\n # basic checks on presence of time_axis\n if not (isinstance(time_axis, str) and len(time_axis) > 0) and not (\n isinstance(time_axis, bool) and time_axis\n ):\n raise ValueError(\"time_axis needs to be a filled string or set to True\")\n if isinstance(time_axis, str) and time_axis not in df.columns:\n raise ValueError(f'time_axis \"{time_axis}\" not found in columns of dataframe.')\n if reference is not None and not isinstance(reference, dict):\n if isinstance(time_axis, str) and time_axis not in reference.columns:\n raise ValueError(\n f'time_axis \"{time_axis}\" not found in columns of reference dataframe.'\n )\n if isinstance(time_axis, bool):\n time_axes = get_time_axes(df)\n num = len(time_axes)\n if num == 1:\n time_axis = time_axes[0]\n logger.info(f'Time-axis automatically set to \"{time_axis}\"')\n elif num == 0:\n raise ValueError(\n \"No obvious time-axes found. Cannot generate stability report.\"\n )\n else:\n raise ValueError(\n f\"Found {num} time-axes: {time_axes}. Set *one* time_axis manually!\"\n )\n if features is not None:\n # by now time_axis is defined. ensure that all histograms start with it.\n if not isinstance(features, list):\n raise TypeError(\n \"features should be list of columns (or combos) to pick up from input data.\"\n )\n features = [\n c if c.startswith(time_axis) else f\"{time_axis}:{c}\" for c in features\n ]\n\n # interpret time_width and time_offset\n if isinstance(time_width, (str, int, float)) and isinstance(\n time_offset, (str, int, float)\n ):\n if bin_specs is None:\n bin_specs = {}\n elif not isinstance(bin_specs, dict):\n raise ValueError(\"bin_specs object is not a dictionary\")\n\n if time_axis in bin_specs:\n raise ValueError(\n f'time-axis \"{time_axis}\" already found in binning specifications.'\n )\n # convert time width and offset to nanoseconds\n time_specs = {\n \"bin_width\": float(pd.Timedelta(time_width).value),\n \"bin_offset\": float(pd.Timestamp(time_offset).value),\n }\n bin_specs[time_axis] = time_specs\n\n reference_hists = None\n if reference is not None:\n reference_type = \"external\"\n if isinstance(reference, dict):\n # 1. reference is dict of histograms\n # extract features and bin_specs from reference histograms\n reference_hists = reference\n features = list(reference_hists.keys())\n bin_specs = get_bin_specs(reference_hists)\n else:\n # 2. reference is pandas or spark dataframe\n # generate histograms and return updated features, bin_specs, time_axis, etc.\n (\n reference_hists,\n features,\n bin_specs,\n time_axis,\n var_dtype,\n ) = make_histograms(\n reference,\n features,\n binning,\n bin_specs,\n time_axis,\n var_dtype,\n ret_specs=True,\n )\n\n # use the same features, bin_specs, time_axis, etc as for reference hists\n hists = make_histograms(\n df,\n features=features,\n binning=binning,\n bin_specs=bin_specs,\n time_axis=time_axis,\n var_dtype=var_dtype,\n )\n\n # generate data stability report\n return stability_report(\n hists,\n reference_type,\n reference_hists,\n time_axis,\n window,\n shift,\n monitoring_rules,\n pull_rules,\n features,\n skip_empty_plots,\n last_n,\n plot_hist_n,\n report_filepath,\n extended_report,\n show_stats,\n **kwargs,\n )\n\n\nclass StabilityReport(Module):\n \"\"\"Representation layer of the report.\n\n Stability report module wraps the representation functionality of the report\n after running the pipeline and generating the report. Report can be represented\n as a HTML string, HTML file or Jupyter notebook's cell output.\n \"\"\"\n\n def __init__(self, read_key=\"html_report\"):\n \"\"\"Initialize an instance of StabilityReport.\n\n :param str read_key: key of HTML report data to read from data store. default is html_report.\n \"\"\"\n super().__init__()\n self.read_key = read_key\n self.html_report = \"\"\n self.datastore = {}\n\n def transform(self, datastore):\n self.datastore = datastore\n self.html_report = self.get_datastore_object(datastore, self.read_key, str)\n\n def _repr_html_(self):\n \"\"\"HTML representation of the class (report) embedded in an iframe.\n\n :return HTML: HTML report in an iframe\n \"\"\"\n from IPython.core.display import display\n\n return display(self.to_notebook_iframe())\n\n def __repr__(self):\n \"\"\"Override so that Jupyter Notebook does not print the object.\"\"\"\n return \"\"\n\n def to_html(self, escape=False):\n \"\"\"HTML code representation of the report (represented as a string).\n\n :param bool escape: escape characters which could conflict with other HTML code. default: False\n :return str: HTML code of the report\n \"\"\"\n import html\n\n return html.escape(self.html_report) if escape else self.html_report\n\n def to_file(self, filename):\n \"\"\"Store HTML report in the local file system.\n\n :param str filename: filename for the HTML report\n \"\"\"\n with open(filename, \"w+\") as file:\n file.write(self.to_html())\n\n def to_notebook_iframe(self, width=\"100%\", height=\"100%\"):\n \"\"\"HTML representation of the class (report) embedded in an iframe.\n\n :param str width: width of the frame to be shown\n :param str height: height of the frame to be shown\n :return HTML: HTML report in an iframe\n \"\"\"\n from IPython.core.display import HTML\n\n # get iframe's snippet code, insert report's HTML code and display it as HTML\n return HTML(\n templates_env(\n filename=\"notebook_iframe.html\",\n src=self.to_html(escape=True),\n width=width,\n height=height,\n )\n )\n\n def regenerate(\n self,\n last_n=0,\n skip_first_n=0,\n skip_last_n=0,\n plot_hist_n=2,\n skip_empty_plots=True,\n report_filepath=None,\n store_key=\"html_report\",\n sections_key=\"report_sections\",\n extended_report=True,\n show_stats=config[\"limited_stats\"],\n ):\n \"\"\"Regenerate HTML report with different plot settings\n\n :param int last_n: plot statistic data for last 'n' periods (optional)\n :param int skip_first_n: in plot skip first 'n' periods. last_n takes precedence (optional)\n :param int skip_last_n: in plot skip last 'n' periods. last_n takes precedence (optional)\n :param int plot_hist_n: plot histograms for last 'n' periods. default is 2 (optional)\n :param bool skip_empty_plots: if false, also show empty plots in report with only nans or zeroes (optional)\n :param str report_filepath: the file path where to output the report (optional)\n :param str sections_key: key to store sections data in the datastore. default is 'report_sections'.\n :param str store_key: key to store the HTML report data in the datastore. default is 'html_report'\n :param bool extended_report: if True, show all the generated statistics in the report (optional)\n :param list show_stats: list of statistic name patterns to show in the report. If None, show all (optional)\n :return HTML: HTML report in an iframe\n \"\"\"\n # basic checks\n if not self.datastore:\n self.logger.warning(\"Empty datastore, cannot regenerate report.\")\n return None\n\n # start from clean slate\n if sections_key in self.datastore:\n del self.datastore[sections_key]\n if store_key in self.datastore:\n del self.datastore[store_key]\n\n # if limited report is selected, check if stats list is provided, if not, get a default minimal list\n show_stats = show_stats if not extended_report else None\n\n pipeline = ReportPipe(\n sections_key=sections_key,\n last_n=last_n,\n skip_first_n=skip_first_n,\n skip_last_n=skip_last_n,\n skip_empty_plots=skip_empty_plots,\n plot_hist_n=plot_hist_n,\n report_filepath=report_filepath,\n show_stats=show_stats,\n )\n stability_report = StabilityReport()\n stability_report.transform(pipeline.transform(self.datastore))\n return stability_report\n"
] |
[
[
"pandas.Timestamp",
"pandas.Timedelta"
]
] |
HanChangHun/dsn_fewshot
|
[
"dbe8d637bce1cb17bfb7c7fd7784bcdebb79085c",
"dbe8d637bce1cb17bfb7c7fd7784bcdebb79085c"
] |
[
"Conv4/algorithm/subspace_projection.py",
"Resnet12/models/protonet_embedding.py"
] |
[
"import torch\nimport torch.nn as nn\n\nclass Subspace_Projection(nn.Module):\n def __init__(self, num_dim=5):\n super().__init__()\n self.num_dim = num_dim\n\n def create_subspace(self, supportset_features, class_size, sample_size):\n all_hyper_planes = []\n means = []\n for ii in range(class_size):\n num_sample = sample_size\n all_support_within_class_t = supportset_features[ii]\n meann = torch.mean(all_support_within_class_t, dim=0)\n means.append(meann)\n all_support_within_class_t = all_support_within_class_t - meann.unsqueeze(0).repeat(num_sample, 1)\n all_support_within_class = torch.transpose(all_support_within_class_t, 0, 1)\n uu, s, v = torch.svd(all_support_within_class.double(), some=False)\n uu = uu.float()\n all_hyper_planes.append(uu[:, :self.num_dim])\n\n all_hyper_planes = torch.stack(all_hyper_planes, dim=0)\n means = torch.stack(means)\n\n if len(all_hyper_planes.size()) < 3:\n all_hyper_planes = all_hyper_planes.unsqueeze(-1)\n\n return all_hyper_planes, means\n\n\n def projection_metric(self, target_features, hyperplanes, mu):\n eps = 1e-12\n batch_size = target_features.shape[0]\n class_size = hyperplanes.shape[0]\n\n similarities = []\n\n discriminative_loss = 0.0\n\n for j in range(class_size):\n h_plane_j = hyperplanes[j].unsqueeze(0).repeat(batch_size, 1, 1)\n target_features_expanded = (target_features - mu[j].expand_as(target_features)).unsqueeze(-1)\n projected_query_j = torch.bmm(h_plane_j, torch.bmm(torch.transpose(h_plane_j, 1, 2), target_features_expanded))\n projected_query_j = torch.squeeze(projected_query_j) + mu[j].unsqueeze(0).repeat(batch_size, 1)\n projected_query_dist_inter = target_features - projected_query_j\n\n #Training per epoch is slower but less epochs in total\n query_loss = -torch.sqrt(torch.sum(projected_query_dist_inter * projected_query_dist_inter, dim=-1) + eps) # norm ||.||\n\n #Training per epoch is faster but more epochs in total\n #query_loss = -torch.sum(projected_query_dist_inter * projected_query_dist_inter, dim=-1) # Squared norm ||.||^2\n\n similarities.append(query_loss)\n\n for k in range(class_size):\n if j != k:\n temp_loss = torch.mm(torch.transpose(hyperplanes[j], 0, 1), hyperplanes[k]) ## discriminative subspaces (Conv4 only, ResNet12 is computationally expensive)\n discriminative_loss = discriminative_loss + torch.sum(temp_loss*temp_loss)\n\n similarities = torch.stack(similarities, dim=1)\n\n return similarities, discriminative_loss\n",
"import torch.nn as nn\nimport math\n\nclass ConvBlock(nn.Module):\n def __init__(self, in_channels, out_channels, retain_activation=True):\n super(ConvBlock, self).__init__()\n \n self.block = nn.Sequential(\n nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=1, padding=1, bias=False),\n nn.BatchNorm2d(out_channels)\n )\n \n if retain_activation:\n self.block.add_module(\"ReLU\", nn.ReLU(inplace=True))\n self.block.add_module(\"MaxPool2d\", nn.MaxPool2d(kernel_size=2, stride=2, padding=0))\n \n def forward(self, x):\n out = self.block(x)\n return out\n\n# Embedding network used in Matching Networks (Vinyals et al., NIPS 2016), Meta-LSTM (Ravi & Larochelle, ICLR 2017),\n# MAML (w/ h_dim=z_dim=32) (Finn et al., ICML 2017), Prototypical Networks (Snell et al. NIPS 2017).\n\nclass ProtoNetEmbedding(nn.Module):\n def __init__(self, x_dim=3, h_dim=64, z_dim=64, retain_last_activation=True):\n super(ProtoNetEmbedding, self).__init__()\n self.encoder = nn.Sequential(\n ConvBlock(x_dim, h_dim),\n ConvBlock(h_dim, h_dim),\n ConvBlock(h_dim, h_dim),\n ConvBlock(h_dim, z_dim, retain_activation=retain_last_activation),\n )\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n m.weight.data.normal_(0, math.sqrt(2. / n))\n elif isinstance(m, nn.BatchNorm2d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n\n def forward(self, x):\n x = self.encoder(x)\n return x.view(x.size(0), -1)"
] |
[
[
"torch.mean",
"torch.transpose",
"torch.sum",
"torch.stack",
"torch.squeeze"
],
[
"torch.nn.MaxPool2d",
"torch.nn.ReLU",
"torch.nn.Conv2d",
"torch.nn.BatchNorm2d"
]
] |
rosefiero/AI-102-AIEngineer
|
[
"6d2ffa3b578e600fee908fa93107f73f3d74ece3"
] |
[
"20-ocr/Python/read-text/read-text.py"
] |
[
"from dotenv import load_dotenv\nimport os\nimport time\nfrom PIL import Image, ImageDraw\nfrom matplotlib import pyplot as plt\n\n# Import namespaces\n# import namespaces\nfrom azure.cognitiveservices.vision.computervision import ComputerVisionClient\nfrom azure.cognitiveservices.vision.computervision.models import OperationStatusCodes\nfrom msrest.authentication import CognitiveServicesCredentials\n\n\ndef main():\n\n global cv_client\n\n try:\n # Get Configuration Settings\n load_dotenv()\n cog_endpoint = os.getenv('COG_SERVICE_ENDPOINT')\n cog_key = os.getenv('COG_SERVICE_KEY')\n\n # Authenticate Computer Vision client\n credential = CognitiveServicesCredentials(cog_key) \n cv_client = ComputerVisionClient(cog_endpoint, credential)\n \n # Menu for text reading functions\n print('1: Use OCR API\\n2: Use Read API\\n3: Read handwriting\\nAny other key to quit')\n command = input('Enter a number:')\n if command == '1':\n image_file = os.path.join('images','Lincoln.jpg')\n GetTextOcr(image_file)\n elif command =='2':\n image_file = os.path.join('images','Rome.pdf')\n GetTextRead(image_file)\n elif command =='3':\n image_file = os.path.join('images','Note.jpg')\n GetTextRead(image_file)\n \n\n except Exception as ex:\n print(ex)\n\ndef GetTextOcr(image_file):\n print('Reading text in {}\\n'.format(image_file))\n # Use OCR API to read text in image\n with open(image_file, mode=\"rb\") as image_data:\n ocr_results = cv_client.recognize_printed_text_in_stream(image_data)\n\n # Prepare image for drawing\n fig = plt.figure(figsize=(7, 7))\n img = Image.open(image_file)\n draw = ImageDraw.Draw(img)\n\n # Process the text line by line\n for region in ocr_results.regions:\n for line in region.lines:\n\n # Show the position of the line of text\n l,t,w,h = list(map(int, line.bounding_box.split(',')))\n draw.rectangle(((l,t), (l+w, t+h)), outline='magenta', width=5)\n\n # Read the words in the line of text\n line_text = ''\n for word in line.words:\n line_text += word.text + ' '\n print(line_text.rstrip())\n\n # Save the image with the text locations highlighted\n plt.axis('off')\n plt.imshow(img)\n outputfile = 'ocr_results.jpg'\n fig.savefig(outputfile)\n print('Results saved in', outputfile)\n\n\n\ndef GetTextRead(image_file):\n print('Reading text in {}\\n'.format(image_file))\n # Use Read API to read text in image\n with open(image_file, mode=\"rb\") as image_data:\n read_op = cv_client.read_in_stream(image_data, raw=True)\n\n # Get the async operation ID so we can check for the results\n operation_location = read_op.headers[\"Operation-Location\"]\n operation_id = operation_location.split(\"/\")[-1]\n\n # Wait for the asynchronous operation to complete\n while True:\n read_results = cv_client.get_read_result(operation_id)\n if read_results.status not in [OperationStatusCodes.running, OperationStatusCodes.not_started]:\n break\n time.sleep(1)\n\n # If the operation was successfuly, process the text line by line\n if read_results.status == OperationStatusCodes.succeeded:\n for page in read_results.analyze_result.read_results:\n for line in page.lines:\n print(line.text)\n\n\n\n\nif __name__ == \"__main__\":\n main()"
] |
[
[
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.figure"
]
] |
goncaloperes/bokeh
|
[
"b857d2d17d7c19779bb0a7be2601d8238fb1d5e9",
"894731860c53b7c9ddd0057dee85cf064278dc0e",
"894731860c53b7c9ddd0057dee85cf064278dc0e",
"894731860c53b7c9ddd0057dee85cf064278dc0e",
"b857d2d17d7c19779bb0a7be2601d8238fb1d5e9",
"b857d2d17d7c19779bb0a7be2601d8238fb1d5e9",
"894731860c53b7c9ddd0057dee85cf064278dc0e"
] |
[
"tests/unit/bokeh/core/property/test_primitive.py",
"examples/plotting/file/multi_legend.py",
"examples/plotting/file/toolbar_autohide.py",
"examples/plotting/file/candlestick.py",
"examples/app/movies/main.py",
"tests/unit/bokeh/util/test_hex.py",
"sphinx/source/docs/user_guide/examples/styling_legend_dimensions.py"
] |
[
"#-----------------------------------------------------------------------------\n# Copyright (c) 2012 - 2021, Anaconda, Inc., and Bokeh Contributors.\n# All rights reserved.\n#\n# The full license is in the file LICENSE.txt, distributed with this software.\n#-----------------------------------------------------------------------------\n\n#-----------------------------------------------------------------------------\n# Boilerplate\n#-----------------------------------------------------------------------------\nimport pytest ; pytest\n\n#-----------------------------------------------------------------------------\n# Imports\n#-----------------------------------------------------------------------------\n\n# External imports\nimport numpy as np\n\n# Bokeh imports\nfrom _util_property import _TestHasProps, _TestModel\nfrom bokeh._testing.util.api import verify_all\n\n# Module under test\nimport bokeh.core.property.primitive as bcpp # isort:skip\n\n#-----------------------------------------------------------------------------\n# Setup\n#-----------------------------------------------------------------------------\n\nALL = (\n 'Bool',\n 'Complex',\n 'Int',\n 'Float',\n 'Null',\n 'String',\n)\n\n#-----------------------------------------------------------------------------\n# General API\n#-----------------------------------------------------------------------------\n\n\nclass Test_Bool:\n def test_valid(self) -> None:\n prop = bcpp.Bool()\n\n assert prop.is_valid(False)\n assert prop.is_valid(True)\n\n assert prop.is_valid(np.bool8(False))\n assert prop.is_valid(np.bool8(True))\n\n def test_invalid(self) -> None:\n prop = bcpp.Bool()\n\n assert not prop.is_valid(None)\n assert not prop.is_valid(0)\n assert not prop.is_valid(1)\n assert not prop.is_valid(0.0)\n assert not prop.is_valid(1.0)\n assert not prop.is_valid(1.0+1.0j)\n assert not prop.is_valid(\"\")\n assert not prop.is_valid(())\n assert not prop.is_valid([])\n assert not prop.is_valid({})\n assert not prop.is_valid(_TestHasProps())\n assert not prop.is_valid(_TestModel())\n\n assert not prop.is_valid(np.int8(0))\n assert not prop.is_valid(np.int8(1))\n assert not prop.is_valid(np.int16(0))\n assert not prop.is_valid(np.int16(1))\n assert not prop.is_valid(np.int32(0))\n assert not prop.is_valid(np.int32(1))\n assert not prop.is_valid(np.int64(0))\n assert not prop.is_valid(np.int64(1))\n assert not prop.is_valid(np.uint8(0))\n assert not prop.is_valid(np.uint8(1))\n assert not prop.is_valid(np.uint16(0))\n assert not prop.is_valid(np.uint16(1))\n assert not prop.is_valid(np.uint32(0))\n assert not prop.is_valid(np.uint32(1))\n assert not prop.is_valid(np.uint64(0))\n assert not prop.is_valid(np.uint64(1))\n assert not prop.is_valid(np.float16(0))\n assert not prop.is_valid(np.float16(1))\n assert not prop.is_valid(np.float32(0))\n assert not prop.is_valid(np.float32(1))\n assert not prop.is_valid(np.float64(0))\n assert not prop.is_valid(np.float64(1))\n assert not prop.is_valid(np.complex64(1.0+1.0j))\n assert not prop.is_valid(np.complex128(1.0+1.0j))\n if hasattr(np, \"complex256\"):\n assert not prop.is_valid(np.complex256(1.0+1.0j))\n\n def test_has_ref(self) -> None:\n prop = bcpp.Bool()\n assert not prop.has_ref\n\n def test_str(self) -> None:\n prop = bcpp.Bool()\n assert str(prop) == \"Bool\"\n\n\nclass Test_Complex:\n def test_valid(self) -> None:\n prop = bcpp.Complex()\n\n assert prop.is_valid(0)\n assert prop.is_valid(1)\n assert prop.is_valid(0.0)\n assert prop.is_valid(1.0)\n assert prop.is_valid(1.0+1.0j)\n\n assert prop.is_valid(np.int8(0))\n assert prop.is_valid(np.int8(1))\n assert prop.is_valid(np.int16(0))\n assert prop.is_valid(np.int16(1))\n assert prop.is_valid(np.int32(0))\n assert prop.is_valid(np.int32(1))\n assert prop.is_valid(np.int64(0))\n assert prop.is_valid(np.int64(1))\n assert prop.is_valid(np.uint8(0))\n assert prop.is_valid(np.uint8(1))\n assert prop.is_valid(np.uint16(0))\n assert prop.is_valid(np.uint16(1))\n assert prop.is_valid(np.uint32(0))\n assert prop.is_valid(np.uint32(1))\n assert prop.is_valid(np.uint64(0))\n assert prop.is_valid(np.uint64(1))\n assert prop.is_valid(np.float16(0))\n assert prop.is_valid(np.float16(1))\n assert prop.is_valid(np.float32(0))\n assert prop.is_valid(np.float32(1))\n assert prop.is_valid(np.float64(0))\n assert prop.is_valid(np.float64(1))\n assert prop.is_valid(np.complex64(1.0+1.0j))\n assert prop.is_valid(np.complex128(1.0+1.0j))\n if hasattr(np, \"complex256\"):\n assert prop.is_valid(np.complex256(1.0+1.0j))\n\n # TODO (bev) should fail\n assert prop.is_valid(False)\n assert prop.is_valid(True)\n\n def test_invalid(self) -> None:\n prop = bcpp.Complex()\n\n assert not prop.is_valid(None)\n assert not prop.is_valid(\"\")\n assert not prop.is_valid(())\n assert not prop.is_valid([])\n assert not prop.is_valid({})\n assert not prop.is_valid(_TestHasProps())\n assert not prop.is_valid(_TestModel())\n\n assert not prop.is_valid(np.bool8(False))\n assert not prop.is_valid(np.bool8(True))\n\n def test_has_ref(self) -> None:\n prop = bcpp.Complex()\n assert not prop.has_ref\n\n def test_str(self) -> None:\n prop = bcpp.Complex()\n assert str(prop) == \"Complex\"\n\n\nclass Test_Float:\n def test_valid(self) -> None:\n prop = bcpp.Float()\n\n assert prop.is_valid(0)\n assert prop.is_valid(1)\n assert prop.is_valid(0.0)\n assert prop.is_valid(1.0)\n\n assert prop.is_valid(np.int8(0))\n assert prop.is_valid(np.int8(1))\n assert prop.is_valid(np.int16(0))\n assert prop.is_valid(np.int16(1))\n assert prop.is_valid(np.int32(0))\n assert prop.is_valid(np.int32(1))\n assert prop.is_valid(np.int64(0))\n assert prop.is_valid(np.int64(1))\n assert prop.is_valid(np.uint8(0))\n assert prop.is_valid(np.uint8(1))\n assert prop.is_valid(np.uint16(0))\n assert prop.is_valid(np.uint16(1))\n assert prop.is_valid(np.uint32(0))\n assert prop.is_valid(np.uint32(1))\n assert prop.is_valid(np.uint64(0))\n assert prop.is_valid(np.uint64(1))\n assert prop.is_valid(np.float16(0))\n assert prop.is_valid(np.float16(1))\n assert prop.is_valid(np.float32(0))\n assert prop.is_valid(np.float32(1))\n assert prop.is_valid(np.float64(0))\n assert prop.is_valid(np.float64(1))\n\n # TODO (bev) should fail\n assert prop.is_valid(False)\n assert prop.is_valid(True)\n\n def test_invalid(self) -> None:\n prop = bcpp.Float()\n\n assert not prop.is_valid(None)\n assert not prop.is_valid(1.0+1.0j)\n assert not prop.is_valid(\"\")\n assert not prop.is_valid(())\n assert not prop.is_valid([])\n assert not prop.is_valid({})\n assert not prop.is_valid(_TestHasProps())\n assert not prop.is_valid(_TestModel())\n\n assert not prop.is_valid(np.bool8(False))\n assert not prop.is_valid(np.bool8(True))\n assert not prop.is_valid(np.complex64(1.0+1.0j))\n assert not prop.is_valid(np.complex128(1.0+1.0j))\n if hasattr(np, \"complex256\"):\n assert not prop.is_valid(np.complex256(1.0+1.0j))\n\n def test_has_ref(self) -> None:\n prop = bcpp.Float()\n assert not prop.has_ref\n\n def test_str(self) -> None:\n prop = bcpp.Float()\n assert str(prop) == \"Float\"\n\n\nclass Test_Int:\n def test_valid(self) -> None:\n prop = bcpp.Int()\n\n assert prop.is_valid(0)\n assert prop.is_valid(1)\n\n assert prop.is_valid(np.int8(0))\n assert prop.is_valid(np.int8(1))\n assert prop.is_valid(np.int16(0))\n assert prop.is_valid(np.int16(1))\n assert prop.is_valid(np.int32(0))\n assert prop.is_valid(np.int32(1))\n assert prop.is_valid(np.int64(0))\n assert prop.is_valid(np.int64(1))\n assert prop.is_valid(np.uint8(0))\n assert prop.is_valid(np.uint8(1))\n assert prop.is_valid(np.uint16(0))\n assert prop.is_valid(np.uint16(1))\n assert prop.is_valid(np.uint32(0))\n assert prop.is_valid(np.uint32(1))\n assert prop.is_valid(np.uint64(0))\n assert prop.is_valid(np.uint64(1))\n\n # TODO (bev) should fail\n assert prop.is_valid(False)\n assert prop.is_valid(True)\n\n def test_invalid(self) -> None:\n prop = bcpp.Int()\n\n assert not prop.is_valid(None)\n assert not prop.is_valid(0.0)\n assert not prop.is_valid(1.0)\n assert not prop.is_valid(1.0+1.0j)\n assert not prop.is_valid(\"\")\n assert not prop.is_valid(())\n assert not prop.is_valid([])\n assert not prop.is_valid({})\n assert not prop.is_valid(_TestHasProps())\n assert not prop.is_valid(_TestModel())\n\n assert not prop.is_valid(np.bool8(False))\n assert not prop.is_valid(np.bool8(True))\n assert not prop.is_valid(np.float16(0))\n assert not prop.is_valid(np.float16(1))\n assert not prop.is_valid(np.float32(0))\n assert not prop.is_valid(np.float32(1))\n assert not prop.is_valid(np.float64(0))\n assert not prop.is_valid(np.float64(1))\n assert not prop.is_valid(np.complex64(1.0+1.0j))\n assert not prop.is_valid(np.complex128(1.0+1.0j))\n if hasattr(np, \"complex256\"):\n assert not prop.is_valid(np.complex256(1.0+1.0j))\n\n def test_has_ref(self) -> None:\n prop = bcpp.Int()\n assert not prop.has_ref\n\n def test_str(self) -> None:\n prop = bcpp.Int()\n assert str(prop) == \"Int\"\n\n\nclass Test_String:\n def test_valid(self) -> None:\n prop = bcpp.String()\n\n assert prop.is_valid(\"\")\n assert prop.is_valid(\"6\")\n\n def test_invalid(self) -> None:\n prop = bcpp.String()\n\n assert not prop.is_valid(None)\n assert not prop.is_valid(False)\n assert not prop.is_valid(True)\n assert not prop.is_valid(0)\n assert not prop.is_valid(1)\n assert not prop.is_valid(0.0)\n assert not prop.is_valid(1.0)\n assert not prop.is_valid(1.0+1.0j)\n\n assert not prop.is_valid(())\n assert not prop.is_valid([])\n assert not prop.is_valid({})\n assert not prop.is_valid(_TestHasProps())\n assert not prop.is_valid(_TestModel())\n\n def test_has_ref(self) -> None:\n prop = bcpp.String()\n assert not prop.has_ref\n\n def test_str(self) -> None:\n prop = bcpp.String()\n assert str(prop) == \"String\"\n\n#-----------------------------------------------------------------------------\n# Dev API\n#-----------------------------------------------------------------------------\n\n#-----------------------------------------------------------------------------\n# Private API\n#-----------------------------------------------------------------------------\n\n#-----------------------------------------------------------------------------\n# Code\n#-----------------------------------------------------------------------------\n\nTest___all__ = verify_all(bcpp, ALL)\n",
"import numpy as np\n\nfrom bokeh.models import Legend, LegendItem\nfrom bokeh.plotting import figure, output_file, show\nfrom bokeh.sampledata.stocks import AAPL, MSFT\n\n\ndef datetime(x):\n return np.array(x, dtype=np.datetime64)\n\np = figure(background_fill_color=\"#fafafa\", x_axis_type=\"datetime\",\n plot_width=800, plot_height=350)\n\nr = p.multi_line([datetime(AAPL['date']), datetime(MSFT['date'])],\n [AAPL['adj_close'], MSFT['adj_close']],\n color=[\"navy\", \"crimson\"], line_width=2, alpha=0.6)\n\nlegend = Legend(items=[\n LegendItem(label=\"AAPL\", renderers=[r], index=0),\n LegendItem(label=\"MSFT\", renderers=[r], index=1),\n], location=\"top_left\")\np.add_layout(legend)\n\noutput_file(\"multi_legend.html\")\n\nshow(p)\n",
"import numpy as np\n\nfrom bokeh.layouts import row\nfrom bokeh.plotting import figure, output_file, show\n\nN = 1000\nx = np.random.random(size=N) * 100\ny = np.random.random(size=N) * 100\nradii = np.random.random(size=N) * 1.5\ncolors = [\"#%02x%02x%02x\" % (int(r), int(g), 150) for r, g in zip(50+2*x, 30+2*y)]\n\ndef make_plot(autohide=None):\n p = figure(width=300, height=300, title='Autohiding toolbar' if autohide else 'Not autohiding toolbar')\n p.scatter(x, y, radius=radii, fill_color=colors, fill_alpha=0.6, line_color=None)\n p.toolbar.autohide = autohide\n return p\n\noutput_file(\"toolbar_autohide.html\", title=\"toolbar_autohide example\")\n\nshow(row(make_plot(True), make_plot(False)))\n",
"from math import pi\n\nimport pandas as pd\n\nfrom bokeh.plotting import figure, output_file, show\nfrom bokeh.sampledata.stocks import MSFT\n\ndf = pd.DataFrame(MSFT)[:50]\ndf[\"date\"] = pd.to_datetime(df[\"date\"])\n\ninc = df.close > df.open\ndec = df.open > df.close\nw = 12*60*60*1000 # half day in ms\n\nTOOLS = \"pan,wheel_zoom,box_zoom,reset,save\"\n\np = figure(x_axis_type=\"datetime\", tools=TOOLS, plot_width=1000, title = \"MSFT Candlestick\")\np.xaxis.major_label_orientation = pi/4\np.grid.grid_line_alpha=0.3\n\np.segment(df.date, df.high, df.date, df.low, color=\"black\")\np.vbar(df.date[inc], w, df.open[inc], df.close[inc], fill_color=\"#D5E1DD\", line_color=\"black\")\np.vbar(df.date[dec], w, df.open[dec], df.close[dec], fill_color=\"#F2583E\", line_color=\"black\")\n\noutput_file(\"candlestick.html\", title=\"candlestick.py example\")\n\nshow(p) # open a browser\n",
"import sqlite3 as sql\nfrom os.path import dirname, join\n\nimport numpy as np\nimport pandas.io.sql as psql\n\nfrom bokeh.io import curdoc\nfrom bokeh.layouts import column, row\nfrom bokeh.models import ColumnDataSource, Div, Select, Slider, TextInput\nfrom bokeh.plotting import figure\nfrom bokeh.sampledata.movies_data import movie_path\n\nconn = sql.connect(movie_path)\nquery = open(join(dirname(__file__), 'query.sql')).read()\nmovies = psql.read_sql(query, conn)\n\nmovies[\"color\"] = np.where(movies[\"Oscars\"] > 0, \"orange\", \"grey\")\nmovies[\"alpha\"] = np.where(movies[\"Oscars\"] > 0, 0.9, 0.25)\nmovies.fillna(0, inplace=True) # just replace missing values with zero\nmovies[\"revenue\"] = movies.BoxOffice.apply(lambda x: '{:,d}'.format(int(x)))\n\nwith open(join(dirname(__file__), \"razzies-clean.csv\")) as f:\n razzies = f.read().splitlines()\nmovies.loc[movies.imdbID.isin(razzies), \"color\"] = \"purple\"\nmovies.loc[movies.imdbID.isin(razzies), \"alpha\"] = 0.9\n\naxis_map = {\n \"Tomato Meter\": \"Meter\",\n \"Numeric Rating\": \"numericRating\",\n \"Number of Reviews\": \"Reviews\",\n \"Box Office (dollars)\": \"BoxOffice\",\n \"Length (minutes)\": \"Runtime\",\n \"Year\": \"Year\",\n}\n\ndesc = Div(text=open(join(dirname(__file__), \"description.html\")).read(), sizing_mode=\"stretch_width\")\n\n# Create Input controls\nreviews = Slider(title=\"Minimum number of reviews\", value=80, start=10, end=300, step=10)\nmin_year = Slider(title=\"Year released\", start=1940, end=2014, value=1970, step=1)\nmax_year = Slider(title=\"End Year released\", start=1940, end=2014, value=2014, step=1)\noscars = Slider(title=\"Minimum number of Oscar wins\", start=0, end=4, value=0, step=1)\nboxoffice = Slider(title=\"Dollars at Box Office (millions)\", start=0, end=800, value=0, step=1)\ngenre = Select(title=\"Genre\", value=\"All\",\n options=open(join(dirname(__file__), 'genres.txt')).read().split())\ndirector = TextInput(title=\"Director name contains\")\ncast = TextInput(title=\"Cast names contains\")\nx_axis = Select(title=\"X Axis\", options=sorted(axis_map.keys()), value=\"Tomato Meter\")\ny_axis = Select(title=\"Y Axis\", options=sorted(axis_map.keys()), value=\"Number of Reviews\")\n\n# Create Column Data Source that will be used by the plot\nsource = ColumnDataSource(data=dict(x=[], y=[], color=[], title=[], year=[], revenue=[], alpha=[]))\n\nTOOLTIPS=[\n (\"Title\", \"@title\"),\n (\"Year\", \"@year\"),\n (\"$\", \"@revenue\")\n]\n\np = figure(plot_height=600, plot_width=700, title=\"\", toolbar_location=None, tooltips=TOOLTIPS, sizing_mode=\"scale_both\")\np.circle(x=\"x\", y=\"y\", source=source, size=7, color=\"color\", line_color=None, fill_alpha=\"alpha\")\n\n\ndef select_movies():\n genre_val = genre.value\n director_val = director.value.strip()\n cast_val = cast.value.strip()\n selected = movies[\n (movies.Reviews >= reviews.value) &\n (movies.BoxOffice >= (boxoffice.value * 1e6)) &\n (movies.Year >= min_year.value) &\n (movies.Year <= max_year.value) &\n (movies.Oscars >= oscars.value)\n ]\n if (genre_val != \"All\"):\n selected = selected[selected.Genre.str.contains(genre_val)==True]\n if (director_val != \"\"):\n selected = selected[selected.Director.str.contains(director_val)==True]\n if (cast_val != \"\"):\n selected = selected[selected.Cast.str.contains(cast_val)==True]\n return selected\n\n\ndef update():\n df = select_movies()\n x_name = axis_map[x_axis.value]\n y_name = axis_map[y_axis.value]\n\n p.xaxis.axis_label = x_axis.value\n p.yaxis.axis_label = y_axis.value\n p.title.text = \"%d movies selected\" % len(df)\n source.data = dict(\n x=df[x_name],\n y=df[y_name],\n color=df[\"color\"],\n title=df[\"Title\"],\n year=df[\"Year\"],\n revenue=df[\"revenue\"],\n alpha=df[\"alpha\"],\n )\n\ncontrols = [reviews, boxoffice, genre, min_year, max_year, oscars, director, cast, x_axis, y_axis]\nfor control in controls:\n control.on_change('value', lambda attr, old, new: update())\n\ninputs = column(*controls, width=320)\n\nl = column(desc, row(inputs, p), sizing_mode=\"scale_both\")\n\nupdate() # initial load of the data\n\ncurdoc().add_root(l)\ncurdoc().title = \"Movies\"\n",
"#-----------------------------------------------------------------------------\n# Copyright (c) 2012 - 2021, Anaconda, Inc., and Bokeh Contributors.\n# All rights reserved.\n#\n# The full license is in the file LICENSE.txt, distributed with this software.\n#-----------------------------------------------------------------------------\n\n#-----------------------------------------------------------------------------\n# Boilerplate\n#-----------------------------------------------------------------------------\nimport pytest ; pytest\n\n#-----------------------------------------------------------------------------\n# Imports\n#-----------------------------------------------------------------------------\n\n# External imports\nimport numpy as np\n\n# Module under test\nimport bokeh.util.hex as buh # isort:skip\n\n#-----------------------------------------------------------------------------\n# Setup\n#-----------------------------------------------------------------------------\n\nnp.random.seed(0)\nn = 500\nx = 2 + np.random.standard_normal(n)\ny = 2 + np.random.standard_normal(n)\n\n#-----------------------------------------------------------------------------\n# General API\n#-----------------------------------------------------------------------------\n\n\nclass Test_axial_to_cartesian:\n def test_default_aspect_pointytop(self) -> None:\n q = np.array([0, 0, 0, 1, -1, 1, -1])\n r = np.array([0, 1, -1, 0, 1, -1, 0])\n\n x, y = buh.axial_to_cartesian(q, r, 1, \"pointytop\")\n\n sq3 = np.sqrt(3)\n assert list(x) == [0, sq3/2, -sq3/2, sq3, -sq3/2, sq3/2, -sq3]\n assert list(y) == [-0.0, -1.5, 1.5, -0.0, -1.5, 1.5, -0.0]\n\n\n def test_default_aspect_flattop(self) -> None:\n q = np.array([0, 0, 0, 1, -1, 1, -1])\n r = np.array([0, 1, -1, 0, 1, -1, 0])\n\n x, y = buh.axial_to_cartesian(q, r, 1, \"flattop\")\n\n sq3 = np.sqrt(3)\n assert list(x) == [0.0, 0.0, 0.0, 1.5, -1.5, 1.5, -1.5]\n assert list(y) == [0, -sq3, sq3, -sq3/2, -sq3/2, sq3/2, sq3/2]\n\n\nclass Test_cartesian_to_axial:\n def test_default_aspect_pointytop(self) -> None:\n x = np.array([0, -2, 2, -1.5, -1.5, 1.5, 1.5])\n y = np.array([0, 0, 0, 1.5, -1.5, 1.5, -1.5])\n\n q, r = buh.cartesian_to_axial(x, y, 1, \"pointytop\")\n\n assert list(zip(q, r)) == [\n (0,0), (-1, 0), (1,0), (0,-1), (-1, 1), (1, -1), (0,1)\n ]\n\n def test_default_aspect_flattop(self) -> None:\n x = np.array([0, 0, 0, 1.5, -1.5, 1.5, -1.5])\n y = np.array([0, -2, 2, -1.5, -1.5, 1.5, 1.5])\n\n q, r = buh.cartesian_to_axial(x, y, 1, \"flattop\")\n\n assert list(zip(q, r)) == [\n (0,0), (0,1), (0,-1), (1, 0), (-1, 1), (1, -1), (-1,0)\n ]\n\n\nclass Test_hexbin:\n # hexbin requires pandas\n\n def test_gaussian_pointytop(self, pd) -> None:\n bins = buh.hexbin(x, y, 2)\n assert list(bins.q) == [0,0,1,1,1,2,2]\n assert list(bins.r) == [-1,0,-2,-1,0,-2,-1]\n assert list(bins.counts) == [9,54,1,313,98,3,22]\n\n assert bins.equals(buh.hexbin(x, y, 2, \"pointytop\"))\n\n def test_gaussian_flattop(self, pd) -> None:\n bins = buh.hexbin(x, y, 2, \"flattop\")\n assert list(bins.q) == [0, 0, 1, 1, 1, 2]\n assert list(bins.r) == [-1, 0, -2, -1, 0, -2]\n assert list(bins.counts) == [95, 57, 14, 324, 8, 2]\n\n#-----------------------------------------------------------------------------\n# Dev API\n#-----------------------------------------------------------------------------\n\n#-----------------------------------------------------------------------------\n# Private API\n#-----------------------------------------------------------------------------\n\n#-----------------------------------------------------------------------------\n# Code\n#-----------------------------------------------------------------------------\n",
"import numpy as np\n\nfrom bokeh.plotting import figure, output_file, show\n\nx = np.linspace(0, 4*np.pi, 100)\ny = np.sin(x)\n\noutput_file(\"legend_labels.html\")\n\np = figure()\n\np.circle(x, y, legend_label=\"sin(x)\")\np.line(x, y, legend_label=\"sin(x)\")\n\np.line(x, 2*y, legend_label=\"2*sin(x)\",\n line_dash=[4, 4], line_color=\"orange\", line_width=2)\n\np.square(x, 3*y, legend_label=\"3*sin(x)\", fill_color=None, line_color=\"green\")\np.line(x, 3*y, legend_label=\"3*sin(x)\", line_color=\"green\")\n\np.legend.label_standoff = 5\np.legend.glyph_width = 50\np.legend.spacing = 10\np.legend.padding = 50\np.legend.margin = 50\n\nshow(p)\n"
] |
[
[
"numpy.complex128",
"numpy.uint32",
"numpy.uint8",
"numpy.float16",
"numpy.int32",
"numpy.int8",
"numpy.int16",
"numpy.int64",
"numpy.uint16",
"numpy.uint64",
"numpy.bool8",
"numpy.float32",
"numpy.float64",
"numpy.complex256",
"numpy.complex64"
],
[
"numpy.array"
],
[
"numpy.random.random"
],
[
"pandas.to_datetime",
"pandas.DataFrame"
],
[
"numpy.where",
"pandas.io.sql.read_sql"
],
[
"numpy.random.standard_normal",
"numpy.array",
"numpy.sqrt",
"numpy.random.seed"
],
[
"numpy.linspace",
"numpy.sin"
]
] |
chinvib66/Niffler
|
[
"6fcf46c505249ac116b16ed2efda92685ba153c1"
] |
[
"modules/png-extraction/ImageExtractor.py"
] |
[
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\nimport os\nimport glob \nfrom shutil import copyfile\nimport hashlib\nimport json\nimport sys\nimport subprocess\nimport logging\nfrom multiprocessing import Pool\nimport pdb\nimport time\nimport pickle\nimport numpy as np\nimport pandas as pd\nimport pydicom as dicom \nimport png\n# pydicom imports needed to handle data errors\nfrom pydicom import config\nfrom pydicom import datadict\nfrom pydicom import values \n\nimport pathlib\nconfigs = {}\n\n\ndef initialize_config_and_execute(config_values):\n global configs\n configs = config_values\n # Applying checks for paths\n \n p1 = pathlib.PurePath(configs['DICOMHome'])\n dicom_home = p1.as_posix() # the folder containing your dicom files\n\n p2 = pathlib.PurePath(configs['OutputDirectory'])\n output_directory = p2.as_posix()\n\n print_images = configs['PrintImages']\n print_only_common_headers = configs['CommonHeadersOnly']\n depth = int(configs['Depth'])\n processes = int(configs['UseProcesses']) # how many processes to use.\n flattened_to_level = configs['FlattenedToLevel']\n email = configs['YourEmail']\n send_email = configs['SendEmail']\n no_splits = int(configs['SplitIntoChunks'])\n is16Bit = configs['is16Bit']\n \n metadata_col_freq_threshold = 0.1\n\n png_destination = output_directory + '/extracted-images/'\n failed = output_directory + '/failed-dicom/'\n maps_directory = output_directory + '/maps/'\n meta_directory = output_directory + '/meta/'\n\n LOG_FILENAME = output_directory + '/ImageExtractor.out'\n pickle_file = output_directory + '/ImageExtractor.pickle'\n\n # record the start time\n t_start = time.time()\n\n if not os.path.exists(output_directory):\n os.makedirs(output_directory)\n\n logging.basicConfig(filename=LOG_FILENAME, level=logging.DEBUG)\n\n if not os.path.exists(maps_directory):\n os.makedirs(maps_directory)\n\n if not os.path.exists(meta_directory):\n os.makedirs(meta_directory)\n\n if not os.path.exists(png_destination):\n os.makedirs(png_destination)\n\n if not os.path.exists(failed):\n os.makedirs(failed)\n\n if not os.path.exists(failed + \"/1\"):\n os.makedirs(failed + \"/1\")\n\n if not os.path.exists(failed + \"/2\"):\n os.makedirs(failed + \"/2\")\n\n if not os.path.exists(failed + \"/3\"):\n os.makedirs(failed + \"/3\")\n\n if not os.path.exists(failed + \"/4\"):\n os.makedirs(failed + \"/4\")\n\n logging.info(\"------- Values Initialization DONE -------\")\n final_res = execute(pickle_file, dicom_home, output_directory, print_images, print_only_common_headers, depth,\n processes, flattened_to_level, email, send_email, no_splits, is16Bit, png_destination,\n failed, maps_directory, meta_directory, LOG_FILENAME, metadata_col_freq_threshold, t_start)\n return final_res\n\n\n# Function for getting tuple for field,val pairs\ndef get_tuples(plan, outlist = None, key = \"\"):\n if len(key)>0:\n key = key + \"_\"\n if not outlist:\n outlist = []\n for aa in plan.dir():\n try:\n hasattr(plan,aa)\n except TypeError as e:\n logging.warning('Type Error encountered')\n if hasattr(plan, aa) and aa!= 'PixelData':\n value = getattr(plan, aa)\n start = len(outlist)\n # if dicom sequence extract tags from each element\n if type(value) is dicom.sequence.Sequence:\n for nn, ss in enumerate(list(value)):\n newkey = \"_\".join([key,(\"%d\"%nn),aa]) if len(key) else \"_\".join([(\"%d\"%nn),aa])\n candidate = get_tuples(ss,outlist=None,key=newkey)\n # if extracted tuples are too big condense to a string\n if len(candidate)>2000:\n outlist.append((newkey,str(candidate)))\n else:\n outlist.extend(candidate)\n else:\n if type(value) is dicom.valuerep.DSfloat:\n value = float(value)\n elif type(value) is dicom.valuerep.IS:\n value = str(value)\n elif type(value) is dicom.valuerep.MultiValue:\n value = tuple(value)\n elif type(value) is dicom.uid.UID:\n value = str(value)\n outlist.append((key + aa, value))\n # appends name, value pair for this file. these are later concatenated to the dataframe\n return outlist\n\n\ndef extract_headers(f_list_elem):\n nn,ff = f_list_elem # unpack enumerated list\n plan = dicom.dcmread(ff, force=True) # reads in dicom file\n # checks if this file has an image\n c=True\n try:\n check = plan.pixel_array # throws error if dicom file has no image\n except:\n c = False\n kv = get_tuples(plan) # gets tuple for field,val pairs for this file. function defined above\n # dicom images should not have more than 300\n if len(kv)>500:\n logging.debug(str(len(kv)) + \" dicoms produced by \" + ff)\n kv.append(('file', f_list_elem[1])) # adds my custom field with the original filepath\n kv.append(('has_pix_array',c)) # adds my custom field with if file has image\n if c:\n # adds my custom category field - useful if classifying images before processing\n kv.append(('category','uncategorized'))\n else:\n kv.append(('category','no image')) # adds my custom category field, makes note as imageless\n return dict(kv)\n\n\n# Function to extract pixel array information\n# takes an integer used to index into the global filedata dataframe\n# returns tuple of\n# filemapping: dicom to png paths (as str)\n# fail_path: dicom to failed folder (as tuple)\n# found_err: error code produced when processing\ndef extract_images(filedata, i, png_destination, flattened_to_level, failed, is16Bit):\n ds = dicom.dcmread(filedata.iloc[i].loc['file'], force=True) # read file in\n found_err=None\n filemapping = \"\"\n fail_path = \"\"\n try:\n im = ds.pixel_array # pull image from read dicom\n imName=os.path.split(filedata.iloc[i].loc['file'])[1][:-4] # get file name ex: IM-0107-0022\n\n if flattened_to_level == 'patient':\n ID = filedata.iloc[i].loc['PatientID'] # Unique identifier for the Patient.\n folderName = hashlib.sha224(ID.encode('utf-8')).hexdigest()\n # check for existence of patient folder. Create if it does not exist.\n os.makedirs(png_destination + folderName,exist_ok=True)\n elif flattened_to_level == 'study':\n ID1 = filedata.iloc[i].loc['PatientID'] # Unique identifier for the Patient.\n try:\n ID2 = filedata.iloc[i].loc['StudyInstanceUID'] # Unique identifier for the Study.\n except:\n ID2='ALL-STUDIES'\n folderName = hashlib.sha224(ID1.encode('utf-8')).hexdigest() + \"/\" + \\\n hashlib.sha224(ID2.encode('utf-8')).hexdigest()\n # check for existence of the folder tree patient/study/series. Create if it does not exist.\n os.makedirs(png_destination + folderName,exist_ok=True)\n else:\n ID1=filedata.iloc[i].loc['PatientID'] # Unique identifier for the Patient.\n try:\n ID2=filedata.iloc[i].loc['StudyInstanceUID'] # Unique identifier for the Study.\n ID3=filedata.iloc[i].loc['SeriesInstanceUID'] # Unique identifier of the Series.\n except:\n ID2='ALL-STUDIES'\n ID3='ALL-SERIES'\n folderName = hashlib.sha224(ID1.encode('utf-8')).hexdigest() + \"/\" + \\\n hashlib.sha224(ID2.encode('utf-8')).hexdigest() + \"/\" + \\\n hashlib.sha224(ID3.encode('utf-8')).hexdigest()\n # check for existence of the folder tree patient/study/series. Create if it does not exist.\n os.makedirs(png_destination + folderName,exist_ok=True)\n\n\n pngfile = png_destination+folderName + '/' + hashlib.sha224(imName.encode('utf-8')).hexdigest() + '.png'\n dicom_path = filedata.iloc[i].loc['file']\n image_path = png_destination+folderName+'/' + hashlib.sha224(imName.encode('utf-8')).hexdigest() + '.png'\n if is16Bit == 'True' or is16Bit == 'true':\n # write the PNG file as a 16-bit greyscale \n image_2d = ds.pixel_array.astype(np.double) \n # # Rescaling grey scale between 0-255\n image_2d_scaled = (np.maximum(image_2d,0) / image_2d.max()) * 65535.0 \n # # Convert to uint\n shape = ds.pixel_array.shape\n image_2d_scaled = np.uint16(image_2d_scaled) \n with open(pngfile , 'wb') as png_file:\n w = png.Writer(shape[1], shape[0], greyscale=True,bitdepth=16)\n w.write(png_file, image_2d_scaled)\n else: \n shape = ds.pixel_array.shape\n # Convert to float to avoid overflow or underflow losses.\n image_2d = ds.pixel_array.astype(float)\n # Rescaling grey scale between 0-255\n image_2d_scaled = (np.maximum(image_2d,0) / image_2d.max()) * 255.0\n # onvert to uint\n image_2d_scaled = np.uint8(image_2d_scaled)\n # Write the PNG file\n with open(pngfile , 'wb') as png_file:\n w = png.Writer(shape[1], shape[0], greyscale=True)\n w.write(png_file, image_2d_scaled)\n filemapping = filedata.iloc[i].loc['file'] + ', ' + pngfile + '\\n'\n except AttributeError as error:\n found_err = error\n logging.error(found_err)\n fail_path = filedata.iloc[i].loc['file'], failed + '1/' + \\\n os.path.split(filedata.iloc[i].loc['file'])[1][:-4]+'.dcm'\n except ValueError as error:\n found_err = error\n logging.error(found_err)\n fail_path = filedata.iloc[i].loc['file'], failed + '2/' + \\\n os.path.split(filedata.iloc[i].loc['file'])[1][:-4]+'.dcm'\n except BaseException as error:\n found_err = error\n logging.error(found_err)\n fail_path = filedata.iloc[i].loc['file'], failed + '3/' + \\\n os.path.split(filedata.iloc[i].loc['file'])[1][:-4]+'.dcm'\n except Exception as error:\n found_err = error\n logging.error(found_err)\n fail_path = filedata.iloc[i].loc['file'], failed + '4/' + \\\n os.path.split(filedata.iloc[i].loc['file'])[1][:-4]+'.dcm'\n return (filemapping, fail_path, found_err)\n\n\n# Function when pydicom fails to read a value attempt to read as other types.\ndef fix_mismatch_callback(raw_elem, **kwargs):\n try:\n if raw_elem.VR: \n values.convert_value(raw_elem.VR, raw_elem)\n except BaseException as err:\n for vr in kwargs['with_VRs']:\n try:\n values.convert_value(vr, raw_elem)\n except ValueError:\n pass\n else:\n raw_elem = raw_elem._replace(VR=vr)\n return raw_elem\n\n\ndef get_path(depth, dicom_home):\n directory = dicom_home + '/'\n i = 0\n while i < depth:\n directory += \"*/\"\n i += 1\n return directory + \"*.dcm\"\n\n \n# Function used by pydicom.\ndef fix_mismatch(with_VRs=['PN', 'DS', 'IS']):\n \"\"\"A callback function to check that RawDataElements are translatable\n with their provided VRs. If not, re-attempt translation using\n some other translators.\n Parameters\n ----------\n with_VRs : list, [['PN', 'DS', 'IS']]\n A list of VR strings to attempt if the raw data element value cannot\n be translated with the raw data element's VR.\n Returns\n -------\n No return value. The callback function will return either\n the original RawDataElement instance, or one with a fixed VR.\n \"\"\"\n dicom.config.data_element_callback = fix_mismatch_callback\n config.data_element_callback_kwargs = {\n 'with_VRs': with_VRs,\n } \n\n\ndef execute(pickle_file, dicom_home, output_directory, print_images, print_only_common_headers, depth,\n processes, flattened_to_level, email, send_email, no_splits, is16Bit, png_destination,\n failed, maps_directory, meta_directory, LOG_FILENAME, metadata_col_freq_threshold, t_start):\n err = None\n fix_mismatch()\n if processes == 0.5: # use half the cores to avoid high ram usage\n core_count = int(os.cpu_count()/2)\n elif processes == 0: # use all the cores\n core_count = int(os.cpu_count())\n elif processes < os.cpu_count(): # use the specified number of cores to avoid high ram usage\n core_count = processes\n else:\n core_count = int(os.cpu_count())\n # get set up to create dataframe\n dirs = os.listdir(dicom_home)\n # gets all dicom files. if editing this code, get filelist into the format of a list of strings,\n # with each string as the file path to a different dicom file.\n file_path = get_path(depth, dicom_home)\n\n if os.path.isfile(pickle_file):\n f=open(pickle_file,'rb')\n filelist=pickle.load(f)\n else:\n filelist=glob.glob(file_path, recursive=True) # search the folders at the depth we request and finds all dicoms\n pickle.dump(filelist,open(pickle_file,'wb'))\n file_chunks = np.array_split(filelist,no_splits)\n logging.info('Number of dicom files: ' + str(len(filelist)))\n\n try:\n ff = filelist[0] # load first file as a template to look at all\n except IndexError:\n logging.error(\"There is no file present in the given folder in \" + file_path)\n sys.exit(1)\n\n plan = dicom.dcmread(ff, force=True)\n logging.debug('Loaded the first file successfully')\n\n keys = [(aa) for aa in plan.dir() if (hasattr(plan, aa) and aa != 'PixelData')]\n # checks for images in fields and prints where they are\n for field in plan.dir():\n if (hasattr(plan, field) and field!='PixelData'):\n entry = getattr(plan, field)\n if type(entry) is bytes:\n logging.debug(field)\n logging.debug(str(entry))\n\n for i,chunk in enumerate(file_chunks):\n csv_destination = \"{}/meta/metadata_{}.csv\".format(output_directory,i)\n mappings = \"{}/maps/mapping_{}.csv\".format(output_directory,i)\n fm = open(mappings, \"w+\")\n filemapping = 'Original DICOM file location, PNG location \\n'\n fm.write(filemapping)\n\n # add a check to see if the metadata has already been extracted\n # step through whole file list, read in file, append fields to future dataframe of all files\n\n headerlist = []\n # start up a multi processing pool\n # for every item in filelist send data to a subprocess and run extract_headers func\n # output is then added to headerlist as they are completed (no ordering is done)\n with Pool(core_count) as p:\n res= p.imap_unordered(extract_headers, enumerate(chunk))\n for i,e in enumerate(res):\n headerlist.append(e)\n data = pd.DataFrame(headerlist)\n logging.info('Chunk ' + str(i) + ' Number of fields per file : ' + str(len(data.columns)))\n # find common fields\n # make dataframe containing all fields and all files minus those removed in previous block\n # export csv file of final dataframe\n export_csv = data.to_csv(csv_destination, index = None, header=True)\n fields=data.keys()\n count = 0 # potential painpoint\n # writting of log handled by main process\n if print_images:\n logging.info(\"Start processing Images\")\n filedata = data\n total = len(chunk)\n stamp = time.time()\n for i in range(len(filedata)):\n (fmap,fail_path,err) = extract_images(filedata, i, png_destination, flattened_to_level, failed, is16Bit)\n if err:\n count +=1\n copyfile(fail_path[0],fail_path[1])\n err_msg = str(count) + ' out of ' + str(len(chunk)) + ' dicom images have failed extraction'\n logging.error(err_msg)\n else:\n fm.write(fmap)\n fm.close()\n logging.info('Chunk run time: %s %s', time.time() - t_start, ' seconds!')\n\n logging.info('Generating final metadata file')\n\n col_names = dict()\n all_headers = dict()\n total_length = 0\n\n metas = glob.glob( \"{}*.csv\".format(meta_directory))\n # for each meta file identify the columns that are not na's for at least 10% (metadata_col_freq_threshold) of data\n for meta in metas:\n m = pd.read_csv(meta,dtype='str')\n d_len = m.shape[0]\n total_length += d_len\n\n for e in m.columns:\n col_pop = d_len - np.sum(m[e].isna()) # number of populated rows for this column in this metadata file\n\n if e in col_names:\n col_names[e] += col_pop\n else:\n col_names[e] = col_pop\n \n # all_headers keeps track of number of appearances of each header. We later use this count to ensure that\n # the headers we use are present in all metadata files.\n if e in all_headers:\n all_headers[e] += 1\n else:\n all_headers[e] = 1\n\n loadable_names = list()\n for k in col_names.keys():\n if k in all_headers and all_headers[k] >= no_splits: # no_splits == number of batches used \n if col_names[k] >= metadata_col_freq_threshold*total_length:\n loadable_names.append(k) # use header only if it's present in every metadata file\n \n # load every metadata file using only valid columns\n meta_list = list()\n for meta in metas:\n m = pd.read_csv(meta,dtype='str',usecols=loadable_names)\n meta_list.append(m)\n merged_meta = pd.concat(meta_list,ignore_index=True)\n merged_meta.to_csv('{}/metadata.csv'.format(output_directory),index=False)\n # getting a single mapping file\n logging.info('Generatign final mapping file')\n mappings = glob.glob(\"{}/maps/*.csv\".format(output_directory))\n map_list = list()\n for mapping in mappings:\n map_list.append(pd.read_csv(mapping,dtype='str'))\n merged_maps = pd.concat(map_list,ignore_index=True)\n if print_only_common_headers == 'True' or print_only_common_headers == 'true':\n mask_common_fields = merged_maps.isnull().mean() < 0.1\n common_fields = set(np.asarray(merged_maps.columns)[mask_common_fields])\n merged_maps = merged_maps[common_fields]\n merged_maps.to_csv('{}/mapping.csv'.format(output_directory),index=False)\n\n if send_email == 'True' or send_email == 'true':\n subprocess.call('echo \"Niffler has successfully completed the png conversion\" | mail -s \"The image conversion'\n ' has been complete\" {0}'.format(email), shell=True)\n # Record the total run-time\n logging.info('Total run time: %s %s', time.time() - t_start, ' seconds!')\n logging.shutdown() # Closing logging file after extraction is done !!\n logs = []\n logs.append(err)\n logs.append(\"The PNG conversion is SUCCESSFUL\")\n return logs\n\n\nif __name__ == \"__main__\":\n with open('config.json', 'r') as f:\n niffler = json.load(f)\n\n initialize_config_and_execute(niffler)\n"
] |
[
[
"pandas.concat",
"pandas.read_csv",
"numpy.maximum",
"numpy.asarray",
"numpy.uint8",
"pandas.DataFrame",
"numpy.uint16",
"numpy.array_split"
]
] |
jasonfan1997/umd_icecube_analysis_tutorial
|
[
"50bf3af27f81d719953ac225f199e733b5c0bddf"
] |
[
"mla/mla/sensitivtiy.py"
] |
[
"'''Core functionality'''\r\n\r\nfrom __future__ import print_function, division\r\nimport os, sys, glob, numpy as np, matplotlib, scipy, time\r\nfrom scipy import stats, interpolate, optimize\r\nfrom math import pi\r\nimport numpy.lib.recfunctions as rf\r\nfrom mla.spectral import *\r\nfrom mla.tools import *\r\nfrom mla.timing import *\r\nfrom mla.core import *\r\nfrom mla.injection import *\r\nimport scipy.stats\r\nfrom copy import deepcopy\r\nfrom matplotlib import pyplot as plt, colors\r\n\r\n\r\nclass PS_sensitivity():\r\n def __init__(self):\r\n pass\r\n \r\n def background_building(self, data, sim, bkg_bins=np.linspace(-1.0, 1.0, 501), bkg_2dbins=[np.linspace(-1,1,100),np.linspace(1,8,100)],gamma_points = np.arange(-4, -1, 0.25),save_file = None):\r\n r''' Building the background distribution\r\n args:\r\n data:The Background\r\n sim: Monte Carlo simulation\r\n spectrum: Spectrum , could be a BaseSpectrum object or a string name PowerLaw\r\n bkg_2dbins: The sindec and logE binning for energy S/B histogram.\r\n gamma_points: The set of gamma for PowerLaw energy weighting.\r\n save_file: location to save the background file.Default is not saving.\r\n '''\r\n self.energybins = bkg_2dbins\r\n if save_file is not None:\r\n bkg_file = save_file + \"bkg_dec.pkl\"\r\n sob_file = save_file + \"bkd_SOB.npy\"\r\n self.bkg_spline = build_bkg_spline(data , bins = bkg_bins , file_name = bkg_file)\r\n self.ratio,self.gamma_points = build_energy_2dhistogram(data, sim ,bkg_2dbins ,gamma_points,file_name = sob_file)\r\n else:\r\n self.bkg_spline = build_bkg_spline(data , bins = bkg_bins )\r\n self.ratio,self.gamma_points = build_energy_2dhistogram(data, sim ,bkg_2dbins ,gamma_points)\r\n return\r\n \r\n def load_background(self, dir_name, bkg_bins=np.linspace(-1.0, 1.0, 501), bkg_2dbins=[np.linspace(-1,1,100),np.linspace(1,8,100)],gamma_points = np.arange(-4, -1, 0.25)):\r\n r''' Loading background distribution\r\n args:\r\n dir_name:Location to the file\r\n spectrum: Spectrum , could be a BaseSpectrum object or a string name PowerLaw\r\n bkg_2dbins: The sindec and logE binning for energy S/B histogram.\r\n gamma_points: The set of gamma for PowerLaw energy weighting.\r\n '''\r\n self.energybins = bkg_2dbins\r\n bkg_file = dir_name + \"bkg_dec.pkl\"\r\n sob_file = dir_name + \"bkd_SOB.npy\"\r\n with open(bkg_file, 'rb') as f:\r\n self.bkg_spline = pickle.load(f)\r\n self.ratio = np.load(sob_file)\r\n self.gamma_points = gamma_points\r\n return\r\n \r\n def set_point_source(self, ra , dec , data , sim , spectrum , signal_time_profile = None , background_time_profile = (0,1)):\r\n r'''Set the location of the source and load the information of the model.\r\n ra: RA of the source in rad\r\n dec: Declination of the source in rad\r\n data:The data\r\n sim: Monte Carlo simulation\r\n spectrum: Spectrum , could be a BaseSpectrum object or a string name PowerLaw\r\n signal_time_profile: generic_profile object. This is the signal time profile.Default is the same as background_time_profile.\r\n background_time_profile: generic_profile object or the list of the start time and end time. This is the background time profile.Default is a (0,1) tuple which will create a uniform_profile from 0 to 1.\r\n '''\r\n self.point_source=LLH_point_source(ra , dec , data , sim , spectrum , signal_time_profile = signal_time_profile , background_time_profile = background_time_profile,gamma_points=self.gamma_points,bkg_dec_spline=self.bkg_spline,sob_maps = self.ratio)\r\n self.background_time_profile = deepcopy(self.point_source.background_time_profile)\r\n self.signal_time_profile = deepcopy(self.point_source.signal_time_profile)\r\n return\r\n \r\n def set_backround(self, background ,grl ,background_window = 14):\r\n r'''Setting the background information which will later be used when drawing data as background\r\n args:\r\n background:Background data\r\n grl:The good run list\r\n background_window: The time window(days) that will be used to estimated the background rate and drawn sample from.Default is 14 days\r\n '''\r\n start_time = self.background_time_profile.get_range()[0]\r\n fully_contained = (grl['start'] >= start_time-background_window) &\\\r\n (grl['stop'] < start_time)\r\n start_contained = (grl['start'] < start_time-background_window) &\\\r\n (grl['stop'] > start_time-background_window)\r\n background_runs = (fully_contained | start_contained)\r\n if not np.any(background_runs):\r\n print(\"ERROR: No runs found in GRL for calculation of \"\r\n \"background rates!\")\r\n raise RuntimeError\r\n background_grl = grl[background_runs]\r\n \r\n # Get the number of events we see from these runs and scale \r\n # it to the number we expect for our search livetime.\r\n n_background = background_grl['events'].sum()\r\n n_background /= background_grl['livetime'].sum()\r\n n_background *= self.background_time_profile.effective_exposure()\r\n self.n_background = n_background\r\n self.background = background\r\n return\r\n \r\n def set_injection( self, sim , gamma = -2, signal_time_profile = None , background_time_profile = (0,1), sampling_width = np.radians(1) ,ra = None,dec = None):\r\n r'''Set the details of the injection.\r\n sim: Simulation data\r\n gamma: Spectral index of the injection spectrum\r\n signal_time_profile: generic_profile object. This is the signal time profile.Default is the same as background_time_profile.\r\n background_time_profile: generic_profile object or the list of the start time and end time. This is the background time profile.Default is a (0,1) tuple which will create a uniform_profile from 0 to 1.\r\n '''\r\n spectrum = PowerLaw( 100e3, 1, gamma)\r\n self.PS_injector = PSinjector(spectrum, sim , signal_time_profile = None , background_time_profile = background_time_profile)\r\n if ra is None:\r\n self.PS_injector.set_source_location(self.point_source.ra,self.point_source.dec,sampling_width = sampling_width)\r\n else:\r\n self.PS_injector.set_source_location(ra,dec,sampling_width = sampling_width)\r\n return\r\n \r\n def draw_data(self):\r\n r'''Draw data sample\r\n return:\r\n background: background sample\r\n '''\r\n n_background_observed = np.random.poisson(self.n_background)\r\n background = np.random.choice(self.background, n_background_observed).copy()\r\n background['time'] = self.background_time_profile.random(len(background))\r\n return background\r\n \r\n def draw_signal(self):\r\n r'''Draw signal sample\r\n return:\r\n signal: signal sample\r\n '''\r\n return self.PS_injector.sample_from_spectrum()\r\n \r\n \r\n \r\n def build_background_TS(self,n_trials = 1000):\r\n r'''build background TS distribution\r\n args:\r\n n_trials: Number of trials\r\n return:\r\n TS: The TS array\r\n '''\r\n TS = []\r\n for i in range(n_trials):\r\n self.point_source.update_data(self.draw_data())\r\n TS.append(self.point_source.eval_llh_fit_ns()[1])\r\n return np.array(TS)\r\n \r\n def build_signal_TS(self, signal_trials = 200 ,result = False ,result_file = None):\r\n r'''build signal TS distribution\r\n args:\r\n signal_trials: Number of trials\r\n result: Whether storing the full result in self.result.Default is False.\r\n result_file:Whether storing the full result in file.Default is False.\r\n \r\n return:\r\n TS: The TS array\r\n '''\r\n TS = []\r\n ts_result = []\r\n for i in range(signal_trials):\r\n data = self.draw_data()\r\n signal = self.draw_signal()\r\n signal = rf.drop_fields(signal, [n for n in signal.dtype.names \\\r\n if not n in data.dtype.names])\r\n self.point_source.update_data(np.concatenate([data,signal]))\r\n TS.append(self.point_source.eval_llh_fit_ns()[1])\r\n ts_result.append(self.point_source.get_fit_result)\r\n if result:\r\n np.save(result_file, np.array(ts_result))\r\n return np.array(TS)\r\n \r\n def calculate_ratio_passthreshold(self,bkg_trials = 1000, signal_trials = 200 ,result = False ,result_file = None):\r\n r'''Calculate the ratio of signal trials passing the threshold\r\n args:\r\n bkg_trials : Number of background trials\r\n signal_trials: Number of signal trials\r\n result: Whether storing the full result in self.result.Default is False.\r\n result_file:Whether storing the full result in file.Default is False.\r\n \r\n return:\r\n result:The ratio of passing(both for three sigma and median of the background\r\n '''\r\n signal_ts = self.build_signal_TS(signal_trials ,result = result ,result_file = result_file)\r\n result = [(signal_ts > self.bkg_three_sigma ).sum()/float(len(signal_ts)), (signal_ts > self.bkg_median).sum()/float(len(signal_ts))]\r\n return result\r\n \r\n def calculate_sensitivity(self, bkg_trials = 1000, signal_trials = 200, gamma = -2, list_N = [1e-17] ,N_factor = 2 , make_plot = None ,Threshold_list=[90] , Threshold_potential = [50],result_save = False ,result_file = None):\r\n r'''Calculate the sensitivity plus the discovery potential\r\n args:\r\n bkg_trials : Number of background trials\r\n signal_trials: Number of signal trials\r\n gamma: Spectral index of the injection signal\r\n list_N:The list of flux norm to test and build the spline\r\n N_factor: Factor for Flux increments .If the maximum in list_N still wasn't enough to pass the threshold, the program will enter a while loop with N_factor*N tested each times until the N passed the threshold.\r\n make_plot: The file name of the plot saved. Default is not saving\r\n Threshold_list: The list of threshold of signal TS passing Median of the background TS. \r\n Threshold_potential: The list of threshold of signal TS passing 3 sigma of the background TS. \r\n result: Whether storing the full result in self.result.Default is False.\r\n result_file:Whether storing the full result in file.Default is False.\r\n\r\n '''\r\n self.Threshold_list = Threshold_list\r\n self.Threshold_potential = Threshold_potential\r\n max_threshold = np.array(Threshold_list).max()\r\n max_potential = np.array(Threshold_potential).max()\r\n list_N = np.array(deepcopy(list_N))\r\n result = []\r\n self.ts_bkg = self.build_background_TS(bkg_trials)\r\n self.bkg_median = np.percentile(self.ts_bkg , 50)\r\n self.bkg_three_sigma = np.percentile(self.ts_bkg , 99.7)\r\n for N in list_N:\r\n print(\"Now testing : \"+ str(N))\r\n spectrum = PowerLaw( 100e3, N, gamma)\r\n self.PS_injector.update_spectrum(spectrum)\r\n tempresult = self.calculate_ratio_passthreshold(bkg_trials = 1000, signal_trials = 200, result = result_save ,result_file = result_file)\r\n print(tempresult)\r\n result.append(tempresult)\r\n if tempresult[0] < max_potential*0.01 or tempresult[1] < max_threshold*0.01:\r\n reach_max = False\r\n N = N * N_factor\r\n list_N = np.append(list_N,N)\r\n else:\r\n reach_max = True\r\n while not reach_max:\r\n print(\"Now testing : \"+ str(N))\r\n spectrum = PowerLaw( 100e3, N, gamma)\r\n self.PS_injector.update_spectrum(spectrum)\r\n tempresult = self.calculate_ratio_passthreshold(bkg_trials = 1000, signal_trials = 200, result = result_save ,result_file = result_file)\r\n print(tempresult)\r\n result.append(tempresult)\r\n if tempresult[0] < max_potential*0.01 or tempresult[1] < max_threshold*0.01:\r\n N = N * N_factor\r\n list_N = np.append(list_N,N)\r\n else:\r\n reach_max = True\r\n result = np.array(result)\r\n self.result = result\r\n self.list_N = list_N\r\n self.spline_sigma = interpolate.UnivariateSpline(list_N,result[:,0] , ext = 3)\r\n self.spline_sen = interpolate.UnivariateSpline( list_N,result[:,1] , ext = 3)\r\n Threshold_result = []\r\n Threshold_potential_result = []\r\n for i in Threshold_list:\r\n tempspline = interpolate.UnivariateSpline(list_N,result[:,1]-i*0.01 , ext = 3)\r\n Threshold_result.append(tempspline.roots()[0])\r\n print(\"Threshold: \" + str(i) + \", N : \" + str(self.spline_sen(i*0.01)))\r\n for i in Threshold_potential:\r\n tempspline = interpolate.UnivariateSpline(list_N,result[:,0]-i*0.01 , ext = 3)\r\n Threshold_potential_result.append(tempspline.roots()[0])\r\n print(\"Threshold_potential: \" + str(i) + \", N : \" + str(self.spline_sigma(i*0.01))) \r\n self.Threshold_result = Threshold_result\r\n self.Threshold_potential_result = Threshold_potential_result\r\n if make_plot != None :\r\n self.make_plot(make_plot)\r\n return\r\n \r\n def make_plot(self,file_name):\r\n r'''save plot to file_name\r\n '''\r\n fig, ax = plt.subplots(figsize = (12,12))\r\n ax.scatter(self.list_N,self.result[:,1],label = 'sensitiviy point',color='r')\r\n ax.scatter(self.list_N,self.result[:,0],label = 'potential point',color='b')\r\n ax.set_xlim(self.list_N[0],self.list_N[-1])\r\n ax.plot(np.linspace(self.list_N[0],self.list_N[-1],1000),self.spline_sen(np.linspace(self.list_N[0],self.list_N[-1],1000)),label = 'sensitiviy spline',color='r')\r\n ax.plot(np.linspace(self.list_N[0],self.list_N[-1],1000),self.spline_sigma(np.linspace(self.list_N[0],self.list_N[-1],1000)),label = 'potential spline',color='b')\r\n for i in range(len(self.Threshold_result)):\r\n ax.axvline(self.Threshold_result[i],label = 'sensitiviy '+str(self.Threshold_list[i]),color='r')\r\n for i in range(len(self.Threshold_potential_result)):\r\n ax.axvline(self.Threshold_potential_result[i],label = 'potential '+str(self.Threshold_potential[i]),color='b')\r\n ax.set_title(\"Flux norm vs passing ratio\",fontsize=14)\r\n ax.set_xlabel(r\"Flux Norm($GeV cm^{-2} s^{-1}$)\",fontsize=14)\r\n ax.set_ylabel(r\"Passing ratio\",fontsize=14)\r\n ax.legend(fontsize=14)\r\n fig.savefig(file_name)\r\n plt.close()"
] |
[
[
"scipy.interpolate.UnivariateSpline",
"numpy.radians",
"numpy.linspace",
"numpy.random.choice",
"numpy.arange",
"matplotlib.pyplot.subplots",
"numpy.percentile",
"numpy.concatenate",
"numpy.random.poisson",
"numpy.append",
"numpy.any",
"matplotlib.pyplot.close",
"numpy.load",
"numpy.lib.recfunctions.drop_fields",
"numpy.array"
]
] |
yetyetanotherusername/vaex
|
[
"71ff313486f9ee3a142d9fb4e80c7bdc0e1270c5",
"71ff313486f9ee3a142d9fb4e80c7bdc0e1270c5",
"71ff313486f9ee3a142d9fb4e80c7bdc0e1270c5"
] |
[
"tests/join_test.py",
"tests/evaluate_test.py",
"packages/vaex-core/vaex/legacy.py"
] |
[
"import pytest\nimport vaex\nimport numpy as np\nimport numpy.ma\n\ndf_a = vaex.from_arrays(a=np.array(['A', 'B', 'C']),\n x=np.array([0., 1., 2.]),\n y=np.ma.array([0., 9., 2.], mask=[False, True, False]),\n m=np.ma.array([1, 2, 3], mask=[False, True, False])\n )\n\ndf_b = vaex.from_arrays(b=np.array(['A', 'B', 'D']),\n x=np.array([2., 1., 0.]),\n y=np.ma.array([9., 1., 2.], mask=[True, False, False]),\n m=np.ma.array([3, 1, 2], mask=[True, False, False])\n )\n\ndf_dup = vaex.from_arrays(b=np.array(['A', 'B', 'A']),\n x=np.array([2., 1., 2.]),\n y=np.ma.array([9., 1., 9.], mask=[True, False, False]),\n m=np.ma.array([3, 1, 2], mask=[True, True, False])\n )\n\ndf_c = vaex.from_arrays(c=np.array(['B', 'C']),\n z1=np.array([-1., -2.]),\n z2=np.array([True, False]),\n )\n\ndf_d = vaex.from_arrays(a=np.array(['B', 'C', 'D']),\n x1=np.array(['dog', 'cat', 'mouse']),\n x2=np.array([3.1, 25, np.nan]),\n )\n\ndf_e = vaex.from_arrays(a=np.array(['X', 'Y', 'Z']),\n x1=np.array(['dog', 'cat', 'mouse']),\n x2=np.array([3.1, 25, np.nan]),\n )\n\n\ndef test_no_on():\n # just adds the columns\n df = df_a.join(df_b, rsuffix='_r')\n assert df.columns['b'] is df_b.columns['b']\n\n\ndef test_join_masked():\n df = df_a.join(other=df_b, left_on='m', right_on='m', rsuffix='_r')\n assert df.evaluate('m').tolist() == [1, None, 3]\n assert df.evaluate('m_r').tolist() == [1, None, None]\n\n\ndef test_join_nomatch():\n df = df_a.join(df_e, on='a', rprefix='r_')\n assert df.x2.tolist() == [None, None, None]\n\n\ndef test_left_a_b():\n df = df_a.join(other=df_b, left_on='a', right_on='b', rsuffix='_r')\n assert df.evaluate('a').tolist() == ['A', 'B', 'C']\n assert df.evaluate('b').tolist() == ['A', 'B', None]\n assert df.evaluate('x').tolist() == [0, 1, 2]\n assert df.evaluate('x_r').tolist() == [2, 1, None]\n assert df.evaluate('y').tolist() == [0, None, 2]\n assert df.evaluate('y_r').tolist() == [None, 1, None]\n\ndef test_join_indexed():\n df = df_a.join(other=df_b, left_on='a', right_on='b', rsuffix='_r')\n df_X = df_a.join(df, left_on='a', right_on='b', rsuffix='_r')\n assert df_X.evaluate('b').tolist() == ['A', 'B', None]\n\n\ndef test_left_a_b_filtered():\n df_af = df_a[df_a.x > 0]\n df = df_af.join(other=df_b, left_on='a', right_on='b', rsuffix='_r')\n assert df.evaluate('a').tolist() == ['B', 'C']\n assert df.evaluate('b').tolist() == ['B', None]\n assert df.evaluate('x').tolist() == [1, 2]\n assert df.evaluate('x_r').tolist() == [1, None]\n assert df.evaluate('y').tolist() == [None, 2]\n assert df.evaluate('y_r').tolist() == [1, None]\n\n # actually, even though the filter is applied, all rows will be matched\n # since the filter can change\n df.set_selection(None, vaex.dataset.FILTER_SELECTION_NAME)\n assert df.evaluate('a').tolist() == ['A', 'B', 'C']\n assert df.evaluate('b').tolist() == ['A', 'B', None]\n assert df.evaluate('x').tolist() == [0, 1, 2]\n assert df.evaluate('x_r').tolist() == [2, 1, None]\n assert df.evaluate('y').tolist() == [0, None, 2]\n assert df.evaluate('y_r').tolist() == [None, 1, None]\n\n # if we extract, that shouldn't be the case\n df_af = df_a[df_a.x > 0].extract()\n df = df_af.join(other=df_b, left_on='a', right_on='b', rsuffix='_r')\n df.set_selection(None, vaex.dataset.FILTER_SELECTION_NAME)\n assert df.evaluate('a').tolist() == ['B', 'C']\n assert df.evaluate('b').tolist() == ['B', None]\n assert df.evaluate('x').tolist() == [1, 2]\n assert df.evaluate('x_r').tolist() == [1, None]\n assert df.evaluate('y').tolist() == [None, 2]\n assert df.evaluate('y_r').tolist() == [1, None]\n\ndef test_inner_a_b_filtered():\n df_a_filtered = df_a[df_a.x > 0]\n df = df_a_filtered.join(other=df_b, left_on='a', right_on='b', rsuffix='_r', how='inner')\n assert df.evaluate('a').tolist() == ['B']\n assert df.evaluate('b').tolist() == ['B']\n assert df.evaluate('x').tolist() == [1]\n assert df.evaluate('x_r').tolist() == [1]\n assert df.evaluate('y').tolist() == [None]\n assert df.evaluate('y_r').tolist() == [1]\n\ndef test_right_x_x():\n df = df_a.join(other=df_b, on='x', rsuffix='_r', how='right')\n assert df.evaluate('a').tolist() == ['C', 'B', 'A']\n assert df.evaluate('b').tolist() == ['A', 'B', 'D']\n assert df.evaluate('x').tolist() == [2, 1, 0]\n assert df.evaluate('x_r').tolist() == [2, 1, 0]\n assert df.evaluate('y').tolist() == [2, None, 0]\n assert df.evaluate('y_r').tolist() == [None, 1, 2]\n assert 'y_r' not in df_b\n\n\ndef test_left_dup():\n df = df_a.join(df_dup, left_on='a', right_on='b', rsuffix='_r', allow_duplication=True)\n assert len(df) == 4\n # df = df_a.join(df_dup, on='x', rsuffix='_r')\n # df = df_a.join(df_dup, on='m', rsuffix='_r')\n\n\ndef test_left_a_c():\n df = df_a.join(df_c, left_on='a', right_on='c', how='left')\n assert df.a.tolist() == ['A', 'B', 'C']\n assert df.x.tolist() == [0, 1, 2]\n assert df.y.tolist() == [0., None, 2.]\n assert df.m.tolist() == [1, None, 3]\n assert df.c.tolist() == [None, 'B', 'C']\n assert df.z1.tolist() == [None, -1., -2.]\n assert df.z2.tolist() == [None, True, False]\n\n\ndef test_join_a_a_suffix_check():\n df = df_a.join(df_a, on='a', lsuffix='_left', rsuffix='_right')\n assert set(df.column_names) == {'a_left', 'x_left', 'y_left', 'm_left', 'a_right', 'x_right', 'y_right', 'm_right'}\n\n\ndef test_join_a_a_prefix_check():\n df = df_a.join(df_a, on='a', lprefix='left_', rprefix='right_')\n assert set(df.column_names) == {'left_a', 'left_x', 'left_y', 'left_m', 'right_a', 'right_x', 'right_y', 'right_m'}\n\n\ndef test_inner_a_d():\n df = df_a.join(df_d, on='a', right_on='a', how='inner', rsuffix='_r')\n assert df.a.tolist() == ['B', 'C']\n assert df.x.tolist() == [1., 2.]\n assert df.y.tolist() == [None, 2.]\n assert df.m.tolist() == [None, 3.]\n assert df.x1.tolist() == ['dog', 'cat']\n assert df.x2.tolist() == [3.1, 25.]\n\n\[email protected](reason='full join not supported yet')\ndef test_full_a_d():\n df = df_a.join(df_d, on='a', right_on='a', how='full')\n assert df.a.tolist() == ['A', 'B', 'C', 'D']\n assert df.x.tolist() == [0., 1., 2., None]\n assert df.y.tolist() == [0., None, 2., None]\n assert df.m.tolist() == [1, None, 3, None]\n assert df.x1.tolist() == [None, 'dog', 'cat', 'mouse']\n assert df.x2.tolist() == [None, 3.1, 25., np.nan]\n np.testing.assert_array_equal(np.array(df_d.x2.values), np.array([3.1, 25., np.nan]))\n\n\ndef test_left_virtual_filter():\n df = df_a.join(df_d, on='a', how='left', rsuffix='_b')\n df['r'] = df.x + df.x2\n df = df[df.r > 10]\n assert set(df[0]) == {'C', 2.0, 2.0, 3, 'C', 'cat', 25.0, 27.0}\n\n\ndef test_left_on_virtual_col():\n mapper = {0: 'A', 1: 'B', 2: 'C'}\n df_a['aa'] = df_a.x.map(mapper=mapper)\n df = df_a.join(df_d, left_on='aa', right_on='a', rsuffix='_right')\n assert df.a.tolist() == ['A', 'B', 'C']\n assert df.aa.tolist() == ['A', 'B', 'C']\n assert df.x.tolist() == [0, 1, 2]\n assert df.y.tolist() == [0., None, 2.]\n assert df.m.tolist() == [1, None, 3]\n assert df.x1.tolist() == [None, 'dog', 'cat']\n assert df.x2.tolist() == [None, 3.1, 25.]\n assert df.a_right.tolist() == [None, 'B', 'C']\n\n\ndef test_join_filtered_inner():\n df_a_filtered = df_a[df_a.y > 0]\n df_joined = df_a_filtered.join(other=df_b, on='x', how='inner', rsuffix='_', allow_duplication=True)\n assert len(df_joined) == len(df_a_filtered)\n\n x = np.arange(20)\n df = vaex.from_arrays(x=x, y=x**2)\n df = df[df.x > 5]\n dfj = df.join(df, on='x', rsuffix='right_', how='inner')\n repr(dfj) # trigger issue with selection cache\n\n\ndef test_join_duplicate_column():\n df_left = vaex.from_arrays(index=[1, 2, 3], x=[10, 20, 30])\n df_right = vaex.from_arrays(index=[1, 2, 3], y=[0.1, 0.2, 0.3])\n\n df = df_left.join(df_right, on='index')\n assert df.column_count() == 3\n assert set(df.column_names) == {'index', 'x', 'y'}\n assert df['index'] == [1, 2, 3]\n assert df.x.tolist() == [10, 20, 30]\n assert df.y.tolist() == [0.1, 0.2, 0.3]\n",
"import vaex\nimport numpy as np\n\ndef test_evaluate_function_filtered_df():\n # Custom function to be applied to a filtered DataFrame\n def custom_func(x):\n assert 4 not in x; return x**2\n\n df = vaex.from_arrays(x=np.arange(10))\n df_filtered = df[df.x!=4]\n df_filtered.add_function('custom_function', custom_func)\n df_filtered['y'] = df_filtered.func.custom_function(df_filtered.x)\n assert df_filtered.y.tolist() == [0, 1, 4, 9, 25, 36, 49, 64, 81]\n\n # sliced exactly at the start of where we are going to filter\n # this used to trigger a bug in df.dtype, which would evaluate the first row\n df_sliced = df[4:]\n df_filtered = df_sliced[df_sliced.x!=4]\n df_filtered.add_function('custom_function', custom_func)\n df_filtered['y'] = df_filtered.func.custom_function(df_filtered.x)\n assert df_filtered.y.tolist() == [25, 36, 49, 64, 81]\n",
"# -*- coding: utf-8 -*-\nfrom __future__ import division, print_function\nimport numpy as np\nimport vaex\nfrom .tasks import Task, TaskMapReduce\nfrom .utils import _parse_f\nimport six\n\n\ndef _asfloat(a):\n if a.dtype.type == np.float64 and a.strides[0] == 8:\n return a\n else:\n return a.astype(np.float64, copy=False)\n\nclass TaskMapReduceLegacy(TaskMapReduce):\n def __init__(self, *args, **kwargs):\n kwargs = kwargs.copy()\n kwargs['ignore_filter'] = True\n TaskMapReduce.__init__(self, *args, **kwargs)\n\nclass TaskHistogram(Task):\n def __init__(self, df, subspace, expressions, size, limits, masked=False, weight=None):\n self.size = size\n self.limits = limits\n Task.__init__(self, df, expressions, name=\"histogram\")\n self.subspace = subspace\n self.dtype = np.float64\n self.masked = masked\n self.weight = weight\n # self.grids = vaex.grids.Grids(self.df, self.df.executor.thread_pool, *expressions)\n # self.grids.ranges = limits\n # self.grids.grids[\"counts\"] = vaex.grids.Grid(self.grids, size, self.dimension, None)\n shape1 = (self.size,) * self.dimension\n try:\n self.size[0]\n shape1 = tuple(self.size)\n except:\n pass\n shape = (self.subspace.executor.thread_pool.nthreads,) + shape1\n self.data = np.zeros(shape, dtype=self.dtype)\n self.ranges_flat = []\n self.minima = []\n self.maxima = []\n for limit in self.limits:\n self.ranges_flat.extend(limit)\n vmin, vmax = limit\n self.minima.append(vmin)\n self.maxima.append(vmax)\n if self.weight is not None:\n self.expressions_all.append(weight)\n # print self.ranges_flat\n\n def __repr__(self):\n name = self.__class__.__module__ + \".\" + self.__class__.__name__\n return \"<%s(df=%r, expressions=%r, size=%r, limits=%r)> instance at 0x%x\" % (name, self.df, self.expressions, self.size, self.limits, id(self))\n\n def map(self, thread_index, i1, i2, filter_mask, *blocks):\n class Info(object):\n pass\n info = Info()\n info.i1 = i1\n info.i2 = i2\n info.first = i1 == 0\n info.last = i2 == self.df.length_unfiltered()\n info.size = i2 - i1\n # print \"bin\", i1, i2, info.last\n # self.grids[\"counts\"].bin_block(info, *blocks)\n # mask = self.df.mask\n data = self.data[thread_index]\n\n blocks = [_asfloat(block) for block in blocks]\n\n if self.masked or self.df.filtered:\n mask = self.df.evaluate_selection_mask(\"default\" if self.masked else None, i1=i1, i2=i2)\n blocks = [block[mask] for block in blocks]\n\n subblock_weight = None\n if len(blocks) == len(self.expressions) + 1:\n subblock_weight = blocks[-1]\n blocks = list(blocks[:-1])\n # print subblocks[0]\n # print subblocks[1]\n\n if self.dimension == 1:\n vaex.vaexfast.histogram1d(blocks[0], subblock_weight, data, *self.ranges_flat)\n elif self.dimension == 2:\n # if subblock_weight is None:\n # #print \"speedup?\"\n # histogram_numba(blocks[0], blocks[1], subblock_weight, data, *self.ranges_flat)\n # else:\n vaex.vaexfast.histogram2d(blocks[0], blocks[1], subblock_weight, data, *self.ranges_flat)\n # vaex.vaexfast.statisticNd([blocks[0], blocks[1]], subblock_weight, data, self.minima, self.maxima, 0)\n elif self.dimension == 3:\n vaex.vaexfast.histogram3d(blocks[0], blocks[1], blocks[2], subblock_weight, data, *self.ranges_flat)\n else:\n blocks = list(blocks) # histogramNd wants blocks to be a list\n vaex.vaexfast.histogramNd(blocks, subblock_weight, data, self.minima, self.maxima)\n\n return i1\n # return map(self._map, blocks)#[self.map(block) for block in blocks]\n\n def reduce(self, results):\n for i in range(1, self.subspace.executor.thread_pool.nthreads):\n self.data[0] += self.data[i]\n return self.data[0]\n # return self.data\n\n\nclass SubspaceGridded(object):\n def __init__(self, subspace_bounded, grid, vx=None, vy=None, vcounts=None):\n self.subspace_bounded = subspace_bounded\n self.grid = grid\n self.vx = vx\n self.vy = vy\n self.vcounts = vcounts\n\n def vector(self, weightx, weighty, size=32):\n counts = self.subspace_bounded.gridded_by_histogram(size=size)\n vx = self.subspace_bounded.gridded_by_histogram(size=size, weight=weightx)\n vy = self.subspace_bounded.gridded_by_histogram(size=size, weight=weighty)\n return SubspaceGridded(self.subspace_bounded, self.grid, vx=vx, vy=vy, vcounts=counts)\n\n def filter_gaussian(self, sigmas=1):\n import scipy.ndimage\n return SubspaceGridded(self.subspace_bounded, scipy.ndimage.filters.gaussian_filter(self.grid, sigmas))\n\n def clip_relative(self, v1, v2):\n vmin = self.grid.min()\n vmax = self.grid.max()\n width = vmax - vmin\n return SubspaceGridded(self.subspace_bounded, np.clip(self.grid, vmin + v1 * width, vmin + v2 * width))\n\n def volr(self, **kwargs):\n import vaex.notebook\n return vaex.notebook.volr(subspace_gridded=self, **kwargs)\n\n def plot(self, axes=None, **kwargs):\n self.subspace_bounded.subspace.plot(np.log1p(self.grid), limits=self.subspace_bounded.bounds, axes=axes, **kwargs)\n\n def mean_line(self, axis=0, **kwargs):\n from matplotlib import pylab\n assert axis in [0, 1]\n other_axis = 0 if axis == 1 else 1\n xmin, xmax = self.subspace_bounded.bounds[axis]\n ymin, ymax = self.subspace_bounded.bounds[other_axis]\n x = vaex.utils.linspace_centers(xmin, xmax, self.grid.shape[axis])\n y = vaex.utils.linspace_centers(ymin, ymax, self.grid.shape[other_axis])\n print(y)\n if axis == 0:\n counts = np.sum(self.grid, axis=axis)\n means = np.sum(self.grid * y[np.newaxis, :].T, axis=axis) / counts\n else:\n counts = np.sum(self.grid, axis=axis)\n means = np.sum(self.grid * y[:, np.newaxis].T, axis=axis) / counts\n if axis == 0:\n result = pylab.plot(x, means, **kwargs)\n else:\n result = pylab.plot(means, x, **kwargs)\n\n self.subspace_bounded.lim()\n return result, x, means\n\n def _repr_png_(self):\n from matplotlib import pylab\n fig, ax = pylab.subplots()\n self.plot(axes=ax, f=np.log1p)\n import vaex.utils\n if all([k is not None for k in [self.vx, self.vy, self.vcounts]]):\n N = self.vx.grid.shape[0]\n bounds = self.subspace_bounded.bounds\n print(bounds)\n positions = [vaex.utils.linspace_centers(bounds[i][0], bounds[i][1], N) for i in range(self.subspace_bounded.subspace.dimension)]\n print(positions)\n mask = self.vcounts.grid > 0\n vx = np.zeros_like(self.vx.grid)\n vy = np.zeros_like(self.vy.grid)\n vx[mask] = self.vx.grid[mask] / self.vcounts.grid[mask]\n vy[mask] = self.vy.grid[mask] / self.vcounts.grid[mask]\n # vx = self.vx.grid / self.vcounts.grid\n # vy = self.vy.grid / self.vcounts.grid\n x2d, y2d = np.meshgrid(positions[0], positions[1])\n ax.quiver(x2d[mask], y2d[mask], vx[mask], vy[mask])\n # print x2d\n # print y2d\n # print vx\n # print vy\n # ax.quiver(x2d, y2d, vx, vy)\n ax.title.set_text(r\"$\\log(1+counts)$\")\n ax.set_xlabel(self.subspace_bounded.subspace.expressions[0])\n ax.set_ylabel(self.subspace_bounded.subspace.expressions[1])\n # pylab.savefig\n # from .io import StringIO\n from six import StringIO\n file_object = StringIO()\n fig.canvas.print_png(file_object)\n pylab.close(fig)\n return file_object.getvalue()\n\n def cube_png(self, f=np.log1p, colormap=\"afmhot\", file=\"cube.png\"):\n if self.grid.shape != ((128,) * 3):\n logger.error(\"only 128**3 cubes are supported\")\n return None\n colormap_name = \"afmhot\"\n import matplotlib.cm\n colormap = matplotlib.cm.get_cmap(colormap_name)\n mapping = matplotlib.cm.ScalarMappable(cmap=colormap)\n # pixmap = QtGui.QPixmap(32*2, 32)\n data = np.zeros((128 * 8, 128 * 16, 4), dtype=np.uint8)\n\n # mi, ma = 1*10**self.mod1, self.data3d.max()*10**self.mod2\n grid = f(self.grid)\n vmin, vmax = grid.min(), grid.max()\n grid_normalized = (grid - vmin) / (vmax - vmin)\n # intensity_normalized = (np.log(self.data3d + 1.) - np.log(mi)) / (np.log(ma) - np.log(mi));\n import PIL.Image\n for y2d in range(8):\n for x2d in range(16):\n zindex = x2d + y2d * 16\n I = grid_normalized[zindex]\n rgba = mapping.to_rgba(I, bytes=True) # .reshape(Nx, 4)\n # print rgba.shape\n subdata = data[y2d * 128:(y2d + 1) * 128, x2d * 128:(x2d + 1) * 128]\n for i in range(3):\n subdata[:, :, i] = rgba[:, :, i]\n subdata[:, :, 3] = (grid_normalized[zindex] * 255).astype(np.uint8) # * 0 + 255\n if 0:\n filename = \"cube%03d.png\" % zindex\n img = PIL.Image.frombuffer(\"RGB\", (128, 128), subdata[:, :, 0:3] * 1)\n print((\"saving to\", filename))\n img.save(filename)\n img = PIL.Image.frombuffer(\"RGBA\", (128 * 16, 128 * 8), data, 'raw') # , \"RGBA\", 0, -1)\n # filename = \"cube.png\"\n # print \"saving to\", file\n img.save(file, \"png\")\n\n if 0:\n filename = \"colormap.png\"\n print((\"saving to\", filename))\n height, width = self.colormap_data.shape[:2]\n img = PIL.Image.frombuffer(\"RGB\", (width, height), self.colormap_data)\n img.save(filename)\n\n\nclass SubspaceBounded(object):\n def __init__(self, subspace, bounds):\n self.subspace = subspace\n self.bounds = bounds\n\n def histogram(self, size=256, weight=None):\n return self.subspace.histogram(limits=self.bounds, size=size, weight=weight)\n\n def gridded(self, size=256, weight=None):\n return self.gridded_by_histogram(size=size, weight=weight)\n\n def gridded_by_histogram(self, size=256, weight=None):\n grid = self.histogram(size=size, weight=weight)\n return SubspaceGridded(self, grid)\n\n def lim(self):\n from matplotlib import pylab\n xmin, xmax = self.bounds[0]\n ymin, ymax = self.bounds[1]\n pylab.xlim(xmin, xmax)\n pylab.ylim(ymin, ymax)\n\n\nclass Subspaces(object):\n \"\"\"\n :type: subspaces: list[Subspace]\n\n \"\"\"\n\n def __init__(self, subspaces):\n self.subspaces = subspaces\n self.expressions = set()\n first_subspace = self.subspaces[0]\n self.delay = first_subspace.delay\n self.dimension = first_subspace.dimension\n self.df = self.subspaces[0].df\n for subspace in self.subspaces:\n assert subspace.df == self.subspaces[0].df\n assert subspace.delay == self.subspaces[0].delay\n assert subspace.dimension == self.subspaces[0].dimension, \"subspace is of dimension %s, while first subspace if of dimension %s\" % (subspace.dimension, self.subspaces[0].dimension)\n # assert subspace.sele== self.subspaces[0].delay\n self.expressions.update(subspace.expressions)\n self.expressions = list(self.expressions)\n self.subspace = self.df(*list(self.expressions), delay=self.delay, executor=first_subspace.executor)\n\n # def _repr_html_(self):\n\n def __len__(self):\n return len(self.subspaces)\n\n def names(self, seperator=\" \"):\n return [seperator.join(subspace.expressions) for subspace in self.subspaces]\n\n def expressions_list(self):\n return [subspace.expressions for subspace in self.subspaces]\n\n def selected(self):\n return Subspaces([subspace.selected() for subspace in self.subspaces])\n\n def _unpack(self, values):\n value_map = dict(zip(self.expressions, values))\n return [[value_map[ex] for ex in subspace.expressions] for subspace in self.subspaces]\n\n def _pack(self, values):\n value_map = {}\n for subspace_values, subspace in zip(values, self.subspaces):\n for value, expression in zip(subspace_values, subspace.expressions):\n if expression in value_map:\n if isinstance(value, np.ndarray):\n assert np.all(value_map[expression] == value), \"inconsistency in subspaces, value for expression %r is %r in one case, and %r in the other\" % (expression, value, value_map[expression])\n else:\n assert value_map[expression] == value, \"inconsistency in subspaces, value for expression %r is %r in one case, and %r in the other\" % (expression, value, value_map[expression])\n else:\n value_map[expression] = value\n return [value_map[expression] for expression in self.expressions]\n\n def minmax(self):\n if self.delay:\n return self.subspace.minmax().then(self._unpack)\n else:\n return self._unpack(self.subspace.minmax())\n\n def limits_sigma(self, sigmas=3, square=False):\n if self.delay:\n return self.subspace.limits_sigma(sigmas=sigmas, square=square).then(self._unpack)\n else:\n return self._unpack(self.subspace.limits_sigma(sigmas=sigmas, square=square))\n\n def mutual_information(self, limits=None, size=256):\n if limits is not None:\n limits = self._pack(limits)\n\n def mutual_information(limits):\n return vaex.promise.listPromise([vaex.promise.Promise.fulfilled(subspace.mutual_information(subspace_limits, size=size)) for subspace_limits, subspace in zip(limits, self.subspaces)])\n # return histograms\n if limits is None:\n limits_promise = vaex.promise.Promise.fulfilled(self.subspace.minmax())\n else:\n limits_promise = vaex.promise.Promise.fulfilled(limits)\n limits_promise = limits_promise.then(self._unpack)\n promise = limits_promise.then(mutual_information)\n return promise if self.delay else promise.get()\n\n def mean(self):\n if self.delay:\n return self.subspace.mean().then(self._unpack)\n else:\n means = self.subspace.mean()\n return self._unpack(means)\n\n def var(self, means=None):\n # 'pack' means, and check if it makes sence\n if means is not None:\n means = self._pack(means)\n\n def var(means):\n return self.subspace.var(means=means)\n if self.delay:\n # if means is None:\n # return self.subspace.mean().then(var).then(self._unpack)\n # else:\n return var(means).then(self._unpack)\n else:\n # if means is None:\n # means = self.subspace.mean()\n # logger.debug(\"means: %r\", means)\n return self._unpack(var(means=means))\n\n def correlation(self, means=None, vars=None):\n def var(means):\n return self.subspace.var(means=means)\n\n def correlation(means_and_vars):\n means, vars = means_and_vars\n means, vars = self._unpack(means), self._unpack(vars)\n # return self.subspace.correlation(means=means, vars=vars)\n return vaex.promise.listPromise([subspace.correlation(means=subspace_mean, vars=subspace_var) for subspace_mean, subspace_var, subspace in zip(means, vars, self.subspaces)])\n if means is not None:\n means = self._pack(means)\n if vars is not None:\n vars = self._pack(vars)\n if self.delay:\n if means is None:\n mean_promise = self.subspace.mean()\n else:\n mean_promise = vaex.promise.Promise.fulfilled(means)\n if vars is None:\n var_promise = mean_promise.then(var)\n else:\n var_promise = vaex.promise.Promise.fulfilled(vars)\n mean_and_var_calculated = vaex.promise.listPromise(mean_promise, var_promise)\n return mean_and_var_calculated.then(correlation)\n else:\n if means is None:\n means = self.subspace.mean()\n if vars is None:\n vars = self.subspace.var(means=means)\n means = self._unpack(means)\n vars = self._unpack(vars)\n return [subspace.correlation(means=subspace_mean, vars=subspace_var) for subspace_mean, subspace_var, subspace in zip(means, vars, self.subspaces)]\n # return correlation((means, vars))\n\n # def bounded_by(self, limits_list):\n # return SubspacesBounded(SubspaceBounded(subspace, limits) for subspace, limit in zip(self.subspaces, limits_list))\n\n\nclass Subspace(object):\n \"\"\"A Subspace represent a subset of columns or expressions from a df.\n\n subspace are not instantiated directly, but by 'calling' the df like this:\n\n >>> subspace_xy = some_df(\"x\", \"y\")\n >>> subspace_r = some_df(\"sqrt(x**2+y**2)\")\n\n See `vaex.df.Dataset` for more documentation.\n\n \"\"\"\n\n def __init__(self, df, expressions, executor, delay, masked=False):\n \"\"\"\n\n :param Dataset df: the df the subspace refers to\n :param list[str] expressions: list of expressions that forms the subspace\n :param Executor executor: responsible for executing the tasks\n :param bool delay: return answers directly, or as a promise\n :param bool masked: work on the selection or not\n :return:\n \"\"\"\n self.df = df\n self.expressions = expressions\n self.executor = executor\n self.delay = delay\n self.is_masked = masked\n\n def __repr__(self):\n name = self.__class__.__module__ + \".\" + self.__class__.__name__\n return \"<%s(df=%r, expressions=%r, delay=%r, is_masked=%r)> instance at 0x%x\" % (name, self.df, self.expressions, self.delay, self.is_masked, id(self))\n\n @property\n def dimension(self):\n return len(self.expressions)\n\n def get_selection(self):\n return self.df.get_selection(\"default\") if self.is_masked else None\n\n def is_selected(self):\n return self.is_masked\n\n def selected(self):\n return self.__class__(self.df, expressions=self.expressions, executor=self.executor, delay=self.delay, masked=True)\n\n def delayhronous(self):\n return self.__class__(self.df, expressions=self.expressions, executor=self.executor, delay=True, masked=self.is_masked)\n\n def image_rgba_save(self, filename, data=None, rgba8=None, **kwargs):\n if rgba8 is not None:\n data = self.image_rgba_data(rgba8=rgba8, **kwargs)\n if data is None:\n data = self.image_rgba_data(**kwargs)\n with open(filename, \"wb\") as f:\n f.write(data)\n\n def image_rgba_notebook(self, data=None, rgba8=None, **kwargs):\n if rgba8 is not None:\n data = self.image_rgba_data(rgba8=rgba8, **kwargs)\n if data is None:\n data = self.image_rgba_data(**kwargs)\n from IPython.display import display, Image\n return Image(data=data)\n\n def image_rgba_data(self, rgba8=None, format=\"png\", pil_draw=False, **kwargs):\n import PIL.Image\n import PIL.ImageDraw\n from six import StringIO\n if rgba8 is None:\n rgba8 = self.image_rgba(**kwargs)\n img = PIL.Image.frombuffer(\"RGBA\", rgba8.shape[:2], rgba8, 'raw') # , \"RGBA\", 0, -1)\n if pil_draw:\n draw = PIL.ImageDraw.Draw(img)\n pil_draw(draw)\n\n f = StringIO()\n img.save(f, format)\n return f.getvalue()\n\n def image_rgba_url(self, rgba8=None, **kwargs):\n if rgba8 is None:\n rgba8 = self.image_rgba(**kwargs)\n import PIL.Image\n img = PIL.Image.frombuffer(\"RGBA\", rgba8.shape[:2], rgba8, 'raw') # , \"RGBA\", 0, -1)\n from six import StringIO\n f = StringIO()\n img.save(f, \"png\")\n from base64 import b64encode\n imgurl = \"data:image/png;base64,\" + b64encode(f.getvalue()) + \"\"\n return imgurl\n\n def normalize_grid(self, grid):\n grid = grid * 1 # copy\n mask = (grid > 0) & np.isfinite(grid)\n if grid.sum():\n grid -= grid[mask].min()\n grid /= grid[mask].max()\n else:\n grid[:] = 0\n return grid\n\n def limits(self, value, square=False):\n \"\"\"TODO: doc + server side implementation\"\"\"\n if isinstance(value, six.string_types):\n import re\n match = re.match(r\"(\\d*)(\\D*)\", value)\n if match is None:\n raise ValueError(\"do not understand limit specifier %r, examples are 90%, 3sigma\")\n else:\n value, type = match.groups()\n import ast\n value = ast.literal_eval(value)\n type = type.strip()\n if type in [\"s\", \"sigma\"]:\n return self.limits_sigma(value)\n elif type in [\"ss\", \"sigmasquare\"]:\n return self.limits_sigma(value, square=True)\n elif type in [\"%\", \"percent\"]:\n return self.limits_percentage(value)\n elif type in [\"%s\", \"%square\", \"percentsquare\"]:\n return self.limits_percentage(value, square=True)\n if value is None:\n return self.limits_percentage(square=square)\n else:\n return value\n\n def image_rgba(self, grid=None, size=256, limits=None, square=False, center=None, weight=None, weight_stat=\"mean\", figsize=None,\n aspect=\"auto\", f=lambda x: x, axes=None, xlabel=None, ylabel=None,\n group_by=None, group_limits=None, group_colors='jet', group_labels=None, group_count=10, cmap=\"afmhot\",\n vmin=None, vmax=None,\n pre_blend=False, background_color=\"white\", background_alpha=1., normalize=True, color=None):\n f = _parse_f(f)\n if grid is None:\n limits = self.limits(limits)\n if limits is None:\n limits = self.limits_sigma()\n if group_limits is None and group_by:\n group_limits = tuple(self.df(group_by).minmax()[0]) + (group_count,)\n if weight_stat == \"mean\" and weight is not None:\n grid = self.bin_mean(weight, limits=limits, size=size, group_limits=group_limits, group_by=group_by)\n else:\n grid = self.histogram(limits=limits, size=size, weight=weight, group_limits=group_limits, group_by=group_by)\n if grid is None: # cancel occured\n return\n import matplotlib.cm\n background_color = np.array(matplotlib.colors.colorConverter.to_rgb(background_color))\n if group_by:\n gmin, gmax, group_count = group_limits\n if isinstance(group_colors, six.string_types):\n group_colors = matplotlib.cm.get_cmap(group_colors)\n if isinstance(group_colors, matplotlib.colors.Colormap):\n group_count = group_limits[2]\n colors = [group_colors(k / float(group_count - 1.)) for k in range(group_count)]\n else:\n colors = [matplotlib.colors.colorConverter.to_rgba(k) for k in group_colors]\n total = np.sum(grid, axis=0).T\n # grid /= total\n mask = total > 0\n alpha = total - total[mask].min()\n alpha[~mask] = 0\n alpha = total / alpha.max()\n rgba = grid.T.dot(colors)\n\n def _norm(data):\n mask = np.isfinite(data)\n data = data - data[mask].min()\n data /= data[mask].max()\n return data\n rgba[..., 3] = (f(alpha))\n # rgba[...,3] = 1\n rgba[total == 0, 3] = 0.\n mask = alpha > 0\n if 1:\n for i in range(3):\n rgba[..., i] /= total\n # rgba[...,i] /= rgba[...,0:3].max()\n rgba[~mask, i] = background_color[i]\n rgba = (np.swapaxes(rgba, 0, 1))\n else:\n if color:\n color = np.array(matplotlib.colors.colorConverter.to_rgba(color))\n rgba = np.zeros(grid.shape + (4,))\n rgba[..., 0:4] = color\n data = f(grid)\n mask = (grid > 0) & np.isfinite(data)\n if vmin is None:\n vmin = data[mask].min()\n if vmax is None:\n vmax = data[mask].max()\n if mask.sum():\n data -= vmin\n data /= vmax\n data[~mask] = 0\n else:\n data[:] = 0\n rgba[..., 3] = data\n else:\n cmap = matplotlib.cm.get_cmap(cmap)\n data = f(grid)\n if normalize:\n mask = (data > 0) & np.isfinite(data)\n if vmin is None:\n vmin = data[mask].min()\n if vmax is None:\n vmax = data[mask].max()\n if mask.sum():\n data -= vmin\n data /= vmax\n else:\n data[:] = 0\n data[~mask] = 0\n data = np.clip(data, 0, 1)\n rgba = cmap(data)\n if normalize:\n rgba[~mask, 3] = 0\n rgba[..., 3] = 1 # data\n # rgba8 = np.swapaxes(rgba8, 0, 1)\n # white = np.ones_like(rgba[...,0:3])\n if pre_blend:\n # rgba[...,3] = background_alpha\n rgb = rgba[..., :3].T\n alpha = rgba[..., 3].T\n rgb[:] = rgb * alpha + background_color[:3].reshape(3, 1, 1) * (1 - alpha)\n alpha[:] = alpha + background_alpha * (1 - alpha)\n rgba = np.clip(rgba, 0, 1)\n rgba8 = (rgba * 255).astype(np.uint8)\n return rgba8\n\n def plot_vectors(self, expression_x, expression_y, limits, wx=None, wy=None, counts=None, size=32, axes=None, **kwargs):\n import pylab\n # refactor: should go to bin_means_xy\n if counts is None:\n counts = self.histogram(size=size, limits=limits)\n if wx is None:\n wx = self.histogram(size=size, weight=expression_x, limits=limits)\n if wy is None:\n wy = self.histogram(size=size, weight=expression_y, limits=limits)\n N = size\n positions = [vaex.utils.linspace_centers(limits[i][0], limits[i][1], N) for i in range(self.dimension)]\n # print(positions)\n mask = counts > 0\n vx = wx / counts\n vy = wy / counts\n vx[counts == 0] = 0\n vy[counts == 0] = 0\n # vx = self.vx.grid / self.vcounts.grid\n # vy = self.vy.grid / self.vcounts.grid\n x2d, y2d = np.meshgrid(positions[0], positions[1])\n if axes is None:\n axes = pylab.gca()\n axes.quiver(x2d[mask], y2d[mask], vx[mask], vy[mask], **kwargs)\n\n def plot(self, grid=None, size=256, limits=None, square=False, center=None, weight=None, weight_stat=\"mean\", figsize=None,\n aspect=\"auto\", f=\"identity\", axes=None, xlabel=None, ylabel=None,\n group_by=None, group_limits=None, group_colors='jet', group_labels=None, group_count=None,\n vmin=None, vmax=None,\n cmap=\"afmhot\",\n **kwargs):\n \"\"\"Plot the subspace using sane defaults to get a quick look at the data.\n\n :param grid: A 2d numpy array with the counts, if None it will be calculated using limits provided and Subspace.histogram\n :param size: Passed to Subspace.histogram\n :param limits: Limits for the subspace in the form [[xmin, xmax], [ymin, ymax]], if None it will be calculated using Subspace.limits_sigma\n :param square: argument passed to Subspace.limits_sigma\n :param Executor executor: responsible for executing the tasks\n :param figsize: (x, y) tuple passed to pylab.figure for setting the figure size\n :param aspect: Passed to matplotlib's axes.set_aspect\n :param xlabel: String for label on x axis (may contain latex)\n :param ylabel: Same for y axis\n :param kwargs: extra argument passed to axes.imshow, useful for setting the colormap for instance, e.g. cmap='afmhot'\n :return: matplotlib.image.AxesImage\n\n \"\"\"\n import pylab\n f = _parse_f(f)\n limits = self.limits(limits)\n if limits is None:\n limits = self.limits_sigma()\n # if grid is None:\n if group_limits is None and group_by:\n group_limits = tuple(self.df(group_by).minmax()[0]) + (group_count,)\n # grid = self.histogram(limits=limits, size=size, weight=weight, group_limits=group_limits, group_by=group_by)\n if figsize is not None:\n pylab.figure(num=None, figsize=figsize, dpi=80, facecolor='w', edgecolor='k')\n if axes is None:\n axes = pylab.gca()\n fig = pylab.gcf()\n # if xlabel:\n pylab.xlabel(xlabel or self.expressions[0])\n # if ylabel:\n pylab.ylabel(ylabel or self.expressions[1])\n # axes.set_aspect(aspect)\n rgba8 = self.image_rgba(grid=grid, size=size, limits=limits, square=square, center=center, weight=weight, weight_stat=weight_stat,\n f=f, axes=axes,\n group_by=group_by, group_limits=group_limits, group_colors=group_colors, group_count=group_count,\n vmin=vmin, vmax=vmax,\n cmap=cmap)\n import matplotlib\n if group_by:\n if isinstance(group_colors, six.string_types):\n group_colors = matplotlib.cm.get_cmap(group_colors)\n if isinstance(group_colors, matplotlib.colors.Colormap):\n group_count = group_limits[2]\n colors = [group_colors(k / float(group_count - 1.)) for k in range(group_count)]\n else:\n colors = [matplotlib.colors.colorConverter.to_rgba(k) for k in group_colors]\n colormap = matplotlib.colors.ListedColormap(colors)\n gmin, gmax, group_count = group_limits # [:2]\n delta = (gmax - gmin) / (group_count - 1.)\n norm = matplotlib.colors.Normalize(gmin - delta / 2, gmax + delta / 2)\n sm = matplotlib.cm.ScalarMappable(norm, colormap)\n sm.set_array(1) # make matplotlib happy (strange behavious)\n colorbar = fig.colorbar(sm)\n if group_labels:\n colorbar.set_ticks(np.arange(gmin, gmax + delta / 2, delta))\n colorbar.set_ticklabels(group_labels)\n else:\n colorbar.set_ticks(np.arange(gmin, gmax + delta / 2, delta))\n colorbar.set_ticklabels(map(lambda x: \"%f\" % x, np.arange(gmin, gmax + delta / 2, delta)))\n colorbar.ax.set_ylabel(group_by)\n # matplotlib.colorbar.ColorbarBase(axes, norm=norm, cmap=colormap)\n im = axes.imshow(rgba8, extent=np.array(limits).flatten(), origin=\"lower\", aspect=aspect, **kwargs)\n else:\n norm = matplotlib.colors.Normalize(0, 23)\n sm = matplotlib.cm.ScalarMappable(norm, cmap)\n sm.set_array(1) # make matplotlib happy (strange behavious)\n colorbar = fig.colorbar(sm)\n im = axes.imshow(rgba8, extent=np.array(limits).flatten(), origin=\"lower\", aspect=aspect, **kwargs)\n colorbar = None\n return im, colorbar\n\n def plot1d(self, grid=None, size=64, limits=None, weight=None, figsize=None, f=\"identity\", axes=None, xlabel=None, ylabel=None, **kwargs):\n \"\"\"Plot the subspace using sane defaults to get a quick look at the data.\n\n :param grid: A 2d numpy array with the counts, if None it will be calculated using limits provided and Subspace.histogram\n :param size: Passed to Subspace.histogram\n :param limits: Limits for the subspace in the form [[xmin, xmax], [ymin, ymax]], if None it will be calculated using Subspace.limits_sigma\n :param figsize: (x, y) tuple passed to pylab.figure for setting the figure size\n :param xlabel: String for label on x axis (may contain latex)\n :param ylabel: Same for y axis\n :param kwargs: extra argument passed to ...,\n\n \"\"\"\n import pylab\n f = _parse_f(f)\n limits = self.limits(limits)\n assert self.dimension == 1, \"can only plot 1d, not %s\" % self.dimension\n if limits is None:\n limits = self.limits_sigma()\n if grid is None:\n grid = self.histogram(limits=limits, size=size, weight=weight)\n if figsize is not None:\n pylab.figure(num=None, figsize=figsize, dpi=80, facecolor='w', edgecolor='k')\n if axes is None:\n axes = pylab.gca()\n # if xlabel:\n pylab.xlabel(xlabel or self.expressions[0])\n # if ylabel:\n # pylab.ylabel(ylabel or self.expressions[1])\n pylab.ylabel(\"counts\" or ylabel)\n # axes.set_aspect(aspect)\n N = len(grid)\n xmin, xmax = limits[0]\n return pylab.plot(np.arange(N) / (N - 1.0) * (xmax - xmin) + xmin, f(grid,), drawstyle=\"steps\", **kwargs)\n # pylab.ylim(-1, 6)\n\n def plot_histogram_bq(self, f=\"identity\", size=64, limits=None, color=\"red\", bq_cleanup=True):\n import vaex.ext.bqplot\n limits = self.limits(limits)\n plot = vaex.ext.bqplot.BqplotHistogram(self, color, size, limits)\n if not hasattr(self, \"_bqplot\"):\n self._bqplot = {}\n self._bqplot[\"cleanups\"] = []\n else:\n if bq_cleanup:\n for cleanup in self._bqplot[\"cleanups\"]:\n cleanup()\n self._bqplot[\"cleanups\"] = []\n\n def cleanup(callback=plot.callback):\n self.df.signal_selection_changed.disconnect(callback=callback)\n self._bqplot[\"cleanups\"].append(cleanup)\n\n return plot\n\n def plot_bq(self, grid=None, size=256, limits=None, square=False, center=None, weight=None, figsize=None,\n aspect=\"auto\", f=\"identity\", fig=None, axes=None, xlabel=None, ylabel=None, title=None,\n group_by=None, group_limits=None, group_colors='jet', group_labels=None, group_count=None,\n cmap=\"afmhot\", scales=None, tool_select=False, bq_cleanup=True,\n **kwargs):\n import vaex.ext.bqplot\n import bqplot.interacts\n import bqplot.pyplot as p\n import ipywidgets as widgets\n import bqplot as bq\n f = _parse_f(f)\n limits = self.limits(limits)\n import vaex.ext.bqplot\n vaex.ext.bqplot.patch()\n if not hasattr(self, \"_bqplot\"):\n self._bqplot = {}\n self._bqplot[\"cleanups\"] = []\n else:\n if bq_cleanup:\n for cleanup in self._bqplot[\"cleanups\"]:\n cleanup()\n self._bqplot[\"cleanups\"] = []\n if limits is None:\n limits = self.limits_sigma()\n # if fig is None:\n if scales is None:\n x_scale = bq.LinearScale(min=limits[0][0], max=limits[0][1])\n y_scale = bq.LinearScale(min=limits[1][0], max=limits[1][1])\n scales = {'x': x_scale, 'y': y_scale}\n else:\n x_scale = scales[\"x\"]\n y_scale = scales[\"y\"]\n if 1:\n fig = p.figure() # actually, bqplot doesn't return it\n fig = p.current_figure()\n fig.fig_color = \"black\" # TODO, take the color from the colormap\n fig.padding_y = 0\n # if we don't do this, bqplot may flip some axes... report this bug\n x = np.arange(10)\n y = x**2\n p.plot(x, y, scales=scales)\n # p.xlim(*limits[0])\n # p.ylim(*limits[1])\n # if grid is None:\n if group_limits is None and group_by:\n group_limits = tuple(self.df(group_by).minmax()[0]) + (group_count,)\n # fig = p.\n # if xlabel:\n fig.axes[0].label = xlabel or self.expressions[0]\n # if ylabel:\n fig.axes[1].label = ylabel or self.expressions[1]\n if title:\n fig.title = title\n # axes.set_aspect(aspect)\n rgba8 = self.image_rgba(grid=grid, size=size, limits=limits, square=square, center=center, weight=weight,\n f=f, axes=axes,\n group_by=group_by, group_limits=group_limits, group_colors=group_colors, group_count=group_count,\n cmap=cmap)\n # x_scale = p._context[\"scales\"][\"x\"]\n # y_scale = p._context[\"scales\"][\"y\"]\n src = \"http://localhost:8888/kernelspecs/python2/logo-64x64.png\"\n import bqplot.marks\n im = vaex.ext.bqplot.Image(src=src, scales=scales, x=0, y=0, width=1, height=1)\n if 0:\n size = 20\n x_data = np.arange(size)\n line = bq.Lines(x=x_data, y=np.random.randn(size), scales={'x': x_scale, 'y': y_scale},\n stroke_width=3, colors=['red'])\n\n ax_x = bq.Axis(scale=x_scale, tick_format='0.2f', grid_lines='solid')\n ax_y = bq.Axis(scale=y_scale, orientation='vertical', tick_format='0.2f', grid_lines='solid')\n panzoom = bq.PanZoom(scales={'x': [x_scale], 'y': [y_scale]})\n lasso = bqplot.interacts.LassoSelector()\n brush = bqplot.interacts.BrushSelector(x_scale=x_scale, y_scale=y_scale, color=\"green\")\n fig = bq.Figure(marks=[line, im], axes=[ax_x, ax_y], min_width=100, min_height=100, interaction=panzoom)\n else:\n fig.marks = list(fig.marks) + [im]\n\n def make_image(executor, limits):\n # print \"make image\" * 100\n self.executor = executor\n if self.df.has_selection():\n sub = self.selected()\n else:\n sub = self\n return sub.image_rgba(limits=limits, size=size, f=f)\n progress = widgets.FloatProgress(value=0.0, min=0.0, max=1.0, step=0.01)\n updater = vaex.ext.bqplot.DebouncedThreadedUpdater(self, size, im, make_image, progress_widget=progress)\n\n def update_image():\n limits = [x_scale.min, x_scale.max], [y_scale.min, y_scale.max]\n # print limits\n # print \"update...\", limits\n # vxbq.debounced_threaded_update(self.df, im, make_image2, limits=limits)\n updater.update(limits)\n\n def update(*args):\n update_image()\n y_scale.observe(update, \"min\")\n y_scale.observe(update, \"max\")\n x_scale.observe(update, \"min\")\n x_scale.observe(update, \"max\")\n update_image()\n # fig = kwargs.pop('figure', p.current_figure())\n tools = []\n tool_actions = []\n panzoom = bq.PanZoom(scales={'x': [x_scale], 'y': [y_scale]})\n tool_actions_map = {u\"m\": panzoom}\n tool_actions.append(u\"m\")\n\n fig.interaction = panzoom\n if tool_select:\n brush = bqplot.interacts.BrushSelector(x_scale=x_scale, y_scale=y_scale, color=\"green\")\n tool_actions_map[\"b\"] = brush\n tool_actions.append(\"b\")\n\n def update_selection(*args):\n def f():\n if brush.selected:\n (x1, y1), (x2, y2) = brush.selected\n ex1, ex2 = self.expressions\n mode = modes_names[modes_labels.index(button_selection_mode.value)]\n self.df.select_rectangle(ex1, ex2, limits=[[x1, x2], [y1, y2]], mode=mode)\n else:\n self.df.select_nothing()\n updater.update_select(f)\n brush.observe(update_selection, \"selected\")\n # fig.interaction = brush\n # callback = self.df.signal_selection_changed.connect(lambda df: update_image())\n callback = self.df.signal_selection_changed.connect(lambda df: updater.update_direct_safe())\n\n def cleanup(callback=callback):\n self.df.signal_selection_changed.disconnect(callback=callback)\n self._bqplot[\"cleanups\"].append(cleanup)\n\n button_select_nothing = widgets.Button(icon=\"fa-trash-o\")\n\n def select_nothing(button):\n self.df.select_nothing()\n button_select_nothing.on_click(select_nothing)\n tools.append(button_select_nothing)\n modes_names = \"replace and or xor subtract\".split()\n modes_labels = \"= & | ^ -\".split()\n button_selection_mode = widgets.ToggleButtons(description='', options=modes_labels)\n tools.append(button_selection_mode)\n\n def change_interact(*args):\n # print \"change\", args\n fig.interaction = tool_actions_map[button_action.value]\n # tool_actions = [\"m\", \"b\"]\n # tool_actions = [(\"m\", \"m\"), (\"b\", \"b\")]\n button_action = widgets.ToggleButtons(description='', options=tool_actions, icons=[\"fa-arrows\", \"fa-pencil-square-o\"])\n button_action.observe(change_interact, \"value\")\n tools.insert(0, button_action)\n button_action.value = \"m\" # tool_actions[-1]\n if len(tools) == 1:\n tools = []\n tools = widgets.HBox(tools)\n\n box_layout = widgets.Layout(display='flex',\n flex_flow='column',\n # border='solid',\n width='100%', height=\"100%\")\n fig.fig_margin = {'bottom': 40, 'left': 60, 'right': 10, 'top': 40}\n # fig.min_height = 700\n # fig.min_width = 400\n fig.layout = box_layout\n return widgets.VBox([fig, progress, tools])\n\n def figlarge(self, size=(10, 10)):\n import pylab\n pylab.figure(num=None, figsize=size, dpi=80, facecolor='w', edgecolor='k')\n\n # def bounded(self):\n # return self.bounded_by_minmax()\n\n def bounded_by(self, limits):\n \"\"\"Returns a bounded subspace (SubspaceBounded) with limits as given by limits\n\n :param limits: sequence of [(min, max), ..., (min, max)] values\n :rtype: SubspaceBounded\n \"\"\"\n return SubspaceBounded(self, np.array(limits))\n\n def bounded_by_minmax(self):\n \"\"\"Returns a bounded subspace (SubspaceBounded) with limits given by Subspace.minmax()\n\n :rtype: SubspaceBounded\n \"\"\"\n bounds = self.minmax()\n return SubspaceBounded(self, bounds)\n\n bounded = bounded_by_minmax\n\n def bounded_by_sigmas(self, sigmas=3, square=False):\n \"\"\"Returns a bounded subspace (SubspaceBounded) with limits given by Subspace.limits_sigma()\n\n :rtype: SubspaceBounded\n \"\"\"\n bounds = self.limits_sigma(sigmas=sigmas, square=square)\n return SubspaceBounded(self, bounds)\n\n def minmax(self):\n \"\"\"Return a sequence of [(min, max), ..., (min, max)] corresponding to each expression in this subspace ignoring NaN.\n \"\"\"\n raise NotImplementedError\n\n def mean(self):\n \"\"\"Return a sequence of [mean, ... , mean] corresponding to the mean of each expression in this subspace ignoring NaN.\n \"\"\"\n raise NotImplementedError\n\n def var(self, means=None):\n \"\"\"Return a sequence of [var, ... , var] corresponding to the variance of each expression in this subspace ignoring NaN.\n \"\"\"\n raise NotImplementedError\n\n def sum(self):\n \"\"\"Return a sequence of [sum, ... , sum] corresponding to the sum of values of each expression in this subspace ignoring NaN.\"\"\"\n raise NotImplementedError\n\n def histogram(self, limits, size=256, weight=None):\n \"\"\"Return a grid of shape (size, ..., size) corresponding to the dimensionality of this subspace containing the counts in each element\n\n The type of the grid of np.float64\n\n \"\"\"\n raise NotImplementedError\n\n def limits_sigma(self, sigmas=3, square=False):\n raise NotImplementedError\n\n def row(self, index):\n return np.array([self.df.evaluate(expression, i1=index, i2=index + 1)[0] for expression in self.expressions])\n\n\nclass SubspaceLocal(Subspace):\n \"\"\"Subclass of subspace which implemented methods that can be run locally.\n \"\"\"\n\n def _toarray(self, list):\n return np.array(list)\n\n @property\n def pre(self):\n self.executor.pre\n\n @property\n def post(self):\n self.executor.post\n\n def _task(self, task, progressbar=False):\n \"\"\"Helper function for returning tasks results, result when immediate is True, otherwise the task itself, which is a promise\"\"\"\n if self.delay:\n # should return a task or a promise nesting it\n return self.executor.schedule(task)\n else:\n import vaex.utils\n callback = None\n try:\n if progressbar == True:\n def update(fraction):\n bar.update(fraction)\n return True\n bar = vaex.utils.progressbar(task.name)\n callback = self.executor.signal_progress.connect(update)\n elif progressbar:\n callback = self.executor.signal_progress.connect(progressbar)\n result = self.executor.run(task)\n if progressbar == True:\n bar.finish()\n sys.stdout.write('\\n')\n return result\n finally:\n if callback:\n self.executor.signal_progress.disconnect(callback)\n\n def minmax(self, progressbar=False):\n def min_max_reduce(minmax1, minmax2):\n if minmax1 is None:\n return minmax2\n if minmax2 is None:\n return minmax1\n result = []\n for d in range(self.dimension):\n min1, max1 = minmax1[d]\n min2, max2 = minmax2[d]\n result.append((min(min1, min2), max(max1, max2)))\n return result\n\n def min_max_map(thread_index, i1, i2, *blocks):\n if self.is_masked or self.df.filtered:\n mask = self.df.evaluate_selection_mask(\"default\" if self.is_masked else None, i1=i1, i2=i2)\n blocks = [block[mask] for block in blocks]\n is_empty = all(~mask)\n if is_empty:\n return None\n # with lock:\n # print blocks\n # with lock:\n # print thread_index, i1, i2, blocks\n blocks = [_asfloat(block) for block in blocks]\n return [vaex.vaexfast.find_nan_min_max(block) for block in blocks]\n if 0: # TODO: implement using statisticNd and benchmark\n minmaxes = np.zeros((len(blocks), 2), dtype=float)\n minmaxes[:, 0] = np.inf\n minmaxes[:, 1] = -np.inf\n for i, block in enumerate(blocks):\n vaex.vaexfast.statisticNd([], block, minmaxes[i, :], [], [], 2)\n # minmaxes[~np.isfinite(minmaxes)] = np.nan\n return minmaxes\n task = TaskMapReduceLegacy(self.df, self.expressions, min_max_map, min_max_reduce, self._toarray, info=True, name=\"minmax\")\n return self._task(task, progressbar=progressbar)\n\n def mean(self):\n return self._moment(1)\n\n def _moment(self, moment=1):\n def mean_reduce(means_and_counts1, means_and_counts2):\n means_and_counts = []\n for (mean1, count1), (mean2, count2) in zip(means_and_counts1, means_and_counts2):\n means_and_counts.append([np.nansum([mean1 * count1, mean2 * count2]) / (count1 + count2), count1 + count2])\n return means_and_counts\n\n def remove_counts(means_and_counts):\n return self._toarray(means_and_counts)[:, 0]\n\n def mean_map(thread_index, i1, i2, *blocks):\n if self.is_masked or self.df.filtered:\n mask = self.df.evaluate_selection_mask(\"default\" if self.is_masked else None, i1=i1, i2=i2)\n return [(np.nanmean(block[mask]**moment), np.count_nonzero(~np.isnan(block[mask]))) for block in blocks]\n else:\n return [(np.nanmean(block**moment), np.count_nonzero(~np.isnan(block))) for block in blocks]\n task = TaskMapReduceLegacy(self.df, self.expressions, mean_map, mean_reduce, remove_counts, info=True)\n return self._task(task)\n\n def var(self, means=None):\n # variances are linear, use the mean to reduce\n def vars_reduce(vars_and_counts1, vars_and_counts2):\n vars_and_counts = []\n for (var1, count1), (var2, count2) in zip(vars_and_counts1, vars_and_counts2):\n vars_and_counts.append([np.nansum([var1 * count1, var2 * count2]) / (count1 + count2), count1 + count2])\n return vars_and_counts\n\n def remove_counts(vars_and_counts):\n return self._toarray(vars_and_counts)[:, 0]\n if self.is_masked or self.df.filtered:\n def var_map(thread_index, i1, i2, *blocks):\n mask = self.df.evaluate_selection_mask(\"default\" if self.is_masked else None, i1=i1, i2=i2)\n if means is not None:\n return [(np.nanmean((block[mask] - mean)**2), np.count_nonzero(~np.isnan(block[mask]))) for block, mean in zip(blocks, means)]\n else:\n return [(np.nanmean(block[mask]**2), np.count_nonzero(~np.isnan(block[mask]))) for block in blocks]\n task = TaskMapReduceLegacy(self.df, self.expressions, var_map, vars_reduce, remove_counts, info=True)\n else:\n def var_map(*blocks):\n if means is not None:\n return [(np.nanmean((block - mean)**2), np.count_nonzero(~np.isnan(block))) for block, mean in zip(blocks, means)]\n else:\n return [(np.nanmean(block**2), np.count_nonzero(~np.isnan(block))) for block in blocks]\n task = TaskMapReduceLegacy(self.df, self.expressions, var_map, vars_reduce, remove_counts)\n return self._task(task)\n\n def correlation(self, means=None, vars=None):\n if self.dimension != 2:\n raise ValueError(\"correlation is only defined for 2d subspaces, not %dd\" % self.dimension)\n\n def do_correlation(means, vars):\n meanx, meany = means\n sigmax, sigmay = vars[0]**0.5, vars[1]**0.5\n\n def remove_counts_and_normalize(covar_and_count):\n covar, counts = covar_and_count\n return covar / counts / (sigmax * sigmay)\n\n def covars_reduce(covar_and_count1, covar_and_count2):\n if covar_and_count1 is None:\n return covar_and_count2\n if covar_and_count2 is None:\n return covar_and_count1\n else:\n covar1, count1 = covar_and_count1\n covar2, count2 = covar_and_count2\n return [np.nansum([covar1, covar2]), count1 + count2]\n\n mask = self.df.mask\n\n def covar_map(thread_index, i1, i2, *blocks):\n # return [(np.nanmean((block[mask[i1:i2]]-mean)**2), np.count_nonzero(~np.isnan(block[mask[i1:i2]]))) for block, mean in zip(blocks, means)]\n blockx, blocky = blocks\n if self.is_masked:\n blockx, blocky = blockx[mask[i1:i2]], blocky[mask[i1:i2]]\n counts = np.count_nonzero(~(np.isnan(blockx) | np.isnan(blocky)))\n if counts == 0:\n return None\n else:\n return np.nansum((blockx - meanx) * (blocky - meany)), counts\n\n task = TaskMapReduceLegacy(self.df, self.expressions, covar_map, covars_reduce, remove_counts_and_normalize, info=True)\n return self._task(task)\n if means is None:\n if self.delay:\n means_wrapper = [None]\n\n def do_vars(means):\n means_wrapper[0] = means\n return self.var(means)\n\n def do_correlation_wrapper(vars):\n return do_correlation(means_wrapper[0], vars)\n return self.mean().then(do_vars).then(do_correlation_wrapper)\n else:\n means = self.mean()\n vars = self.var(means=means)\n return do_correlation(means, vars)\n else:\n if vars is None:\n if self.delay:\n def do_correlation_wrapper(vars):\n return do_correlation(means, vars)\n return self.vars(means=means).then(do_correlation_wrapper)\n else:\n vars = self.var(means)\n return do_correlation(means, vars)\n else:\n if means is None:\n means = self.mean()\n if vars is None:\n vars = self.var(means=means)\n return do_correlation(means, vars)\n\n def sum(self):\n def nansum(x): return np.nansum(x, dtype=np.float64)\n # TODO: we can speed up significantly using our own nansum, probably the same for var and mean\n nansum = vaex.vaexfast.nansum\n if self.is_masked or self.df.filtered:\n task = TaskMapReduceLegacy(self.df,\n self.expressions, lambda thread_index, i1, i2, *blocks: [nansum(block[self.df.evaluate_selection_mask(\"default\" if self.is_masked else None, i1=i1, i2=i2)])\n for block in blocks],\n lambda a, b: np.array(a) + np.array(b), self._toarray, info=True)\n else:\n task = TaskMapReduceLegacy(self.df, self.expressions, lambda *blocks: [nansum(block) for block in blocks], lambda a, b: np.array(a) + np.array(b), self._toarray)\n return self._task(task)\n\n def histogram(self, limits, size=256, weight=None, progressbar=False, group_by=None, group_limits=None):\n expressions = self.expressions\n if group_by:\n expressions = list(expressions) + [group_by]\n limits = list(limits) + [group_limits[:2]] # [[group_limits[0] - 0,5, group_limits[1]+0.5]]\n # assert group_limits[2] == 1\n size = (group_limits[2],) + (size,) * (len(expressions) - 1)\n task = TaskHistogram(self.df, self, expressions, size, limits, masked=self.is_masked, weight=weight)\n return self._task(task, progressbar=progressbar)\n\n def bin_mean(self, expression, limits, size=256, progressbar=False, group_by=None, group_limits=None):\n # todo, fix progressbar into two...\n counts = self.histogram(limits=limits, size=size, progressbar=progressbar, group_by=group_by, group_limits=group_limits)\n weighted = self.histogram(limits=limits, size=size, progressbar=progressbar, group_by=group_by, group_limits=group_limits,\n weight=expression)\n mean = weighted / counts\n mean[counts == 0] = np.nan\n return mean\n\n def bin_mean_cyclic(self, expression, max_value, limits, size=256, progressbar=False, group_by=None, group_limits=None):\n # todo, fix progressbar into two...\n meanx = self.bin_mean(limits=limits, size=size, progressbar=progressbar, group_by=group_by, group_limits=group_limits,\n expression=\"cos((%s)/%r*2*pi)\" % (expression, max_value))\n meany = self.bin_mean(limits=limits, size=size, progressbar=progressbar, group_by=group_by, group_limits=group_limits,\n expression=\"sin((%s)/%r*2*pi)\" % (expression, max_value))\n angles = np.arctan2(meany, meanx)\n values = ((angles + 2 * np.pi) % (2 * np.pi)) / (2 * np.pi) * max_value\n length = np.sqrt(meanx**2 + meany**2)\n length[~np.isfinite(meanx)] = np.nan\n return values, length\n\n def mutual_information(self, limits=None, grid=None, size=256):\n if limits is None:\n limits_done = Task.fulfilled(self.minmax())\n else:\n limits_done = Task.fulfilled(limits)\n if grid is None:\n if limits is None:\n histogram_done = limits_done.then(lambda limits: self.histogram(limits, size=size))\n else:\n histogram_done = Task.fulfilled(self.histogram(limits, size=size))\n else:\n histogram_done = Task.fulfilled(grid)\n mutual_information_promise = histogram_done.then(vaex.kld.mutual_information)\n return mutual_information_promise if self.delay else mutual_information_promise.get()\n\n def limits_percentage(self, percentage=99.73, square=False):\n import scipy.ndimage\n limits = []\n for expr in self.expressions:\n subspace = self.df(expr)\n if self.is_selected():\n subspace = subspace.selected()\n limits_minmax = subspace.minmax()\n vmin, vmax = limits_minmax[0]\n size = 1024 * 16\n counts = subspace.histogram(size=size, limits=limits_minmax)\n cumcounts = np.concatenate([[0], np.cumsum(counts)])\n cumcounts /= cumcounts.max()\n # TODO: this is crude.. see the details!\n f = (1 - percentage / 100.) / 2\n x = np.linspace(vmin, vmax, size + 1)\n l = scipy.interp([f, 1 - f], cumcounts, x)\n limits.append(l)\n return limits\n\n def limits_sigma(self, sigmas=3, square=False):\n if self.delay:\n means_wrapper = [None]\n\n def do_vars(means):\n means_wrapper[0] = means\n return self.var(means)\n\n def do_limits(vars):\n stds = vars**0.5\n means = means_wrapper[0]\n if square:\n stds = np.repeat(stds.mean(), len(stds))\n return np.array(list(zip(means - sigmas * stds, means + sigmas * stds)))\n return self.mean().then(do_vars).then(do_limits)\n else:\n means = self.mean()\n stds = self.var(means=means)**0.5\n if square:\n stds = np.repeat(stds.mean(), len(stds))\n return np.array(list(zip(means - sigmas * stds, means + sigmas * stds)))\n\n def _not_needed_current(self):\n index = self.df.get_current_row()\n\n def find(thread_index, i1, i2, *blocks):\n if (index >= i1) and (index < i2):\n return [block[index - i1] for block in blocks]\n else:\n return None\n task = TaskMapReduceLegacy(self.df, self.expressions, find, lambda a, b: a if b is None else b, info=True)\n return self._task(task)\n\n def nearest(self, point, metric=None):\n metric = metric or [1.] * len(point)\n\n def nearest_in_block(thread_index, i1, i2, *blocks):\n if self.is_masked:\n mask = self.df.evaluate_selection_mask(\"default\", i1=i1, i2=i2)\n if mask.sum() == 0:\n return None\n blocks = [block[mask] for block in blocks]\n distance_squared = np.sum([(blocks[i] - point[i])**2. * metric[i] for i in range(self.dimension)], axis=0)\n min_index_global = min_index = np.argmin(distance_squared)\n if self.is_masked: # we skipped some indices, so correct for that\n min_index_global = np.argmin((np.cumsum(mask) - 1 - min_index)**2)\n # with lock:\n # print i1, i2, min_index, distance_squared, [block[min_index] for block in blocks]\n return min_index_global.item() + i1, distance_squared[min_index].item()**0.5, [block[min_index].item() for block in blocks]\n\n def nearest_reduce(a, b):\n if a is None:\n return b\n if b is None:\n return a\n if a[1] < b[1]:\n return a\n else:\n return b\n if self.is_masked:\n pass\n task = TaskMapReduceLegacy(self.df,\n self.expressions,\n nearest_in_block,\n nearest_reduce, info=True)\n return self._task(task)\n"
] |
[
[
"numpy.ma.array",
"numpy.arange",
"numpy.array"
],
[
"numpy.arange"
],
[
"numpy.sqrt",
"numpy.linspace",
"numpy.cumsum",
"numpy.arctan2",
"numpy.all",
"numpy.zeros_like",
"numpy.argmin",
"numpy.random.randn",
"numpy.nanmean",
"numpy.swapaxes",
"numpy.clip",
"numpy.arange",
"numpy.nansum",
"matplotlib.cm.ScalarMappable",
"matplotlib.colors.colorConverter.to_rgba",
"numpy.log1p",
"numpy.zeros",
"numpy.isnan",
"matplotlib.colors.colorConverter.to_rgb",
"matplotlib.colors.ListedColormap",
"numpy.array",
"numpy.meshgrid",
"numpy.sum",
"numpy.isfinite",
"matplotlib.colors.Normalize",
"matplotlib.cm.get_cmap"
]
] |
ALexanderpu/CUDAC-PerformanceEvaluation
|
[
"1106792a41781b490685941d53bcf5bf43f4ca32"
] |
[
"SparkCCM.py"
] |
[
"# running under python 2.7 \n__author__ = \"Bo Pu\"\n\nimport sys\nimport ConfigParser\nimport pandas as pd\nfrom pyspark.sql import SparkSession\nimport json\nimport numpy as np\nimport os\n\n# for single L; which will be not used \n# read parameter combinations config and fill into the objects\nclass Sample:\n def __init__(self, _observations, _targets, _e, _tau, _l, _samples, _multil, _genoutput):\n self.observations = _observations\n self.targets = _targets\n self.e = _e\n self.tau = _tau\n self.samples = _samples\n self.l = _l\n self.multil = _multil\n self.genoutput = _genoutput\n\ndef ccm(LArr, EArr, TauArr, num_samples, time_series, x, y, scriptPath, generateOutput):\n observations, targets = time_series[x].tolist(), time_series[y].tolist()\n paras = []\n for l in LArr:\n for e in EArr:\n for tau in TauArr:\n s = Sample(observations, targets, e, tau, l, num_samples, 0, generateOutput)\n para = json.dumps(vars(s))\n #print para\n paras.append(para)\n # start the spark context \n spark = SparkSession.builder.appName(\"PySparkCCM\").getOrCreate()\n paraRdd = spark.sparkContext.parallelize(paras)\n piped = paraRdd.pipe(scriptPath)\n result = piped.collect()\n spark.stop()\n return result\n\n\n# for multi Ls in one task\nclass SampleMultiL:\n def __init__(self, _observations, _targets, _e, _tau, _samples, _lstart, _lend, _linterval, _multil, _genoutput, _outputpath, _gpu):\n self.observations = _observations\n self.targets = _targets\n self.e = _e\n self.tau = _tau\n self.samples = _samples\n self.lstart = _lstart\n self.lend = _lend\n self.linterval = _linterval\n self.multil = _multil\n self.genoutput = _genoutput\n self.outputpath = _outputpath\n self.gpu = _gpu\n\n\nif __name__ == \"__main__\":\n if len(sys.argv) < 2:\n print(\"please input the local path of ccm.cfg\")\n sys.exit()\n\n # read input cfg file: the first argument is the file path\n cfgfile = sys.argv[1]\n \n config = ConfigParser.RawConfigParser()\n config.read(cfgfile)\n try:\n input_path = config.get('paths', 'input')\n output_path = config.get('paths', 'output')\n script_path = config.get('paths', 'sparkccmlib')\n\n E = config.get('parameters', 'E')\n Tau = config.get('parameters', 'tau')\n EArr = map(int, E.split(\",\"))\n TauArr = map(int, Tau.split(\",\"))\n\n num_samples = config.getint('parameters', 'num_samples')\n \n LStart = config.getint('parameters', 'LStart')\n LEnd = config.getint('parameters', 'LEnd')\n LInterval = config.getint('parameters', 'LInterval')\n\n xname = config.get('inputs', 'x')\n yname = config.get('inputs', 'y')\n\n time_series = pd.read_csv(input_path)\n observations, targets = time_series[xname].tolist(), time_series[yname].tolist()\n\n GenerateOutputCSV = config.getint('options', 'GenerateOutputCSV')\n GPUAcceleration = config.getint('options', 'GPUAcceleration')\n print(\"GPUAcceleration: \" + str(GPUAcceleration))\n # generate para rdd to separate the tasks to different workers\n paras = []\n for e in EArr:\n for tau in TauArr:\n s = SampleMultiL(observations, targets, e, tau, num_samples, LStart, LEnd, LInterval, 1, GenerateOutputCSV, output_path, GPUAcceleration)\n para = json.dumps(vars(s))\n #print para\n paras.append(para)\n # start the spark context \n \n print(\"size: \" + str(len(paras)))\n \n \n spark = SparkSession.builder.appName(\"PySparkCCMMultiL\").getOrCreate()\n paraRdd = spark.sparkContext.parallelize(paras)\n piped = paraRdd.pipe(script_path)\n result = piped.collect()\n\n for ele in result:\n print(ele)\n\n spark.stop()\n\n # output path in the result\n # with open(\"outputcsvpath.out\", \"w\") as f:\n # for record in result:\n # f.write(record)\n except:\n print(\"parsing config file error\")"
] |
[
[
"pandas.read_csv"
]
] |
justinpayan/StackOverflowNER-NS
|
[
"8459cee99582e5bddf94fb1dff4fcad5fc93fd54"
] |
[
"regularizers.py"
] |
[
"import abc\nimport math\nimport torch\nfrom torch.optim import Optimizer, SGD\nfrom settings import args, FILL_VAL, TOKENS_WEIGHT\nfrom utils import get_losses, get_model_dir\nfrom parallel import DataParallelCriterion\nfrom torch.nn import CrossEntropyLoss, MSELoss\nimport pickle as pkl\nimport os\nfrom torch.nn.functional import softmax\n\n\nclass Regularizer(abc.ABC):\n def __init__(self, model, parallel_model, dataloaders, task, prev_task=None):\n self.model = model\n self.parallel_model = parallel_model\n self.dataloaders = dataloaders\n self.task = task\n self.prev_task = prev_task\n @abc.abstractmethod\n def task_start_do(self):\n return NotImplemented\n @abc.abstractmethod\n def task_end_do(self):\n return NotImplemented\n def save_reg_params(self):\n model_dir = get_model_dir([self.task])\n reg_params_path = os.path.join(model_dir, \"reg_params.pkl\")\n with open(reg_params_path, 'wb') as f:\n pkl.dump(self.model.reg_params,f)\n def load_reg_params(self):\n if self.prev_task:\n model_dir = get_model_dir([self.prev_task])\n reg_params_path = os.path.join(model_dir, \"reg_params.pkl\")\n with open(reg_params_path, 'rb') as f:\n self.model.reg_params = pkl.load(f)\n input()\n\n\nclass MAS(Regularizer):\n def task_start_do(self,freeze_layers=[]):\n #self.load_reg_params()\n task_start_do(self.model, freeze_layers)\n def task_end_do(self):\n updater = Omega_update(self.model.parameters(), lr=0.0001, momentum=0.9)\n compute_importance(self.model, self.parallel_model, updater, self.dataloaders)\n accumulate_reg_params(self.model)\n self.save_reg_params()\n\nclass EWC(Regularizer):\n def task_start_do(self,freeze_layers=[]):\n #self.load_reg_params()\n task_start_do(self.model, freeze_layers)\n def task_end_do(self):\n updater = Omega_update(self.model.parameters(), lr=0.0001, momentum=0.9)\n compute_importance(self.model, self.parallel_model, updater, self.dataloaders, loss_type=\"ewc\")\n accumulate_reg_params(self.model)\n self.save_reg_params()\n\n\nREG_TYPES = {\n \"mas\": MAS,\n \"ewc\": EWC,\n}\nargs.REG_TYPE_KEYS = REG_TYPE_KEYS = list(REG_TYPES.keys())\n\n\ndef task_start_do(model, freeze_layers=[]):\n if not hasattr(model,\"reg_params\"):\n initialize_reg_params(model,freeze_layers)\n else:\n clean_omega_sum(model,freeze_layers)\n\n\ndef initialize_reg_params(model,freeze_layers=[]):\n \"\"\"initialize an omega for each parameter to zero\"\"\"\n reg_params={}\n for name, param in model.named_parameters():\n if not name in freeze_layers:\n # print('initializing param',name)\n omega=torch.FloatTensor(param.size()).zero_()\n omega=omega.cuda()\n init_val=param.data.clone()\n init_val=init_val.cuda()\n reg_param={}\n reg_param['omega'] = omega\n reg_param['omega_sum'] = omega\n #initialize the initial value to that before starting training\n reg_param['init_val'] = init_val\n reg_params[param]=reg_param\n if 'data_count' not in reg_params:\n reg_params['data_count'] = 0\n reg_params['lambda'] = args.reg_lambda\n model.reg_params = reg_params\n\n\ndef clean_omega_sum(model,freeze_layers=[]):\n for name, param in model.named_parameters():\n if not name in freeze_layers:\n omega=torch.FloatTensor(param.size()).zero_()\n omega=omega.cuda()\n reg_param = model.reg_params.get(param)\n reg_param['omega_sum'] = omega\n model.reg_params[param]=reg_param\n model.reg_params['data_count'] = 0\n\n\nclass Weight_Regularized_AdamW(Optimizer):\n \"\"\" Implements Adam algorithm with weight decay fix.\n Parameters:\n lr (float): learning rate. Default 1e-3.\n betas (tuple of 2 floats): Adams beta parameters (b1, b2). Default: (0.9, 0.999)\n eps (float): Adams epsilon. Default: 1e-6\n weight_decay (float): Weight decay. Default: 0.0\n correct_bias (bool): can be set to False to avoid correcting bias in Adam (e.g. like in Bert TF repository). Default True.\n \"\"\"\n def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-6, weight_decay=0.0, correct_bias=True):\n if lr < 0.0:\n raise ValueError(\"Invalid learning rate: {} - should be >= 0.0\".format(lr))\n if not 0.0 <= betas[0] < 1.0:\n raise ValueError(\"Invalid beta parameter: {} - should be in [0.0, 1.0[\".format(betas[0]))\n if not 0.0 <= betas[1] < 1.0:\n raise ValueError(\"Invalid beta parameter: {} - should be in [0.0, 1.0[\".format(betas[1]))\n if not 0.0 <= eps:\n raise ValueError(\"Invalid epsilon value: {} - should be >= 0.0\".format(eps))\n defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay,\n correct_bias=correct_bias)\n super(Weight_Regularized_AdamW, self).__init__(params, defaults)\n\n def step(self, reg_params, closure=None):\n \"\"\"Performs a single optimization step.\n Arguments:\n closure (callable, optional): A closure that reevaluates the model\n and returns the loss.\n \"\"\"\n loss = None\n if closure is not None:\n loss = closure()\n\n reg_lambda=reg_params.get('lambda')\n\n for group in self.param_groups:\n for p in group['params']:\n if p.grad is None:\n continue\n grad = p.grad.data\n if grad.is_sparse:\n raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead')\n\n\n state = self.state[p]\n\n # State initialization\n if len(state) == 0:\n state['step'] = 0\n # Exponential moving average of gradient values\n state['exp_avg'] = torch.zeros_like(p.data)\n # Exponential moving average of squared gradient values\n state['exp_avg_sq'] = torch.zeros_like(p.data)\n\n exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']\n beta1, beta2 = group['betas']\n\n state['step'] += 1\n\n # Decay the first and second moment running average coefficient\n # In-place operations to update the averages at the same time\n exp_avg.mul_(beta1).add_(1.0 - beta1, grad)\n exp_avg_sq.mul_(beta2).addcmul_(1.0 - beta2, grad, grad)\n denom = exp_avg_sq.sqrt().add_(group['eps'])\n\n step_size = group['lr']\n if group['correct_bias']: # No bias correction for Bert\n bias_correction1 = 1.0 - beta1 ** state['step']\n bias_correction2 = 1.0 - beta2 ** state['step']\n step_size = step_size * math.sqrt(bias_correction2) / bias_correction1\n\n p.data.addcdiv_(-step_size, exp_avg, denom)\n\n # Just adding the square of the weights to the loss function is *not*\n # the correct way of using L2 regularization/weight decay with Adam,\n # since that will interact with the m and v parameters in strange ways.\n #\n # Instead we want to decay the weights in a manner that doesn't interact\n # with the m/v parameters. This is equivalent to adding the square\n # of the weights to the loss with plain (non-momentum) SGD.\n # Add weight decay at the end (fixed version)\n #Regularize PART CODE GOES HERE\n if p in reg_params:\n\n reg_param=reg_params.get(p)\n #get omega for this parameter\n omega=reg_param.get('omega')\n #initial value when the training start\n init_val=reg_param.get('init_val')\n curr_weight_val=p.data\n\n #get the difference\n weight_dif=curr_weight_val.add(-1,init_val)\n #compute the MAS penalty\n regulizer=weight_dif.mul(2*reg_lambda*omega)\n del weight_dif\n del curr_weight_val\n del omega\n del init_val\n #add the MAS regulizer to the gradient\n # grad.add_(regulizer)\n p.data.add_(-group['lr'], regulizer)\n del regulizer\n #Regularize PART CODE ENDS\n if group['weight_decay'] > 0.0:\n p.data.add_(-group['lr'] * group['weight_decay'], p.data)\n\n return loss\n\n# update omega for one task; use in compute_importance\nclass Omega_update(SGD):\n \"\"\"\n Update the paramerter importance using the gradient of the function output norm. To be used at deployment time.\n reg_params:parameters omega to be updated\n batch_index,batch_size:used to keep a running average over the seen samples\n \"\"\"\n def __init__(self, params, lr=0.001, momentum=0, dampening=0, weight_decay=0, nesterov=False):\n\n super(Omega_update, self).__init__(params,lr,momentum,dampening,weight_decay,nesterov)\n\n def __setstate__(self, state):\n super(Omega_update, self).__setstate__(state)\n\n def step(self, reg_params, batch_size, closure=None):\n \"\"\"\n Performs a single parameters importance update setp\n \"\"\"\n #print('************************DOING A STEP************************')\n reg_params['data_count'] += batch_size\n loss = None\n if closure is not None:\n loss = closure()\n\n for group in self.param_groups:\n\n #if the parameter has an omega to be updated\n for p in group['params']:\n\n #print('************************ONE PARAM************************')\n\n if p.grad is None:\n continue\n\n if p in reg_params:\n\n #HERE MAS IMPOERANCE UPDATE GOES\n #get the gradient\n unreg_dp = p.grad.data.clone()\n reg_param = reg_params.get(p)\n #get parameter omega\n omega = reg_param.get('omega_sum')\n if args.seq_train_type == \"ewc\":\n omega = omega.add((unreg_dp)**2)\n else:\n omega = omega.add(unreg_dp.abs_())\n reg_param['omega_sum'] = omega\n reg_params[p] = reg_param\n #HERE MAS IMPOERANCE UPDATE ENDS\n\n return loss#HAS NOTHING TO DO\n\n# update omega for one task\ndef compute_importance(model, parallel_model, updater, dataloaders, loss_type=\"l2\"):\n \"\"\"Mimic the depoloyment setup where the model is applied on some samples and those are used to update the importance params\n Uses the L2norm of the function output. This is what we MAS uses as default\n \"\"\"\n # model.eval() # Set model to training mode so we get the gradient\n # train_loss_fct = DataParallelCriterion(CrossEntropyLoss(ignore_index=FILL_VAL), args.device_ids)\n\n softmax = torch.nn.Softmax(dim=-1)\n if loss_type == \"l2\":\n # loss_fct = DataParallelCriterion(torch.nn.MSELoss(reduction='mean'), args.device_ids)\n loss_fct = torch.nn.MSELoss(reduction='mean')\n elif loss_type == \"l1\":\n # loss_fct = DataParallelCriterion(torch.nn.L1Loss(reduction='mean'), args.device_ids)\n loss_fct = torch.nn.L1Loss(reduction='mean')\n elif loss_type == \"ewc\":\n CELoss = CrossEntropyLoss(ignore_index=FILL_VAL, reduction='mean', weight=TOKEN_WEIGHT)\n loss_fct = CELoss\n # loss_fct = DataParallelCriterion(CELoss, args.device_ids)\n\n # Iterate over data.\n for dataloader in dataloaders:\n for cq, len_cq, cqa, len_cqa, Y, _, _ in dataloader:\n # get the inputs\n n_inputs = sum(len(_cq) for _cq in cq)\n for i in range(len(cqa)):\n cq[i] = (cq[i].to(args.device_ids[i]),)\n len_cq[i] = len_cq[i].to(args.device_ids[i])\n cqa[i] = (cqa[i].to(args.device_ids[i]),)\n len_cqa[i] = len_cqa[i].to(args.device_ids[i])\n Y[i] = Y[i].to(args.device_ids[i])\n\n # zero the parameter gradients\n updater.zero_grad()\n\n # forward\n if loss_type != \"ewc\":\n # logits = parallel_model(cq)\n logits = model(cq)\n logits = [logit[range(len(logit)), len_cq[i]-1, :] for i, logit in enumerate(logits)]\n #logits = [softmax(logit, dim=-1) for logit in logits]\n target_zeros = [torch.zeros(logit.size()).to(args.device_ids[i]) for i, logit in enumerate(logits)]\n logits = [softmax(logit) for logit in logits]\n\n if loss_type == \"l2\":\n targets = loss_fct(logits, target_zeros)\n elif loss_type == \"l1\":\n targets = loss_fct(logits, target_zeros)\n else:\n # targets, _ = get_losses(parallel_model, cqa, Y, None, None, loss_fct)\n targets, _ = get_losses(model, cqa, Y, None, None, loss_fct)\n\n\n targets /= n_inputs \n\n #compute the gradients\n targets.backward()\n\n #update the parameters importance\n updater.step(model.reg_params, n_inputs)\n\n# omega of task1 + omega of task2 ...\n# new_omega=omega_sum/data_count; omega=new_omega+prev_omega\ndef accumulate_reg_params(model, freeze_layers=[]):\n \"\"\"accumelate the newly computed omega with the previously stroed one from the old previous tasks\"\"\"\n for name, param in model.named_parameters():\n if not name in freeze_layers:\n if param in model.reg_params:\n reg_param=model.reg_params.get(param)\n # print('restoring previous omega',name)\n prev_omega=reg_param.get('omega')\n new_omega=reg_param.get('omega_sum') / model.reg_params[\"data_count\"]\n acc_omega=torch.add(prev_omega,new_omega)\n\n del reg_param['omega_sum']\n reg_param['omega'] = acc_omega\n\n model.reg_params[param]=reg_param\n del prev_omega\n del new_omega\n del acc_omega\n else:\n if param in model.reg_params:\n reg_param=model.reg_params.get(param)\n # print('removing unused omega',name)\n del reg_param['omega']\n del model.reg_params[param]\n\n\nclass Weight_Regularized_SGD(SGD):\n r\"\"\"Implements SGD training with importance params regulization. IT inherents stochastic gradient descent (optionally with momentum).\n Nesterov momentum is based on the formula from\n\n \"\"\"\n\n def __init__(self, params, lr=0.001, momentum=0, dampening=0, weight_decay=0, nesterov=False):\n super(Weight_Regularized_SGD, self).__init__(params, lr,momentum,dampening,weight_decay,nesterov)\n\n\n def __setstate__(self, state):\n super(Weight_Regularized_SGD, self).__setstate__(state)\n\n\n def step(self, reg_params,closure=None):\n\n loss = None\n if closure is not None:\n loss = closure()\n reg_lambda=reg_params.get('lambda')\n\n for group in self.param_groups:\n weight_decay = group['weight_decay']\n momentum = group['momentum']\n dampening = group['dampening']\n nesterov = group['nesterov']\n\n for p in group['params']:\n if p.grad is None:\n continue\n d_p = p.grad.data\n\n #MAS PART CODE GOES HERE\n #if this param has an omega to use for regulization\n if p in reg_params:\n\n reg_param=reg_params.get(p)\n #get omega for this parameter\n omega=reg_param.get('omega')\n #initial value when the training start\n init_val=reg_param.get('init_val')\n\n curr_wegiht_val=p.data\n #move the tensors to cuda\n init_val=init_val.cuda()\n omega=omega.cuda()\n\n #get the difference\n weight_dif=curr_wegiht_val.add(-1,init_val)\n #compute the MAS penalty\n regulizer=weight_dif.mul(2*reg_lambda*omega)\n del weight_dif\n del curr_wegiht_val\n del omega\n del init_val\n #add the MAS regulizer to the gradient\n d_p.add_(regulizer)\n del regulizer\n #MAS PARAT CODE ENDS\n if weight_decay != 0:\n d_p.add_(weight_decay,p.data.sign())\n\n if momentum != 0:\n param_state = self.state[p]\n if 'momentum_buffer' not in param_state:\n buf = param_state['momentum_buffer'] = d_p.clone()\n else:\n buf = param_state['momentum_buffer']\n buf.mul_(momentum).add_(1 - dampening, d_p)\n if nesterov:\n d_p = d_p.add(momentum, buf)\n else:\n d_p = buf\n p.data.add_(-group['lr'], d_p)\n\n return loss\n"
] |
[
[
"torch.nn.Softmax",
"torch.nn.CrossEntropyLoss",
"torch.add",
"torch.zeros_like",
"torch.nn.L1Loss",
"torch.nn.MSELoss"
]
] |
divyanshugit/Machine-Learning-Lab-EC792B
|
[
"2c0ceeef67dcbf9dd1135d0b4616d9f94205fd66"
] |
[
"kNN/kNN.py"
] |
[
"import numpy as np\nfrom math import sqrt\n\nclass KNN():\n \"\"\" K Nearest Neighbors classifier.\n Parameters:\n -----------\n k: int\n The number of closest neighbors that will determine the class of the \n sample that we wish to predict.\n \"\"\"\n def __init__(self, k=5):\n self.k = k\n\n def euclidean_distance(self, x1, x2):\n \"\"\"\n Calculate the euclidean distance between two rows.\n \"\"\" \n distance = 0.0\n \n for i in range(len(x1)-1):\n distance += (x1[i] - x2[i])**2\n \n return sqrt(distance)\n def _vote(self, neighbor_labels):\n \"\"\" Return the most common class among the neighbor samples \"\"\"\n counts = np.bincount(neighbor_labels.astype('int'))\n return counts.argmax()\n\n def predict(self, X_test, X_train, y_train):\n y_pred = np.empty(X_test.shape[0])\n # Determine the class of each sample\n for i, test_sample in enumerate(X_test):\n # Sort the training samples by their distance to the test sample and get the K nearest\n idx = np.argsort([self.euclidean_distance(test_sample, x) for x in X_train])[:self.k]\n # Extract the labels of the K nearest neighboring training samples\n k_nearest_neighbors = np.array([y_train[i] for i in idx])\n # Label sample as the most common class label\n y_pred[i] = self._vote(k_nearest_neighbors)\n\n return y_pred\n\nX = np.random.rand(100, 2)\ny = np.random.randint(0, 2, size=100)\nX_train = X[:80]\ny_train = y[:80]\nX_test = X[80:]\ny_test = y[80:]\nprint(\"Actual Value:\",y_test)\nKNN = KNN()\nprint(\"Pridicted Value:\",KNN.predict(X_test, X_train, y_train))\n\n# Returns\n#Actual Value: [0 1 1 1 0 0 0 0 1 1 0 1 1 1 0 1 0 1 0 0]\n#Pridicted Value: [0. 1. 1. 0. 0. 0. 0. 0. 1. 0. 0. 1. 1. 1. 1. 1. 0. 0. 1. 1.]"
] |
[
[
"numpy.array",
"numpy.random.rand",
"numpy.empty",
"numpy.random.randint"
]
] |
chen0040/keras-language-translator-web-api
|
[
"06dc1d106e2293abaadd506992988a4a66b5eb78"
] |
[
"translator_train/eng_to_fra_glove_translator_train.py"
] |
[
"from keras.models import Model\nfrom keras.callbacks import ModelCheckpoint\nfrom keras.layers.recurrent import LSTM\nfrom keras.layers import Dense, Input, Embedding\nfrom keras.preprocessing.sequence import pad_sequences\nfrom collections import Counter\nimport nltk\nimport numpy as np\nimport os\nimport sys\nimport zipfile\nimport urllib.request\n\nBATCH_SIZE = 64\nNUM_EPOCHS = 100\nHIDDEN_UNITS = 256\nNUM_SAMPLES = 10000\nMAX_VOCAB_SIZE = 10000\nGLOVE_EMBEDDING_SIZE = 100\nDATA_PATH = 'data/fra.txt'\nWEIGHT_FILE_PATH = 'models/eng-to-fra/eng-to-fra-glove-weights.h5'\nARCHITECTURE_FILE_PATH = 'models/eng-to-fra/eng-to-fra-glove-architecture.json'\n\ntarget_counter = Counter()\n\nGLOVE_MODEL = \"very_large_data/glove.6B.\" + str(GLOVE_EMBEDDING_SIZE) + \"d.txt\"\nWHITELIST = 'abcdefghijklmnopqrstuvwxyz1234567890?.,'\n\n\ndef in_white_list(_word):\n for char in _word:\n if char in WHITELIST:\n return True\n\n return False\n\n\ndef reporthook(block_num, block_size, total_size):\n read_so_far = block_num * block_size\n if total_size > 0:\n percent = read_so_far * 1e2 / total_size\n s = \"\\r%5.1f%% %*d / %d\" % (\n percent, len(str(total_size)), read_so_far, total_size)\n sys.stderr.write(s)\n if read_so_far >= total_size: # near the end\n sys.stderr.write(\"\\n\")\n else: # total size is unknown\n sys.stderr.write(\"read %d\\n\" % (read_so_far,))\n\n\ndef download_glove():\n if not os.path.exists(GLOVE_MODEL):\n\n glove_zip = 'very_large_data/glove.6B.zip'\n\n if not os.path.exists('very_large_data'):\n os.makedirs('very_large_data')\n\n if not os.path.exists(glove_zip):\n print('glove file does not exist, downloading from internet')\n urllib.request.urlretrieve(url='http://nlp.stanford.edu/data/glove.6B.zip', filename=glove_zip,\n reporthook=reporthook)\n\n print('unzipping glove file')\n zip_ref = zipfile.ZipFile(glove_zip, 'r')\n zip_ref.extractall('very_large_data')\n zip_ref.close()\n\n\ndef load_glove():\n download_glove()\n _word2em = {}\n file = open(GLOVE_MODEL, mode='rt', encoding='utf8')\n for line in file:\n words = line.strip().split()\n word = words[0]\n embeds = np.array(words[1:], dtype=np.float32)\n _word2em[word] = embeds\n file.close()\n return _word2em\n\nword2em = load_glove()\n\nlines = open(DATA_PATH, 'rt', encoding='utf8').read().split('\\n')\nfor line in lines[: min(NUM_SAMPLES, len(lines)-1)]:\n input_text, target_text = line.split('\\t')\n input_words = [w for w in nltk.word_tokenize(input_text.lower())]\n target_text = 'START ' + target_text.lower() + ' END'\n target_words = [w for w in nltk.word_tokenize(target_text)]\n for w in target_words:\n target_counter[w] += 1\n\ntarget_word2idx = dict()\nfor idx, word in enumerate(target_counter.most_common(MAX_VOCAB_SIZE)):\n target_word2idx[word[0]] = idx + 1\n\ntarget_word2idx['UNK'] = 0\n\ntarget_idx2word = dict([(idx, word) for word, idx in target_word2idx.items()])\n\nnum_decoder_tokens = len(target_idx2word)\n\nnp.save('models/eng-to-fra/eng-to-fra-glove-target-word2idx.npy', target_word2idx)\nnp.save('models/eng-to-fra/eng-to-fra-glove-target-idx2word.npy', target_idx2word)\n\nunknown_emb = np.random.randn(GLOVE_EMBEDDING_SIZE)\n\nnp.save('models/eng-to-fra/eng-to-fra-glove-unknown-emb', unknown_emb)\n\nencoder_input_data = []\n\nencoder_max_seq_length = 0\ndecoder_max_seq_length = 0\n\nlines = open(DATA_PATH, 'rt', encoding='utf8').read().split('\\n')\nfor line in lines[: min(NUM_SAMPLES, len(lines)-1)]:\n input_text, target_text = line.split('\\t')\n target_text = 'START ' + target_text.lower() + ' END'\n input_words = [w for w in nltk.word_tokenize(input_text.lower())]\n target_words = [w for w in nltk.word_tokenize(target_text)]\n encoder_input_emb = []\n for w in input_words:\n emb = unknown_emb\n if w in word2em:\n emb = word2em[w]\n encoder_input_emb.append(emb)\n\n encoder_input_data.append(encoder_input_emb)\n encoder_max_seq_length = max(len(encoder_input_emb), encoder_max_seq_length)\n decoder_max_seq_length = max(len(target_words), decoder_max_seq_length)\n\nencoder_input_data = pad_sequences(encoder_input_data, encoder_max_seq_length)\n\ndecoder_target_data = np.zeros(shape=(NUM_SAMPLES, decoder_max_seq_length, num_decoder_tokens))\ndecoder_input_data = np.zeros(shape=(NUM_SAMPLES, decoder_max_seq_length, num_decoder_tokens))\nlines = open(DATA_PATH, 'rt', encoding='utf8').read().split('\\n')\nfor lineIdx, line in enumerate(lines[: min(NUM_SAMPLES, len(lines)-1)]):\n _, target_text = line.split('\\t')\n target_text = 'START ' + target_text.lower() + ' END'\n target_words = [w for w in nltk.word_tokenize(target_text)]\n for idx, w in enumerate(target_words):\n w2idx = 0 # default [UNK]\n if w in target_word2idx:\n w2idx = target_word2idx[w]\n decoder_input_data[lineIdx, idx, w2idx] = 1\n if idx > 0:\n decoder_target_data[lineIdx, idx-1, w2idx] = 1\n\ncontext = dict()\ncontext['num_decoder_tokens'] = num_decoder_tokens\ncontext['encoder_max_seq_length'] = encoder_max_seq_length\ncontext['decoder_max_seq_length'] = decoder_max_seq_length\n\nnp.save('models/eng-to-fra/eng-to-fra-glove-context.npy', context)\n\nencoder_inputs = Input(shape=(None, GLOVE_EMBEDDING_SIZE), name='encoder_inputs')\nencoder_lstm = LSTM(units=HIDDEN_UNITS, return_state=True, name='encoder_lstm')\nencoder_outputs, encoder_state_h, encoder_state_c = encoder_lstm(encoder_inputs)\nencoder_states = [encoder_state_h, encoder_state_c]\n\ndecoder_inputs = Input(shape=(None, num_decoder_tokens), name='decoder_inputs')\ndecoder_lstm = LSTM(units=HIDDEN_UNITS, return_state=True, return_sequences=True, name='decoder_lstm')\ndecoder_outputs, decoder_state_h, decoder_state_c = decoder_lstm(decoder_inputs,\n initial_state=encoder_states)\ndecoder_dense = Dense(units=num_decoder_tokens, activation='softmax', name='decoder_dense')\ndecoder_outputs = decoder_dense(decoder_outputs)\n\nmodel = Model([encoder_inputs, decoder_inputs], decoder_outputs)\n\nmodel.compile(loss='categorical_crossentropy', optimizer='rmsprop')\n\njson = model.to_json()\nopen(ARCHITECTURE_FILE_PATH, 'w').write(json)\n\ncheckpoint = ModelCheckpoint(filepath=WEIGHT_FILE_PATH, save_best_only=True)\nmodel.fit([encoder_input_data, decoder_input_data], decoder_target_data, batch_size=BATCH_SIZE, epochs=NUM_EPOCHS,\n verbose=1, validation_split=0.2, callbacks=[checkpoint])\n\nmodel.save_weights(WEIGHT_FILE_PATH)\n\n\n\n\n\n\n\n"
] |
[
[
"numpy.array",
"numpy.random.randn",
"numpy.zeros",
"numpy.save"
]
] |
jesbu1/spinningup
|
[
"fd54d9e06febc7ff5696a63d1e84e2c16d38e486",
"fd54d9e06febc7ff5696a63d1e84e2c16d38e486",
"fd54d9e06febc7ff5696a63d1e84e2c16d38e486"
] |
[
"gym/quick_script.py",
"spinup/algos/pytorch/superpos_sac/superpos_sac.py",
"spinup/__init__.py"
] |
[
"import gym\nimport numpy as np\nenv = gym.make('SawyerPush-v0')\nfor _ in range(100):\n env.reset()\n for i in range(150):\n env.render()\n env.step(np.random.uniform(0, 1, size=(4,)))\n",
"from copy import deepcopy\nimport itertools\nimport numpy as np\nimport torch\nfrom torch.optim import Adam\nimport torch.nn as nn\nimport gym\nimport time\nimport spinup.algos.pytorch.superpos_sac.core as core\nfrom spinup.utils.logx import EpochLogger\nfrom torch.utils.tensorboard import SummaryWriter\n\n\nTASK_HORIZON = 150\n\nclass ReplayBuffer:\n \"\"\"\n A simple FIFO experience replay buffer for SAC agents.\n \"\"\"\n\n def __init__(self, obs_dim, act_dim, size):\n self.obs_buf = np.zeros(core.combined_shape(size, obs_dim), dtype=np.float32)\n self.obs2_buf = np.zeros(core.combined_shape(size, obs_dim), dtype=np.float32)\n self.act_buf = np.zeros(core.combined_shape(size, act_dim), dtype=np.float32)\n self.rew_buf = np.zeros(size, dtype=np.float32)\n self.done_buf = np.zeros(size, dtype=np.float32)\n self.ptr, self.size, self.max_size = 0, 0, size\n\n def store(self, obs, act, rew, next_obs, done):\n self.obs_buf[self.ptr] = obs\n self.obs2_buf[self.ptr] = next_obs\n self.act_buf[self.ptr] = act\n self.rew_buf[self.ptr] = rew\n self.done_buf[self.ptr] = done\n self.ptr = (self.ptr+1) % self.max_size\n self.size = min(self.size+1, self.max_size)\n\n def sample_batch(self, batch_size=32):\n idxs = np.random.randint(0, self.size, size=batch_size)\n batch = dict(obs=self.obs_buf[idxs],\n obs2=self.obs2_buf[idxs],\n act=self.act_buf[idxs],\n rew=self.rew_buf[idxs],\n done=self.done_buf[idxs])\n return {k: torch.as_tensor(v, dtype=torch.float32).cuda() for k,v in batch.items()}\n\nclass MultiTaskReplayBuffer(ReplayBuffer):\n \"\"\"\n Maintains ``num_tasks`` separate replay buffers of size ``size`` each\n \"\"\"\n def __init__(self, obs_dim, act_dim, size, num_tasks):\n self.obs_buf = torch.zeros(core.multi_task_combined_shape(num_tasks, size, obs_dim), dtype=torch.float32)\n self.obs2_buf = torch.zeros(core.multi_task_combined_shape(num_tasks, size, obs_dim), dtype=torch.float32)\n self.act_buf = torch.zeros(core.multi_task_combined_shape(num_tasks, size, act_dim), dtype=torch.float32)\n self.rew_buf = torch.zeros(core.combined_shape(num_tasks, size), dtype=torch.float32)\n self.done_buf = torch.zeros(core.combined_shape(num_tasks, size), dtype=torch.float32)\n self.ptr = torch.zeros(core.combined_shape(num_tasks), dtype=torch.long)\n self.size, self.max_size = 0, size\n self.num_tasks = num_tasks\n\n def store(self, obs, act, rew, next_obs, done, task):\n self.obs_buf[task, self.ptr[task]] = torch.from_numpy(obs)\n self.obs2_buf[task, self.ptr[task]] = torch.from_numpy(next_obs)\n self.act_buf[task, self.ptr[task]] = torch.from_numpy(act)\n self.rew_buf[task, self.ptr[task]] = rew\n self.done_buf[task, self.ptr[task]] = done\n self.ptr[task] = (self.ptr[task] + 1) % self.max_size\n self.size = min(self.size+1, self.max_size)\n \n def batched_store(self, obs, act, rew, next_obs, done):\n self.obs_buf[:, self.ptr] = torch.from_numpy(obs)\n self.obs2_buf[:, self.ptr] = torch.from_numpy(next_obs)\n self.act_buf[:, self.ptr] = torch.from_numpy(act)\n self.rew_buf[:, self.ptr] = torch.from_numpy(rew)\n self.done_buf[:, self.ptr] = torch.from_numpy(done)\n self.ptr = torch.fmod(self.ptr + 1, self.max_size)\n self.size = min(self.size+1, self.max_size)\n\n def sample_batch(self, batch_size=32):\n # Returns a (batch_size * num_tasks) x dim dict of tensors\n idxs = np.random.randint(0, self.size, size=(self.num_tasks, batch_size))\n process_buffers = lambda buf: torch.cat([buf[i, idxs[i]] for i in range(self.num_tasks)], dim=0)\n batch = dict(obs=process_buffers(self.obs_buf),\n obs2=process_buffers(self.obs2_buf),\n act=process_buffers(self.act_buf),\n rew=process_buffers(self.rew_buf),\n done=process_buffers(self.done_buf))\n #if separate_by_task:\n return {k: v.cuda() for k,v in batch.items()}\n #return {k: v.view(self.num_tasks * batch_size, -1).cuda() for k,v in batch.items()}\n\ndef superpos_sac(env_fn, num_tasks, psp_type, actor_critic=core.MLPActorCritic, ac_kwargs=dict(), seed=0, \n steps_per_epoch=4000, epochs=100, replay_size=int(1e6), gamma=0.99, \n polyak=0.995, lr=1e-3, target_entropy=None, batch_size=128, start_steps=10000, \n update_after=1000, update_every=50, num_test_episodes=10, max_ep_len=1000, \n logger_kwargs=dict(), save_freq=50):\n \"\"\"\n Soft Actor-Critic (SAC)\n\n Args:\n env_fn : A function which creates a copy of the environment.\n The environment must satisfy the OpenAI Gym API.\n\n\n num_tasks: The number of tasks for the env in env_fn\n\n actor_critic: The constructor method for a PyTorch Module with an ``act`` \n method, a ``pi`` module, a ``q1`` module, and a ``q2`` module.\n The ``act`` method and ``pi`` module should accept batches of \n observations as inputs, and ``q1`` and ``q2`` should accept a batch \n of observations and a batch of actions as inputs. When called, \n ``act``, ``q1``, and ``q2`` should return:\n\n =========== ================ ======================================\n Call Output Shape Description\n =========== ================ ======================================\n ``act`` (batch, act_dim) | Numpy array of actions for each \n | observation.\n ``q1`` (batch,) | Tensor containing one current estimate\n | of Q* for the provided observations\n | and actions. (Critical: make sure to\n | flatten this!)\n ``q2`` (batch,) | Tensor containing the other current \n | estimate of Q* for the provided observations\n | and actions. (Critical: make sure to\n | flatten this!)\n =========== ================ ======================================\n\n Calling ``pi`` should return:\n\n =========== ================ ======================================\n Symbol Shape Description\n =========== ================ ======================================\n ``a`` (batch, act_dim) | Tensor containing actions from policy\n | given observations.\n ``logp_pi`` (batch,) | Tensor containing log probabilities of\n | actions in ``a``. Importantly: gradients\n | should be able to flow back into ``a``.\n =========== ================ ======================================\n\n ac_kwargs (dict): Any kwargs appropriate for the ActorCritic object \n you provided to SAC.\n\n seed (int): Seed for random number generators.\n\n steps_per_epoch (int): Number of steps of interaction (state-action pairs) \n for the agent and the environment in each epoch.\n\n epochs (int): Number of epochs to run and train agent.\n\n replay_size (int): Maximum length of replay buffer.\n\n gamma (float): Discount factor. (Always between 0 and 1.)\n\n polyak (float): Interpolation factor in polyak averaging for target \n networks. Target networks are updated towards main networks \n according to:\n\n .. math:: \\\\theta_{\\\\text{targ}} \\\\leftarrow \n \\\\rho \\\\theta_{\\\\text{targ}} + (1-\\\\rho) \\\\theta\n\n where :math:`\\\\rho` is polyak. (Always between 0 and 1, usually \n close to 1.)\n\n lr (float): Learning rate (used for both policy and value learning).\n\n alpha (float): Entropy regularization coefficient. (Equivalent to \n inverse of reward scale in the original SAC paper.)\n\n batch_size (int): Minibatch size for SGD.\n\n start_steps (int): Number of steps for uniform-random action selection,\n before running real policy. Helps exploration.\n\n update_after (int): Number of env interactions to collect before\n starting to do gradient descent updates. Ensures replay buffer\n is full enough for useful updates.\n\n update_every (int): Number of env interactions that should elapse\n between gradient descent updates. Note: Regardless of how long \n you wait between updates, the ratio of env steps to gradient steps \n is locked to 1.\n\n num_test_episodes (int): Number of episodes to test the deterministic\n policy at the end of each epoch.\n\n max_ep_len (int): Maximum length of trajectory / episode / rollout.\n\n logger_kwargs (dict): Keyword args for EpochLogger.\n\n save_freq (int): How often (in terms of gap between epochs) to save\n the current policy and value function.\n\n \"\"\"\n BATCHED = True\n logger = EpochLogger(**logger_kwargs)\n logger.save_config(locals())\n\n torch.manual_seed(seed)\n np.random.seed(seed)\n \n if not BATCHED:\n env, test_env = env_fn(), env_fn()\n #from metaworld.benchmarks import MT40\n #test_env = MT40.get_train_tasks()\n\n else:\n # Creating vectorized batch of envs\n envs = []\n for i in range(num_tasks):\n env = env_fn()\n env.set_task(i)\n envs.append(env)\n obs_dim = env.observation_space.shape\n act_dim = env.action_space.shape[0]\n\n # Action limit for clamping: critically, assumes all dimensions share the same bound!\n act_limit = env.action_space.high[0]\n\n # Create actor-critic module and target networks\n ac = actor_critic(num_tasks, env.observation_space, env.action_space, psp_type, **ac_kwargs).cuda()\n ac_targ = deepcopy(ac).cuda()\n\n # Freeze target networks with respect to optimizers (only update via polyak averaging)\n for p in ac_targ.parameters():\n p.requires_grad = False\n\n # List of parameters for both Q-networks (save this for convenience)\n q_params = itertools.chain(ac.q1.parameters(), ac.q2.parameters())\n\n # Experience buffer\n replay_buffer = MultiTaskReplayBuffer(obs_dim=obs_dim, act_dim=act_dim, size=replay_size, num_tasks=num_tasks)\n\n # Learned Log_Alpha\n log_alpha = torch.zeros((num_tasks, 1), requires_grad=True, device=\"cuda\")\n\n # Alpha Optimizer\n alpha_optimizer = Adam([log_alpha], lr=lr)\n\n # Target Entropy\n if target_entropy:\n target_entropy = target_entropy\n else:\n target_entropy = -np.prod(env.action_space.shape).item()\n\n # Count variables (protip: try to get a feel for how different size networks behave!)\n if psp_type == 'Proposed':\n var_counts = tuple(core.count_vars(module) for module in [ac.pi, ac.q1, ac.q2, ac.context_gen])\n logger.log('\\nNumber of parameters: \\t pi: %d, \\t q1: %d, \\t q2: %d \\t context_gen: %d\\n'%var_counts)\n else:\n var_counts = tuple(core.count_vars(module) for module in [ac.pi, ac.q1, ac.q2])\n logger.log('\\nNumber of parameters: \\t pi: %d, \\t q1: %d, \\t q2: %d\\n'%var_counts)\n\n # Set up function for computing SAC Q-losses\n def compute_loss_q(data):\n o, a, r, o2, d = data['obs'], data['act'], data['rew'], data['obs2'], data['done']\n\n q1 = ac.q1(o,a)\n q2 = ac.q2(o,a)\n\n # Bellman backup for Q functions\n with torch.no_grad():\n # Target actions come from *current* policy\n if psp_type == 'Proposed':\n context_map = ac.context_gen(o)\n a2, logp_a2 = ac.pi(o2, context=context_map['Pi'])\n else:\n a2, logp_a2 = ac.pi(o2)\n\n # Target Q-values\n q1_pi_targ = ac_targ.q1(o2, a2)\n q2_pi_targ = ac_targ.q2(o2, a2)\n q_pi_targ = torch.min(q1_pi_targ, q2_pi_targ)\n\n log_alpha_corrected_task = torch.matmul(o[..., -num_tasks:], log_alpha)\n\n backup = r + gamma * (1 - d) * (q_pi_targ - log_alpha_corrected_task.exp() * logp_a2)\n #backup = r + gamma * (1 - d) * (q_pi_targ - logp_a2)\n\n # MSE loss against Bellman backup\n loss_q1 = ((q1 - backup)**2).mean()\n loss_q2 = ((q2 - backup)**2).mean()\n loss_q = loss_q1 + loss_q2\n\n # Useful info for logging\n q_info = dict(Q1Vals=q1.cpu().detach().numpy(),\n Q2Vals=q2.cpu().detach().numpy())\n\n return loss_q, q_info\n\n # Set up function for computing SAC pi loss\n def compute_loss_pi(data):\n o = data['obs']\n if psp_type == 'Proposed':\n context_map = ac.context_gen(o)\n pi, logp_pi = ac.pi(o, context=context_map['Pi'])\n else:\n pi, logp_pi = ac.pi(o)\n q1_pi = ac.q1(o, pi)\n q2_pi = ac.q2(o, pi)\n q_pi = torch.min(q1_pi, q2_pi)\n\n log_alpha_corrected_task = torch.matmul(o[..., -num_tasks:].detach(), log_alpha)\n # Compute alpha loss\n loss_alpha = -(log_alpha_corrected_task * (logp_pi + target_entropy).detach()).mean()\n \n # Entropy-regularized policy loss\n loss_pi = (log_alpha_corrected_task.detach().exp() * logp_pi - q_pi).mean()\n\n # Useful info for logging\n pi_info = dict(LogPi=logp_pi.cpu().detach().numpy())\n\n return loss_pi, loss_alpha, pi_info\n\n # Set up optimizers for policy and q-function\n if psp_type == 'Proposed':\n pi_optimizer = Adam(list(ac.pi.parameters()) + list(ac.context_gen.parameters()), lr=lr)\n q_optimizer = Adam(q_params, lr=lr)\n else:\n pi_optimizer = Adam(ac.pi.parameters(), lr=lr)\n q_optimizer = Adam(q_params, lr=lr)\n\n # Set up model saving\n logger.setup_pytorch_saver({\n 'ac': ac, \n 'log_alpha': log_alpha, \n 'optim_pi': pi_optimizer, \n 'optim_q': q_optimizer, \n 'optim_alpha': alpha_optimizer,\n 'replay_buffer': replay_buffer})\n\n def update(data):\n # First run one gradient descent step for Q1 and Q2\n q_optimizer.zero_grad()\n loss_q, q_info = compute_loss_q(data)\n loss_q.backward()\n q_optimizer.step()\n\n # Record things\n logger.store(LossQ=loss_q.item(), **q_info)\n\n # Freeze Q-networks so you don't waste computational effort \n # computing gradients for them during the policy learning step.\n for p in q_params:\n p.requires_grad = False\n\n # Next run one gradient descent step for pi and alpha.\n loss_pi, loss_alpha, pi_info = compute_loss_pi(data)\n alpha_optimizer.zero_grad()\n loss_alpha.backward()\n alpha_optimizer.step()\n pi_optimizer.zero_grad()\n loss_pi.backward()\n pi_optimizer.step()\n\n # Unfreeze Q-networks so you can optimize it at next DDPG step.\n for p in q_params:\n p.requires_grad = True\n\n # Record things\n logger.store(LossPi=loss_pi.item(), **pi_info)\n logger.store(LossAlpha=loss_alpha.item())\n\n # Finally, update target networks by polyak averaging.\n with torch.no_grad():\n for p, p_targ in zip(ac.parameters(), ac_targ.parameters()):\n # NB: We use an in-place operations \"mul_\", \"add_\" to update target\n # params, as opposed to \"mul\" and \"add\", which would make new tensors.\n p_targ.data.mul_(polyak)\n p_targ.data.add_((1 - polyak) * p.data)\n\n def get_action(o, deterministic=False):\n return ac.act(torch.as_tensor(o, dtype=torch.float32).cuda(), \n deterministic)\n \n def get_batched_action(o, deterministic=False):\n return ac.batched_act(torch.as_tensor(np.array(o), dtype=torch.float32).cuda(), \n deterministic)\n\n def test_agent():\n for j in range(num_test_episodes):\n o, d, ep_ret, ep_len, success, goalDist, reachDist = test_env.reset(), False, 0, 0, False, None, None\n while not(d or (ep_len == max_ep_len)):\n # Take deterministic actions at test time \n o, r, d, info = test_env.step(get_action(o, True))\n ep_ret += r\n ep_len += 1\n if 'success' in info:\n success = info['success'] or success\n if 'goalDist' in info and info['goalDist'] is not None:\n goalDist = info['goalDist']\n if 'reachDist' in info and info['reachDist'] is not None:\n reachDist = info['reachDist']\n if goalDist != None:\n logger.store(TestGoalDist=goalDist)\n if reachDist != None:\n logger.store(TestReachDist=reachDist)\n logger.store(TestEpRet=ep_ret, TestEpLen=ep_len, TestSuccess=success)\n\n # Prepare for interaction with environment\n total_steps = 0\n start_time = time.time()\n writer = SummaryWriter(logger.output_dir)\n\n\n # Main loop: collect experience in env and update/log each epoch\n for epoch in range(epochs):\n steps_before = total_steps\n while (total_steps - steps_before) < steps_per_epoch:\n if BATCHED:\n obs = []\n ep_rets = []\n ep_lens = []\n successes = []\n for (i, env) in enumerate(envs):\n o, ep_ret, ep_len, success = env.reset(task=i), 0, 0, False\n obs.append(o)\n ep_rets.append(ep_ret)\n ep_lens.append(ep_len)\n successes.append(success)\n dones = [False for i in range(num_tasks)] \n for step in range(TASK_HORIZON):\n # Until start_steps have elapsed, randomly sample actions\n # from a uniform distribution for better exploration. Afterwards, \n # use the learned policy. \n if total_steps > start_steps:\n action = get_batched_action(obs)\n else:\n action = [env.action_space.sample() for env in envs]\n\n\n # Step the env\n r_s = []\n obs_2 = []\n infos = []\n for (i, env) in enumerate(envs):\n o2, r, d, info = env.step(action[i])\n obs_2.append(o2)\n r_s.append(r)\n infos.append(info)\n ep_rets[i] += r\n ep_lens[i] += 1\n # Ignore the \"done\" signal if it comes from hitting the time\n # horizon (that is, when it's an artificial terminal signal\n # that isn't based on the agent's state)\n dones[i] = False if ep_lens[i]==max_ep_len else d\n\n # Store experience to replay buffer\n replay_buffer.batched_store(\n np.array(obs, np.float32).reshape(num_tasks, 1, -1), \n np.array(action, np.float32).reshape(num_tasks, 1, -1), \n np.array(r_s, np.float32).reshape(num_tasks, 1), \n np.array(obs_2, np.float32).reshape(num_tasks, 1, -1), \n np.array(dones, np.float32).reshape(num_tasks, 1),\n )\n\n # Super critical, easy to overlook step: make sure to update \n # most recent observation!\n obs = obs_2\n\n # End of trajectory handling\n for (i, env) in enumerate(envs):\n if 'success' in infos[i]:\n successes[i] = infos[i]['success'] or successes[i]\n if dones[i] or (ep_lens[i] == max_ep_len):\n logger.store(EpRet=ep_rets[i], EpLen=ep_lens[i], EpSuccess=successes[i])\n logger.store(**{'SuccessTask%d' % i: successes[i]})\n obs[i], ep_rets[i], ep_lens[i], successes[i] = env.reset(task=i), 0, 0, False\n total_steps += (1 * num_tasks)\n\n # Update handling\n if total_steps >= update_after:\n batch = replay_buffer.sample_batch(2 * batch_size) # Ratio of 2 training steps per time step\n update(data=batch)\n \"\"\"\n else:\n for task in range(num_tasks):\n o, ep_ret, ep_len, success = env.reset(task=task), 0, 0, False\n for step in range(TASK_HORIZON):\n # Until start_steps have elapsed, randomly sample actions\n # from a uniform distribution for better exploration. Afterwards, \n # use the learned policy. \n if total_steps > start_steps:\n a = get_action(o)\n else:\n a = env.action_space.sample()\n\n # Step the env\n o2, r, d, info = env.step(a)\n ep_ret += r\n ep_len += 1\n\n # Ignore the \"done\" signal if it comes from hitting the time\n # horizon (that is, when it's an artificial terminal signal\n # that isn't based on the agent's state)\n d = False if ep_len==max_ep_len else d\n\n # Store experience to replay buffer\n replay_buffer.store(o, a, r, o2, d, task)\n\n # Super critical, easy to overlook step: make sure to update \n # most recent observation!\n o = o2\n\n # End of trajectory handling\n if 'success' in info:\n success = info['success'] or success\n if d or (ep_len == max_ep_len):\n logger.store(EpRet=ep_ret, EpLen=ep_len, EpSuccess=success)\n o, ep_ret, ep_len, success = env.reset(task=task), 0, 0, False\n\n total_steps += 1\n\n \"\"\"\n\n # End of epoch handling\n # Save model\n if (epoch % save_freq == 0) or (epoch == epochs):\n if BATCHED:\n logger.save_state({'envs' : envs, 'epoch': epoch, 'total_steps':total_steps}, None)\n else:\n logger.save_state({'env' : env, 'epoch': epoch, 'total_steps':total_steps}, None)\n\n # Test the performance of the deterministic version of the agent.\n #test_agent()\n\n # Log info about epoch\n logger.log_tabular('Epoch', epoch)\n logger.log_tabular('EpRet', with_min_and_max=True)\n logger.log_tabular('EpLen', average_only=True)\n logger.log_tabular('EpSuccess', average_only=True)\n if BATCHED:\n for i in range(num_tasks):\n logger.log_tabular('SuccessTask%d' % i, average_only=True)\n #logger.log_tabular('TestEpRet', with_min_and_max=True)\n #logger.log_tabular('TestEpLen', average_only=True)\n #if 'TestGoalDist' in logger.epoch_dict:\n # logger.log_tabular('TestGoalDist', with_min_and_max=True)\n #if 'TestReachDist' in logger.epoch_dict:\n # logger.log_tabular('TestReachDist', with_min_and_max=True)\n #if 'TestSuccess' in logger.epoch_dict:\n # logger.log_tabular('TestSuccess', average_only=True)\n logger.log_tabular('TotalEnvInteracts', total_steps)\n logger.log_tabular('Q1Vals', with_min_and_max=True)\n logger.log_tabular('Q2Vals', with_min_and_max=True)\n logger.log_tabular('LogPi', with_min_and_max=True)\n logger.log_tabular('LossPi', average_only=True)\n logger.log_tabular('LossAlpha', average_only=True)\n logger.log_tabular('LossQ', average_only=True)\n logger.log_tabular('Time', time.time()-start_time)\n logger.dump_tabular()\n # write context distribution info\n def write_context_info(module_list: nn.ModuleList, name):\n for module in module_list:\n if hasattr(module, \"o\"):\n for task in range(num_tasks):\n writer.add_histogram(str(task) + \"/\" + name, module.o[task].cpu().detach().cpu().numpy(), global_step=total_steps)\n if epoch % 10 == 0:\n write_context_info(ac.pi.net, \"pi\")\n write_context_info(ac.q1.q, \"q1\")\n write_context_info(ac.q2.q, \"q2\")\n writer.close()\n\n\n\nif __name__ == '__main__':\n TASK_HORIZON = 150\n PATHS_PER_TASK = 3\n NUM_TASKS = 10\n import argparse\n parser = argparse.ArgumentParser()\n parser.add_argument('--env', type=str, default='MT10Helper-v0')\n parser.add_argument('--hid', type=int, default=256)\n parser.add_argument('--l', type=int, default=5)\n parser.add_argument('--gamma', type=float, default=0.99)\n parser.add_argument('--seed', '-s', type=int, default=0)\n parser.add_argument('--epochs', type=int, default=900)\n parser.add_argument('--batch_size', type=int, default=128) # real is 128 x 10\n parser.add_argument('--lr', type=float, default=3e-4) # real is 128 x 10\n parser.add_argument('--epochs', type=int, default=900)\n #eg = ExperimentGrid(name='superpos_sac-MT10_with_bias_%s_context_q_%s' % (args.psp_type, hidden_sizes_name))\n parser.add_argument('--psp_type', type=str, default='Rand')\n args = parser.parse_args()\n\n from spinup.utils.run_utils import setup_logger_kwargs\n exp_name = 'superpos_sac-MT10_with_bias_%s_context_q_%s' % (args.psp_type, str(tuple([args.hid] * args.l)))\n logger_kwargs = setup_logger_kwargs(exp_name, args.seed)\n\n torch.set_num_threads(torch.get_num_threads())\n\n\n steps_per_epoch = TASK_HORIZON * PATHS_PER_TASK * NUM_TASKS\n\n superpos_sac(lambda : gym.make(args.env), num_tasks=args.num_tasks, actor_critic=core.MLPActorCritic, psp_type=args.psp_type,\n seed=args.seed, steps_per_epoch=steps_per_epoch, epochs=args.epochs, lr=args.lr, batch_size=args.batch_size, update_after=TASK_HORIZON * NUM_TASKS,\n num_test_episodes=NUM_TASKS * 10,\n start_steps=1000, max_ep_len=TASK_HORIZON,\n ac_kwargs=dict(hidden_sizes=[args.hid]*args.l, activation=torch.nn.ReLU), \n logger_kwargs=logger_kwargs)",
"# Disable TF deprecation warnings.\n# Syntax from tf1 is not expected to be compatible with tf2.\nimport tensorflow as tf\ntf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)\n\n# Algorithms\nfrom spinup.algos.tf1.ddpg.ddpg import ddpg as ddpg_tf1\nfrom spinup.algos.tf1.ppo.ppo import ppo as ppo_tf1\nfrom spinup.algos.tf1.sac.sac import sac as sac_tf1\nfrom spinup.algos.tf1.td3.td3 import td3 as td3_tf1\nfrom spinup.algos.tf1.trpo.trpo import trpo as trpo_tf1\nfrom spinup.algos.tf1.vpg.vpg import vpg as vpg_tf1\n\nfrom spinup.algos.pytorch.ddpg.ddpg import ddpg as ddpg_pytorch\nfrom spinup.algos.pytorch.ppo.ppo import ppo as ppo_pytorch\nfrom spinup.algos.pytorch.sac.sac import sac as sac_pytorch\nfrom spinup.algos.pytorch.td3.td3 import td3 as td3_pytorch\nfrom spinup.algos.pytorch.trpo.trpo import trpo as trpo_pytorch\nfrom spinup.algos.pytorch.vpg.vpg import vpg as vpg_pytorch\nfrom spinup.algos.pytorch.superpos_sac.superpos_sac import superpos_sac as psp_sac_pytorch\n\n# Loggers\nfrom spinup.utils.logx import Logger, EpochLogger\n\n# Version\nfrom spinup.version import __version__"
] |
[
[
"numpy.random.uniform"
],
[
"torch.optim.Adam",
"torch.fmod",
"numpy.random.seed",
"torch.zeros",
"torch.manual_seed",
"torch.min",
"torch.from_numpy",
"torch.matmul",
"torch.as_tensor",
"torch.no_grad",
"torch.utils.tensorboard.SummaryWriter",
"numpy.prod",
"torch.get_num_threads",
"numpy.array",
"numpy.zeros",
"numpy.random.randint"
],
[
"tensorflow.compat.v1.logging.set_verbosity"
]
] |
NICALab/Inducing-Functions-through-RL
|
[
"e2171ff5e14bb272353e7df5156104ad2a85a3ae"
] |
[
"scripts/plot.py"
] |
[
"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport glob\nimport argparse\nfrom collections import defaultdict\nimport seaborn as sns\nimport pandas as pd\n\ntask_default_list = ['task_b_2021',\n 'task_b_vision_only_2021',\n 'task_b_sequence_ext_use_pred_20',\n 'task_b_sequence_ext_use_pred_60',\n 'task_b_sequence_ext_use_pred_80',\n 'task_b_random_ext_8',\n 'task_b_random_ext_10',\n 'task_b_random_ext_12',\n 'task_b_sequence_ext_use_pred_perm1',\n 'task_b_sequence_ext_use_pred_perm2',\n 'task_b_sequence_ext_use_pred_perm3',\n 'cifar10_2021',\n 'cifar10_sequence_ext_use_pred_2021',\n 'cifar10_vision_only_2021'\n ]\n\n\ndef moving_average(x, w):\n return np.convolve(x, np.ones(w), 'valid') / w\n\n\ndef plot_one(exp_names, csv_slices, feature, env_name):\n # plot features for every experiments\n fig = plt.figure(figsize=(8, 8))\n fig.canvas.set_window_title(feature)\n for csv_slice in csv_slices:\n plt.plot(moving_average(csv_slice[feature].to_numpy(), 100))\n plt.legend(exp_names)\n plt.title(env_name, fontsize=17)\n plt.xlabel(\"iteration\", fontsize=15)\n plt.xticks(fontsize=13)\n plt.ylabel(feature, fontsize=15)\n plt.yticks(fontsize=13)\n\n # make dataframe for multiple experiments\n task_list = []\n for task in task_default_list:\n if any(task in s for s in exp_names):\n task_list.append(task)\n num_df = len(task_list)\n df = []\n for i in range(num_df):\n feature_vals_list = []\n feature_vals_len_list = []\n print(i)\n for j, exp_name in enumerate(exp_names):\n if task_list[i] in exp_name:\n print(task_list[i], exp_name)\n csv_slice = csv_slices[j]\n feature_vals = moving_average(csv_slice[feature].to_numpy(), 100)\n max_len = min([2500, len(feature_vals)])\n feature_vals_list.append(feature_vals[:max_len])\n feature_vals_len_list.append(range(max_len))\n\n feature_vals_array = np.concatenate(feature_vals_list, axis=0)\n feature_vals_len_array = np.concatenate(feature_vals_len_list, axis=0)\n df_i = pd.DataFrame({'iteration': feature_vals_len_array,\n task_list[i]: feature_vals_array})\n df.append(df_i)\n\n fig = plt.figure(figsize=(8, 8))\n fig.canvas.set_window_title(feature)\n for i in range(num_df):\n sns.lineplot(data=df[i], x='iteration', y=task_list[i])\n plt.legend(task_list)\n plt.title(env_name, fontsize=17)\n plt.xlabel(\"iteration\", fontsize=15)\n plt.xticks(fontsize=13)\n plt.ylabel(feature, fontsize=15)\n plt.yticks(fontsize=13)\n\n\n\n\ndef plot_data(args):\n path = args.file\n features = args.f\n style = args.s\n\n plt.style.use(style)\n features = features[0].split(\",\")\n\n for feature in features:\n path = path.rstrip('/').rstrip('\\\\')\n env_name = path.split('/')[-1]\n method = env_name.split('-')[0]\n env_name = env_name.replace(method + '-', '')\n csv_paths = glob.glob(f\"{path}/**/progress.csv\")\n exp_names = [csv_path.split(\"/\")[-2] for csv_path in csv_paths]\n\n assert len(csv_paths) > 0, \"There is no csv files\"\n\n csv_slices = []\n for csv_path in csv_paths:\n csv = pd.read_csv(csv_path)\n csv_slices.append(csv.loc[:, [feature]])\n del csv\n\n plot_one(exp_names, csv_slices, feature, env_name)\n plt.show()\n\n\nif __name__ == \"__main__\":\n # To run, refer README.md\n parser = argparse.ArgumentParser()\n parser.add_argument('file', type=str,\n help='path to the task directory')\n parser.add_argument('--f', type=str, nargs='+',\n help='List of features to plot')\n parser.add_argument('--s', type=str, default='ggplot',\n help='Style of plots, Look at (https://matplotlib.org/3.1.1/gallery/style_sheets/style_sheets_reference.html)')\n args = parser.parse_args()\n plot_data(args)"
] |
[
[
"matplotlib.pyplot.legend",
"matplotlib.pyplot.yticks",
"pandas.read_csv",
"matplotlib.pyplot.title",
"pandas.DataFrame",
"numpy.ones",
"numpy.concatenate",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.show",
"matplotlib.pyplot.style.use",
"matplotlib.pyplot.figure"
]
] |
gamaievsky/DescripteursHarmoniquesAudio
|
[
"551e253058502049a91803da8b0412b5ffb1bd60",
"551e253058502049a91803da8b0412b5ffb1bd60"
] |
[
"Comparison.py",
"spectral_clustering_audio.py"
] |
[
"# Representations abstraites\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pickle\nimport params\n\n# # Ouverture des listes\n# with open ('liste1', 'rb') as fp:\n# l1x = pickle.load(fp)\n# with open ('liste2', 'rb') as fp:\n# l1y = pickle.load(fp)\n# with open ('liste1v', 'rb') as fp:\n# l2x = pickle.load(fp)\n# with open ('liste2v', 'rb') as fp:\n# l2y = pickle.load(fp)\n\n#Affichage\ndef Affichage(l1x,l1y,l2x,l2y):\n color = params.color_abstr\n\n plt.figure()\n ax = plt.subplot()\n\n plt.plot(l1x, l1y, 'b'+'--')\n plt.plot(l1x, l1y, 'b'+'o',label = 'Piano')\n for i in range(len(l1x)):\n ax.annotate(' {}'.format(i+1), (l1x[i], l1y[i]), color='black')\n plt.plot(l2x, l2y, 'r'+'--')\n plt.plot(l2x, l2y, 'r'+'o', label = 'Violon')\n for i in range(len(l2x)):\n ax.annotate(' {}'.format(i+1), (l2x[i], l2y[i]), color='black')\n\n\n d1, d2 = 'diffConcordance', 'crossConcordance'\n plt.xlabel(d1[0].upper() + d1[1:])\n plt.ylabel(d2[0].upper() + d2[1:])\n plt.title('Cadence ' + ' (' + d1[0].upper() + d1[1:] + ', ' + d2[0].upper() + d2[1:] + ')')\n plt.legend(frameon=True, framealpha=0.75)\n plt.show()\n\n\npts1 = [np.array((l1x[t],l1y[t])) for t in range(len(l1x))]\npts2 = [np.array((l2x[t],l2y[t])) for t in range(len(l1x))]\n\n# #distance euclidienne\n# def dist(x,y):\n# return np.sqrt(np.sum((x-y)**2))\n#\n#\n# def distance(pts1,pts2,type = 'diff'):\n# distance = 0\n# if type == 'stat':\n# for t in range(len(pts1)):\n# distance += dist(pts1[t], pts2[t])\n# return distance\n# else :\n# pts1_diff = [pts1[t+1]-pts1[t] for t in range(len(pts1)-1)]\n# pts2_diff = [pts2[t+1]-pts2[t] for t in range(len(pts2)-1)]\n# for t in range(len(pts1_diff)):\n# distance += dist(pts1_diff[t], pts2_diff[t])\n# return distance\n\n\n# print(distance(pts1,pts2,'stat'))\n\npoints = np.asarray([pts1, pts2])\n\n# Fonction qui calcule l'éloignement de courbes nomalisées, correspondant à différents timbres \ndef dispersion(points,type = 'diff'):\n if type == 'stat':\n return np.linalg.norm(np.std(points,axis = 0), axis = 1)\n else :\n points_diff = np.zeros((points.shape[0],points.shape[1]-1,points.shape[2]))\n for i in range(points.shape[1]-1):\n points_diff[:,i] = points[:,i+1]-points[:,i]\n return np.linalg.norm(np.std(points_diff,axis = 0), axis = 1)\n\n\n\nprint(dispersion(points))\n\n\n\n\n\n# Affichage(l1x,l1y,l2x,l2y)\n",
"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n\"\"\"\n======================\nLaplacian segmentation\n======================\n\nThis notebook implements the laplacian segmentation method of\n`McFee and Ellis, 2014 <http://bmcfee.github.io/papers/ismir2014_spectral.pdf>`_,\nwith a couple of minor stability improvements.\nThis implementation is available at https://librosa.github.io/librosa/auto_examples/plot_segmentation.html\n\nAdditional functions have been added to the core segmentation:\n - unsupervised determination of the number of clusters suitable for the running task\n - different feature packages: spectral, cepstral and chroma.\n - a cosine distance between the different clusters that is plot together with cluster segmentation\n - a set of parameters reported in params.py file necessary for tuning the segmentation model.\n\nusage:\npython3 spectral_clustering_audio.py audiofilename.wav [.mp3]\n\nInput:\n - name of audio file to be analyzed\n\nOutput:\n - Segmentation and grouping of the different musical sections synchronized on user-chosen onsets\n - Optional plots of similarity and recurrence matrix\n - Optional timestamps text file with parameters and time boundaries\n\"\"\"\n\n# Code source by Marie Tahon (2018) adapted from Brian McFee (2014)\n# License: ISC\n\n\n###################################\n# Imports\n# - numpy for basic functionality\n# - scipy for graph Laplacian\n# - matplotlib for visualization\n# - sklearn.cluster for K-Means, for metrics and scaling.\n# - warnings to delete warning message for scipy package\n\n\n\nfrom __future__ import division\nimport numpy as np\nimport scipy\nimport warnings\nwarnings.filterwarnings(action=\"ignore\", module=\"scipy\", message=\"^internal gelsd\")\nimport sys, os\nimport argparse\nimport matplotlib.pyplot as plt\nfrom matplotlib import gridspec\n\nimport sklearn.cluster\nfrom sklearn.preprocessing import scale\nimport sklearn.metrics\nimport sklearn.utils\n\n\nimport librosa\nimport librosa.display\n\nimport cluster_rotate\nimport params\n\nplt.rcParams.update({'font.size': 8})\n\nBINS_PER_OCTAVE = params.BINS_PER_OCTAVE\nN_OCTAVES = params.N_OCTAVES\nNFFT = int(params.NFFT)\nSTEP = int(params.STEP)\n\n\n\n#######################################\ndef detect_onsets(y, sr, M):\n\t#detect onsets\n\toenv = librosa.onset.onset_strength(S=M, sr=sr)\n\t# Detect events without backtracking\n\tonset_raw = librosa.onset.onset_detect(onset_envelope=oenv, backtrack=False)\n\t## Backtrack the events using the onset envelope\n\tonset_bt = librosa.onset.onset_backtrack(onset_raw, oenv)\n\t# we fix_frames to include non-beat frames 0 and C.shape[1] (final frame)\n\tonset_frames = librosa.util.fix_frames(onset_raw, x_min=0, x_max=M.shape[1]-1)\n\tonset_times = librosa.frames_to_time(onset_frames, sr=sr, hop_length = STEP)\n\t# To reduce dimensionality, we'll beat-synchronous the CQT\n\tMsync = librosa.util.sync(M, onset_raw, aggregate=np.median)\n\n\tif params.onset_plot:\n\t\tplt.figure(figsize=(12, 4))\n\t\tplt.plot(oenv, label='Onset strength')\n\t\tplt.vlines(onset_raw, 0, oenv.max(), label='Raw onsets')\n\t\tplt.vlines(onset_bt, 0, oenv.max(), label='Backtracked', color='r')\n\t\tplt.legend(frameon=True, framealpha=0.75)\n\t\tplt.tight_layout()\n\n\t\tplt.figure(figsize=(12, 4))\n\t\tplt.subplot(2,1,1)\n\t\tplt.title('CQT spectrogram')\n\t\tlibrosa.display.specshow(M, y_axis='cqt_hz', sr=sr, hop_length= STEP, bins_per_octave=BINS_PER_OCTAVE, x_axis='time')\n\t\tplt.tight_layout()\n\n\t\tplt.subplot(2,1,2)\n\t\tplt.title('CQT spectrogram synchronized on onsets')\n\t\tlibrosa.display.specshow(Msync, bins_per_octave=BINS_PER_OCTAVE, y_axis='cqt_hz', x_axis='time', x_coords=onset_times)\n\t\tplt.tight_layout()\n\n\treturn onset_raw, onset_times, Msync\n\n\n\n##############################################\ndef detect_beats(y, sr, M):\n\ttempo, beats = librosa.beat.beat_track(y=y, sr=sr, hop_length = STEP, trim=False)\n\tprint('Detected tempo: {0:.2f} bpm'.format(tempo))\n\tbeat_period = np.diff(librosa.frames_to_time(beats, sr=sr, hop_length= STEP))\n\tprint('mean beat period: {0:.2f} ; std beat period: {1:.2f}'.format(60/np.mean(beat_period), np.std(beat_period)))\n\n\tbeats_frames = librosa.util.fix_frames(beats, x_min=0, x_max=M.shape[1]-1)\n\tbeat_times = librosa.frames_to_time(beats_frames, sr=sr, hop_length = STEP)\n\n\tMsync = librosa.util.sync(M, beats_frames, aggregate=np.median)\n\tif params.onset_plot:\n\t\tplt.figure(figsize=(12, 4))\n\t\tplt.subplot(2,1,1)\n\t\tplt.title('CQT spectrogram')\n\t\tlibrosa.display.specshow(M, y_axis='cqt_hz', sr=sr, hop_length=STEP, bins_per_octave=BINS_PER_OCTAVE, x_axis='time')\n\t\tplt.tight_layout()\n\t\t# For plotting purposes, we'll need the timing of the beats\n\t\t# we fix_frames to include non-beat frames 0 and C.shape[1] (final frame)\n\n\t\tplt.subplot(2,1,2)\n\t\tplt.title('CQT spectrogram synchronized on beats')\n\t\tlibrosa.display.specshow(Msync, bins_per_octave=BINS_PER_OCTAVE, y_axis='cqt_hz', x_axis='time', x_coords=beat_times)\n\t\tplt.tight_layout()\n\treturn beats_frames, beat_times, Msync\n\n##############################################\ndef no_onsets(sr, M):\n\n\tonsets = np.arange(0, M.shape[1])\n\tonset_times = librosa.samples_to_time(onsets, sr=sr/STEP)\n\n\tif params.onset_plot:\n\t\tplt.figure(figsize=(12, 4))\n\t\tplt.title('CQT spectrogram')\n\t\tlibrosa.display.specshow(M, y_axis='cqt_hz', sr=sr, bins_per_octave=BINS_PER_OCTAVE, x_axis='time', x_coords=onset_times)\n\t\tplt.tight_layout()\n\n\treturn onsets, onset_times, M\n\n\ndef get_manual_beats(sr, M, filename):\n\twith open(filename, 'r') as f:\n\t\tdata = f.readlines()\n\ttimes = np.array([float(x.strip()) for x in data[1:]])\n\tframes = np.array([int(x * sr / STEP) for x in times])\n\tonsets = librosa.util.fix_frames(frames, x_min=0, x_max=M.shape[1]-1)\n\tonset_times = librosa.frames_to_time(onsets, sr=sr, hop_length = STEP)\n\n\tMsync = librosa.util.sync(M, onsets, aggregate=np.median)\n\n\tif params.onset_plot:\n\t\tplt.figure(figsize=(12, 4))\n\t\tplt.subplot(2,1,1)\n\t\tplt.title('CQT spectrogram')\n\t\tlibrosa.display.specshow(M, y_axis='cqt_hz', sr=sr, hop_length=STEP, bins_per_octave=BINS_PER_OCTAVE, x_axis='time')\n\t\tplt.tight_layout()\n\n\t\tplt.subplot(2,1,2)\n\t\tplt.title('CQT spectrogram synchronized on beats')\n\t\tlibrosa.display.specshow(Msync, bins_per_octave=BINS_PER_OCTAVE, y_axis='cqt_hz', x_axis='time', x_coords=onset_times)\n\t\tplt.tight_layout()\n\n\treturn onsets, onset_times, Msync\n\n\ndef extract_onsets(y, sr, manual_opt):\n\tmethod = params.onset\n\t#compute the CQT transform C: np.array((252, Tmax*sr/STEP))\n\tC = librosa.amplitude_to_db(librosa.core.magphase(librosa.cqt(y=y, sr=sr, bins_per_octave=BINS_PER_OCTAVE, n_bins=N_OCTAVES * BINS_PER_OCTAVE, hop_length = STEP))[0], ref=np.max)\n\t#to reduce dimensionality, we'll onset-synchronous the CQT\n\t#onset is a vector of onset indexes np.array((N+1,)) including 0\n\t#onset_times is a vector of onset times np.array((N+1,)) including 0\n\t#Csync is the CQT transform synchronized on onsets np.array((252, N))\n\tif method == 'no':\n\t\tonset, onset_times, Csync = no_onsets(sr, C)\n\telif method == 'onset':\n\t\tonset, onset_times, Csync = detect_onsets(y, sr, C)\n\telif method == 'beat':\n\t\tonset, onset_times, Csync = detect_beats(y, sr, C)\n\telif method == 'manual':\n\t\tonset, onset_times, Csync = get_manual_beats(sr, C, manual_opt)\n\telse:\n\t\tprint('onset parameter is not well-defined')\n\t\tsys.exit()\n\n\treturn onset, onset_times, Csync\n\n\n\ndef build_weighted_rec_matrix(M):\n\t# Let's build a weighted recurrence affinity matrix using onset-synchronous CQT\n\n\t# the similarity matrix is filtered to prevent linkage errors and fill the gaps\n\t# the filter corresponds to a width=3 time window and a majority vote.\n\tR = librosa.segment.recurrence_matrix(M, width=3, mode='affinity',sym=True)\n\n\t# Enhance diagonals with a median filter\n\tdf = librosa.segment.timelag_filter(scipy.ndimage.median_filter)\n\tRf = df(R, size=(1, 7))\n\treturn Rf\n\n\n\ndef build_seq_matrix(M, x):\n\t#build the sequence matrix using feature-similarity\n\t#Rpath[i, i+/-1] = \\exp(- |M[i] - C[i+/-1]|^2 / sigma^2)`\n\n\t#synchronize features with onsets\n\tMsync = librosa.util.sync(M, x, aggregate=np.median)\n\t#Msync = M #pas de syncrhonisation\n\n\t#normalize (rescale) features between 0 and 1\n\tMsync_normed = scale(Msync)\n\n\t#constant scaling\n\tpath_distance = np.sum(np.diff(Msync_normed, axis=1)**2, axis=0)\n\t#sigma is the median distance between successive beats/onsets.\n\tsigma = np.median(path_distance)\n\tpath_sim = np.exp(-path_distance / sigma)\n\n\t#local scaling from A Spectral Clustering Approach to Speaker Diarization, Huazhong Ning, Ming Liu, Hao Tang, Thomas Huang\n\tR_path = np.diag(path_sim, k=1) + np.diag(path_sim, k=-1)\n\treturn R_path\n\n\ndef build_laplacian_and_evec(Rf, R_path, opt, onsets):\n\n\t# And compute the balanced combination A of the two similarity matrices Rf and R_path\n\tdeg_path = np.sum(R_path, axis=1)\n\tdeg_rec = np.sum(Rf, axis=1)\n\tmu = deg_path.dot(deg_path + deg_rec) / np.sum((deg_path + deg_rec)**2)\n\tprint('Optimal weight value (mu): {0:.2f}'.format(mu))\n\n\tA = mu * Rf + (1 - mu) * R_path\n\n\t# Plot the resulting graphs\n\tif opt: plot_similarity(Rf, R_path, A, onsets)\n\n\t# L: symetrized normalized Laplacian\n\tL = scipy.sparse.csgraph.laplacian(A, normed=True)\n\n\t# and its spectral decomposition (Find eigenvalues w and optionally eigenvectors v of matrix L)\n\tevals, evecs = np.linalg.eigh(L)\n\tprint('L shape:', L.shape)\n\n\t# We can clean this up further with a median filter.\n\t# This can help smooth over small discontinuities\n\tevecs = scipy.ndimage.median_filter(evecs, size=(9, 1))\n\n\t# cumulative normalization is needed for symmetric normalize laplacian eigenvectors\n\tCnorm = np.cumsum(evecs**2, axis=1)**0.5\n\n\treturn Cnorm, evals, evecs\n\n\n\n\n\n################################################\ndef compute_nb_clusters(method, evals, evecs, Tmax):\n\n\tif method == 'fixed':\n\t\tc = params.cluster_nb # list\n\telif method == 'max':\n\t\tnc = []\n\t\tfor it in range(params.cluster_max):\n\t\t\tnc.append(cluster_rotate.cluster_rotate(evecs/Cnorm, evals, range(1,10), 1, False))\n\t\tc = [int(np.mean(nc))+1]\n\telif method == 'evals':\n\t\tind = np.where(1- evals > 0.75)[0]\n\t\t#print(ind)\n\t\treturn [len(ind)+1 ]\n\telif method in ['silhouette', 'davies_bouldin', 'calinski_harabaz']:\n\t\tlist_k = range(2,50,2)\n\t\tCnorm = np.cumsum(e**2, axis=1)**0.5 #eigenvectors in input\n\t\tfor k in list_k:\n\t\t\tprint('nb of clusters:', k)\n\t\t\tX = e[:, :k] / Cnorm[:, k-1:k]\n\t\t\t# Let's use these k components to cluster beats into segments\n\t\t\t# (Algorithm 1)\n\t\t\tKM = sklearn.cluster.KMeans(n_clusters=k)\n\t\t\tseg_ids = KM.fit_predict(X)\n\t\t\tscore = []\n\t\t\tif method == 'silhouette':\n\t\t\t\tscore.append(sklearn.metrics.silhouette_score(X, seg_ids, metric='euclidean')) #max (proche de 1)\n\t\t\telif method == 'davies_bouldin':\n\t\t\t\tscore.append(davies_bouldin_score(X, seg_ids)) #min\n\t\t\telif method == 'calinski_harabaz':\n\t\t\t\tscore.append(sklearn.metrics.calinski_harabaz_score(X, seg_ids)) #max\n\n\t\tif method == 'silhouette':\n\t\t\treturn list_k[np.argmax(score)]\n\t\telif method == 'davies_bouldin':\n\t\t\treturn list_k[np.argmin(score)]\n\t\telif method == 'calinski_harabaz':\n\t\t\treturn list_k[np.argmax(score)]\n\n\telse:\n\t\tprint('method for finding the right number of clusters is unknown')\n\t\tsys.exit()\n\n\tprint('nb of clusters:', c)\n\n\treturn c\n\n\n\ndef davies_bouldin_score(X, labels):\n\t\"\"\"Computes the Davies-Bouldin score.\n\tThe score is defined as the ratio of within-cluster distances to\n\tbetween-cluster distances.\n\tRead more in the :ref:`User Guide <davies-bouldin_index>`.\n\tParameters\n\t----------\n\tX : array-like, shape (``n_samples``, ``n_features``)\n\tList of ``n_features``-dimensional data points. Each row corresponds\n\tto a single data point.\n\tlabels : array-like, shape (``n_samples``,)\n\tPredicted labels for each sample.\n\tReturns\n\t-------\n\tscore: float\n\tThe resulting Davies-Bouldin score.\n\tReferences\n\t----------\n\t.. [1] `Davies, David L.; Bouldin, Donald W. (1979).\n\t\"A Cluster Separation Measure\". IEEE Transactions on\n\tPattern Analysis and Machine Intelligence. PAMI-1 (2): 224-227`_\n\t\"\"\"\n\tX, labels = sklearn.utils.check_X_y(X, labels)\n\tle = sklearn.preprocessing.LabelEncoder()\n\tlabels = le.fit_transform(labels)\n\tn_samples, _ = X.shape\n\tn_labels = len(le.classes_)\n\tif not 1 < n_labels < n_samples:\n\t\traise ValueError(\"Number of labels is %d. Valid values are 2 to n_samples - 1 (inclusive)\" % n_labels)\n\n\tintra_dists = np.zeros(n_labels)\n\tcentroids = np.zeros((n_labels, len(X[0])), dtype=np.float)\n\tfor k in range(n_labels):\n\t\tcluster_k = sklearn.utils.safe_indexing(X, labels == k)\n\t\tcentroid = cluster_k.mean(axis=0)\n\t\tcentroids[k] = centroid\n\t\tintra_dists[k] = np.average(sklearn.metrics.pairwise.pairwise_distances(cluster_k, [centroid]))\n\n\tcentroid_distances = sklearn.metrics.pairwise.pairwise_distances(centroids)\n\n\n\tif np.allclose(intra_dists, 0) or np.allclose(centroid_distances, 0):\n\t\treturn 0.0\n\n\tscore = (intra_dists[:, None] + intra_dists) / centroid_distances\n\tscore[score == np.inf] = np.nan\n\n\treturn np.mean(np.nanmax(score, axis=1))\n\n\n\ndef plot_similarity(Rf, R_path, A, onset_times):\n\n\tplt.figure(figsize=(12, 4))\n\tplt.subplot(1, 3, 1)\n\tlibrosa.display.specshow(Rf, cmap='inferno_r', y_axis='time', y_coords=onset_times)\n\tplt.title('Long-range recurrence similarity (Rrec)')\n\tplt.subplot(1, 3, 2)\n\tlibrosa.display.specshow(R_path, cmap='inferno_r')\n\tplt.title('Local path similarity (Rloc)')\n\tplt.subplot(1, 3, 3)\n\tlibrosa.display.specshow(A, cmap='inferno_r')\n\tplt.title('Combined graph (A = m Rrec + (1-m) Rloc)')\n\tplt.tight_layout()\n\n\n\ndef plot_structure(Rf, X, seg_ids, k, onset_times):\n\n\tfig_s = plt.figure(figsize=(12, 4))\n\tcolors = plt.get_cmap('Paired', k)\n\n\tax_s1 = fig_s.add_subplot(1, 3, 2)\n\tlibrosa.display.specshow(Rf, cmap='inferno_r')\n\tax_s1.set_title('Long-range recurrence similarity (Rrec)')\n\tax_s2 =fig_s.add_subplot(1, 3, 1)\n\tlibrosa.display.specshow(X, y_axis='time', y_coords=onset_times)\n\tax_s2.set_title('Structure components (Eigen vectors)')\n\tax_s3 = fig_s.add_subplot(1, 3, 3)\n\tlibrosa.display.specshow(np.atleast_2d(seg_ids).T, cmap=colors)\n\tax_s3.set_title('Estimated segments')\n\tplt.colorbar(ticks=range(k))\n\tplt.tight_layout()\n\n\n#################################################\ndef compute_musical_density(C, onset_times, w, alpha):\n\tN = C.shape[1]\n\tdensity = []\n\tfor n in range(N):\n\t\tt1 = np.min([onset_times[-1], onset_times[n] + w])\n\t\tt2 = np.min([onset_times[-1] -w, onset_times[n]])\n\t\tidw = np.where((onset_times < t1) & (onset_times >= t2))\n\t\t#if n + w < :\n\t\tthreshold_chroma = np.max(C[:,idw])\n\t\t#else:\n\t\t\t#threshold_chroma = np.mean(C[:, N - w : N])\n\t\tidx = np.where(C[:,n] > alpha * threshold_chroma)\n\t\tdensity.append(len(idx[0]))\n\n\treturn density\n\n\n\ndef plot_features(X, onsets, onset_times):\n\n\tXsync = librosa.util.sync(X, onsets, aggregate=np.median)\n\n\t#print(X.shape, Xsync.shape)\n\t#print(onset_times)\n\n\tif params.feat[0] == 'chroma':\n\t\tfig_c = plt.figure(figsize=(12, 6))\n\t\tax0_c = fig_c.add_subplot(3,1,1)\n\t\tax0_c.set_title('onset-synchronous chroma (12)')\n\t\t#ax0_c.pcolor(distance, cmap = 'plasma')\n\t\tlibrosa.display.specshow(Xsync[:12,:], y_axis='chroma', x_axis='time', x_coords=onset_times, cmap = 'OrRd')\n\t\t#plt.colorbar()\n\n\t\tax1_c = fig_c.add_subplot(3,1,2, sharex = ax0_c)\n\t\tax1_c.set_title('onset-synchronous delta chroma (12)')\n\t\tlibrosa.display.specshow(np.abs(Xsync[12:,:]), y_axis='chroma', x_axis='time', x_coords=onset_times, cmap = 'OrRd')\n\t\t#plt.colorbar()\n\n\t\tdensity = compute_musical_density(Xsync[:12,:], onset_times, params.norm_density_win, params.alpha)\n\t\tprint(len(onset_times), len(density))\n\t\tax2_c = fig_c.add_subplot(3,1,3, sharex = ax0_c)\n\t\tax2_c.set_title('musical density')\n\t\tax2_c.plot(onset_times, density)\n\t\tplt.tight_layout()\n\n\telif params.feat[0] == 'cepstral':\n\t\tfig_s = plt.figure(figsize=(12, 6))\n\t\tax0_s = fig_s.add_subplot(3,1,1)\n\t\tax0_s.set_title('onset-synchronous MFCC (20)')\n\t\tlibrosa.display.specshow(Xsync[:21,:], x_axis='time', x_coords=onset_times)\n\t\t#plt.colorbar()\n\t\t#plt.tight_layout()\n\n\t\tax1_s = fig_s.add_subplot(3,1,2, sharex = ax0_s)\n\t\tax1_s.set_title('onset-synchronous delta MFCC (20)')\n\t\tlibrosa.display.specshow(np.abs(Xsync[20:,:]), x_axis='time', x_coords=onset_times)\n\t\t#plt.colorbar()\n\n\t\tdensity = compute_musical_density(Xsync[:21,:], onset_times, params.norm_density_win, params.alpha)\n\t\tax2_s = fig_s.add_subplot(3,1,2, sharex = ax0_s)\n\t\tax2_s.set_title('musical density')\n\t\tax2_s.plot(onset_times, density)\n\t\tplt.tight_layout()\n\telse:\n\t\tprint('these parameters can not be plot')\n\n\n\ndef load_wav_percu(filename, start, duration, opt_percussive_part):\n\ty, sr = librosa.load(filename, offset=start, duration = duration)\n\n\tif opt_percussive_part:\n\t#separate harmonics and percussives into two wavforms\n\t\ty_harmo, yo = librosa.effects.hpss(y)\n\t\tlibrosa.output.write_wav(filename + '_harmo.wav', y_harmo, sr)\n\t\tlibrosa.output.write_wav(filename + '_percu.wav', y_percu, sr)\n\t\treturn yo, sr\n\telse:\n\t\treturn y, sr\n\n\n\n\n\n\n################################################\ndef feature_extraction(y, sr, opt_tuning):\n\n\tif opt_tuning:\n\t\t#extraction of tuning\n\t\tA440 = librosa.estimate_tuning(y=y, sr=sr, resolution=1e-3)\n\t\tprint('Deviation from A440 is : {0:.2f}'.format(A440))\n\telse:\n\t\tA440 = 0.0\n\n\tprint('Features for local similarity: ', ' '.join(params.feat))\n\tfull = []\n\tidx_chroma = 0\n\n\tif 'cepstral' in params.feat:\n\t\tmfcc = librosa.feature.mfcc(y=y, sr=sr, n_mfcc = 20, n_fft = NFFT, hop_length = STEP)\n\t\tmfcc_delta = librosa.feature.delta(mfcc)\n\t\tfcep = np.concatenate((mfcc, mfcc_delta), axis=0)\n\t\tfull.append(fcep)\n\n\tif 'chroma' in params.feat:\n\t\tchroma = librosa.feature.chroma_cqt(y=y, sr=sr, n_chroma = 12, n_octaves = N_OCTAVES, hop_length = STEP, norm = None, tuning= A440)\n\t\tchroma_delta = librosa.feature.delta(chroma)\n\t\tfchr = np.concatenate((chroma, chroma_delta), axis=0)\n\t\tidx_chroma = len(full)\n\t\tfull.append(fchr)\n\n\tif 'spectral' in params.feat:\n\t\tcentroid = librosa.feature.spectral_centroid(y=y, sr=sr, n_fft = NFFT, hop_length = STEP)\n\t\tcontrast = librosa.feature.spectral_contrast(y=y, sr=sr, n_fft = NFFT, n_bands=6, hop_length = STEP)\n\t\tflatness = librosa.feature.spectral_flatness(y=y, n_fft = NFFT, hop_length = STEP)\n\t\trolloff05 = librosa.feature.spectral_rolloff(y=y, sr= sr, n_fft = NFFT, hop_length = STEP, roll_percent= 0.05)\n\t\trolloff25 = librosa.feature.spectral_rolloff(y=y, sr= sr, n_fft = NFFT, hop_length = STEP, roll_percent= 0.25)\n\t\trolloff50 = librosa.feature.spectral_rolloff(y=y, sr= sr, n_fft = NFFT, hop_length = STEP, roll_percent= 0.50)\n\t\trolloff75 = librosa.feature.spectral_rolloff(y=y, sr= sr, n_fft = NFFT, hop_length = STEP, roll_percent= 0.75)\n\t\trolloff95 = librosa.feature.spectral_rolloff(y=y, sr= sr, n_fft = NFFT, hop_length = STEP, roll_percent= 0.95)\n\t\tspec = np.concatenate((centroid, contrast, flatness, rolloff05,rolloff25,rolloff50,rolloff75,rolloff95), axis=0)\n\t\tspec_delta = librosa.feature.delta(spec)\n\t\tfspec = np.concatenate((spec, spec_delta), axis = 0)\n\t\tfull.append(fspec)\n\n\tfull = np.array(full)[0]\n\n\tprint('feature shape', full.shape)\n\treturn full, idx_chroma\n\ndef extract_time_boundaries(cluster_ids, onsets, nb_frames, sr):\n\n\t# Locate segment boundaries from the label sequence\n\tbound_beats = 1 + np.flatnonzero(cluster_ids[:-1] != cluster_ids[1:])\n\n\t# Count beat 0 as a boundary\n\tbound_beats = librosa.util.fix_frames(bound_beats, x_min=0)\n\n\t# Compute the segment label for each boundary\n\tbound_labels = list(cluster_ids[bound_beats])\n\n\t# Convert beat indices to frames\n\tbound_frames = onsets[bound_beats]\n\n\t# Make sure we cover to the end of the track\n\tbound_frames = librosa.util.fix_frames(bound_frames, x_min=None, x_max=nb_frames-1)\n\tbound_times = librosa.frames_to_time(bound_frames, sr=sr, hop_length = STEP)\n\n\treturn bound_times, bound_labels\n\n\n\n##################################\ndef extract_cosine_distance_clusters(center_clusters, distance_ref, type_dist = 'cos'):\n\tdistance = []\n\n\tfor center in center_clusters:\n\t\tif type_dist == 'cos':\n\t\t\tdistance.append( scipy.spatial.distance.cosine( center, distance_ref) )\n\t\telif type_dist == 'eucl':\n\t\t\tdistance.append(np.sqrt( np.sum( (center - distance_ref)**2) ))\n\n\treturn distance\n\n\ndef extract_distance_between_clusters(center_clusters, type_dist = 'cos'):\n\tdistance = np.zeros((center_clusters.shape))\n\n\tfor i, center_i in enumerate(center_clusters):\n\t\tfor j, center_j in enumerate(center_clusters):\n\t\t\tif type_dist == 'cos':\n\t\t\t\tdistance[i,j] = scipy.spatial.distance.cosine( center_i, center_j)\n\t\t\telif type_dist == 'eucl':\n\t\t\t\tdistance[i,j] = np.sqrt( np.sum( (center_i - center_j)**2) )\n\n\tx = range(i+1)\n\ty = range(j+1)\n\txloc = [c + 0.5 for c in x]\n\tcx = [str(c) for c in x]\n\t#print(cx)\n\tfig_d, ax_d = plt.subplots(figsize=(5, 4))\n\tp_d = ax_d.pcolor(distance, cmap = 'inferno_r')\n\tcb = fig_d.colorbar(p_d)\n\tax_d.xaxis.set_ticks(xloc)\n\tax_d.xaxis.set_ticklabels(cx)\n\tax_d.yaxis.set_ticks(xloc)\n\tax_d.yaxis.set_ticklabels(cx)\n\tax_d.set_title('Distance between clusters')\n\tax_d.set_xlabel('clusters numbers')\n\tplt.tight_layout()\n\n\treturn distance\n\n\n\ndef extract_ref_signal(X, onset_times):\n\tind = np.where((onset_times >= params.begin_ref) & (onset_times < params.end_ref))\n\treturn X[ind,:]\n\n\n\ndef main():\n\n\n\tparser = argparse.ArgumentParser(description='Segmentation and clustering of musical sections with spectral clustering (Laplacian matrix and eigen values)')\n\tparser.add_argument('filename', type=str, help='name of audio file')\n\tparser.add_argument('manual_onset', nargs='?', type=str, help='name of the file containing manual annotations for onset timestamps (with method=manual)')\n\n\targs = parser.parse_args()\n\n\t#==================\n\t# Signal processing\n\t#==================\n\n\t#extract waveform from audio signal of given duration and begining. If onset_percu is True, extract only percussive part of the signal.\n\ty, sr = load_wav_percu(args.filename, params.begin, params.duration, params.onset_percu)\n\tprint('signal shape:', y.shape, ' sr=', sr, 'win duration=%.2f' %(NFFT / sr))\n\n\t#extract acoustic feature from audio signal feat is a matrix np.array((nb features, Tmax*sr/STEP))\n\tfeat, idx_chroma = feature_extraction(y, sr, params.opt_tuning)\n\n\t#extract onset indexes and times + onset-synchronous CQT transform on onsets.\n\tonsets, onset_times, Csync = extract_onsets(y, sr, args.manual_onset)\n\n\t#if 'chroma' in params.feat:\n\t#\tcompute_musical_density(Csync, onset_times, idx_chroma, params.norm_density_win, params.alpha, sr)\n\n\tif params.plot_features: plot_features(feat, onsets, onset_times)\n\n\t#================\n\t# Affinity matrix\n\t#================\n\n\t#compute a non-negative affinity matrix using onset-synchronous CQT (with Gaussian kernel)\n\t#represent local consistency of timbral (CQT) features\n\tRf = build_weighted_rec_matrix(Csync)\n\n\t#compute a non-negative affinity matrix using onset-synchronous feature matrix (with Gaussian kernel)\n\t#represent long-range repeating forms of harmonic features\n\tR_path = build_seq_matrix(feat, onsets)\n\n\t#compute Laplacian (sequence augmented affinity matrix) as a linear combination of Rf and Rpath and extract eigenvalues and vectors.\n\tCnorm, evals, evecs = build_laplacian_and_evec(Rf, R_path, params.plot_simi, onset_times)\n\n\n\t#===========\n\t# Clustering\n\t#===========\n\n\t#determine number of clusters kl is a list of potential numbers of cluster.\n\tkl = compute_nb_clusters(params.cluster_method, evals, evecs, y.shape[0]*sr)\n\tN_CLUST = len(kl)\n\n\n\t#=================\n\t# Start plotting\n\t#=================\n\timport matplotlib.patches as patches\n\tfig_f = plt.figure(figsize = (12, 3+2*N_CLUST))\n\t#fig.subplots_adjust(hspace=.5)\n\n\t#plot onset-synchronous CQT\n\thr = [1] * (N_CLUST +1)\n\thr[0] = 2\n\tgs = gridspec.GridSpec(1 + N_CLUST,1, height_ratios=hr)\n\tax_f0 = fig_f.add_subplot(gs[0])\n\tlibrosa.display.specshow(Csync, y_axis='cqt_hz', sr=sr, hop_length = STEP, bins_per_octave=BINS_PER_OCTAVE, x_axis='time', x_coords=onset_times)\n\t#librosa.display.specshow(feat, y_axis='chroma', x_axis='time') #ou\n\tax_f0.set_title('CQT spectrogram synchronized {0}'.format(params.onset))\n\n\tfor it, k in enumerate(kl):\n\t\t#limit the number of clusters per second\n\t\tif k > params.cluster_nb_max*sr*y.shape[0]:\n\t\t\tk = params.cluster_nb_max*sr*y.shape[0]\n\t\tprint('nb of clusters: {} for it {}/{}'.format(k, it, N_CLUST))\n\n\t\t#for k clusters, use the first k normalized eigenvectors.\n\t\t#X can be interpretable as an onset-synchronous matrix containing relevant feature information for local and log-range structure segmentation\n\t\tX = evecs[:, :k] / Cnorm[:, k-1:k]\n\n\t\t#onsets are grouped into k clusters, each cluster having its own acoustic characteristics\n\t\tKM = sklearn.cluster.KMeans(n_clusters=k)\n\t\t#seg_ids is a np.array((label)) label being a number corresponding to one cluster seg_ids[i] is the label of onset i\n\t\tseg_ids = KM.fit_predict(X)\n\n\t\t#if needed compute the cosine distance between each cluster and a reference taken at the very begining of th signal\n\t\t#KM.cluster_centers_ : array, [n_clusters, n_features]\n\t\tif params.cluster_dist:\n\t\t\tref_signal = extract_ref_signal(X, onset_times)\n\t\t\tdistance_cosine_cluster = extract_cosine_distance_clusters( KM.cluster_centers_, np.mean(X[:10*NFFT,:], axis=0))\n\t\telse:\n\t\t\tdistance_cosine_cluster = None\n\n\t\tif params.plot_dist:\n\t\t\tdistance_between_clusters = extract_distance_between_clusters( KM.cluster_centers_ )\n\n\n\t\t# and plot the resulting structure representation\n\t\tif params.plot_struct: plot_structure(Rf, X, seg_ids, k, onset_times)\n\n\t\tbound_times, bound_labels = extract_time_boundaries(seg_ids, onsets, feat.shape[1], sr)\n\t\tfreqs = librosa.cqt_frequencies(n_bins=Csync.shape[0], fmin=librosa.note_to_hz('C1'), bins_per_octave=BINS_PER_OCTAVE)\n\n\t\ttimestamps_name = os.path.splitext(args.filename)[0] + '_timestamps.txt'\n\n\t\t#=============\n\t\t# Plot results\n\t\t#=============\n\n\n\t\tcmap = plt.get_cmap('Paired', k)\n\t\t#write header of text file with parameters.\n\t\tif params.timestamps:\n\t\t\tf = open(timestamps_name, 'a')\n\t\t\tf.write('WIN = {0:.2f} sec, NFFT = {1}, STEP = {2}, begin = {3}, duration = {4}\\n'.format(NFFT / sr, NFFT, STEP, params.begin, params.duration))\n\t\t\tf.write('Nb of clusters: {0} obtained with method {1} and features {2}\\n'.format(k, params.cluster_method, '-'.join(params.feat)))\n\n\t\t#plot onset-synchronous CQT\n\t\t#if it == 0:\n\n\n\t\t#plot segmentation and clusters grouping (+ cosine distance.)\n\t\t#also write obtained boundaries in the text file.\n\t\tax_f1 = fig_f.add_subplot(gs[it + 1], sharex = ax_f0)\n\t\tfor interval, label in zip(zip(bound_times, bound_times[1:]), bound_labels):\n\t\t\tif params.timestamps: f.write('{0:.2f} \\t {1:.2f} \\t {2} \\n'.format(interval[0], interval[1], label))\n\t\t\tif params.cluster_dist: ax_f1.plot([interval[0], interval[1]],[distance_cosine_cluster[label], distance_cosine_cluster[label]], 'k')\n\t\t\tax_f1.add_patch(patches.Rectangle((interval[0], 0), interval[1] - interval[0], 1, facecolor=cmap(label), alpha=1))\n\t\t\tax_f1.text(interval[0]+(interval[1]-interval[0])/2, 0.9, label, fontsize=8)\n\t\tif params.timestamps: f.close()\n\n\t\t#plt.subplots_adjust(hspace=.0)\n\tplt.tight_layout()\n\tplt.show()\n\nif __name__ == '__main__':\n\tmain()\n\n\n\ntitle = 'Palestrina'\n# Palestrina, AccordsMajeurs, AccordsMineur, Majeur3et4notes, Majeur3et4notes, Accords3Notes, DispoMajeurMineur, Tension\n# Cadence3V, Cadence4VMaj, Cadence4Vmin,\naudio = load('/Users/manuel/Dropbox (TMG)/Thèse/code/DescripteursHarmoniquesAudio/'+title+'.wav')\nmain(audio)\n"
] |
[
[
"matplotlib.pyplot.legend",
"numpy.asarray",
"matplotlib.pyplot.plot",
"numpy.std",
"matplotlib.pyplot.subplot",
"numpy.array",
"matplotlib.pyplot.show",
"numpy.zeros",
"matplotlib.pyplot.figure"
],
[
"numpy.diag",
"matplotlib.pyplot.legend",
"numpy.nanmax",
"matplotlib.pyplot.get_cmap",
"numpy.cumsum",
"matplotlib.pyplot.plot",
"numpy.max",
"numpy.concatenate",
"numpy.mean",
"numpy.argmin",
"matplotlib.pyplot.rcParams.update",
"numpy.exp",
"numpy.where",
"matplotlib.pyplot.tight_layout",
"numpy.allclose",
"numpy.arange",
"scipy.ndimage.median_filter",
"numpy.flatnonzero",
"numpy.std",
"matplotlib.pyplot.subplot",
"numpy.diff",
"matplotlib.gridspec.GridSpec",
"numpy.argmax",
"numpy.zeros",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.title",
"numpy.min",
"numpy.median",
"numpy.atleast_2d",
"numpy.linalg.eigh",
"sklearn.preprocessing.scale",
"matplotlib.pyplot.show",
"numpy.sum",
"numpy.array",
"numpy.abs",
"scipy.spatial.distance.cosine",
"scipy.sparse.csgraph.laplacian",
"matplotlib.pyplot.subplots"
]
] |
DzAvril/tvm
|
[
"89fa6d3363926a6770084c10f9dee2cf78129903"
] |
[
"apps/deploy_tflite_cpp/build_input.py"
] |
[
"# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\"\"\"Creates a simple TVM modules.\"\"\"\n\nimport argparse\nimport os\nimport logging\nfrom PIL import Image\nimport numpy as np\n\n\ndef preprocess_image(image_file):\n resized_image = Image.open(image_file).resize((224, 224))\n image_data = np.asarray(resized_image).astype(\"float32\")\n # after expand_dims, we have format NCHW\n image_data = np.expand_dims(image_data, axis=0)\n image_data[:, :, :, 0] = 2.0 / 255.0 * image_data[:, :, :, 0] - 1\n image_data[:, :, :, 1] = 2.0 / 255.0 * image_data[:, :, :, 1] - 1\n image_data[:, :, :, 2] = 2.0 / 255.0 * image_data[:, :, :, 2] - 1\n return image_data\n\n\ndef build_inputs():\n x = preprocess_image(\"lib/cat.png\")\n print(\"x\", x.shape)\n with open(\"lib/input.bin\", \"wb\") as fp:\n fp.write(x.astype(np.float32).tobytes())\n\n\nif __name__ == \"__main__\":\n build_inputs()\n"
] |
[
[
"numpy.asarray",
"numpy.expand_dims"
]
] |
LucasFidon/trustworthy-ai-fetal-brain-segmentation
|
[
"84959da54d8c2fb156da2b06cca30fa31a1c926d",
"84959da54d8c2fb156da2b06cca30fa31a1c926d"
] |
[
"docker/third-party/nnUNet/nnunet/dataset_conversion/Task172_CovidSegChallengeAutoCorrect.py",
"docker/third-party/nnUNet/nnunet/network_architecture/generic_UNet.py"
] |
[
"import os\nimport pickle\nfrom scipy.ndimage.measurements import label\nimport numpy as np\nimport SimpleITK as sitk\nfrom collections import OrderedDict\nfrom lungmask import mask\nfrom batchgenerators.utilities.file_and_folder_operations import *\nfrom nnunet.paths import nnUNet_raw_data\n\nMAIN_DATA_FOLDER = '/data'\nMAIN_WORKSPACE_FOLDER = '/workspace'\nNNUNET_FOLDER = os.path.join(MAIN_WORKSPACE_FOLDER, 'nnUNet', 'nnunet')\nNNUNET_INFERENCE_FOLDER = os.path.join(NNUNET_FOLDER, 'inference')\n\n# Challenge data\nDATA_FOLDER = os.path.join(MAIN_DATA_FOLDER, 'COVID-19-20', 'COVID-19-20_v2')\nTRAIN_DATA_FOLDER = join(DATA_FOLDER, 'Train')\nVALID_DATA_FOLDER = join(DATA_FOLDER, 'Validation')\n\nJUN_DATASET_CT_FOLDER = os.path.join(MAIN_DATA_FOLDER, 'covid_benchmark', 'COVID-19-CT-Seg_20cases')\nJUN_DATASET_LESIONS_FOLDER = os.path.join(MAIN_DATA_FOLDER, 'covid_benchmark', 'Infection_Mask')\n\n# Guotai data:\n# only binary seg\n# non HU intensity\nGUOTAI_DATASET_FOLDER = os.path.join(MAIN_DATA_FOLDER, 'UESTC-COVID-19')\nGUOTAI_DATASET_PART1 = os.path.join( # 70 cases labelled by non-experts\n GUOTAI_DATASET_FOLDER,\n 'UESTC-COVID-19-20201109T135232Z-001',\n 'UESTC-COVID-19',\n 'part1',\n)\nGUOTAI_DATASET_PART2 = os.path.join( # 50 cases labelled by experts\n GUOTAI_DATASET_FOLDER,\n 'UESTC-COVID-19-20201109T135232Z-001',\n 'UESTC-COVID-19',\n 'part2',\n)\nGUOTAI_HU_MIN = -1400 # strange value... could it be -1000? nop it is correct\nGUOTAI_HU_MAX = 100\n\n\n# iCovid data\nICOVID_DATASET_FOLDER = os.path.join(MAIN_DATA_FOLDER, 'icovid_raw_data')\nLABELS_ICOVID = {\n # Basic lesion classes\n 'ggo': 1,\n 'consolidation': 2,\n 'crazy_paving_pattern': 3,\n 'linear_opacity': 2,\n # Super classes\n 'combined_pattern': 4,\n 'reversed_halo_sign': 4,\n 'other_abnormal_tissue': 5,\n 'lung': 6,\n 'background': 0,\n}\nPATIENT_ID_TO_EXCLUDE = [\n '1363112652', # moderate artefact and I can't see some of the lesions segmented\n '1366125607', # artefact and suspicious seg (completed and reviewed by same person)\n '1413717420', # strong breathing artefact and suspicious seg\n '1812933091', # BART: to exclude pat12. nothing seg\n '1868609820', # BART: to exclude pat13. nothing seg\n '2602703662', # can't see most of the lesions; noisy seg\n '2762004157', # mainly other abn and comb pattern; noisy seg\n '2969709397', # lots of other abn; mix other abn other lesions; can't see some of the lesions\n '3375944345', # no lesion\n '5925215067', # not annotated completely (very partial)\n '7414742831', # can't see the lesions; seg seem noisy\n # '7957238453', # suspicious: lesion in only one slice\n '8874887577', # mainly combined pattern; some suspicious seg\n]\n\n\n# PREPROCESSING PARAMS\nMIN_HU = -1000 # air\nMAX_HU = 100 # max for Guotai's data\nMASK_MARGIN = [5, 15, 15]\nMIN_NUM_VOXEL_PER_COMP = 100000\nLABELS = {\n 'lung': 1,\n 'lesion': 2,\n 'unsure': 3, # when we are not sure if a voxel belongs to the lesion or to healthy lung tissues\n 'background': 0,\n}\n\n\ndef get_patient_name_from_file_name(file_name):\n name = file_name.replace('_ct.nii.gz', '').replace('_seg.nii.gz', '').replace('.nii.gz', '')\n return name\n\n\ndef predict_lesion_with_model_ensemble_task171(preprocessed_ct_sitk):\n \"\"\"\n Compute the automatic segmentations using the ensemble 3d_lowres\n trained on task171 with 16GB and batch size=2\n :param preprocessed_ct:\n :return: seg proba\n \"\"\"\n def post_process_softmax_pred(softmax_np):\n seg_pred = np.argmax(softmax_np, axis=0)\n seg_lung = seg_pred > 0\n # Keep only the two largest connected components\n structure = np.ones((3, 3, 3), dtype=np.int)\n labeled, ncomp = label(seg_lung, structure)\n size_comp = [\n np.sum(labeled == l) for l in range(1, ncomp + 1)\n ]\n first_largest_comp = np.argmax(size_comp)\n label_first = first_largest_comp + 1\n size_comp[first_largest_comp] = -1\n second_largest_comp = np.argmax(size_comp)\n label_second = second_largest_comp + 1\n # To avoid cases where the two lungs are in the same component\n # and the second largest component is outside the lungs\n # we set a minimum size for the second largest component\n if size_comp[second_largest_comp] < MIN_NUM_VOXEL_PER_COMP:\n label_second = -1\n for i in range(1, ncomp + 1):\n if i not in [label_first, label_second]:\n # set to background with proba 1\n # the voxels of the foreground that are not in the\n # two main connected components of the foreground\n softmax_np[:, labeled == i] = 0.\n softmax_np[0, labeled == i] = 1.\n return softmax_np\n\n tmp_folder = 'tmp_autoseg'\n if not os.path.exists(tmp_folder):\n os.mkdir(tmp_folder)\n\n # Save the CT in a tmp folder\n tmp_folder_ct = os.path.join(tmp_folder, 'ct')\n if not os.path.exists(tmp_folder_ct):\n os.mkdir(tmp_folder_ct)\n save_img_path = os.path.join(tmp_folder_ct, 'ct_0000.nii.gz')\n sitk.WriteImage(preprocessed_ct_sitk, save_img_path)\n\n out_folder_list = []\n\n # Run the individual model predictions\n for fold in range(5):\n output_folder = os.path.join(tmp_folder, 'out_fold%d' % fold)\n out_folder_list.append(output_folder)\n if os.path.exists(output_folder):\n os.system('rm -r %s' % output_folder)\n options = '-t 171 -f %d -m 3d_lowres -tr nnUNetTrainerV2 -p nnUNetPlansv2.1_16GB --save_npz' % fold\n cmd = '%s/predict_simple.py -i %s -o %s %s' % (NNUNET_INFERENCE_FOLDER, tmp_folder_ct, output_folder, options)\n print('\\n%s\\n' % cmd)\n os.system(cmd)\n\n # Compute the mean of the softmax predictions\n output_folder_ens = os.path.join(tmp_folder, 'out_ensemble')\n cmd = '%s/ensemble_predictions.py -f %s %s %s %s %s -o %s --npz' % \\\n (NNUNET_INFERENCE_FOLDER, out_folder_list[0], out_folder_list[1], out_folder_list[2], out_folder_list[3], out_folder_list[4], output_folder_ens)\n print('\\n%s\\n' % cmd)\n os.system(cmd)\n\n # Load the softmax proba ensemble prediction\n softmax_path = os.path.join(output_folder_ens, 'ct.npz')\n softmax_cropped = np.load(softmax_path)['softmax'][None][0,...]\n pkl_path = os.path.join(output_folder_ens, 'ct.pkl')\n with open(pkl_path, 'rb') as f:\n prop = pickle.load(f)\n ori_img_shape = prop['original_size_of_raw_data']\n shape = (softmax_cropped.shape[0], ori_img_shape[0], ori_img_shape[1], ori_img_shape[2])\n softmax_full = np.zeros(shape)\n softmax_full[0, ...] = 1 # initialize to background\n crop_coord = np.array(prop['crop_bbox'])\n softmax_full[:, crop_coord[0,0]:crop_coord[0,1], crop_coord[1,0]:crop_coord[1,1], crop_coord[2,0]:crop_coord[2,1]] = softmax_cropped\n\n # Apply the post-processing\n softmax_full = post_process_softmax_pred(softmax_full)\n\n # Delete all the temporary files\n if os.path.exists(tmp_folder):\n os.system('rm -r %s' % tmp_folder)\n\n return softmax_full\n\ndef preprocess(img_path, seg_path=None, mode='challenge', crop=False):\n def mask_img(img_np, lung_mask_np, do_crop=False):\n x, y, z = np.where(lung_mask_np > 0)\n x_min = max(0, np.min(x) - MASK_MARGIN[0])\n x_max = min(img_np.shape[0], np.max(x) + MASK_MARGIN[0])\n y_min = max(0, np.min(y) - MASK_MARGIN[1])\n y_max = min(img_np.shape[1], np.max(y) + MASK_MARGIN[1])\n z_min = max(0, np.min(z) - MASK_MARGIN[2])\n z_max = min(img_np.shape[2], np.max(z) + MASK_MARGIN[2])\n if do_crop:\n img_np = img_np[x_min:x_max, y_min:y_max, z_min:z_max]\n else:\n img_np[:x_min, :, :] = 0\n img_np[x_max:, :, :] = 0\n img_np[:, :y_min, :] = 0\n img_np[:, y_max:, :] = 0\n img_np[:, :, :z_min] = 0\n img_np[:, :, z_max:] = 0\n return img_np\n\n def postprocess_auto_lung_seg(lung_seg_np):\n # Binarize the lung segmentation\n lung_seg_np[lung_seg_np > 1] = 1\n # Keep only the two largest connected components\n structure = np.ones((3, 3, 3), dtype=np.int)\n labeled, ncomp = label(lung_seg_np, structure)\n size_comp = [\n np.sum(labeled == l) for l in range(1, ncomp + 1)\n ]\n first_largest_comp = np.argmax(size_comp)\n label_first = first_largest_comp + 1\n size_comp[first_largest_comp] = -1\n second_largest_comp = np.argmax(size_comp)\n label_second = second_largest_comp + 1\n # To avoid cases where the two lungs are in the same component\n # and the second largest component is outside the lungs\n # we set a minimum size for the second largest component\n if size_comp[second_largest_comp] < MIN_NUM_VOXEL_PER_COMP:\n label_second = -1\n for i in range(1, ncomp + 1):\n if i in [label_first, label_second]:\n labeled[labeled == i] = 1\n else:\n labeled[labeled == i] = 0\n return labeled\n\n def update_labels_seg_task171(ct_sitk, seg_np, mode='normal'):\n new_seg = np.zeros_like(seg_np)\n pred_proba_seg_t171 = predict_lesion_with_model_ensemble_task171(ct_sitk)\n pred_seg_t171 = np.argmax(pred_proba_seg_t171, axis=0)\n\n # Make the initial segmentation\n new_seg[pred_seg_t171 > 0] = LABELS['lung']\n if mode == 'icovid':\n for l in [1, 2, 3, 4]:\n new_seg[seg_np == l] = LABELS['lesion'] # all lesion types together\n new_seg[seg_np == LABELS_ICOVID['other_abnormal_tissue']] = LABELS['unsure']\n else:\n new_seg[seg_np > 0] = LABELS['lesion']\n\n # Look at the voxels with disagreement between manual and auto seg\n # and mark them as 'unsure' when appropriate\n max_proba = np.max(pred_proba_seg_t171, axis=0)\n # We mark a voxel as 'unsure' iff there is a disagreement\n # and the ensemble has a maximum probability of at least 0.75\n disagreement = np.logical_and(new_seg != pred_seg_t171, max_proba >= 0.75)\n new_seg[disagreement] = LABELS['unsure']\n\n return new_seg\n\n def convert_to_sitk(img_np, ref_img_sitk):\n img_sitk = sitk.GetImageFromArray(img_np)\n img_sitk.SetOrigin(ref_img_sitk.GetOrigin())\n img_sitk.SetSpacing(ref_img_sitk.GetSpacing())\n img_sitk.SetDirection(ref_img_sitk.GetDirection())\n return img_sitk\n\n img = sitk.ReadImage(img_path)\n img_np = sitk.GetArrayFromImage(img)\n if mode == 'guotai':\n # Convert the CT intensities back to HU\n # This has to be done before inference of the lung mask\n img_np = GUOTAI_HU_MIN + (GUOTAI_HU_MAX - GUOTAI_HU_MIN) * img_np\n img = convert_to_sitk(img_np, img)\n\n # Create the lung mask\n if mode == 'icovid':\n assert seg_path is not None, 'Segmentation is required for iCovid data'\n seg = sitk.ReadImage(seg_path)\n seg_np = sitk.GetArrayFromImage(seg)\n lung_mask_np = np.zeros_like(seg_np)\n lung_mask_np[seg_np > 0] = 1\n else:\n lung_mask_np = mask.apply(img)\n # binarize the mask and keep only the two largest connected components\n lung_mask_np = postprocess_auto_lung_seg(lung_mask_np)\n\n # Clip the HU intensity\n img_np[img_np < MIN_HU] = MIN_HU\n img_np[img_np > MAX_HU] = MAX_HU\n\n # Mask the image outside a box containing the lung\n img_np = mask_img(img_np, lung_mask_np, do_crop=crop)\n\n # Convert back to SITK image\n img_pre = convert_to_sitk(img_np, img)\n\n # Seg pre-processing (if available)\n if seg_path is not None:\n seg = sitk.ReadImage(seg_path)\n seg_np = sitk.GetArrayFromImage(seg)\n if crop:\n seg_np = mask_img(seg_np, lung_mask_np, do_crop=crop)\n # Add lung and unsure as extra labels for the segmentation\n seg_np = update_labels_seg_task171(img_pre, seg_np, mode=mode)\n if mode == 'guotai':\n seg_pre = convert_to_sitk(seg_np, img) # need to use img header for Guotai's data\n else:\n seg_pre = convert_to_sitk(seg_np, seg)\n else:\n seg_pre = None\n\n return img_pre, seg_pre\n\n\nif __name__ == '__main__':\n task_id = 172\n task_name = \"CovidSegChallengeAutoCorrect\"\n\n foldername = \"Task%d_%s\" % (task_id, task_name)\n\n out_base = join(nnUNet_raw_data, foldername)\n imagestr = join(out_base, \"imagesTr\")\n imagesval = join(out_base, \"imagesVal\")\n labelstr = join(out_base, \"labelsTr\")\n maybe_mkdir_p(imagestr)\n maybe_mkdir_p(imagesval)\n maybe_mkdir_p(labelstr)\n\n train_patient_names = []\n valid_patient_names = []\n\n # Training data (Challenge data)\n for f_n in os.listdir(TRAIN_DATA_FOLDER):\n patient_name = get_patient_name_from_file_name(f_n)\n if patient_name in train_patient_names:\n continue\n print('\\nPreprocces', patient_name)\n train_patient_names.append(patient_name)\n img = join(TRAIN_DATA_FOLDER, '%s_ct.nii.gz' % patient_name)\n seg = join(TRAIN_DATA_FOLDER, '%s_seg.nii.gz' % patient_name)\n assert all([\n isfile(img),\n isfile(seg),\n ]), '%s: some files were not found' % patient_name\n\n save_img = join(imagestr, patient_name + \"_0000.nii.gz\")\n save_seg = join(labelstr, patient_name + \".nii.gz\")\n if os.path.exists(save_img) and os.path.exists(save_seg):\n print('%s already reprocessed' % patient_name)\n print('pass\\n')\n else:\n img_pre, seg_pre = preprocess(img, seg, crop=True)\n sitk.WriteImage(img_pre, save_img)\n sitk.WriteImage(seg_pre, save_seg)\n print('Found %d training cases in %s' % (len(train_patient_names), TRAIN_DATA_FOLDER))\n\n # Jun dataset\n jun_patient_names = []\n for f_n in os.listdir(JUN_DATASET_CT_FOLDER):\n if not 'coronacases' in f_n: # remove data with low quality\n continue\n patient_name = get_patient_name_from_file_name(f_n)\n print('Preprocces', patient_name)\n if patient_name in train_patient_names:\n continue\n jun_patient_names.append(patient_name)\n img = join(JUN_DATASET_CT_FOLDER, '%s.nii.gz' % patient_name)\n seg = join(JUN_DATASET_LESIONS_FOLDER, '%s.nii.gz' % patient_name)\n assert all([\n isfile(img),\n isfile(seg),\n ]), '%s: some files were not found' % patient_name\n\n save_img = join(imagestr, patient_name + \"_0000.nii.gz\")\n save_seg = join(labelstr, patient_name + \".nii.gz\")\n if os.path.exists(save_img) and os.path.exists(save_seg):\n print('%s already reprocessed' % patient_name)\n print('pass\\n')\n else:\n img_pre, seg_pre = preprocess(img, seg, mode='jun', crop=True)\n sitk.WriteImage(img_pre, save_img)\n sitk.WriteImage(seg_pre, save_seg)\n train_patient_names += jun_patient_names\n print('Found %d training cases in %s' % (len(jun_patient_names), JUN_DATASET_CT_FOLDER))\n\n # Guotai data (expert)\n guotai_pat_names = []\n img_folder = os.path.join(GUOTAI_DATASET_PART2, 'image')\n seg_folder = os.path.join(GUOTAI_DATASET_PART2, 'label')\n for f_n in os.listdir(img_folder):\n patient_name = get_patient_name_from_file_name(f_n) + '_part2'\n if patient_name in train_patient_names:\n continue\n print('Preprocces', patient_name)\n guotai_pat_names.append(patient_name)\n img = join(img_folder, f_n)\n seg = join(seg_folder, f_n)\n assert all([\n isfile(img),\n isfile(seg),\n ]), '%s: some files were not found' % patient_name\n\n save_img = join(imagestr, patient_name + \"_0000.nii.gz\")\n save_seg = join(labelstr, patient_name + \".nii.gz\")\n if os.path.exists(save_img) and os.path.exists(save_seg):\n print('%s already reprocessed' % patient_name)\n print('pass\\n')\n else:\n img_pre, seg_pre = preprocess(img, seg, mode='guotai', crop=True)\n sitk.WriteImage(img_pre, save_img)\n sitk.WriteImage(seg_pre, save_seg)\n train_patient_names += guotai_pat_names\n print('Found %d training cases in %s' % (len(guotai_pat_names), GUOTAI_DATASET_PART2))\n\n # Guotai data (non-expert)\n guotai_pat_names = []\n img_folder = os.path.join(GUOTAI_DATASET_PART1, 'image')\n seg_folder = os.path.join(GUOTAI_DATASET_PART1, 'label')\n for f_n in os.listdir(img_folder):\n patient_name = get_patient_name_from_file_name(f_n) + '_part1'\n if patient_name in train_patient_names:\n continue\n print('Preprocces', patient_name)\n guotai_pat_names.append(patient_name)\n img = join(img_folder, f_n)\n seg = join(seg_folder, f_n)\n assert all([\n isfile(img),\n isfile(seg),\n ]), '%s: some files were not found' % patient_name\n\n save_img = join(imagestr, patient_name + \"_0000.nii.gz\")\n save_seg = join(labelstr, patient_name + \".nii.gz\")\n if os.path.exists(save_img) and os.path.exists(save_seg):\n print('%s already reprocessed' % patient_name)\n print('pass\\n')\n else:\n img_pre, seg_pre = preprocess(img, seg, mode='guotai', crop=True)\n sitk.WriteImage(img_pre, save_img)\n sitk.WriteImage(seg_pre, save_seg)\n train_patient_names += guotai_pat_names\n print('Found %d training cases in %s' % (len(guotai_pat_names), GUOTAI_DATASET_PART1))\n\n # iCovid data\n icovid_patient_names = []\n for f_n in os.listdir(ICOVID_DATASET_FOLDER):\n patient_name = f_n\n if patient_name in PATIENT_ID_TO_EXCLUDE:\n print(patient_name, 'excluded')\n continue\n print('Preprocces', patient_name)\n icovid_patient_names.append(patient_name)\n img = join(ICOVID_DATASET_FOLDER, patient_name, 'ct.nii.gz')\n seg = join(ICOVID_DATASET_FOLDER, patient_name, 'lesions_seg.nii.gz')\n assert all([\n isfile(img),\n isfile(seg),\n ]), '%s: some files were not found' % patient_name\n\n save_img = join(imagestr, patient_name + \"_0000.nii.gz\")\n save_seg = join(labelstr, patient_name + \".nii.gz\")\n if os.path.exists(save_img) and os.path.exists(save_seg):\n print('%s already reprocessed' % patient_name)\n print('pass\\n')\n else:\n img_pre, seg_pre = preprocess(img, seg, mode='icovid', crop=True)\n sitk.WriteImage(img_pre, save_img)\n sitk.WriteImage(seg_pre, save_seg)\n train_patient_names += icovid_patient_names\n print('Found %d training cases in %s' % (len(icovid_patient_names), ICOVID_DATASET_FOLDER))\n\n print('')\n print('A total of %s training cases were found' % len(train_patient_names))\n print('')\n\n # Validation data\n for f_n in os.listdir(VALID_DATA_FOLDER):\n patient_name = get_patient_name_from_file_name(f_n)\n if patient_name in valid_patient_names:\n continue\n valid_patient_names.append(patient_name)\n img = join(VALID_DATA_FOLDER, '%s_ct.nii.gz' % patient_name)\n assert isfile(img), '%s: CT file was not found' % patient_name\n\n save_img = join(imagesval, patient_name + \"_0000.nii.gz\")\n if os.path.exists(save_img):\n print('%s already reprocessed' % patient_name)\n print('pass\\n')\n else:\n img_pre, _ = preprocess(img)\n sitk.WriteImage(img_pre, save_img)\n print('Found %d validation cases' % len(valid_patient_names))\n\n # Dataset json file\n json_dict = OrderedDict()\n json_dict['name'] = task_name\n json_dict['description'] = \"nothing\"\n json_dict['tensorImageSize'] = \"4D\"\n json_dict['reference'] = \"no reference\"\n json_dict['licence'] = \"no license\"\n json_dict['release'] = \"0.0\"\n json_dict['modality'] = {\n \"0\": \"CT\",\n }\n json_dict['labels'] = {\n \"0\": \"background\",\n \"1\": \"lung\",\n \"2\": \"lesion\",\n \"3\": \"unsure\",\n }\n json_dict['numTraining'] = len(train_patient_names)\n json_dict['numTest'] = 0\n json_dict['training'] = [{\n 'image': \"./imagesTr/%s.nii.gz\" % i,\n \"label\": \"./labelsTr/%s.nii.gz\" % i}\n for i in train_patient_names]\n json_dict['test'] = []\n save_json(json_dict, join(out_base, \"dataset.json\"))",
"# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nfrom copy import deepcopy\nfrom nnunet.utilities.nd_softmax import softmax_helper\nfrom torch import nn\nimport torch\nimport numpy as np\nfrom nnunet.network_architecture.initialization import InitWeights_He\nfrom nnunet.network_architecture.neural_network import SegmentationNetwork\nimport torch.nn.functional\n\n\nclass ConvDropoutNormNonlin(nn.Module):\n \"\"\"\n fixes a bug in ConvDropoutNormNonlin where lrelu was used regardless of nonlin. Bad.\n \"\"\"\n\n def __init__(self, input_channels, output_channels,\n conv_op=nn.Conv2d, conv_kwargs=None,\n norm_op=nn.BatchNorm2d, norm_op_kwargs=None,\n dropout_op=nn.Dropout2d, dropout_op_kwargs=None,\n nonlin=nn.LeakyReLU, nonlin_kwargs=None):\n super(ConvDropoutNormNonlin, self).__init__()\n if nonlin_kwargs is None:\n nonlin_kwargs = {'negative_slope': 1e-2, 'inplace': True}\n if dropout_op_kwargs is None:\n dropout_op_kwargs = {'p': 0.5, 'inplace': True}\n if norm_op_kwargs is None:\n norm_op_kwargs = {'eps': 1e-5, 'affine': True, 'momentum': 0.1}\n if conv_kwargs is None:\n conv_kwargs = {'kernel_size': 3, 'stride': 1, 'padding': 1, 'dilation': 1, 'bias': True}\n\n self.nonlin_kwargs = nonlin_kwargs\n self.nonlin = nonlin\n self.dropout_op = dropout_op\n self.dropout_op_kwargs = dropout_op_kwargs\n self.norm_op_kwargs = norm_op_kwargs\n self.conv_kwargs = conv_kwargs\n self.conv_op = conv_op\n self.norm_op = norm_op\n\n self.conv = self.conv_op(input_channels, output_channels, **self.conv_kwargs)\n if self.dropout_op is not None and self.dropout_op_kwargs['p'] is not None and self.dropout_op_kwargs[\n 'p'] > 0:\n self.dropout = self.dropout_op(**self.dropout_op_kwargs)\n else:\n self.dropout = None\n self.instnorm = self.norm_op(output_channels, **self.norm_op_kwargs)\n self.lrelu = self.nonlin(**self.nonlin_kwargs)\n\n def forward(self, x):\n x = self.conv(x)\n if self.dropout is not None:\n x = self.dropout(x)\n return self.lrelu(self.instnorm(x))\n\n\nclass ConvDropoutNonlinNorm(ConvDropoutNormNonlin):\n def forward(self, x):\n x = self.conv(x)\n if self.dropout is not None:\n x = self.dropout(x)\n return self.instnorm(self.lrelu(x))\n\n\nclass StackedConvLayers(nn.Module):\n def __init__(self, input_feature_channels, output_feature_channels, num_convs,\n conv_op=nn.Conv2d, conv_kwargs=None,\n norm_op=nn.BatchNorm2d, norm_op_kwargs=None,\n dropout_op=nn.Dropout2d, dropout_op_kwargs=None,\n nonlin=nn.LeakyReLU, nonlin_kwargs=None, first_stride=None, basic_block=ConvDropoutNormNonlin):\n '''\n stacks ConvDropoutNormLReLU layers. initial_stride will only be applied to first layer in the stack. The other parameters affect all layers\n :param input_feature_channels:\n :param output_feature_channels:\n :param num_convs:\n :param dilation:\n :param kernel_size:\n :param padding:\n :param dropout:\n :param initial_stride:\n :param conv_op:\n :param norm_op:\n :param dropout_op:\n :param inplace:\n :param neg_slope:\n :param norm_affine:\n :param conv_bias:\n '''\n self.input_channels = input_feature_channels\n self.output_channels = output_feature_channels\n\n if nonlin_kwargs is None:\n nonlin_kwargs = {'negative_slope': 1e-2, 'inplace': True}\n if dropout_op_kwargs is None:\n dropout_op_kwargs = {'p': 0.5, 'inplace': True}\n if norm_op_kwargs is None:\n norm_op_kwargs = {'eps': 1e-5, 'affine': True, 'momentum': 0.1}\n if conv_kwargs is None:\n conv_kwargs = {'kernel_size': 3, 'stride': 1, 'padding': 1, 'dilation': 1, 'bias': True}\n\n self.nonlin_kwargs = nonlin_kwargs\n self.nonlin = nonlin\n self.dropout_op = dropout_op\n self.dropout_op_kwargs = dropout_op_kwargs\n self.norm_op_kwargs = norm_op_kwargs\n self.conv_kwargs = conv_kwargs\n self.conv_op = conv_op\n self.norm_op = norm_op\n\n if first_stride is not None:\n self.conv_kwargs_first_conv = deepcopy(conv_kwargs)\n self.conv_kwargs_first_conv['stride'] = first_stride\n else:\n self.conv_kwargs_first_conv = conv_kwargs\n\n super(StackedConvLayers, self).__init__()\n self.blocks = nn.Sequential(\n *([basic_block(input_feature_channels, output_feature_channels, self.conv_op,\n self.conv_kwargs_first_conv,\n self.norm_op, self.norm_op_kwargs, self.dropout_op, self.dropout_op_kwargs,\n self.nonlin, self.nonlin_kwargs)] +\n [basic_block(output_feature_channels, output_feature_channels, self.conv_op,\n self.conv_kwargs,\n self.norm_op, self.norm_op_kwargs, self.dropout_op, self.dropout_op_kwargs,\n self.nonlin, self.nonlin_kwargs) for _ in range(num_convs - 1)]))\n\n def forward(self, x):\n return self.blocks(x)\n\n\ndef print_module_training_status(module):\n if isinstance(module, nn.Conv2d) or isinstance(module, nn.Conv3d) or isinstance(module, nn.Dropout3d) or \\\n isinstance(module, nn.Dropout2d) or isinstance(module, nn.Dropout) or isinstance(module, nn.InstanceNorm3d) \\\n or isinstance(module, nn.InstanceNorm2d) or isinstance(module, nn.InstanceNorm1d) \\\n or isinstance(module, nn.BatchNorm2d) or isinstance(module, nn.BatchNorm3d) or isinstance(module,\n nn.BatchNorm1d):\n print(str(module), module.training)\n\n\nclass Upsample(nn.Module):\n def __init__(self, size=None, scale_factor=None, mode='nearest', align_corners=False):\n super(Upsample, self).__init__()\n self.align_corners = align_corners\n self.mode = mode\n self.scale_factor = scale_factor\n self.size = size\n\n def forward(self, x):\n return nn.functional.interpolate(x, size=self.size, scale_factor=self.scale_factor, mode=self.mode,\n align_corners=self.align_corners)\n\n\nclass Generic_UNet(SegmentationNetwork):\n DEFAULT_BATCH_SIZE_3D = 2\n DEFAULT_PATCH_SIZE_3D = (64, 192, 160)\n SPACING_FACTOR_BETWEEN_STAGES = 2\n BASE_NUM_FEATURES_3D = 30\n MAX_NUMPOOL_3D = 999\n MAX_NUM_FILTERS_3D = 320\n\n DEFAULT_PATCH_SIZE_2D = (256, 256)\n BASE_NUM_FEATURES_2D = 30\n DEFAULT_BATCH_SIZE_2D = 50\n MAX_NUMPOOL_2D = 999\n MAX_FILTERS_2D = 480\n\n use_this_for_batch_size_computation_2D = 19739648\n use_this_for_batch_size_computation_3D = 520000000 # 505789440\n\n def __init__(self, input_channels, base_num_features, num_classes, num_pool, num_conv_per_stage=2,\n feat_map_mul_on_downscale=2, conv_op=nn.Conv2d,\n norm_op=nn.BatchNorm2d, norm_op_kwargs=None,\n dropout_op=nn.Dropout2d, dropout_op_kwargs=None,\n nonlin=nn.LeakyReLU, nonlin_kwargs=None, deep_supervision=True, dropout_in_localization=False,\n final_nonlin=softmax_helper, weightInitializer=InitWeights_He(1e-2), pool_op_kernel_sizes=None,\n conv_kernel_sizes=None,\n upscale_logits=False, convolutional_pooling=False, convolutional_upsampling=False,\n max_num_features=None, basic_block=ConvDropoutNormNonlin,\n seg_output_use_bias=False):\n \"\"\"\n basically more flexible than v1, architecture is the same\n\n Does this look complicated? Nah bro. Functionality > usability\n\n This does everything you need, including world peace.\n\n Questions? -> [email protected]\n \"\"\"\n super(Generic_UNet, self).__init__()\n self.convolutional_upsampling = convolutional_upsampling\n self.convolutional_pooling = convolutional_pooling\n self.upscale_logits = upscale_logits\n if nonlin_kwargs is None:\n nonlin_kwargs = {'negative_slope': 1e-2, 'inplace': True}\n if dropout_op_kwargs is None:\n dropout_op_kwargs = {'p': 0.5, 'inplace': True}\n if norm_op_kwargs is None:\n norm_op_kwargs = {'eps': 1e-5, 'affine': True, 'momentum': 0.1}\n\n self.conv_kwargs = {'stride': 1, 'dilation': 1, 'bias': True}\n\n self.nonlin = nonlin\n self.nonlin_kwargs = nonlin_kwargs\n self.dropout_op_kwargs = dropout_op_kwargs\n self.norm_op_kwargs = norm_op_kwargs\n self.weightInitializer = weightInitializer\n self.conv_op = conv_op\n self.norm_op = norm_op\n self.dropout_op = dropout_op\n self.num_classes = num_classes\n self.final_nonlin = final_nonlin\n self._deep_supervision = deep_supervision\n self.do_ds = deep_supervision\n\n if conv_op == nn.Conv2d:\n upsample_mode = 'bilinear'\n pool_op = nn.MaxPool2d\n transpconv = nn.ConvTranspose2d\n if pool_op_kernel_sizes is None:\n pool_op_kernel_sizes = [(2, 2)] * num_pool\n if conv_kernel_sizes is None:\n conv_kernel_sizes = [(3, 3)] * (num_pool + 1)\n elif conv_op == nn.Conv3d:\n upsample_mode = 'trilinear'\n pool_op = nn.MaxPool3d\n transpconv = nn.ConvTranspose3d\n if pool_op_kernel_sizes is None:\n pool_op_kernel_sizes = [(2, 2, 2)] * num_pool\n if conv_kernel_sizes is None:\n conv_kernel_sizes = [(3, 3, 3)] * (num_pool + 1)\n else:\n raise ValueError(\"unknown convolution dimensionality, conv op: %s\" % str(conv_op))\n\n self.input_shape_must_be_divisible_by = np.prod(pool_op_kernel_sizes, 0, dtype=np.int64)\n self.pool_op_kernel_sizes = pool_op_kernel_sizes\n self.conv_kernel_sizes = conv_kernel_sizes\n\n self.conv_pad_sizes = []\n for krnl in self.conv_kernel_sizes:\n self.conv_pad_sizes.append([1 if i == 3 else 0 for i in krnl])\n\n if max_num_features is None:\n if self.conv_op == nn.Conv3d:\n self.max_num_features = self.MAX_NUM_FILTERS_3D\n else:\n self.max_num_features = self.MAX_FILTERS_2D\n else:\n self.max_num_features = max_num_features\n\n self.conv_blocks_context = []\n self.conv_blocks_localization = []\n self.td = []\n self.tu = []\n self.seg_outputs = []\n\n output_features = base_num_features\n input_features = input_channels\n\n for d in range(num_pool):\n # determine the first stride\n if d != 0 and self.convolutional_pooling:\n first_stride = pool_op_kernel_sizes[d - 1]\n else:\n first_stride = None\n\n self.conv_kwargs['kernel_size'] = self.conv_kernel_sizes[d]\n self.conv_kwargs['padding'] = self.conv_pad_sizes[d]\n # add convolutions\n self.conv_blocks_context.append(StackedConvLayers(input_features, output_features, num_conv_per_stage,\n self.conv_op, self.conv_kwargs, self.norm_op,\n self.norm_op_kwargs, self.dropout_op,\n self.dropout_op_kwargs, self.nonlin, self.nonlin_kwargs,\n first_stride, basic_block=basic_block))\n if not self.convolutional_pooling:\n self.td.append(pool_op(pool_op_kernel_sizes[d]))\n input_features = output_features\n output_features = int(np.round(output_features * feat_map_mul_on_downscale))\n\n output_features = min(output_features, self.max_num_features)\n\n # now the bottleneck.\n # determine the first stride\n if self.convolutional_pooling:\n first_stride = pool_op_kernel_sizes[-1]\n else:\n first_stride = None\n\n # the output of the last conv must match the number of features from the skip connection if we are not using\n # convolutional upsampling. If we use convolutional upsampling then the reduction in feature maps will be\n # done by the transposed conv\n if self.convolutional_upsampling:\n final_num_features = output_features\n else:\n final_num_features = self.conv_blocks_context[-1].output_channels\n\n self.conv_kwargs['kernel_size'] = self.conv_kernel_sizes[num_pool]\n self.conv_kwargs['padding'] = self.conv_pad_sizes[num_pool]\n self.conv_blocks_context.append(nn.Sequential(\n StackedConvLayers(input_features, output_features, num_conv_per_stage - 1, self.conv_op, self.conv_kwargs,\n self.norm_op, self.norm_op_kwargs, self.dropout_op, self.dropout_op_kwargs, self.nonlin,\n self.nonlin_kwargs, first_stride, basic_block=basic_block),\n StackedConvLayers(output_features, final_num_features, 1, self.conv_op, self.conv_kwargs,\n self.norm_op, self.norm_op_kwargs, self.dropout_op, self.dropout_op_kwargs, self.nonlin,\n self.nonlin_kwargs, basic_block=basic_block)))\n\n # if we don't want to do dropout in the localization pathway then we set the dropout prob to zero here\n if not dropout_in_localization:\n old_dropout_p = self.dropout_op_kwargs['p']\n self.dropout_op_kwargs['p'] = 0.0\n\n # now lets build the localization pathway\n for u in range(num_pool):\n nfeatures_from_down = final_num_features\n nfeatures_from_skip = self.conv_blocks_context[\n -(2 + u)].output_channels # self.conv_blocks_context[-1] is bottleneck, so start with -2\n n_features_after_tu_and_concat = nfeatures_from_skip * 2\n\n # the first conv reduces the number of features to match those of skip\n # the following convs work on that number of features\n # if not convolutional upsampling then the final conv reduces the num of features again\n if u != num_pool - 1 and not self.convolutional_upsampling:\n final_num_features = self.conv_blocks_context[-(3 + u)].output_channels\n else:\n final_num_features = nfeatures_from_skip\n\n if not self.convolutional_upsampling:\n self.tu.append(Upsample(scale_factor=pool_op_kernel_sizes[-(u + 1)], mode=upsample_mode))\n else:\n self.tu.append(transpconv(nfeatures_from_down, nfeatures_from_skip, pool_op_kernel_sizes[-(u + 1)],\n pool_op_kernel_sizes[-(u + 1)], bias=False))\n\n self.conv_kwargs['kernel_size'] = self.conv_kernel_sizes[- (u + 1)]\n self.conv_kwargs['padding'] = self.conv_pad_sizes[- (u + 1)]\n self.conv_blocks_localization.append(nn.Sequential(\n StackedConvLayers(n_features_after_tu_and_concat, nfeatures_from_skip, num_conv_per_stage - 1,\n self.conv_op, self.conv_kwargs, self.norm_op, self.norm_op_kwargs, self.dropout_op,\n self.dropout_op_kwargs, self.nonlin, self.nonlin_kwargs, basic_block=basic_block),\n StackedConvLayers(nfeatures_from_skip, final_num_features, 1, self.conv_op, self.conv_kwargs,\n self.norm_op, self.norm_op_kwargs, self.dropout_op, self.dropout_op_kwargs,\n self.nonlin, self.nonlin_kwargs, basic_block=basic_block)\n ))\n\n for ds in range(len(self.conv_blocks_localization)):\n # Last convolution operation(s) to obtain the labels score maps before softmax/argmax\n self.seg_outputs.append(conv_op(self.conv_blocks_localization[ds][-1].output_channels, num_classes,\n 1, 1, 0, 1, 1, seg_output_use_bias))\n\n self.upscale_logits_ops = []\n cum_upsample = np.cumprod(np.vstack(pool_op_kernel_sizes), axis=0)[::-1]\n for usl in range(num_pool - 1):\n if self.upscale_logits:\n self.upscale_logits_ops.append(Upsample(scale_factor=tuple([int(i) for i in cum_upsample[usl + 1]]),\n mode=upsample_mode))\n else:\n self.upscale_logits_ops.append(lambda x: x)\n\n if not dropout_in_localization:\n self.dropout_op_kwargs['p'] = old_dropout_p\n\n # register all modules properly\n self.conv_blocks_localization = nn.ModuleList(self.conv_blocks_localization)\n self.conv_blocks_context = nn.ModuleList(self.conv_blocks_context)\n self.td = nn.ModuleList(self.td)\n self.tu = nn.ModuleList(self.tu)\n self.seg_outputs = nn.ModuleList(self.seg_outputs)\n if self.upscale_logits:\n self.upscale_logits_ops = nn.ModuleList(\n self.upscale_logits_ops) # lambda x:x is not a Module so we need to distinguish here\n\n if self.weightInitializer is not None:\n self.apply(self.weightInitializer)\n # self.apply(print_module_training_status)\n\n def forward(self, x):\n skips = []\n seg_outputs = []\n # Encoder\n for d in range(len(self.conv_blocks_context) - 1):\n x = self.conv_blocks_context[d](x)\n skips.append(x)\n if not self.convolutional_pooling:\n x = self.td[d](x)\n\n x = self.conv_blocks_context[-1](x)\n # Decoder\n for u in range(len(self.tu)):\n x = self.tu[u](x)\n x = torch.cat((x, skips[-(u + 1)]), dim=1)\n x = self.conv_blocks_localization[u](x)\n seg_outputs.append(self.final_nonlin(self.seg_outputs[u](x)))\n\n if self._deep_supervision and self.do_ds:\n return tuple([seg_outputs[-1]] + [i(j) for i, j in\n zip(list(self.upscale_logits_ops)[::-1], seg_outputs[:-1][::-1])])\n else:\n return seg_outputs[-1]\n\n def embedding(self, x):\n # return the last embedding map (before last conv + softmax)\n skips = []\n # Encoder\n for d in range(len(self.conv_blocks_context) - 1):\n x = self.conv_blocks_context[d](x)\n skips.append(x)\n if not self.convolutional_pooling:\n x = self.td[d](x)\n x = self.conv_blocks_context[-1](x)\n # Decoder\n for u in range(len(self.tu)):\n x = self.tu[u](x)\n x = torch.cat((x, skips[-(u + 1)]), dim=1)\n x = self.conv_blocks_localization[u](x)\n return x\n\n @staticmethod\n def compute_approx_vram_consumption(patch_size, num_pool_per_axis, base_num_features, max_num_features,\n num_modalities, num_classes, pool_op_kernel_sizes, deep_supervision=False,\n conv_per_stage=2):\n \"\"\"\n This only applies for num_conv_per_stage and convolutional_upsampling=True\n not real vram consumption. just a constant term to which the vram consumption will be approx proportional\n (+ offset for parameter storage)\n :param deep_supervision:\n :param patch_size:\n :param num_pool_per_axis:\n :param base_num_features:\n :param max_num_features:\n :param num_modalities:\n :param num_classes:\n :param pool_op_kernel_sizes:\n :return:\n \"\"\"\n if not isinstance(num_pool_per_axis, np.ndarray):\n num_pool_per_axis = np.array(num_pool_per_axis)\n\n npool = len(pool_op_kernel_sizes)\n\n map_size = np.array(patch_size)\n tmp = np.int64((conv_per_stage * 2 + 1) * np.prod(map_size, dtype=np.int64) * base_num_features +\n num_modalities * np.prod(map_size, dtype=np.int64) +\n num_classes * np.prod(map_size, dtype=np.int64))\n\n num_feat = base_num_features\n\n for p in range(npool):\n for pi in range(len(num_pool_per_axis)):\n map_size[pi] /= pool_op_kernel_sizes[p][pi]\n num_feat = min(num_feat * 2, max_num_features)\n num_blocks = (conv_per_stage * 2 + 1) if p < (npool - 1) else conv_per_stage # conv_per_stage + conv_per_stage for the convs of encode/decode and 1 for transposed conv\n tmp += num_blocks * np.prod(map_size, dtype=np.int64) * num_feat\n if deep_supervision and p < (npool - 2):\n tmp += np.prod(map_size, dtype=np.int64) * num_classes\n # print(p, map_size, num_feat, tmp)\n return tmp\n"
] |
[
[
"numpy.sum",
"numpy.logical_and",
"numpy.min",
"scipy.ndimage.measurements.label",
"numpy.ones",
"numpy.max",
"numpy.argmax",
"numpy.zeros_like",
"numpy.load",
"numpy.array",
"numpy.zeros",
"numpy.where"
],
[
"torch.cat",
"torch.nn.ModuleList",
"numpy.round",
"numpy.prod",
"torch.nn.functional.interpolate",
"numpy.array",
"numpy.vstack"
]
] |
SudoHead/cs231n.github.io
|
[
"652285518ff5ed8c02503bac6cb24aaea0d6ff75"
] |
[
"assignments/2019/assignment1/cs231n/data_utils.py"
] |
[
"from __future__ import print_function\n\nfrom builtins import range\nfrom six.moves import cPickle as pickle\nimport numpy as np\nimport os\n\n# scipy.misc.imread is deprecated, so use imageio.imread\nfrom scipy.misc import imread\nimport platform\n\ndef load_pickle(f):\n version = platform.python_version_tuple()\n if version[0] == '2':\n return pickle.load(f)\n elif version[0] == '3':\n return pickle.load(f, encoding='latin1')\n raise ValueError(\"invalid python version: {}\".format(version))\n\ndef load_CIFAR_batch(filename):\n \"\"\" load single batch of cifar \"\"\"\n with open(filename, 'rb') as f:\n datadict = load_pickle(f)\n X = datadict['data']\n Y = datadict['labels']\n X = X.reshape(10000, 3, 32, 32).transpose(0,2,3,1).astype(\"float\")\n Y = np.array(Y)\n return X, Y\n\ndef load_CIFAR10(ROOT):\n \"\"\" load all of cifar \"\"\"\n xs = []\n ys = []\n for b in range(1,6):\n f = os.path.join(ROOT, 'data_batch_%d' % (b, ))\n X, Y = load_CIFAR_batch(f)\n xs.append(X)\n ys.append(Y)\n Xtr = np.concatenate(xs)\n Ytr = np.concatenate(ys)\n del X, Y\n Xte, Yte = load_CIFAR_batch(os.path.join(ROOT, 'test_batch'))\n return Xtr, Ytr, Xte, Yte\n\n\ndef get_CIFAR10_data(num_training=49000, num_validation=1000, num_test=1000,\n subtract_mean=True):\n \"\"\"\n Load the CIFAR-10 dataset from disk and perform preprocessing to prepare\n it for classifiers. These are the same steps as we used for the SVM, but\n condensed to a single function.\n \"\"\"\n # Load the raw CIFAR-10 data\n cifar10_dir = 'cs231n/datasets/cifar-10-batches-py'\n X_train, y_train, X_test, y_test = load_CIFAR10(cifar10_dir)\n\n # Subsample the data\n mask = list(range(num_training, num_training + num_validation))\n X_val = X_train[mask]\n y_val = y_train[mask]\n mask = list(range(num_training))\n X_train = X_train[mask]\n y_train = y_train[mask]\n mask = list(range(num_test))\n X_test = X_test[mask]\n y_test = y_test[mask]\n\n # Normalize the data: subtract the mean image\n if subtract_mean:\n mean_image = np.mean(X_train, axis=0)\n X_train -= mean_image\n X_val -= mean_image\n X_test -= mean_image\n\n # Transpose so that channels come first\n X_train = X_train.transpose(0, 3, 1, 2).copy()\n X_val = X_val.transpose(0, 3, 1, 2).copy()\n X_test = X_test.transpose(0, 3, 1, 2).copy()\n\n # Package data into a dictionary\n return {\n 'X_train': X_train, 'y_train': y_train,\n 'X_val': X_val, 'y_val': y_val,\n 'X_test': X_test, 'y_test': y_test,\n }\n\n\ndef load_tiny_imagenet(path, dtype=np.float32, subtract_mean=True):\n \"\"\"\n Load TinyImageNet. Each of TinyImageNet-100-A, TinyImageNet-100-B, and\n TinyImageNet-200 have the same directory structure, so this can be used\n to load any of them.\n\n Inputs:\n - path: String giving path to the directory to load.\n - dtype: numpy datatype used to load the data.\n - subtract_mean: Whether to subtract the mean training image.\n\n Returns: A dictionary with the following entries:\n - class_names: A list where class_names[i] is a list of strings giving the\n WordNet names for class i in the loaded dataset.\n - X_train: (N_tr, 3, 64, 64) array of training images\n - y_train: (N_tr,) array of training labels\n - X_val: (N_val, 3, 64, 64) array of validation images\n - y_val: (N_val,) array of validation labels\n - X_test: (N_test, 3, 64, 64) array of testing images.\n - y_test: (N_test,) array of test labels; if test labels are not available\n (such as in student code) then y_test will be None.\n - mean_image: (3, 64, 64) array giving mean training image\n \"\"\"\n # First load wnids\n with open(os.path.join(path, 'wnids.txt'), 'r') as f:\n wnids = [x.strip() for x in f]\n\n # Map wnids to integer labels\n wnid_to_label = {wnid: i for i, wnid in enumerate(wnids)}\n\n # Use words.txt to get names for each class\n with open(os.path.join(path, 'words.txt'), 'r') as f:\n wnid_to_words = dict(line.split('\\t') for line in f)\n for wnid, words in wnid_to_words.items():\n wnid_to_words[wnid] = [w.strip() for w in words.split(',')]\n class_names = [wnid_to_words[wnid] for wnid in wnids]\n\n # Next load training data.\n X_train = []\n y_train = []\n for i, wnid in enumerate(wnids):\n if (i + 1) % 20 == 0:\n print('loading training data for synset %d / %d'\n % (i + 1, len(wnids)))\n # To figure out the filenames we need to open the boxes file\n boxes_file = os.path.join(path, 'train', wnid, '%s_boxes.txt' % wnid)\n with open(boxes_file, 'r') as f:\n filenames = [x.split('\\t')[0] for x in f]\n num_images = len(filenames)\n\n X_train_block = np.zeros((num_images, 3, 64, 64), dtype=dtype)\n y_train_block = wnid_to_label[wnid] * \\\n np.ones(num_images, dtype=np.int64)\n for j, img_file in enumerate(filenames):\n img_file = os.path.join(path, 'train', wnid, 'images', img_file)\n img = imread(img_file)\n if img.ndim == 2:\n ## grayscale file\n img.shape = (64, 64, 1)\n X_train_block[j] = img.transpose(2, 0, 1)\n X_train.append(X_train_block)\n y_train.append(y_train_block)\n\n # We need to concatenate all training data\n X_train = np.concatenate(X_train, axis=0)\n y_train = np.concatenate(y_train, axis=0)\n\n # Next load validation data\n with open(os.path.join(path, 'val', 'val_annotations.txt'), 'r') as f:\n img_files = []\n val_wnids = []\n for line in f:\n img_file, wnid = line.split('\\t')[:2]\n img_files.append(img_file)\n val_wnids.append(wnid)\n num_val = len(img_files)\n y_val = np.array([wnid_to_label[wnid] for wnid in val_wnids])\n X_val = np.zeros((num_val, 3, 64, 64), dtype=dtype)\n for i, img_file in enumerate(img_files):\n img_file = os.path.join(path, 'val', 'images', img_file)\n img = imread(img_file)\n if img.ndim == 2:\n img.shape = (64, 64, 1)\n X_val[i] = img.transpose(2, 0, 1)\n\n # Next load test images\n # Students won't have test labels, so we need to iterate over files in the\n # images directory.\n img_files = os.listdir(os.path.join(path, 'test', 'images'))\n X_test = np.zeros((len(img_files), 3, 64, 64), dtype=dtype)\n for i, img_file in enumerate(img_files):\n img_file = os.path.join(path, 'test', 'images', img_file)\n img = imread(img_file)\n if img.ndim == 2:\n img.shape = (64, 64, 1)\n X_test[i] = img.transpose(2, 0, 1)\n\n y_test = None\n y_test_file = os.path.join(path, 'test', 'test_annotations.txt')\n if os.path.isfile(y_test_file):\n with open(y_test_file, 'r') as f:\n img_file_to_wnid = {}\n for line in f:\n line = line.split('\\t')\n img_file_to_wnid[line[0]] = line[1]\n y_test = [wnid_to_label[img_file_to_wnid[img_file]]\n for img_file in img_files]\n y_test = np.array(y_test)\n\n mean_image = X_train.mean(axis=0)\n if subtract_mean:\n X_train -= mean_image[None]\n X_val -= mean_image[None]\n X_test -= mean_image[None]\n\n return {\n 'class_names': class_names,\n 'X_train': X_train,\n 'y_train': y_train,\n 'X_val': X_val,\n 'y_val': y_val,\n 'X_test': X_test,\n 'y_test': y_test,\n 'class_names': class_names,\n 'mean_image': mean_image,\n }\n\n\ndef load_models(models_dir):\n \"\"\"\n Load saved models from disk. This will attempt to unpickle all files in a\n directory; any files that give errors on unpickling (such as README.txt)\n will be skipped.\n\n Inputs:\n - models_dir: String giving the path to a directory containing model files.\n Each model file is a pickled dictionary with a 'model' field.\n\n Returns:\n A dictionary mapping model file names to models.\n \"\"\"\n models = {}\n for model_file in os.listdir(models_dir):\n with open(os.path.join(models_dir, model_file), 'rb') as f:\n try:\n models[model_file] = load_pickle(f)['model']\n except pickle.UnpicklingError:\n continue\n return models\n\n\ndef load_imagenet_val(num=None):\n \"\"\"Load a handful of validation images from ImageNet.\n\n Inputs:\n - num: Number of images to load (max of 25)\n\n Returns:\n - X: numpy array with shape [num, 224, 224, 3]\n - y: numpy array of integer image labels, shape [num]\n - class_names: dict mapping integer label to class name\n \"\"\"\n imagenet_fn = 'cs231n/datasets/imagenet_val_25.npz'\n if not os.path.isfile(imagenet_fn):\n print('file %s not found' % imagenet_fn)\n print('Run the following:')\n print('cd cs231n/datasets')\n print('bash get_imagenet_val.sh')\n assert False, 'Need to download imagenet_val_25.npz'\n f = np.load(imagenet_fn)\n X = f['X']\n y = f['y']\n class_names = f['label_map'].item()\n if num is not None:\n X = X[:num]\n y = y[:num]\n return X, y, class_names\n"
] |
[
[
"numpy.ones",
"numpy.concatenate",
"numpy.mean",
"scipy.misc.imread",
"numpy.load",
"numpy.array",
"numpy.zeros"
]
] |
WildbookOrg/wbia-deprecate-tpl-brambox
|
[
"9aa6a69f706d0653a65520c696a7cd66715b6a37"
] |
[
"brambox/boxes/statistics/pr.py"
] |
[
"# -*- coding: utf-8 -*-\n#\n# Copyright EAVISE\n# Author: Maarten Vandersteegen\n# Author: Tanguy Ophoff\n#\n# Functions for generating PR-curve values and calculating average precision\n#\n\nimport math\nfrom statistics import mean\nimport numpy as np\nimport scipy.interpolate\n\nfrom .util import *\n\n__all__ = ['pr', 'ap']\n\n\ndef pr(detections, ground_truth, overlap_threshold=0.5):\n \"\"\" Compute a list of precision recall values that can be plotted into a graph.\n\n Args:\n detections (dict): Detection objects per image\n ground_truth (dict): Annotation objects per image\n overlap_threshold (Number, optional): Minimum iou threshold for true positive; Default **0.5**\n\n Returns:\n tuple: **[precision_values]**, **[recall_values]**\n \"\"\"\n tps, fps, num_annotations = match_detections(\n detections, ground_truth, overlap_threshold\n )\n\n precision = []\n recall = []\n for tp, fp in zip(tps, fps):\n recall.append(tp / num_annotations)\n precision.append(tp / (fp + tp))\n\n return precision, recall\n\n\ndef ap(precision, recall, num_of_samples=100):\n \"\"\" Compute the average precision from a given pr-curve.\n The average precision is defined as the area under the curve.\n\n Args:\n precision (list): Precision values\n recall (list): Recall values\n num_of_samples (int, optional): Number of samples to take from the curve to measure the average precision; Default **100**\n\n Returns:\n Number: average precision\n \"\"\"\n if len(precision) > 1 and len(recall) > 1:\n p = np.array(precision)\n r = np.array(recall)\n p_start = p[np.argmin(r)]\n samples = np.arange(0.0, 1.0, 1.0 / num_of_samples)\n interpolated = scipy.interpolate.interp1d(\n r, p, fill_value=(p_start, 0.0), bounds_error=False\n )(samples)\n avg = sum(interpolated) / len(interpolated)\n elif len(precision) > 0 and len(recall) > 0:\n # 1 point on PR: AP is box between (0,0) and (p,r)\n avg = precision[0] * recall[0]\n else:\n avg = float('nan')\n\n return avg\n"
] |
[
[
"numpy.arange",
"numpy.array",
"numpy.argmin"
]
] |
Utsav-Patel/The-Imitation-Game
|
[
"09dfaffdf917c1adfb1d8cd3e09a216b9a014e52",
"09dfaffdf917c1adfb1d8cd3e09a216b9a014e52"
] |
[
"models/project2/dense/20x20/model1.py",
"src/Maze.py"
] |
[
"import pickle\nimport numpy as np\nfrom sklearn.model_selection import train_test_split\nimport tensorflow as tf\nfrom tensorflow.keras.utils import to_categorical\n\nfrom constants import CHECKPOINT_FILEPATH, PROJECT2_DATA_PATH, PROJECT2_VALIDATION_PATH\nfrom model_architectures import create_model_project2_dense_20x20\nfrom DataGenerator import DataGenerator\n\nconfig = tf.compat.v1.ConfigProto()\nconfig.gpu_options.allow_growth = True\nsess = tf.compat.v1.Session(config=config)\n\n\ndef prepare_dataset(path):\n open_file = open(path, \"rb\")\n loaded_list = pickle.load(open_file)\n open_file.close()\n\n print(\"Successfully loaded data from pickle file\", path)\n\n input_list = list()\n output_list = list()\n\n for dct in loaded_list:\n input_list.append({'input': dct['input'], 'sensed': dct['sensed'], 'current_pos': dct['current_pos']})\n output_list.append(dct['output'])\n\n # input_numpy = np.array(input_list)\n # print(input_numpy.shape)\n # # input_numpy = input_numpy.reshape(input_numpy.shape[0], -1)\n\n output_numpy = np.array(output_list)\n output_numpy = output_numpy.reshape(output_numpy.shape[0])\n output_numpy = to_categorical(output_numpy)\n\n return input_list, output_numpy\n\n\n# print(\"Input shape\", input_numpy.shape)\n# print(\"Output shape\", output_numpy.shape)\n# print('Starting training')\n\nX_train, y_train = prepare_dataset(PROJECT2_DATA_PATH)\nX_val, y_val = prepare_dataset(PROJECT2_VALIDATION_PATH)\n\nX_val, X_test, y_val, y_test = train_test_split(X_val, y_val, test_size=0.50, random_state=81)\n\n# print(\"X train shape\", X_train.shape)\n# print(\"y train shape\", y_train.shape)\n# print(\"X validation shape\", X_val.shape)\n# print(\"y validation shape\", y_val.shape)\n# print(\"X test shape\", X_test.shape)\n# print(\"y test shape\", y_test.shape)\n\ntraining_generator = DataGenerator(X_train, y_train)\nvalidation_generator = DataGenerator(X_val, y_val)\ntesting_generator = DataGenerator(X_test, y_test)\n\nmodel = create_model_project2_dense_20x20()\nmodel.summary()\n\nmodel_checkpoint_callback = tf.keras.callbacks.ModelCheckpoint(\n filepath=CHECKPOINT_FILEPATH,\n verbose=1,\n save_weights_only=True,\n monitor='val_accuracy',\n save_best_only=False,\n save_freq='epoch'\n)\n\nhistory = model.fit(training_generator, epochs=20, validation_data=validation_generator, use_multiprocessing=True,\n workers=75, callbacks=[model_checkpoint_callback])\n\nprint(history.history)\nmodel.evaluate(testing_generator, verbose=2)\n",
"import numpy as np\n\nfrom constants import UNVISITED_NUMBER\nfrom src.Node import Node\n\n\nclass Maze:\n def __init__(self, num_cols: int, num_rows: int):\n self.num_cols = num_cols\n self.num_rows = num_rows\n\n self.maze_numpy = np.zeros((num_rows, num_cols)) + UNVISITED_NUMBER\n self.num_times_cell_visited = np.zeros((num_rows, num_cols))\n\n self.maze = list()\n for row_num in range(self.num_rows):\n lst = list()\n for column_num in range(self.num_cols):\n lst.append(Node())\n self.maze.append(lst)\n\n def __str__(self):\n return 'NUmber of columns: ' + str(self.num_cols) + '\\nNumber of rows: ' + str(self.num_rows) \\\n + '\\nMaze: ' + str(self.maze)\n\n def reset(self):\n self.maze_numpy.fill(UNVISITED_NUMBER)\n self.num_times_cell_visited.fill(0)\n for row in range(self.num_rows):\n for col in range(self.num_cols):\n self.maze[row][col].reset()\n\n def reset_except_h(self):\n self.maze_numpy.fill(UNVISITED_NUMBER)\n self.num_times_cell_visited.fill(0)\n for row in range(self.num_rows):\n for col in range(self.num_cols):\n self.maze[row][col].reset_except_h()\n"
] |
[
[
"tensorflow.keras.callbacks.ModelCheckpoint",
"tensorflow.compat.v1.ConfigProto",
"sklearn.model_selection.train_test_split",
"tensorflow.compat.v1.Session",
"numpy.array",
"tensorflow.keras.utils.to_categorical"
],
[
"numpy.zeros"
]
] |
Hotpotfish/pysc2
|
[
"3d7f7ffc01a50ab69d435b65c892cd0bc11265a8",
"3d7f7ffc01a50ab69d435b65c892cd0bc11265a8"
] |
[
"pysc2/agents/myAgent/myAgent_7/net/lenet.py",
"pysc2/agents/myAgent/myAgent_5/macro_operation.py"
] |
[
"import tensorflow as tf\n\n\nclass Lenet():\n\n def __init__(self, mu, sigma, learning_rate, action_dim, parameterdim, statedim, name):\n self.mu = mu\n self.sigma = sigma\n self.learning_rate = learning_rate\n\n self.action_dim = action_dim\n self.parameterdim = parameterdim\n self.statedim = statedim\n\n self.name = name\n\n self._build_graph()\n\n def _build_graph(self):\n self._setup_placeholders_graph()\n self._build_network_graph(self.name)\n self._compute_loss_graph()\n # self._compute_acc_graph()\n self._create_train_op_graph()\n self.merged_summary = tf.summary.merge_all()\n\n def _setup_placeholders_graph(self):\n self.action_input = tf.placeholder(\"float\", shape=[None, self.action_dim + self.parameterdim], name=self.name + '_' + 'action_input')\n self.y_input = tf.placeholder(\"float\", shape=[None, 1 + self.parameterdim], name=self.name + '_' + 'y_input')\n self.state_input = tf.placeholder(\"float\", shape=self.statedim, name=self.name + '_' + 'state_input')\n\n def _cnn_layer(self, scope_name, W_name, b_name, x, filter_shape, conv_strides, padding_tag='VALID'):\n with tf.variable_scope(scope_name):\n conv_W = tf.get_variable(W_name,\n dtype=tf.float32,\n initializer=tf.truncated_normal(shape=filter_shape, mean=self.mu,\n stddev=self.sigma))\n conv_b = tf.get_variable(b_name,\n dtype=tf.float32,\n initializer=tf.zeros(filter_shape[3]))\n conv = tf.nn.conv2d(x, conv_W,\n strides=conv_strides,\n padding=padding_tag) + conv_b\n\n return conv\n\n def _pooling_layer(self, scope_name, x, pool_ksize, pool_strides, padding_tag='VALID'):\n with tf.variable_scope(scope_name):\n pool = tf.nn.avg_pool(x, pool_ksize, pool_strides, padding=padding_tag)\n return pool\n\n def _fully_connected_layer(self, scope_name, W_name, b_name, x, W_shape):\n with tf.variable_scope(scope_name):\n x = tf.reshape(x, [-1, W_shape[0]])\n w = tf.get_variable(W_name,\n dtype=tf.float32,\n initializer=tf.truncated_normal(shape=W_shape, mean=self.mu,\n stddev=self.sigma))\n b = tf.get_variable(b_name,\n dtype=tf.float32,\n initializer=tf.zeros(W_shape[1]))\n\n r = tf.add(tf.matmul(x, w), b)\n\n return r\n\n def _build_network_graph(self, scope_name):\n with tf.variable_scope(scope_name, reuse=tf.AUTO_REUSE):\n # 28 * 28 * 6\n self.conv1 = self._cnn_layer('layer_1_conv', 'conv_w', 'conv_b', self.state_input, (5, 5, self.statedim[3], 6), [1, 1, 1, 1])\n # 14 * 14 * 6\n self.pool1 = self._pooling_layer('layer_1_pooling', self.conv1, [1, 2, 2, 1], [1, 2, 2, 1])\n\n # 10 * 10 * 16\n self.conv2 = self._cnn_layer('layer_2_conv', 'conv_w', 'conv_b', self.pool1, (5, 5, 6, 16), [1, 1, 1, 1])\n\n # 5 * 5 * 16\n self.pool2 = self._pooling_layer('layer_2_pooling', self.conv2, [1, 2, 2, 1], [1, 2, 2, 1])\n\n # w.shape=[5 * 5 * 16, 120]\n self.fc1 = self._fully_connected_layer('full_connected1', 'full_connected_w', 'full_connected_b',\n self.pool2, (self.pool2._shape[1] * self.pool2._shape[2] * self.pool2._shape[3], 120))\n\n # w.shape=[120, 84]\n self.fc2 = self._fully_connected_layer('full_connected2', 'full_connected_w',\n 'full_connected_b',\n self.fc1, (120, 84))\n # w.shape=[84, 10]\n self.logits = self._fully_connected_layer('full_connected3', 'full_connected_w', 'full_connected_b',\n self.fc2, (84, self.action_dim + self.parameterdim))\n\n self.Q_value = tf.nn.softmax(self.logits)\n tf.summary.histogram(\"Q_value\", self.Q_value)\n\n def _compute_loss_graph(self):\n with tf.name_scope(self.name + \"_loss_function\"):\n self.Q_action = tf.reduce_sum(tf.multiply(self.Q_value, self.action_input))\n self.loss = tf.reduce_mean(tf.square(self.y_input - self.Q_action))\n # tf.summary.scalar(self.name + \"_loss_function\", self.loss)\n\n def _compute_acc_graph(self):\n with tf.name_scope(self.name + \"_acc_function\"):\n self.accuracy = \\\n tf.metrics.accuracy(labels=tf.argmax(self.y, axis=1), predictions=tf.argmax(self.y_predicted, axis=1))[\n 1]\n tf.summary.scalar(\"accuracy\", self.accuracy)\n\n def _create_train_op_graph(self):\n self.train_op = tf.train.AdamOptimizer(self.learning_rate).minimize(self.loss)\n",
"import random\nfrom pysc2.lib import actions\nfrom pysc2.lib import features\nfrom pysc2.lib import units\nfrom queue import Queue\nimport numpy as np\n\n\n\nmapSzie = 128\n\n\n# def plt_soldiers(soldiers, color):\n# for i in range(len(soldiers)):\n# plt.scatter(soldiers[i].x, soldiers[i].y, c=color, cmap='coolwarm')\n\n\ndef automatic_formation(obs):\n soldiers = get_my_units_by_type(obs, units.Terran.Marine)\n # plt_soldiers(soldiers, 1000.0)\n # plt.show()\n\n soldier_count = len(soldiers)\n\n combat_teams = []\n\n loop = 0\n\n while soldier_count != 0:\n\n # 战斗小组人数\n combat_team_count = random.randint(1, soldier_count)\n\n combat_team = []\n\n # 筛选人员\n for i in range(combat_team_count):\n random_index = random.randint(0, len(soldiers) - 1)\n\n soldier = soldiers.pop(random_index)\n\n combat_team.append(soldier)\n\n print('soldier:' + str(soldier.tag) + ' ')\n\n # plt_soldiers(combat_team, float(loop * 100))\n combat_teams.append(combat_team)\n\n print('are in combat_team_' + str(loop))\n print('-------------------------------')\n loop += 1\n soldier_count -= combat_team_count\n # plt.show()\n\n print()\n print()\n print()\n\n return combat_teams\n\n\ndef chooseARandomPlace(input_x, input_y):\n offset = 20\n add_y = random.randint(-offset, offset)\n add_x = random.randint(-offset, offset)\n\n if input_x + add_x >= mapSzie:\n\n outx = mapSzie\n\n elif input_x + add_x < 0:\n outx = 0\n\n else:\n outx = input_x + add_x\n\n if input_y + add_y >= mapSzie:\n\n outy = mapSzie\n\n elif input_y + add_y < 0:\n outy = 0\n\n else:\n outy = input_y + add_y\n\n return (outx, outy)\n\n\ndef get_my_units_by_type(obs, unit_type):\n return [unit for unit in obs.observation.raw_units\n if unit.unit_type == unit_type\n and unit.alliance == features.PlayerRelative.SELF]\n\n\ndef get_enemy_units_by_type(obs, unit_type):\n return [unit for unit in obs.observation.raw_units\n if unit.unit_type == unit_type\n and unit.alliance == features.PlayerRelative.ENEMY]\n\n\ndef get_my_completed_units_by_type(obs, unit_type):\n return [unit for unit in obs.observation.raw_units\n if unit.unit_type == unit_type\n and unit.build_progress == 100\n and unit.alliance == features.PlayerRelative.SELF]\n\n\ndef get_enemy_completed_units_by_type(obs, unit_type):\n return [unit for unit in obs.observation.raw_units\n if unit.unit_type == unit_type\n and unit.build_progress == 100\n and unit.alliance == features.PlayerRelative.ENEMY]\n\n\ndef find_any_enemy(obs):\n return [unit for unit in obs.observation.raw_units\n if unit.alliance == features.PlayerRelative.ENEMY]\n\n\ndef get_distances(obs, units, xy):\n units_xy = [(unit.x, unit.y) for unit in units]\n return np.linalg.norm(np.array(units_xy) - np.array(xy), axis=1)\n\n\ndef harvest_minerals(obs):\n scvs = get_my_units_by_type(obs, units.Terran.SCV)\n idle_scvs = [scv for scv in scvs if scv.order_length == 0]\n if len(idle_scvs) > 0:\n mineral_patches = [unit for unit in obs.observation.raw_units\n if unit.unit_type in [\n units.Neutral.BattleStationMineralField,\n units.Neutral.BattleStationMineralField750,\n units.Neutral.LabMineralField,\n units.Neutral.LabMineralField750,\n units.Neutral.MineralField,\n units.Neutral.MineralField750,\n units.Neutral.PurifierMineralField,\n units.Neutral.PurifierMineralField750,\n units.Neutral.PurifierRichMineralField,\n units.Neutral.PurifierRichMineralField750,\n units.Neutral.RichMineralField,\n units.Neutral.RichMineralField750\n ]]\n if len(mineral_patches) == 0:\n return actions.RAW_FUNCTIONS.no_op()\n scv = random.choice(idle_scvs)\n distances = get_distances(obs, mineral_patches, (scv.x, scv.y))\n mineral_patch = mineral_patches[np.argmin(distances)]\n return actions.RAW_FUNCTIONS.Harvest_Gather_unit(\n \"now\", scv.tag, mineral_patch.tag)\n return actions.RAW_FUNCTIONS.no_op()\n\n\ndef harvest_VespeneGeyser(obs):\n scvs = get_my_units_by_type(obs, units.Terran.SCV)\n # idle_scvs = [scv for scv in scvs if scv.order_length == 0]\n VespeneGeyser_patches = get_my_completed_units_by_type(obs, units.Terran.Refinery) \\\n + get_my_completed_units_by_type(obs, units.Terran.RefineryRich)\n if len(scvs) > 0 and len(VespeneGeyser_patches) > 0:\n for i in range(len(VespeneGeyser_patches)):\n if VespeneGeyser_patches[i].assigned_harvesters < VespeneGeyser_patches[i].ideal_harvesters:\n scv = random.choice(scvs)\n return actions.RAW_FUNCTIONS.Harvest_Gather_unit(\n \"now\", scv.tag, VespeneGeyser_patches[i].tag)\n\n # scv = random.choice(scvs)\n # distances = get_distances(obs, VespeneGeyser_patches, (scv.x, scv.y))\n # VespeneGeyser_patch = VespeneGeyser_patches[np.argmin(distances)]\n # return actions.RAW_FUNCTIONS.Harvest_Gather_unit(\n # \"now\", scv.tag, VespeneGeyser_patch.tag)\n return actions.RAW_FUNCTIONS.no_op()\n\n\ndef build_refinery(obs):\n commandCenters = get_my_units_by_type(obs, units.Terran.CommandCenter)\n if len(commandCenters) > 0:\n commandCenter = commandCenters[random.randint(0, len(commandCenters) - 1)]\n scvs = get_my_units_by_type(obs, units.Terran.SCV)\n if (obs.observation.player.minerals >= 75 and len(scvs) > 0):\n VespeneGeyser_patches = [unit for unit in obs.observation.raw_units\n if unit.unit_type in [\n units.Neutral.ProtossVespeneGeyser,\n units.Neutral.PurifierVespeneGeyser,\n units.Neutral.RichVespeneGeyser,\n units.Neutral.ShakurasVespeneGeyser,\n units.Neutral.VespeneGeyser,\n ]]\n if len(VespeneGeyser_patches) == 0:\n return actions.RAW_FUNCTIONS.no_op()\n\n refineries = get_my_units_by_type(obs, units.Terran.Refinery)\n\n if len(refineries) == 0:\n scv = random.choice(scvs)\n distances = get_distances(obs, VespeneGeyser_patches, (commandCenter.x, commandCenter.y))\n VespeneGeyser_patch = VespeneGeyser_patches[np.argmin(distances)]\n return actions.RAW_FUNCTIONS.Build_Refinery_pt(\n \"now\", scv.tag, VespeneGeyser_patch.tag)\n elif len(refineries) < len(commandCenters) * 2:\n for i in range(len(refineries)):\n for j in range(len(VespeneGeyser_patches)):\n if refineries[i].x == VespeneGeyser_patches[j].x and \\\n refineries[i].y == VespeneGeyser_patches[j].y:\n VespeneGeyser_patches.pop(j)\n j -= 1\n break\n scv = random.choice(scvs)\n distances = get_distances(obs, VespeneGeyser_patches, (commandCenter.x, commandCenter.y))\n VespeneGeyser_patch = VespeneGeyser_patches[np.argmin(distances)]\n return actions.RAW_FUNCTIONS.Build_Refinery_pt(\n \"now\", scv.tag, VespeneGeyser_patch.tag)\n\n return actions.RAW_FUNCTIONS.no_op()\n\n\ndef build_supply_depot(obs):\n commandCenters = get_my_units_by_type(obs, units.Terran.CommandCenter)\n if len(commandCenters) > 0:\n commandCenter = commandCenters[random.randint(0, len(commandCenters) - 1)]\n scvs = get_my_units_by_type(obs, units.Terran.SCV)\n if (obs.observation.player.minerals >= 100 and len(scvs) > 0 and obs.observation.player.food_cap < 200):\n supply_depot_xy = chooseARandomPlace(commandCenter.x, commandCenter.y)\n distances = get_distances(obs, scvs, supply_depot_xy)\n scv = scvs[np.argmin(distances)]\n return actions.RAW_FUNCTIONS.Build_SupplyDepot_pt(\"now\", scv.tag, supply_depot_xy)\n return actions.RAW_FUNCTIONS.no_op()\n\n\ndef build_barracks(obs):\n commandCenters = get_my_units_by_type(obs, units.Terran.CommandCenter)\n if len(commandCenters) > 0:\n completed_supply_depots = get_my_completed_units_by_type(\n obs, units.Terran.SupplyDepot)\n\n commandCenter = commandCenters[random.randint(0, len(commandCenters) - 1)]\n scvs = get_my_units_by_type(obs, units.Terran.SCV)\n if (len(completed_supply_depots) > 0 and\n obs.observation.player.minerals >= 150 and len(scvs) > 0):\n barracks_xy = chooseARandomPlace(commandCenter.x, commandCenter.y)\n distances = get_distances(obs, scvs, barracks_xy)\n scv = scvs[np.argmin(distances)]\n return actions.RAW_FUNCTIONS.Build_Barracks_pt(\n \"now\", scv.tag, barracks_xy)\n return actions.RAW_FUNCTIONS.no_op()\n\n\ndef train_scv(obs):\n completed_commandCenters = get_my_completed_units_by_type(obs, units.Terran.CommandCenter)\n free_supply = (obs.observation.player.food_cap - obs.observation.player.food_used)\n if (len(completed_commandCenters) > 0 and obs.observation.player.minerals >= 50 and free_supply > 0):\n commandCenters = get_my_units_by_type(obs, units.Terran.CommandCenter)\n commandCenter = commandCenters[random.randint(0, len(commandCenters) - 1)]\n if commandCenter.order_length < 5:\n return actions.RAW_FUNCTIONS.Train_SCV_quick(\"now\", commandCenter.tag)\n return actions.RAW_FUNCTIONS.no_op()\n\n\ndef train_marine(obs):\n completed_barrackses = get_my_completed_units_by_type(\n obs, units.Terran.Barracks)\n free_supply = (obs.observation.player.food_cap -\n obs.observation.player.food_used)\n if (len(completed_barrackses) > 0 and obs.observation.player.minerals >= 100\n and free_supply > 0):\n barracks = get_my_units_by_type(obs, units.Terran.Barracks)\n barrack = barracks[random.randint(0, len(barracks) - 1)]\n if barrack.order_length < 5:\n return actions.RAW_FUNCTIONS.Train_Marine_quick(\"now\", barrack.tag)\n return actions.RAW_FUNCTIONS.no_op()\n\n\n#\n# def attack(obs):\n# marines = get_my_units_by_type(obs, units.Terran.Marine)\n# if len(marines) > 0:\n# enmies = find_any_enemy(obs)\n# attack_orders = []\n# if len(enmies) > 0:\n# for i in range(len(marines)):\n# marine_xy = (marines[i].x, marines[i].y)\n# distances = get_distances(obs, enmies, marine_xy)\n# enmy = enmies[np.argmin(distances)]\n# attack_orders.append(actions.RAW_FUNCTIONS.Attack_unit(\"now\", marines[i].tag, enmy.tag))\n# return attack_orders\n#\n# else:\n# for i in range(len(marines)):\n# random_x = random.randint(0, mapSzie - 1)\n# random_y = random.randint(0, mapSzie - 1)\n# attack_orders.append(actions.RAW_FUNCTIONS.Move_pt(\"queued\", marines[i].tag, (random_x, random_y)))\n# return attack_orders\n#\n# return actions.RAW_FUNCTIONS.no_op()\n\ndef attack(obs):\n combat_teams = automatic_formation(obs)\n if len(combat_teams) > 0:\n enmies = find_any_enemy(obs)\n attack_orders = []\n if len(enmies) > 0:\n for i in range(len(combat_teams)):\n marine_xy = (combat_teams[i][0].x, combat_teams[i][0].y)\n distances = get_distances(obs, enmies, marine_xy)\n enmy = enmies[np.argmin(distances)]\n for j in range(len(combat_teams[i])):\n attack_orders.append(actions.RAW_FUNCTIONS.Attack_unit(\"now\", combat_teams[i][j].tag, enmy.tag))\n return attack_orders\n\n else:\n for i in range(len(combat_teams)):\n random_x = random.randint(0, mapSzie - 1)\n random_y = random.randint(0, mapSzie - 1)\n for j in range(len(combat_teams[i])):\n attack_orders.append(\n actions.RAW_FUNCTIONS.Move_pt(\"queued\", combat_teams[i][j].tag, (random_x, random_y)))\n return attack_orders\n\n return actions.RAW_FUNCTIONS.no_op()\n"
] |
[
[
"tensorflow.matmul",
"tensorflow.nn.softmax",
"tensorflow.multiply",
"tensorflow.nn.conv2d",
"tensorflow.truncated_normal",
"tensorflow.zeros",
"tensorflow.reshape",
"tensorflow.placeholder",
"tensorflow.summary.merge_all",
"tensorflow.nn.avg_pool",
"tensorflow.name_scope",
"tensorflow.square",
"tensorflow.variable_scope",
"tensorflow.train.AdamOptimizer",
"tensorflow.argmax",
"tensorflow.summary.scalar",
"tensorflow.summary.histogram"
],
[
"numpy.array",
"numpy.argmin"
]
] |
zhuboli/alf
|
[
"b357565638c9336ebd88cecb9766a17d72d5d0c3",
"38a3621337a030f74bb3944d7695e7642e777e10",
"b357565638c9336ebd88cecb9766a17d72d5d0c3",
"b357565638c9336ebd88cecb9766a17d72d5d0c3",
"b357565638c9336ebd88cecb9766a17d72d5d0c3",
"b357565638c9336ebd88cecb9766a17d72d5d0c3"
] |
[
"alf/environments/suite_carla.py",
"alf/environments/mario_wrappers.py",
"alf/utils/datagen.py",
"alf/utils/conditional_ops.py",
"alf/device_ctx.py",
"alf/networks/preprocessors_test.py"
] |
[
"# Copyright (c) 2020 Horizon Robotics. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"CarlaEnvironment suite.\n\nTo use this, there are two ways:\n\n1. Run the code within docker image horizonrobotics/alf:0.0.3-carla\n Both `Docker <https://docs.docker.com/engine/install/ubuntu/>`_ and\n `Nvidia-Docker2 <https://github.com/NVIDIA/nvidia-docker>`_ need to be installed.\n\n2. Install carla:\n\n.. code-block:: bash\n\n wget https://carla-releases.s3.eu-west-3.amazonaws.com/Linux/CARLA_0.9.9.tar.gz\n mkdir carla\n tar zxf CARLA_0.9.9.tar.gz -C carla\n cd carla/Import\n wget https://carla-releases.s3.eu-west-3.amazonaws.com/Linux/AdditionalMaps_0.9.9.tar.gz\n cd ..\n ./ImportAssert.sh\n easy_install PythonAPI/carla/dist/carla-0.9.9-py3.7-linux-x86_64.egg\n\nMake sure you are using python3.7\n\n\"\"\"\n\nfrom collections import OrderedDict\nfrom absl import logging\nimport gin\nimport math\nimport numpy as np\nimport os\nimport random\nimport subprocess\nimport sys\nimport time\nimport torch\n\ntry:\n import carla\nexcept ImportError:\n carla = None\n\nimport alf\nimport alf.data_structures as ds\nfrom alf.utils import common\nfrom .suite_socialbot import _get_unused_port\nfrom .alf_environment import AlfEnvironment\nfrom .carla_sensors import (CameraSensor, CollisionSensor, GnssSensor,\n IMUSensor, LaneInvasionSensor, NavigationSensor,\n RadarSensor, World, MINIMUM_RENDER_WIDTH,\n MINIMUM_RENDER_HEIGHT)\n\n\ndef is_available():\n return carla is not None\n\n\ndef geo_distance(loc1, loc2):\n \"\"\"\n Args:\n loc1 (np.array): [latitude, longitude, altitude]. The units for altitude\n is meter.\n loc2 (np.array):\n Returns:\n float: distance in meters\n \"\"\"\n earth_radius = 6371 * 1000\n d2r = math.pi / 180\n\n d = loc1 - loc2\n dlat = d[0] * d2r\n dlon = d[1] * d2r\n lat1 = loc1[0] * d2r\n lat2 = loc2[0] * d2r\n a = np.sin(\n 0.5 * dlat)**2 + np.sin(0.5 * dlon)**2 * np.cos(lat1) * np.cos(lat2)\n c = 2 * np.arctan2(np.sqrt(a), np.sqrt(1 - a))\n c = earth_radius * c\n return np.sqrt(c * c + d[2] * d[2])\n\n\ndef _calculate_relative_position(self_transform, location):\n \"\"\"\n Args:\n self_transform (carla.Transform): transform of self actor\n location (np.ndarray): shape is [3] or [N, 3]\n Returns:\n np.ndarray: shape is same as location\n \"\"\"\n trans = self_transform\n self_loc = trans.location\n yaw = math.radians(trans.rotation.yaw)\n\n self_loc = np.array([self_loc.x, self_loc.y, self_loc.z])\n cos, sin = np.cos(yaw), np.sin(yaw)\n rot = np.array([[cos, -sin, 0.], [sin, cos, 0.], [0., 0., 1.]])\n return np.matmul(location - self_loc, rot).astype(np.float32)\n\n\ndef _calculate_relative_velocity(self_transform, velocity):\n \"\"\"\n Args:\n self_transform (carla.Transform): transform of self actor\n velocity (np.ndarray): shape is [3] or [N, 3]\n Returns:\n np.ndarray: shape is same as location\n \"\"\"\n trans = self_transform\n yaw = math.radians(trans.rotation.yaw)\n\n cos, sin = np.cos(yaw), np.sin(yaw)\n rot = np.array([[cos, -sin, 0.], [sin, cos, 0.], [0., 0., 1.]])\n return np.matmul(velocity, rot).astype(np.float32)\n\n\ndef _to_numpy_loc(loc: carla.Location):\n return np.array([loc.x, loc.y, loc.z])\n\n\[email protected](blacklist=['actor', 'alf_world'])\nclass Player(object):\n \"\"\"Player is a vehicle with some sensors.\n\n An episode terminates if it reaches one of the following situations:\n 1. the vehicle arrives at the goal.\n 2. the time exceeds ``route_length / min_speed``.\n 3. it get stuck because of a collision.\n\n At each step, the reward is given based on the following components:\n 1. Arriving goal: ``success_reward``\n 2. Moving in the navigation direction: the number of meters moved\n This moving reward can be either dense of sparse depending on the argument\n ``sparse_reward``.\n 3. Negative reward caused by collision: ``-min(max_collision_reward, max(epside_reward, 0))``\n\n Currently, the player has the these sensors: ``CollisionSensor``, ``GnssSensor``,\n ``IMUSensor``, ``CameraSensor``, ``LaneInvasionSensor`` , ``RadarSensor``,\n ``NavigationSensor``. See the documentation for these class for the definition\n the data generated by these sensors.\n \"\"\"\n\n # over all reward\n REWARD_OVERALL = 0\n\n # distance in meter for moving along route\n # If using sparse reward (`sparse_reward` is True), this reward is only given\n # about every `sparse_reward_interval` meters\n # If not using sparse reward, this reward is given every steps.\n REWARD_DISTANCE = 1\n\n # 0/1 valued indicating whether there is collision\n REWARD_COLLISION = 2\n\n # 0/1 valued indicating reaching goal\n REWARD_SUCCESS = 3\n\n # dimension of the reward vector\n REWARD_DIMENSION = 4\n\n def __init__(self,\n actor,\n alf_world,\n success_reward=100.,\n success_distance_thresh=5.0,\n max_collision_penalty=100.,\n max_stuck_at_collision_seconds=5.0,\n stuck_at_collision_distance=1.0,\n sparse_reward=False,\n sparse_reward_interval=10.,\n allow_negative_distance_reward=True,\n min_speed=5.,\n with_gnss_sensor=True,\n with_imu_sensor=True,\n with_camera_sensor=True,\n with_radar_sensor=True):\n \"\"\"\n Args:\n actor (carla.Actor): the carla actor object\n alf_world (Wolrd): the world containing the player\n success_reward (float): the reward for arriving the goal location.\n success_distance_thresh (float): success is achieved if the current\n location is with such distance of the goal\n max_collision_penalty (float): the maximum penalty (i.e. negative reward)\n for collision. We don't want the collision penalty to be too large\n if the player cannot even get enough positive moving reward. So the\n panalty is capped at ``max(0., episode_reward))``. Note that this\n reward is only given once at the first step of contiguous collisions.\n max_stuck_at_collision_seconds (float): the episode will end and is\n considerred as failure if the car is stuck at the collision for\n so many seconds,\n stuck_at_collision_distance (float): the car is considerred as being\n stuck at the collision if it is within such distance of the first\n collision location.\n sparse_reward (bool): If False, the distance reward is given at every\n step based on how much it moves along the navigation route. If\n True, the distance reward is only given after moving ``sparse_reward_distance``.\n sparse_reward_interval (float): the sparse reward is given after\n approximately every such distance along the route has been driven.\n allow_negative_distance_reward (True): whether to allow negative distance\n reward. If True, the agent will receive positive reward for moving\n ahead along the route, and negative rewad for moving back along\n the route. If False, the agent still receives positive reward for\n moving ahead along the route, but will not receive negative rewad\n for moving back along the route. Instead, the negative distance\n will be accumulated to the future distance reward. This may ease\n the learning if the right behavior is to temporarily go back along\n the route in order, for examle, to avoid obstacle.\n min_speed (float): unit is m/s. Failure if initial_distance / min_speed\n seconds passed\n with_gnss_sensor (bool): whether to use ``GnssSensor``.\n with_imu_sensor (bool): whether to use ``IMUSensor``.\n with_camera_sensor (bool): whether to use ``CameraSensor``.\n with_radar_sensor (bool): whether to use ``RadarSensor``.\n \"\"\"\n self._actor = actor\n self._alf_world = alf_world\n self._observation_sensors = {}\n\n self._collision_sensor = CollisionSensor(actor)\n self._observation_sensors['collision'] = self._collision_sensor\n\n if with_gnss_sensor:\n self._gnss_sensor = GnssSensor(actor)\n self._observation_sensors['gnss'] = self._gnss_sensor\n else:\n self._gnss_sensor = None\n\n if with_imu_sensor:\n self._imu_sensor = IMUSensor(actor)\n self._observation_sensors['imu'] = self._imu_sensor\n else:\n self._imu_sensor = None\n\n if with_camera_sensor:\n self._camera_sensor = CameraSensor(actor)\n self._observation_sensors['camera'] = self._camera_sensor\n else:\n self._camera_sensor = None\n\n self._lane_invasion_sensor = LaneInvasionSensor(actor)\n\n if with_radar_sensor:\n self._radar_sensor = RadarSensor(actor)\n self._observation_sensors['radar'] = self._radar_sensor\n else:\n self._radar_sensor = None\n\n self._navigation = NavigationSensor(actor, alf_world)\n self._observation_sensors['navigation'] = self._navigation\n\n self._success_reward = success_reward\n self._success_distance_thresh = success_distance_thresh\n self._min_speed = min_speed\n self._delta_seconds = actor.get_world().get_settings(\n ).fixed_delta_seconds\n self._max_collision_penalty = max_collision_penalty\n self._max_stuck_at_collision_frames = max_stuck_at_collision_seconds / self._delta_seconds\n self._stuck_at_collision_distance = stuck_at_collision_distance\n self._sparse_reward = sparse_reward\n self._sparse_reward_index_interval = int(\n max(1, sparse_reward_interval // self._alf_world.route_resolution))\n self._allow_negative_distance_reward = allow_negative_distance_reward\n\n self._observation_spec = dict()\n self._observation_desc = dict()\n for sensor_name, sensor in self._observation_sensors.items():\n self._observation_spec[sensor_name] = sensor.observation_spec()\n self._observation_desc[sensor_name] = sensor.observation_desc()\n self._observation_spec['goal'] = alf.TensorSpec([3])\n self._observation_spec['velocity'] = alf.TensorSpec([3])\n\n # UE4 coordinate system is right handed:\n # https://forums.unrealengine.com/development-discussion/c-gameplay-programming/103787-ue4-coordinate-system-not-right-handed\n self._observation_desc['goal'] = (\n \"Target location relative to the vehicle coordinate system in \"\n \"meters. X axis: front, Y axis: right, Z axis: up. Only the \"\n \"rotation around Z axis is taken into account when calculating the \"\n \"vehicle's coordinate system.\")\n self._observation_desc['navigation'] = (\n 'Relative positions of the future waypoints in the route')\n self._observation_desc[\n 'velocity'] = \"3D Velocity relative to self coordinate in m/s\"\n self._info_spec = OrderedDict(\n success=alf.TensorSpec(()), collision=alf.TensorSpec(()))\n\n self._control = carla.VehicleControl()\n self.reset()\n\n # for rendering\n self._surface = None\n self._font = None\n self._clock = None\n\n def reset(self):\n \"\"\"Reset the player location and goal.\n\n Use ``carla.Client.apply_batch_sync()`` to actually reset.\n\n Returns:\n list[carla.command]:\n \"\"\"\n\n wp = random.choice(self._alf_world.get_waypoints())\n goal_loc = wp.transform.location\n self._goal_location = np.array([goal_loc.x, goal_loc.y, goal_loc.z],\n dtype=np.float32)\n\n forbidden_locations = []\n for v in self._alf_world.get_actors():\n if v.id == self._actor.id:\n continue\n forbidden_locations.append(\n self._alf_world.get_actor_location(v.id))\n\n # find a waypoint far enough from other vehicles\n ok = False\n i = 0\n while not ok and i < 100:\n wp = random.choice(self._alf_world.get_waypoints())\n loc = wp.transform.location\n ok = True\n for other_loc in forbidden_locations:\n if loc.distance(other_loc) < 10.:\n ok = False\n break\n i += 1\n assert ok, \"Fail to find new position\"\n # loc.z + 0.27531 to avoid Z-collision, see Carla documentation for\n # carla.Map.get_spawn_points(). The value used by carla is slightly\n # smaller: 0.27530714869499207\n loc = carla.Location(loc.x, loc.y, loc.z + 0.3)\n\n commands = [\n carla.command.ApplyTransform(\n self._actor, carla.Transform(loc, wp.transform.rotation)),\n carla.command.ApplyVelocity(self._actor, carla.Vector3D()),\n carla.command.ApplyAngularVelocity(self._actor, carla.Vector3D())\n ]\n\n self._max_frame = None\n self._done = False\n self._prev_location = loc\n self._prev_action = np.zeros(\n self.action_spec().shape, dtype=np.float32)\n self._alf_world.update_actor_location(self._actor.id, loc)\n\n self._route_length = self._navigation.set_destination(goal_loc)\n\n self._prev_collision = False # whether there is collision in the previous frame\n self._collision = False # whether there is colliion in the current frame\n self._collision_loc = None # the location of the car when it starts to have collition\n\n # The intermediate goal for sparse reward\n self._intermediate_goal_index = min(self._sparse_reward_index_interval,\n self._navigation.num_waypoints - 1)\n\n # The location of the car when the intermediate goal is set\n self._intermediate_start = _to_numpy_loc(loc)\n\n self._episode_reward = 0.\n self._unrecorded_distance_reward = 0.\n self._is_first_step = True\n\n return commands\n\n def destroy(self):\n \"\"\"Get the commands for destroying the player.\n\n Use carla.Client.apply_batch_sync() to actually destroy the sensor.\n\n Returns:\n list[carla.command]:\n \"\"\"\n commands = []\n for sensor in self._observation_sensors.values():\n commands.extend(sensor.destroy())\n commands.extend(self._lane_invasion_sensor.destroy())\n commands.append(carla.command.DestroyActor(self._actor))\n if self._surface is not None:\n import pygame\n pygame.quit()\n\n return commands\n\n def observation_spec(self):\n \"\"\"Get the observation spec.\n\n Returns:\n nested TensorSpec:\n \"\"\"\n return self._observation_spec\n\n def observation_desc(self):\n \"\"\"Get the description about the observation.\n\n Returns:\n nested str: each str corresponds to one TensorSpec from\n ``observatin_spec()``.\n \"\"\"\n return self._observation_desc\n\n def action_spec(self):\n \"\"\"Get the action spec.\n\n The action is a 4-D vector of [throttle, steer, brake, reverse], where\n throttle is in [-1.0, 1.0] (negative value is same as zero), steer is in\n [-1.0, 1.0], brake is in [-1.0, 1.0] (negative value is same as zero),\n and reverse is interpreted as a boolean value with values greater than\n 0.5 corrsponding to True.\n\n Returns:\n nested BoundedTensorSpec:\n \"\"\"\n return alf.BoundedTensorSpec([4],\n minimum=[-1., -1., -1., 0.],\n maximum=[1., 1., 1., 1.])\n\n def info_spec(self):\n \"\"\"Get the info spec.\"\"\"\n return self._info_spec\n\n def action_desc(self):\n \"\"\"Get the description about the action.\n\n Returns:\n nested str: each str corresponds to one TensorSpec from\n ``action_spec()``.\n \"\"\"\n return (\n \"4-D vector of [throttle, steer, brake, reverse], where \"\n \"throttle is in [-1.0, 1.0] (negative value is same as zero), \"\n \"steer is in [-1.0, 1.0], brake is in [-1.0, 1.0] (negative value \"\n \"is same as zero), and reverse is interpreted as a boolean value \"\n \"with values greater than 0.5 corrsponding to True.\")\n\n def reward_spec(self):\n \"\"\"Get the reward spec.\"\"\"\n return alf.TensorSpec([Player.REWARD_DIMENSION])\n\n def _get_goal(self):\n return _calculate_relative_position(self._actor.get_transform(),\n self._goal_location)\n\n def get_current_time_step(self, current_frame):\n \"\"\"Get the current time step for the player.\n\n Args:\n current_frame (int): current simulation frame no.\n Returns:\n TimeStep: all elements are ``np.ndarray`` or ``np.number``.\n \"\"\"\n obs = dict()\n for sensor_name, sensor in self._observation_sensors.items():\n obs[sensor_name] = sensor.get_current_observation(current_frame)\n obs['goal'] = self._get_goal()\n self._alf_world.update_actor_location(self._actor.id,\n self._actor.get_location())\n v = self._actor.get_velocity()\n obs['velocity'] = _calculate_relative_velocity(\n self._actor.get_transform(), _to_numpy_loc(v))\n self._current_distance = np.linalg.norm(obs['goal'])\n\n prev_loc = _to_numpy_loc(self._prev_location)\n curr_loc = _to_numpy_loc(self._actor.get_location())\n\n reward_vector = np.zeros(Player.REWARD_DIMENSION, np.float32)\n reward = 0.\n discount = 1.0\n info = OrderedDict(success=np.float32(0.0), collision=np.float32(0.0))\n\n # When the previous episode ends because of stucking at a collision with\n # another vehicle, it may get an additional collision event in the new frame\n # because the relocation of the car may happen after the simulation of the\n # moving. So we ignore the collision at the first step.\n self._collision = not np.all(\n obs['collision'] == 0) and not self._is_first_step\n if self._collision and not self._prev_collision:\n # We only report the first collision event among contiguous collision\n # events.\n info['collision'] = np.float32(1.0)\n logging.info(\"actor=%d frame=%d COLLISION\" % (self._actor.id,\n current_frame))\n self._collision_loc = curr_loc\n self._collision_frame = current_frame\n # We don't want the collision penalty to be too large if the player\n # cannot even get enough positive moving reward. So we cap the penalty\n # at ``max(0., self._episode_reward)``\n reward -= min(self._max_collision_penalty,\n max(0., self._episode_reward))\n reward_vector[Player.REWARD_COLLISION] = 1.\n\n if self._max_frame is None:\n step_type = ds.StepType.FIRST\n max_frames = math.ceil(\n self._route_length / self._min_speed / self._delta_seconds)\n self._max_frame = current_frame + max_frames\n elif (self._current_distance < self._success_distance_thresh\n and self._actor.get_velocity() == carla.Location(0., 0., 0.)):\n # TODO: include waypoint orientation as success critiria\n step_type = ds.StepType.LAST\n reward += self._success_reward\n reward_vector[Player.REWARD_SUCCESS] = 1.\n discount = 0.0\n info['success'] = np.float32(1.0)\n logging.info(\n \"actor=%d frame=%d SUCCESS\" % (self._actor.id, current_frame))\n elif current_frame >= self._max_frame:\n logging.info(\"actor=%d frame=%d FAILURE: out of time\" %\n (self._actor.id, current_frame))\n step_type = ds.StepType.LAST\n elif (self._collision_loc is not None\n and current_frame - self._collision_frame >\n self._max_stuck_at_collision_frames\n and np.linalg.norm(curr_loc - self._collision_loc) <\n self._stuck_at_collision_distance):\n logging.info(\"actor=%d frame=%d FAILURE: stuck at collision\" %\n (self._actor.id, current_frame))\n step_type = ds.StepType.LAST\n else:\n step_type = ds.StepType.MID\n\n if self._sparse_reward:\n current_index = self._navigation.get_next_waypoint_index()\n if step_type == ds.StepType.LAST and info['success'] == 1.0:\n # Since the episode is finished, we need to incorporate the final\n # progress towards the goal as reward to encourage stopping near the goal.\n distance_reward = (\n np.linalg.norm(self._intermediate_start -\n self._goal_location) -\n np.linalg.norm(curr_loc - self._goal_location))\n elif self._intermediate_goal_index < current_index:\n # This means that the car has passed the intermediate goal.\n # And we give it a reward which is equal to the distance it\n # travels.\n intermediate_goal = self._navigation.get_waypoint(\n self._intermediate_goal_index)\n distance_reward = np.linalg.norm(intermediate_goal -\n self._intermediate_start)\n self._intermediate_start = intermediate_goal\n self._intermediate_goal_index = min(\n self._intermediate_goal_index +\n self._sparse_reward_index_interval,\n self._navigation.num_waypoints - 1)\n else:\n goal0 = obs['navigation'][2] # This is about 10m ahead\n distance_reward = (np.linalg.norm(prev_loc - goal0) -\n np.linalg.norm(curr_loc - goal0))\n\n reward_vector[Player.REWARD_DISTANCE] = distance_reward\n if not self._allow_negative_distance_reward:\n distance_reward += self._unrecorded_distance_reward\n if distance_reward < 0:\n self._unrecorded_distance_reward = distance_reward\n distance_reward = 0\n else:\n self._unrecorded_distance_reward = 0\n reward += distance_reward\n\n obs['navigation'] = _calculate_relative_position(\n self._actor.get_transform(), obs['navigation'])\n\n self._done = step_type == ds.StepType.LAST\n self._episode_reward += reward\n\n reward_vector[Player.REWARD_OVERALL] = reward\n\n self._current_time_step = ds.TimeStep(\n step_type=step_type,\n reward=reward_vector,\n discount=np.float32(discount),\n observation=obs,\n prev_action=self._prev_action,\n env_info=info)\n return self._current_time_step\n\n def act(self, action):\n \"\"\"Generate the carla command for taking the given action.\n\n Use ``carla.Client.apply_batch_sync()`` to actually destroy the sensor.\n\n Args:\n action (nested np.ndarray):\n Returns:\n list[carla.command]:\n \"\"\"\n self._prev_collision = self._collision\n self._prev_location = self._actor.get_location()\n self._is_first_step = False\n if self._done:\n return self.reset()\n self._control.throttle = max(float(action[0]), 0.0)\n self._control.steer = float(action[1])\n self._control.brake = max(float(action[2]), 0.0)\n self._control.reverse = bool(action[3] > 0.5)\n self._prev_action = action\n\n return [carla.command.ApplyVehicleControl(self._actor, self._control)]\n\n def render(self, mode):\n \"\"\"Render the simulation.\n\n Args:\n mode (str): one of ['rgb_array', 'human']\n Returns:\n one of the following:\n - None: if mode is 'human'\n - np.ndarray: the image of shape [height, width, channeles] if\n mode is 'rgb_array'\n \"\"\"\n import pygame\n if self._surface is None:\n pygame.init()\n pygame.font.init()\n self._clock = pygame.time.Clock()\n if self._camera_sensor:\n height, width = self._camera_sensor.observation_spec(\n ).shape[1:3]\n height = max(height, MINIMUM_RENDER_HEIGHT)\n width = max(width, MINIMUM_RENDER_WIDTH)\n else:\n height = MINIMUM_RENDER_HEIGHT\n width = MINIMUM_RENDER_WIDTH\n if mode == 'human':\n self._surface = pygame.display.set_mode(\n (width, height), pygame.HWSURFACE | pygame.DOUBLEBUF)\n else:\n self._surface = pygame.Surface((width, height))\n\n if mode == 'human':\n self._clock.tick_busy_loop(1000)\n\n if self._camera_sensor:\n self._camera_sensor.render(self._surface)\n obs = self._current_time_step.observation\n np_precision = np.get_printoptions()['precision']\n np.set_printoptions(precision=1)\n info_text = [\n 'FPS: %6.2f' % self._clock.get_fps(),\n 'GPS: (%7.4f, %8.4f, %5.2f)' % tuple(obs['gnss'].tolist()),\n 'Goal: (%7.1f, %8.1f, %5.1f)' % tuple(obs['goal'].tolist()),\n 'Ahead: (%7.1f, %8.1f, %5.1f)' % tuple(\n obs['navigation'][2].tolist()),\n 'Distance: %7.2f' % np.linalg.norm(obs['goal']),\n 'Velocity: (%4.1f, %4.1f, %4.1f) km/h' % tuple(\n (3.6 * obs['velocity']).tolist()),\n 'Acceleration: (%4.1f, %4.1f, %4.1f)' % tuple(\n obs['imu'][0:3].tolist()),\n 'Compass: %5.1f' % math.degrees(float(obs['imu'][6])),\n 'Throttle: %4.2f' % self._control.throttle,\n 'Brake: %4.2f' % self._control.brake,\n 'Steer: %4.2f' % self._control.steer,\n 'Reverse: %4s' % self._control.reverse,\n 'Reward: (%s)' % self._current_time_step.reward,\n ]\n np.set_printoptions(precision=np_precision)\n self._draw_text(info_text)\n\n if mode == 'human':\n pygame.display.flip()\n elif mode == 'rgb_array':\n # (x, y, c) => (y, x, c)\n return np.transpose(\n pygame.surfarray.array3d(self._surface), (1, 0, 2))\n else:\n raise ValueError(\"Unsupported render mode: %s\" % mode)\n\n def _draw_text(self, texts):\n import os\n import pygame\n if self._font is None:\n font_name = 'courier' if os.name == 'nt' else 'mono'\n fonts = [x for x in pygame.font.get_fonts() if font_name in x]\n default_font = 'ubuntumono'\n mono = default_font if default_font in fonts else fonts[0]\n mono = pygame.font.match_font(mono)\n self._font = pygame.font.Font(mono, 12 if os.name == 'nt' else 14)\n info_surface = pygame.Surface((240, 240))\n info_surface.set_alpha(100)\n self._surface.blit(info_surface, (0, 0))\n v_offset = 4\n for item in texts:\n surface = self._font.render(item, True, (255, 255, 255))\n self._surface.blit(surface, (8, v_offset))\n v_offset += 18\n\n\ndef _exec(command):\n stream = os.popen(command)\n ret = stream.read()\n stream.close()\n return ret\n\n\ngin.constant('CarlaEnvironment.REWARD_DIMENSION', Player.REWARD_DIMENSION)\n\n\[email protected]\nclass CarlaServer(object):\n \"\"\"CarlaServer for doing the simulation.\"\"\"\n\n def __init__(self,\n rpc_port=2000,\n streaming_port=2001,\n docker_image=\"horizonrobotics/alf:0.0.3-carla\",\n quality_level=\"Low\",\n carla_root=\"/home/carla\",\n use_opengl=True):\n \"\"\"\n\n Args:\n rpc_port (int): port for RPC\n streaming_port (int): port for data streaming\n docker_image (str): If provided, will use the docker image to start\n the Carla server. Some valid images are \"carlasim/carla:0.9.9\"\n and \"horionrobotics/alf:0.0.3-carla\"\n quality_level (str): one of ['Low', 'Epic']. See the explanation at\n `<https://carla.readthedocs.io/en/latest/adv_rendering_options/#graphics-quality>`_\n carla_root (str): directorcy where CarlaUE4.sh is in. The default\n value is correct for using docker image. If not using docker\n image, make sure you provide the correct path. This is the directory\n where you unzipped the file you downloaded from\n `<https://github.com/carla-simulator/carla/releases/tag/0.9.9>`_.\n use_opengl (bool): the default graphics engine of Carla is Vulkan,\n which is supposed to be better than OpenGL. However, Vulkan is not\n always available. It may not be installed or the nvidia driver does\n not support vulkan.\n \"\"\"\n assert quality_level in ['Low', 'Epic'], \"Unknown quality level\"\n use_docker = (not alf.utils.common.is_inside_docker_container()\n and docker_image)\n opengl = \"-opengl\" if use_opengl else \"\"\n if use_docker:\n dev = os.environ.get('CUDA_VISIBLE_DEVICES')\n if not dev:\n dev = 'all'\n command = (\"docker run -d \"\n \"-p {rpc_port}:{rpc_port} \"\n \"-p {streaming_port}:{streaming_port} \"\n \"-u carla \"\n \"--rm --gpus device=\" + dev + \" \" + docker_image +\n \" {carla_root}/CarlaUE4.sh \"\n \"--carla-rpc-port={rpc_port} \"\n \"--carla-streaming-port={streaming_port} \"\n \"--quality-level={quality_level} {opengl}\")\n else:\n assert os.path.exists(carla_root + \"/CarlaUE4.sh\"), (\n \"%s/CarlaUE4.sh \"\n \"does not exist. Please provide correct value for `carla_root`\"\n % carla_root)\n # We do not use CarlaUE4.sh here in order to get the actual Carla\n # server processs so that we can kill it.\n command = (\n \"{carla_root}/CarlaUE4/Binaries/Linux/CarlaUE4-Linux-Shipping \"\n \"CarlaUE4 \" # perhaps most system does not have vulkan support, so we use opengl\n \"-carla-rpc-port={rpc_port} \"\n \"-carla-streaming-port={streaming_port} \"\n \"-quality-level={quality_level} {opengl}\")\n\n command = command.format(\n rpc_port=rpc_port,\n streaming_port=streaming_port,\n quality_level=quality_level,\n carla_root=carla_root,\n opengl=opengl)\n\n logging.info(\"Starting Carla server: %s\" % command)\n self._container_id = None\n self._process = None\n if use_docker:\n self._container_id = _exec(command)\n assert self._container_id, \"Fail to start container\"\n logging.info(\"Starting carla in container %s\" % self._container_id)\n else:\n new_env = os.environ.copy()\n new_env['SDL_VIDEODRIVER'] = 'offscreen'\n self._process = subprocess.Popen(\n command.split(),\n stdout=sys.stdout,\n stderr=sys.stderr,\n env=new_env)\n\n def stop(self):\n \"\"\"Stop the carla server.\"\"\"\n if self._container_id:\n command = \"docker kill %s\" % self._container_id\n logging.info(\"Stopping Carla server: %s\" % command)\n _exec(command)\n self._container_id = None\n if self._process:\n self._process.kill()\n self._process.communicate()\n self._process = None\n\n def __del__(self):\n self.stop()\n\n\[email protected]\nclass CarlaEnvironment(AlfEnvironment):\n \"\"\"Carla simulation environment.\n\n In order to use it, you need to either download a valid docker image or\n a Carla package.\n \"\"\"\n\n def __init__(self,\n batch_size,\n map_name,\n vehicle_filter='vehicle.*',\n walker_filter='walker.pedestrian.*',\n num_other_vehicles=0,\n num_walkers=0,\n percentage_walkers_running=0.1,\n percentage_walkers_crossing=0.1,\n global_distance_to_leading_vehicle=2.0,\n use_hybrid_physics_mode=True,\n safe=True,\n step_time=0.05):\n \"\"\"\n Args:\n batch_size (int): the number of learning vehicles.\n map_name (str): the name of the map (e.g. \"Town01\")\n vehicle_filter (str): the filter for getting vehicle blueprints.\n walker_filter (str): the filter for getting walker blueprints.\n num_other_vehicles (int): the number of autopilot vehicles\n num_walkers (int): the number of walkers\n global_distance_to_leading_vehicle (str): the autopiloted vehicles\n will try to keep such distance from other vehicles.\n percentage_walkers_running (float): percent of running walkers\n percentage_walkers_crossing (float): percent of walkers walking\n across the road.\n use_hybrid_physics_mode (bool): If true, the autopiloted vehicle will\n not use physics for simulation if it is far from other vehicles.\n safe (bool): avoid spawning vehicles prone to accidents.\n step_time (float): how many seconds does each step of simulation represents.\n \"\"\"\n super().__init__()\n\n with _get_unused_port(2000, n=2) as (rpc_port, streaming_port):\n self._server = CarlaServer(rpc_port, streaming_port)\n\n self._batch_size = batch_size\n self._num_other_vehicles = num_other_vehicles\n self._num_walkers = num_walkers\n self._percentage_walkers_running = percentage_walkers_running\n self._percentage_walkers_crossing = percentage_walkers_crossing\n\n self._world = None\n try:\n for i in range(20):\n try:\n logging.info(\n \"Waiting for server to start. Try %d\" % (i + 1))\n self._client = carla.Client(\"localhost\", rpc_port)\n self._world = self._client.load_world(map_name)\n break\n except RuntimeError:\n continue\n finally:\n if self._world is None:\n self._server.stop()\n assert self._world is not None, \"Fail to start server.\"\n\n logging.info(\"Server started.\")\n\n self._traffic_manager = None\n if self._num_other_vehicles + self._num_walkers > 0:\n with _get_unused_port(8000, n=1) as tm_port:\n self._traffic_manager = self._client.get_trafficmanager(\n tm_port)\n self._traffic_manager.set_hybrid_physics_mode(\n use_hybrid_physics_mode)\n self._traffic_manager.set_global_distance_to_leading_vehicle(\n global_distance_to_leading_vehicle)\n\n self._client.set_timeout(20)\n self._alf_world = World(self._world)\n self._safe = safe\n self._vehicle_filter = vehicle_filter\n self._walker_filter = walker_filter\n\n settings = self._world.get_settings()\n settings.synchronous_mode = True\n settings.fixed_delta_seconds = step_time\n\n self._world.apply_settings(settings)\n self._map_name = map_name\n\n self._spawn_vehicles()\n self._spawn_walkers()\n\n self._observation_spec = self._players[0].observation_spec()\n self._action_spec = self._players[0].action_spec()\n self._env_info_spec = self._players[0].info_spec()\n self._reward_spec = self._players[0].reward_spec()\n\n # metadata property is required by video recording\n self.metadata = {\n 'render.modes': ['human', 'rgb_array'],\n 'video.frames_per_second': 1 / step_time\n }\n\n def _spawn_vehicles(self):\n blueprints = self._world.get_blueprint_library().filter(\n self._vehicle_filter)\n assert len(\n blueprints) > 0, \"Cannot find vehicle '%s'\" % self._vehicle_filter\n if self._safe:\n blueprints = [\n x for x in blueprints\n if int(x.get_attribute('number_of_wheels')) == 4\n ]\n blueprints = [x for x in blueprints if not x.id.endswith('isetta')]\n blueprints = [\n x for x in blueprints if not x.id.endswith('carlacola')\n ]\n blueprints = [\n x for x in blueprints if not x.id.endswith('cybertruck')\n ]\n blueprints = [x for x in blueprints if not x.id.endswith('t2')]\n assert len(\n blueprints\n ) > 0, \"Cannot find safe vehicle '%s'\" % self._vehicle_filter\n\n spawn_points = self._world.get_map().get_spawn_points()\n number_of_spawn_points = len(spawn_points)\n\n num_vehicles = self._batch_size + self._num_other_vehicles\n if num_vehicles <= number_of_spawn_points:\n random.shuffle(spawn_points)\n else:\n raise ValueError(\n \"requested %d vehicles, but could only find %d spawn points\" %\n (self._batch_size, number_of_spawn_points))\n\n commands = []\n for i, transform in enumerate(spawn_points[:num_vehicles]):\n blueprint = random.choice(blueprints)\n if blueprint.has_attribute('color'):\n color = random.choice(\n blueprint.get_attribute('color').recommended_values)\n blueprint.set_attribute('color', color)\n if blueprint.has_attribute('driver_id'):\n driver_id = random.choice(\n blueprint.get_attribute('driver_id').recommended_values)\n blueprint.set_attribute('driver_id', driver_id)\n if i < self._batch_size:\n blueprint.set_attribute('role_name', 'hero')\n else:\n blueprint.set_attribute('role_name', 'autopilot')\n command = carla.command.SpawnActor(blueprint, transform)\n if i >= self._batch_size:\n # managed by traffic manager\n command = command.then(\n carla.command.SetAutopilot(\n carla.command.FutureActor, True,\n self._traffic_manager.get_port()))\n commands.append(command)\n\n self._players = []\n self._other_vehicles = []\n responses = self._client.apply_batch_sync(commands, True)\n for i, response in enumerate(responses):\n if response.error:\n logging.error(response.error)\n continue\n vehicle = self._world.get_actor(response.actor_id)\n if i < self._batch_size:\n self._players.append(Player(vehicle, self._alf_world))\n else:\n self._other_vehicles.append(vehicle)\n self._alf_world.add_actor(vehicle)\n self._alf_world.update_actor_location(vehicle.id,\n spawn_points[i].location)\n\n assert len(self._players) + len(\n self._other_vehicles) == num_vehicles, (\n \"Fail to create %s vehicles\" % num_vehicles)\n\n def _spawn_walkers(self):\n walker_blueprints = self._world.get_blueprint_library().filter(\n self._walker_filter)\n\n # 1. take all the random locations to spawn\n spawn_points = []\n for _ in range(self._num_walkers):\n spawn_point = carla.Transform()\n loc = self._world.get_random_location_from_navigation()\n if loc != None:\n spawn_point.location = loc\n spawn_points.append(spawn_point)\n\n # 2. we spawn the walker object\n commands = []\n walker_speeds = []\n for spawn_point in spawn_points:\n walker_bp = random.choice(walker_blueprints)\n # set as not invincible\n if walker_bp.has_attribute('is_invincible'):\n walker_bp.set_attribute('is_invincible', 'false')\n # set the max speed\n if walker_bp.has_attribute('speed'):\n if (random.random() > self._percentage_walkers_running):\n # walking\n walker_speeds.append(\n walker_bp.get_attribute('speed').recommended_values[1])\n else:\n # running\n walker_speeds.append(\n walker_bp.get_attribute('speed').recommended_values[2])\n else:\n logging.info(\"Walker has no speed\")\n walker_speeds.append(0.0)\n commands.append(carla.command.SpawnActor(walker_bp, spawn_point))\n responses = self._client.apply_batch_sync(commands, True)\n walker_speeds2 = []\n self._walkers = []\n for response, walker_speed, spawn_point in zip(\n responses, walker_speeds, spawn_points):\n if response.error:\n logging.error(\n \"%s: %s\" % (response.error, spawn_point.location))\n continue\n walker = self._world.get_actor(response.actor_id)\n self._walkers.append({\"walker\": walker})\n walker_speeds2.append(walker_speed)\n walker_speeds = walker_speeds2\n\n # 3. we spawn the walker controller\n commands = []\n walker_controller_bp = self._world.get_blueprint_library().find(\n 'controller.ai.walker')\n for walker in self._walkers:\n commands.append(\n carla.command.SpawnActor(walker_controller_bp,\n carla.Transform(),\n walker[\"walker\"].id))\n responses = self._client.apply_batch_sync(commands, True)\n for response, walker in zip(responses, self._walkers):\n if response.error:\n logging.error(response.error)\n continue\n walker[\"controller\"] = self._world.get_actor(response.actor_id)\n\n # wait for a tick to ensure client receives the last transform of the walkers we have just created\n self._world.tick()\n\n # 5. initialize each controller and set target to walk to (list is [controler, actor, controller, actor ...])\n # set how many pedestrians can cross the road\n self._world.set_pedestrians_cross_factor(\n self._percentage_walkers_crossing)\n for walker, walker_speed in zip(self._walkers, walker_speeds):\n # start walker\n walker['controller'].start()\n # set walk to random point\n location = self._world.get_random_location_from_navigation()\n walker['controller'].go_to_location(location)\n # max speed\n walker['controller'].set_max_speed(float(walker_speed))\n self._alf_world.add_actor(walker['walker'])\n self._alf_world.update_actor_location(walker['walker'].id,\n location)\n\n def _clear(self):\n if self._world is None:\n return\n if self._players:\n commands = []\n for player in self._players:\n commands.extend(player.destroy())\n for response in self._client.apply_batch_sync(commands, True):\n if response.error:\n logging.error(response.error)\n self._players.clear()\n commands = []\n for vehicle in self._other_vehicles:\n commands.append(carla.command.DestroyActor(vehicle))\n for walker in self._walkers:\n walker['controller'].stop()\n commands.append(carla.command.DestroyActor(walker['controller']))\n commands.append(carla.command.DestroyActor(walker['walker']))\n\n if commands:\n for response in self._client.apply_batch_sync(commands, True):\n if response.error:\n logging.error(response.error)\n self._other_vehicles.clear()\n self._walkers.clear()\n\n @property\n def batched(self):\n return True\n\n @property\n def batch_size(self):\n return self._batch_size\n\n def env_info_spec(self):\n return self._env_info_spec\n\n def observation_spec(self):\n return self._observation_spec\n\n def observation_desc(self):\n return self._players[0].observation_desc()\n\n def action_spec(self):\n return self._action_spec\n\n def action_desc(self):\n return self._players[0].action_desc()\n\n def reward_spec(self):\n return self._reward_spec\n\n def close(self):\n self._clear()\n self._server.stop()\n\n def __del__(self):\n self.close()\n\n @property\n def players(self):\n \"\"\"Get all the players in the environment.\n\n Returns:\n list[Player]:\n \"\"\"\n return self._players\n\n def render(self, mode):\n return self._players[0].render(mode)\n\n def _step(self, action):\n action = alf.nest.map_structure(lambda x: x.cpu().numpy(), action)\n commands = []\n for player, act in zip(self._players, action):\n commands.extend(player.act(act))\n for response in self._client.apply_batch_sync(commands):\n if response.error:\n logging.error(response.error)\n self._current_frame = self._world.tick()\n for vehicle in self._other_vehicles:\n self._alf_world.update_actor_location(vehicle.id,\n vehicle.get_location())\n for walker in self._walkers:\n actor = walker['walker']\n self._alf_world.update_actor_location(actor.id,\n actor.get_location())\n\n return self._get_current_time_step()\n\n def _get_current_time_step(self):\n time_step = [\n player.get_current_time_step(self._current_frame)\n for player in self._players\n ]\n time_step = alf.nest.map_structure(lambda *a: np.stack(a), *time_step)\n time_step = alf.nest.map_structure(torch.as_tensor, time_step)\n\n common.check_numerics(time_step)\n\n return time_step._replace(env_id=torch.arange(self._batch_size))\n\n def _reset(self):\n commands = []\n for player in self._players:\n commands.extend(player.reset())\n for response in self._client.apply_batch_sync(commands):\n if response.error:\n logging.error(response.error)\n self._current_frame = self._world.tick()\n return self._get_current_time_step()\n\n\[email protected](whitelist=['wrappers'])\ndef load(map_name, batch_size, wrappers=[]):\n \"\"\"Load CarlaEnvironment\n\n Args:\n map_name (str): name of the map. Currently available maps are:\n 'Town01, Town02', 'Town03', 'Town04', 'Town05', 'Town06', 'Town07',\n and 'Town10HD'\n batch_size (int): the number of vehicles in the simulation.\n wrappers (list[AlfEnvironmentBaseWrapper]): environment wrappers\n Returns:\n CarlaEnvironment\n \"\"\"\n env = CarlaEnvironment(batch_size, map_name)\n for wrapper in wrappers:\n env = wrapper(env)\n return env\n\n\nload.batched = True\n",
"# Copyright (c) 2019 Horizon Robotics. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom collections import deque\nimport itertools\nfrom copy import copy\nimport numpy as np\nfrom PIL import Image\nimport gym\nfrom gym import spaces\n\n# See https://github.com/openai/large-scale-curiosity/blob/ \\\n# 0c3d179fd61ee46233199d0891c40fbe7964d3aa/wrappers.py#L155-L238\n\n\nclass MarioXReward(gym.Wrapper):\n \"\"\"\n Wrap mario environment and use X-axis coordinate increment as reward\n\n ```\n max_x = 0 if initial or upgrade_to_new_level\n current_x = xscrollHi * 256 + xscrollLo\n reward = current_x - max_x if current_x > max_x else 0\n max_x = current_x if current_x > max_x else max_x\n ```\n \"\"\"\n\n def __init__(self, env):\n gym.Wrapper.__init__(self, env)\n self.current_level = [0, 0]\n self.visited_levels = set()\n self.visited_levels.add(tuple(self.current_level))\n self.current_max_x = 0.\n\n def reset(self):\n ob = self.env.reset()\n self.current_level = [0, 0]\n self.visited_levels = set()\n self.visited_levels.add(tuple(self.current_level))\n self.current_max_x = 0.\n return ob\n\n def step(self, action):\n ob, reward, done, info = self.env.step(action)\n levellow, levelhigh, xscrollHi, xscrollLo = \\\n info[\"levelLo\"], info[\"levelHi\"], \\\n info[\"xscrollHi\"], info[\"xscrollLo\"]\n currentx = xscrollHi * 256 + xscrollLo\n new_level = [levellow, levelhigh]\n if new_level != self.current_level:\n self.current_level = new_level\n self.current_max_x = 0.\n reward = 0.\n self.visited_levels.add(tuple(self.current_level))\n else:\n if currentx > self.current_max_x:\n delta = currentx - self.current_max_x\n self.current_max_x = currentx\n reward = delta\n else:\n reward = 0.\n if done:\n info[\"levels\"] = copy(self.visited_levels)\n info[\"retro_episode\"] = dict(levels=copy(self.visited_levels))\n\n return ob, reward, done, info\n\n\nclass LimitedDiscreteActions(gym.ActionWrapper):\n \"\"\"\n Wrap mario environment and make it use discrete actions.\n Map available button combinations to discrete actions\n eg:\n 0 -> None\n 1 -> UP\n 2 -> DOWN\n ...\n k -> A\n ...\n m -> A + LEFT\n ...\n n -> B + UP\n ...\n \"\"\"\n\n BUTTONS = {\"A\", \"B\"}\n SHOULDERS = {\"L\", \"R\"}\n\n def __init__(self, env, all_buttons):\n gym.ActionWrapper.__init__(self, env)\n # 'B', None, 'SELECT', 'START', 'UP', 'DOWN', 'LEFT', 'RIGHT', 'A'\n self._num_buttons = len(all_buttons)\n button_keys = {\n i\n for i, b in enumerate(all_buttons) if b in self.BUTTONS\n }\n buttons = [(), *zip(button_keys),\n *itertools.combinations(button_keys, 2)]\n # 'UP', 'DOWN', 'LEFT', 'RIGHT'\n arrows = [(), (4, ), (5, ), (6, ), (7, )]\n acts = []\n acts += arrows\n acts += buttons[1:]\n acts += [a + b for a in arrows[-2:] for b in buttons[1:]]\n self._actions = acts\n self.action_space = gym.spaces.Discrete(len(self._actions))\n\n def action(self, a):\n mask = np.zeros(self._num_buttons)\n for i in self._actions[a]:\n mask[i] = 1\n return mask\n\n\nclass ProcessFrame84(gym.ObservationWrapper):\n \"\"\"\n Resize frame from original resolution to 84x84 or\n resize to 84x110 and then crop to 84x84\n \"\"\"\n\n def __init__(self, env, crop=True):\n self.crop = crop\n super(ProcessFrame84, self).__init__(env)\n self.observation_space = gym.spaces.Box(\n low=0, high=255, shape=(84, 84, 1), dtype=np.uint8)\n\n def observation(self, obs):\n return ProcessFrame84.process(obs, crop=self.crop)\n\n @staticmethod\n def process(frame, crop=True):\n if frame.size == 210 * 160 * 3:\n img = np.reshape(frame, [210, 160, 3]).astype(np.float32)\n elif frame.size == 250 * 160 * 3:\n img = np.reshape(frame, [250, 160, 3]).astype(np.float32)\n elif frame.size == 224 * 240 * 3: # mario resolution\n img = np.reshape(frame, [224, 240, 3]).astype(np.float32)\n else:\n assert False, \"Unknown resolution.\" + str(frame.size)\n img = img[:, :, 0] * 0.299 + img[:, :, 1] * 0.587 + img[:, :, 2] * 0.114\n size = (84, 110 if crop else 84)\n resized_screen = np.array(\n Image.fromarray(img).resize(size, resample=Image.BILINEAR),\n dtype=np.uint8)\n x_t = resized_screen[18:102, :] if crop else resized_screen\n x_t = np.reshape(x_t, [84, 84, 1])\n return x_t.astype(np.uint8)\n\n\nclass FrameFormat(gym.Wrapper):\n \"\"\"\n Format frame to specified data_format\n\n Args:\n data_format: Data format for frame\n `channels_first` for CHW and `channels_last` for HWC\n \"\"\"\n\n def __init__(self, env, data_format='channels_last'):\n gym.Wrapper.__init__(self, env)\n data_format = data_format.lower()\n if data_format not in {'channels_first', 'channels_last'}:\n raise ValueError('The `data_format` argument must be one of '\n '\"channels_first\", \"channels_last\". Received: ' +\n str(data_format))\n self._transpose = False\n obs_shape = env.observation_space.shape\n if data_format == 'channels_first':\n self._transpose = True\n obs_shape = (obs_shape[-1], ) + (obs_shape[:-1])\n self.observation_space = spaces.Box(\n low=0,\n high=255,\n shape=obs_shape,\n dtype=env.observation_space.dtype)\n\n def reset(self):\n ob = self.env.reset()\n return self._get_ob(ob)\n\n def step(self, action):\n ob, reward, done, info = self.env.step(action)\n ob = self._get_ob(ob)\n return ob, reward, done, info\n\n def _get_ob(self, ob):\n import numpy as np\n if self._transpose:\n return np.transpose(ob, (2, 0, 1))\n return ob\n",
"# Copyright (c) 2020 Horizon Robotics. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Adapted from the following:\n\nhttps://github.com/neale/HyperGAN/blob/master/datagen.py\n\"\"\"\n\nimport torch\nimport torchvision\nfrom torchvision import datasets, transforms\n\n\nclass TestDataSet(torch.utils.data.Dataset):\n def __init__(self, input_dim=3, output_dim=1, size=1000, weight=None):\n self._features = torch.randn(size, input_dim)\n if weight is None:\n self._weight = torch.rand(input_dim, output_dim) + 5.\n else:\n self._weight = weight\n noise = torch.randn(size, output_dim)\n self._values = self._features @ self._weight + noise\n\n def __getitem__(self, index):\n return self._features[index], self._values[index]\n\n def __len__(self):\n return len(self._features)\n\n\ndef load_test(train_bs=50, test_bs=10, num_workers=0):\n input_dim = 3\n output_dim = 1\n weight = torch.rand(input_dim, output_dim) + 5.\n trainset = TestDataSet(\n input_dim=input_dim, output_dim=output_dim, size=1000, weight=weight)\n testset = TestDataSet(\n input_dim=input_dim, output_dim=output_dim, size=500, weight=weight)\n\n train_loader = torch.utils.data.DataLoader(\n trainset, batch_size=train_bs, shuffle=True, num_workers=num_workers)\n\n test_loader = torch.utils.data.DataLoader(\n trainset, batch_size=test_bs, shuffle=True, num_workers=num_workers)\n\n return train_loader, test_loader\n\n\ndef load_mnist(train_bs=100, test_bs=100, num_workers=0):\n kwargs = {\n 'num_workers': num_workers,\n 'pin_memory': False,\n 'drop_last': False\n }\n path = 'data_m/'\n train_loader = torch.utils.data.DataLoader(\n datasets.MNIST(\n path,\n train=True,\n download=True,\n transform=transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.1307, ), (0.3081, ))\n ])),\n batch_size=train_bs,\n shuffle=True,\n **kwargs)\n test_loader = torch.utils.data.DataLoader(\n datasets.MNIST(\n path,\n train=False,\n transform=transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.1307, ), (0.3081, ))\n ])),\n batch_size=test_bs,\n shuffle=False,\n **kwargs)\n return train_loader, test_loader\n\n\ndef load_notmnist(train_bs=100, test_bs=100, num_workers=0):\n kwargs = {\n 'num_workers': num_workers,\n 'pin_memory': False,\n 'drop_last': False\n }\n path = 'data_nm/'\n train_loader = torch.utils.data.DataLoader(\n datasets.MNIST(\n path,\n train=True,\n transform=transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.1307, ), (0.3081, ))\n ])),\n batch_size=train_bs,\n shuffle=True,\n **kwargs)\n test_loader = torch.utils.data.DataLoader(\n datasets.MNIST(\n path,\n train=False,\n transform=transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.1307, ), (0.3081, ))\n ])),\n batch_size=test_bs,\n shuffle=False,\n **kwargs)\n return train_loader, test_loader\n\n\ndef load_cifar(train_bs=32, test_bs=100):\n path = 'data_c/'\n kwargs = {'num_workers': 1, 'pin_memory': False, 'drop_last': True}\n transform_train = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.4914, 0.4822, 0.4465),\n (0.2023, 0.1994, 0.2010)),\n ])\n transform_test = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.4914, 0.4822, 0.4465),\n (0.2023, 0.1994, 0.2010)),\n ])\n trainset = torchvision.datasets.CIFAR10(\n root=path, train=True, download=True, transform=transform_train)\n trainloader = torch.utils.data.DataLoader(\n trainset, batch_size=train_bs, shuffle=True, **kwargs)\n testset = torchvision.datasets.CIFAR10(\n root=path, train=False, download=True, transform=transform_test)\n testloader = torch.utils.data.DataLoader(\n testset, batch_size=test_bs, shuffle=False, **kwargs)\n return trainloader, testloader\n\n\ndef load_cifar_hidden(train_bs=32, test_bs=100, c_idx=[0, 1, 2, 3, 4]):\n path = './data_c'\n kwargs = {'num_workers': 2, 'pin_memory': False, 'drop_last': True}\n transform_train = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.4914, 0.4822, 0.4465),\n (0.2023, 0.1994, 0.2010)),\n ])\n transform_test = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.4914, 0.4822, 0.4465),\n (0.2023, 0.1994, 0.2010)),\n ])\n\n def get_classes(target, labels):\n label_indices = []\n for i in range(len(target)):\n if target[i][1] in labels:\n label_indices.append(i)\n return label_indices\n\n trainset = torchvision.datasets.CIFAR10(\n root=path, train=True, download=False, transform=transform_train)\n train_hidden = torch.utils.data.Subset(trainset,\n get_classes(trainset, c_idx))\n trainloader = torch.utils.data.DataLoader(\n train_hidden, batch_size=train_bs, shuffle=True, **kwargs)\n\n testset = torchvision.datasets.CIFAR10(\n root=path, train=False, download=False, transform=transform_test)\n test_hidden = torch.utils.data.Subset(testset, get_classes(testset, c_idx))\n testloader = torch.utils.data.DataLoader(\n test_hidden, batch_size=test_bs, shuffle=False, **kwargs)\n return trainloader, testloader\n",
"# Copyright (c) 2019 Horizon Robotics. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Conditional operations.\"\"\"\n\nimport torch\n\nimport alf\nimport alf.utils.common as common\n\n\ndef _gather_nest(nest, indices):\n return alf.nest.map_structure(lambda t: t[indices], nest)\n\n\ndef select_from_mask(data, mask):\n \"\"\"Select the items from data based on mask.\n\n data[i,...] will be selected to form a new tensor if mask[i] is True or\n non-zero\n\n Args:\n data (nested Tensor): source tensor\n mask (Tensor): 1D Tensor mask.shape[0] should be same as data.shape[0]\n Returns:\n nested Tensor with the same structure as data\n \"\"\"\n gather_indices = torch.where(mask)[0]\n return _gather_nest(data, gather_indices)\n\n\ndef conditional_update(target, cond, func, *args, **kwargs):\n \"\"\"Update target according to cond mask\n\n Compute result as an update of `target` based on `cond`. To be specific,\n result[row] is func(*args[row], **kwargs[row]) if cond[row] is True,\n otherwise result[row] will be target[row]. Note that target will not be\n changed.\n\n If you simply want to do some conditional computation without actually\n returning any results. You can use conditional_update in the following way:\n ```\n # func needs to return an empty tuple ()\n conditional_update((), cond, func, *args, **kwargs)\n ```\n\n Args:\n target (nested Tensor): target to be updated\n func (Callable): a function with arguments (*args, **kwargs) and returning\n a nest with same structure as target\n cond (Tensor): 1d bool Tensor with shape[0] == target.shape[0]\n Returns:\n nest with the same structure and shape as target.\n \"\"\"\n # the return of torch.where() is a tuple (indices, )\n gather_indices = torch.where(cond)[0]\n\n def _update_subset():\n selected_args = _gather_nest(args, gather_indices)\n selected_kwargs = _gather_nest(kwargs, gather_indices)\n updates = func(*selected_args, **selected_kwargs)\n\n def _update(tgt, updt):\n scatter_indices = common.expand_dims_as(gather_indices, updt)\n scatter_indices = scatter_indices.expand_as(updt)\n return tgt.scatter(0, scatter_indices, updt)\n\n return alf.nest.map_structure(_update, target, updates)\n\n total = cond.shape[0]\n n = gather_indices.shape[0]\n if n == 0:\n return target\n elif n == total:\n return func(*args, **kwargs)\n else:\n return _update_subset()\n",
"# Copyright (c) 2020 Horizon Robotics. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport torch\n\n_devece_ddtype_tensor_map = {\n 'cpu': {\n torch.float32: torch.FloatTensor,\n torch.float64: torch.DoubleTensor,\n torch.float16: torch.HalfTensor,\n torch.uint8: torch.ByteTensor,\n torch.int8: torch.CharTensor,\n torch.int16: torch.ShortTensor,\n torch.int32: torch.IntTensor,\n torch.int64: torch.LongTensor,\n torch.bool: torch.BoolTensor,\n },\n 'cuda': {\n torch.float32: torch.cuda.FloatTensor,\n torch.float64: torch.cuda.DoubleTensor,\n torch.float16: torch.cuda.HalfTensor,\n torch.uint8: torch.cuda.ByteTensor,\n torch.int8: torch.cuda.CharTensor,\n torch.int16: torch.cuda.ShortTensor,\n torch.int32: torch.cuda.IntTensor,\n torch.int64: torch.cuda.LongTensor,\n torch.bool: torch.cuda.BoolTensor,\n }\n}\n\n\ndef set_default_device(device_name):\n \"\"\"Set the default device.\n\n Cannot find a native torch function for setting default device. We have to\n hack our own.\n\n Args:\n device_name (str): one of (\"cpu\", \"cuda\")\n \"\"\"\n torch.set_default_tensor_type(\n _devece_ddtype_tensor_map[device_name][torch.get_default_dtype()])\n\n\ndef get_default_device():\n return torch._C._get_default_device()\n\n\nclass device(object):\n \"\"\"Specifies the device for tensors created in this context.\"\"\"\n\n def __init__(self, device_name):\n \"\"\"Create the context with default device with name `device_name`\n\n Args:\n device_name (str): one of (\"cpu\", \"cuda\")\n \"\"\"\n self._device_name = device_name\n\n def __enter__(self):\n self._prev_device_name = get_default_device()\n if self._prev_device_name != self._device_name:\n set_default_device(self._device_name)\n\n def __exit__(self, type, value, traceback):\n if self._prev_device_name != self._device_name:\n set_default_device(self._prev_device_name)\n",
"# Copyright (c) 2020 Horizon Robotics. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom absl.testing import parameterized\nimport functools\n\nimport torch\nimport torch.nn as nn\n\nimport alf\nfrom alf.nest.utils import NestConcat\nfrom alf.networks.encoding_networks import EncodingNetwork\nfrom alf.networks.encoding_networks import LSTMEncodingNetwork\nfrom alf.networks.encoding_networks import ParallelEncodingNetwork\nfrom alf.networks.preprocessors import EmbeddingPreprocessor\nfrom alf.tensor_specs import TensorSpec\nfrom alf.utils import common\n\n\nclass TestInputpreprocessor(parameterized.TestCase, alf.test.TestCase):\n input_spec = TensorSpec((10, ))\n preproc = EmbeddingPreprocessor(\n input_tensor_spec=input_spec, embedding_dim=10)\n\n shared_preproc = preproc.copy().singleton()\n\n @parameterized.parameters((False, preproc), (True, preproc),\n (False, shared_preproc), (True, shared_preproc))\n def test_input_preprocessor(self, lstm, preproc):\n def _check_with_shared_param(net1, net2, shared_subnet=None):\n net1_params = set(net1.parameters())\n net2_params = set(net2.parameters())\n # check that net1 and net2 share paramsters with shared_subnet\n if shared_subnet is not None:\n shared_params = set(shared_subnet.parameters())\n for p in shared_params:\n self.assertTrue((p in net1_params) and (p in net2_params))\n\n # for the rest part, net1 and net2 do not share parameters\n for p1, p2 in zip(net1_params, net2_params):\n if shared_subnet is None or p1 not in shared_params:\n self.assertTrue(p1 is not p2)\n\n # 1) test input_preprocessor copy and each copy has its own parameters\n input_preprocessor = preproc\n input_preprocessor_copy = input_preprocessor.copy()\n\n if not preproc._singleton_instance:\n _check_with_shared_param(input_preprocessor,\n input_preprocessor_copy)\n elif preproc._singleton_instance:\n _check_with_shared_param(input_preprocessor,\n input_preprocessor_copy,\n input_preprocessor)\n\n if lstm:\n network_ctor = functools.partial(\n LSTMEncodingNetwork,\n hidden_size=(1, ),\n post_fc_layer_params=(2, 2))\n else:\n network_ctor = functools.partial(\n EncodingNetwork, fc_layer_params=(10, 10))\n\n net = network_ctor(\n input_tensor_spec=[\n TestInputpreprocessor.input_spec,\n TestInputpreprocessor.input_spec\n ],\n input_preprocessors=[input_preprocessor, torch.relu],\n preprocessing_combiner=NestConcat(dim=1))\n\n # 2) test copied network has its own parameters, including\n # parameters from input preprocessors\n copied_net = net.copy()\n if not preproc._singleton_instance:\n _check_with_shared_param(net, copied_net)\n else:\n _check_with_shared_param(net, copied_net, input_preprocessor)\n\n # 3) test for each replica of the NaiveParallelNetwork has its own\n # parameters, including parameters from input preprocessors\n replicas = 2\n p_net = alf.networks.network.NaiveParallelNetwork(net, replicas)\n if not preproc._singleton_instance:\n _check_with_shared_param(p_net._networks[0], p_net._networks[1])\n else:\n _check_with_shared_param(p_net._networks[0], p_net._networks[1],\n input_preprocessor)\n\n # 4) test network forward\n batch_size = 6\n batch = TestInputpreprocessor.input_spec.zeros(\n outer_dims=(batch_size, ))\n\n if lstm:\n state = [(torch.zeros((batch_size, 1)), ) * 2]\n p_state = [(torch.zeros((batch_size, replicas, 1)), ) * 2]\n else:\n state = ()\n p_state = ()\n\n net([batch, batch], state)\n p_net([batch, batch], p_state)\n\n @parameterized.parameters(preproc, shared_preproc)\n def test_input_preprocessor_state(self, input_preprocessor):\n batch_size = 6\n batch = TestInputpreprocessor.input_spec.zeros(\n outer_dims=(batch_size, ))\n\n input_preprocessor(batch)\n self.assertRaises(\n AssertionError, input_preprocessor, inputs=batch, state=batch)\n\n\nif __name__ == '__main__':\n alf.test.main()\n"
] |
[
[
"numpy.sqrt",
"numpy.get_printoptions",
"numpy.set_printoptions",
"numpy.matmul",
"numpy.cos",
"numpy.linalg.norm",
"numpy.sin",
"numpy.all",
"numpy.stack",
"numpy.float32",
"torch.arange",
"numpy.array",
"numpy.zeros"
],
[
"numpy.reshape",
"numpy.zeros",
"numpy.transpose"
],
[
"torch.randn",
"torch.utils.data.DataLoader",
"torch.rand"
],
[
"torch.where"
],
[
"torch.get_default_dtype",
"torch._C._get_default_device"
],
[
"torch.zeros"
]
] |
rougier/JCGT-2014a
|
[
"78793d05a145af79d9cacf87a6e1ffaaea501394"
] |
[
"demo-continuous.py"
] |
[
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# -----------------------------------------------------------------------------\n# Copyright (C) 2013 Nicolas P. Rougier. All rights reserved.\n# \n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n# \n# 1. Redistributions of source code must retain the above copyright notice,\n# this list of conditions and the following disclaimer.\n# \n# 2. Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n# \n# THIS SOFTWARE IS PROVIDED BY NICOLAS P. ROUGIER ''AS IS'' AND ANY EXPRESS OR\n# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF\n# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO\n# EVENT SHALL NICOLAS P. ROUGIER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,\n# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF\n# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n# \n# The views and conclusions contained in the software and documentation are\n# those of the authors and should not be interpreted as representing official\n# policies, either expressed or implied, of Nicolas P. Rougier.\n# -----------------------------------------------------------------------------\nimport numpy as np\nimport OpenGL.GL as gl\nfrom transforms import ortho\n\n# -------------------------------------\ndef on_display():\n gl.glClearColor(1,1,1,1)\n gl.glClear(gl.GL_COLOR_BUFFER_BIT | gl.GL_DEPTH_BUFFER_BIT)\n collection.draw(uniforms= {'u_projection': u_projection,\n 'u_model' : u_model,\n 'u_view' : u_view})\n glut.glutSwapBuffers()\n\n# -------------------------------------\ndef on_reshape(width, height):\n gl.glViewport(0, 0, width, height)\n u_projection[...] = ortho(0,width,0,height,-1,1)\n collection.scale = min(width, height)\n\n# -------------------------------------\ndef on_keyboard(key, x, y):\n if key == '\\033': sys.exit()\n\n# -------------------------------------\ndef on_special( key, x, y ):\n if key == glut.GLUT_KEY_LEFT:\n collection.dash_phase += 0.05\n elif key == glut.GLUT_KEY_RIGHT:\n collection.dash_phase -= 0.05\n glut.glutPostRedisplay()\n\n\n# -------------------------------------\nif __name__ == '__main__':\n import sys\n import OpenGL.GLUT as glut\n\n from curves import curve3_bezier, curve4_bezier\n from dash_lines_2D import DashLines\n\n glut.glutInit(sys.argv)\n # HiDPI support for retina display\n # This requires glut from http://iihm.imag.fr/blanch/software/glut-macosx/\n if sys.platform == 'darwin':\n import ctypes\n from OpenGL import platform\n try:\n glutInitDisplayString = platform.createBaseFunction( \n 'glutInitDisplayString', dll=platform.GLUT, resultType=None, \n argTypes=[ctypes.c_char_p],\n doc='glutInitDisplayString( ) -> None', \n argNames=() )\n text = ctypes.c_char_p(\"rgba stencil double samples=8 hidpi\")\n glutInitDisplayString(text)\n except:\n pass\n\n glut.glutInitDisplayMode(glut.GLUT_DOUBLE | glut.GLUT_RGB | glut.GLUT_DEPTH)\n glut.glutInitWindowSize(1000, 1000)\n glut.glutCreateWindow(\"Dashed & antialiased bezier curve [Arrow keys change offset]\")\n glut.glutDisplayFunc(on_display)\n glut.glutReshapeFunc(on_reshape)\n glut.glutKeyboardFunc(on_keyboard)\n glut.glutSpecialFunc(on_special)\n\n # Some init\n gl.glBlendFunc( gl.GL_SRC_ALPHA, gl.GL_ONE_MINUS_SRC_ALPHA )\n gl.glDisable( gl.GL_DEPTH_TEST )\n gl.glEnable( gl.GL_BLEND )\n gl.glClearColor(1.0,1.0,1.0,1.0)\n u_projection = np.eye(4).astype( np.float32 )\n u_view = np.eye(4).astype( np.float32 )\n u_model = np.eye(4).astype( np.float32 )\n\n collection = DashLines()\n\n # ---------------------------------\n points = np.array([[.1, .6], [.5, 1.], [.9, .6]])\n vertices = curve3_bezier(*points)\n\n closed = False\n collection.append(vertices, color=(0,0,0,1), linewidth=104,\n dash_pattern = 'solid', linecaps=('>','<'), closed=closed)\n collection.append(vertices, color=(1,1,1,1), linewidth=102,\n dash_pattern = 'solid', linecaps=('>','<'), closed=closed)\n collection.append(vertices, color=(0.75,0.75,1.00,1.00), linewidth=100,\n dash_pattern = 'dashed', dash_caps=('>','<'),\n linecaps=('>','<'), closed=closed)\n\n\n # ---------------------------------\n vertices = curve3_bezier(*(points + [0, -0.4]))\n collection.append(vertices, color=(0,0,0,1), linewidth=104,\n dash_pattern = 'solid', linecaps=('=','='), closed=closed)\n collection.append(vertices, color=(1,1,1,1), linewidth=102,\n dash_pattern = 'solid', linecaps=('=','='), closed=closed)\n collection.append( vertices, color=(0.75,0.75,1.00,1.0),\n linewidth=100, linecaps = ('|','|'), closed=closed,\n dash_pattern = 'custom', dash_caps=('|','|') )\n\n # ---------------------------------\n vertices = curve3_bezier(*(points + [0, -0.2]))\n collection.append(vertices, color=(0,0,0,1), linewidth=104,\n dash_pattern = 'solid', linecaps=('o','o'), closed=closed)\n collection.append(vertices, color=(1,1,1,1), linewidth=102,\n dash_pattern = 'solid', linecaps=('o','o'), closed=closed)\n collection.append( vertices, color=(0.75,0.75,1.00,1.0),\n linewidth=100, linecaps = ('o','o'), closed=closed,\n dash_pattern = 'densely dotted', dash_caps=('o','o') )\n\n\n glut.glutMainLoop()\n"
] |
[
[
"numpy.eye",
"numpy.array"
]
] |
rhambach/TEMareels
|
[
"92a907f483baeb919dd485895c56454f0b552c76"
] |
[
"tools/remove_stripes.py"
] |
[
"\"\"\"\n IMPLEMENTATION:\n - crude method for removing periodic noise in images recorded \n on Tietz CMOS slave camera in wq-mode\n - integrates over several lines (e.g. 10x4096) of noise and \n substracts signal from each line in sector\n \n Copyright (c) 2013, pwachsmuth, rhambach\n This file is part of the TEMareels package and released\n under the MIT-Licence. See LICENCE file for details.\n\n\"\"\"\nimport numpy as np\nimport matplotlib.pylab as plt\n\ndef remove_stripes(image, intwidth=10, intstart=[0,1025,2900,3800], \n sector_width=1024, mask = None, verbosity = 0):\n \n image = np.asarray(image)\n old_image = image.copy(); \n \n offset = 0;\n for j in range(0,4):\n ref_line = old_image[:,intstart[j]:intstart[j]+intwidth].sum(axis=1)*(1./(intwidth));\n #ref_line[ref_line > thresh] = 0;\n imax = ref_line.argmax();\n if mask is not None:\n ref_line[mask] = 0;\n for i in range(offset,offset+sector_width):\n image[:,i] = image[:,i]-ref_line;\n offset += sector_width;\n #print offset \n image[:,0:5]= image[:,-5:] = 0; \n if verbosity > 0:\n plt.title(\"Remove Stripes: difference between old and new image\");\n plt.imshow(image - old_image, aspect='auto')\n plt.show();\n return image;\n\n \n # -- main ----------------------------------------\nif __name__ == '__main__':\n import TEMareels.tools.tifffile as tiff\n from TEMareels.tools import tvips\n\n\n image_file = '../tests/wqmap.tif';\n image = tiff.imread(image_file).astype(float);\n binning = 8;\n intstart= np.array([0,1025,2900,3800])/binning;\n \n img = remove_stripes(image, intwidth=100/binning, \n intstart=intstart, sector_width=1024/binning, verbosity=1);\n \n #outfile = \"script_test_.tif\";\n #tvips.write_tiff(img, outfile);\n \n \n \n"
] |
[
[
"matplotlib.pylab.show",
"numpy.asarray",
"matplotlib.pylab.title",
"matplotlib.pylab.imshow",
"numpy.array"
]
] |
pyronear/pyro-dataset
|
[
"b6445f6051058f20f2fc821040ec3705dc60464c"
] |
[
"test/test_datasets.py"
] |
[
"# Copyright (C) 2021, Pyronear contributors.\n\n# This program is licensed under the GNU Affero General Public License version 3.\n# See LICENSE or go to <https://www.gnu.org/licenses/agpl-3.0.txt> for full license details.\n\nimport unittest\nimport tempfile\nfrom pathlib import Path\nimport json\nfrom PIL.Image import Image\nimport pandas as pd\nimport random\nimport requests\nimport torch\nfrom torch.utils.data import DataLoader\nfrom torchvision.transforms import transforms\nfrom torchvision.datasets import VisionDataset\n\nfrom pyrodataset.wildfire import WildFireDataset, WildFireSplitter, computeSubSet\n\n\ndef generate_wildfire_dataset_fixture():\n random.seed(42)\n df = pd.DataFrame(columns=['imgFile', 'fire_id', 'fire'])\n for i in range(974):\n df = df.append({'imgFile': str(i).zfill(4) + '.jpg', 'fire_id': float(random.randint(1, 100)),\n 'fire': float(random.randint(0, 1))}, ignore_index=True)\n\n return df\n\n\ndef generate_wildfire_subsampler_dataset_fixture():\n df = pd.DataFrame(columns=['exploitable', 'fire', 'sequence', 'clf_confidence',\n 'loc_confidence', 'x', 'y', 't', 'stateStart',\n 'stateEnd', 'imgFile', 'fire_id', 'fBase'])\n for b in range(10):\n x = random.uniform(200, 500)\n y = random.uniform(200, 500)\n t = random.uniform(0, 100)\n start = random.randint(0, 200)\n end = random.randint(start + 11, 400)\n base = str(b) + '.mp4'\n imgsNb = random.sample(range(start, end), 10)\n imgsNb.sort()\n imgs = [str(b) + '_frame' + str(i) + '.png' for i in imgsNb]\n fire_id = float(random.randint(1, 100))\n fire = float(random.randint(0, 1))\n for i in range(10):\n df = df.append({'exploitable': True, 'fire': fire, 'sequence': 0,\n 'clf_confidence': 0, 'loc_confidence': 0, 'x': x, 'y': y, 't': t, 'stateStart': start,\n 'stateEnd': end, 'imgFile': imgs[i], 'fire_id': fire_id,\n 'fBase': base}, ignore_index=True)\n\n return df\n\n\ndef get_wildfire_image():\n\n #download image\n url = 'https://media.springernature.com/w580h326/nature-cms/uploads/collections/' \\\n 'Wildfire-and-ecosystems-Hero-d62e7fbbf36ce6915d4e3efef069ee0e.jpg'\n response = requests.get(url)\n # save image\n file = open(\"test//0003.jpg\", \"wb\")\n file.write(response.content)\n file.close()\n\n\nclass WildFireDatasetTester(unittest.TestCase):\n\n def setUp(self):\n self.path_to_frames = Path(__file__).parent\n self.path_to_frames_str = str(self.path_to_frames)\n self.wildfire_path = Path(__file__).parent / 'wildfire_dataset.csv'\n self.wildfire_df = generate_wildfire_dataset_fixture()\n self.wildfire_df.to_csv(self.wildfire_path)\n get_wildfire_image()\n\n def test_wildfire_correctly_init_from_path(self):\n\n for path_to_frames in [self.path_to_frames, self.path_to_frames_str]:\n wildfire = WildFireDataset(\n metadata=self.wildfire_path,\n path_to_frames=path_to_frames\n )\n\n self.assertEqual(len(wildfire), 974)\n self.assertEqual(len(wildfire[3]), 2)\n\n def test_wildfire_correctly_init_from_dataframe(self):\n for path_to_frames in [self.path_to_frames, self.path_to_frames_str]:\n wildfire = WildFireDataset(\n metadata=self.wildfire_df,\n path_to_frames=path_to_frames\n )\n\n self.assertEqual(len(wildfire), 974)\n self.assertEqual(len(wildfire[3]), 2)\n\n # try to get one image of wildfire (item 3 is authorized image fixture)\n observation_3, metadata_3 = wildfire[3]\n self.assertIsInstance(observation_3, Image) # image correctly loaded ?\n self.assertEqual(observation_3.size, (580, 326))\n # metadata correctly loaded ?\n self.assertTrue(torch.equal(metadata_3, torch.tensor([self.wildfire_df.loc[3]['fire']])))\n\n def test_wildfire_correctly_init_with_multiple_targets(self):\n wildfire = WildFireDataset(\n metadata=self.wildfire_df,\n path_to_frames=self.path_to_frames,\n transform=transforms.ToTensor(),\n target_names=['fire', 'fire_id']\n )\n\n self.assertEqual(len(wildfire), 974)\n\n # try to get one image of wildfire (item 3 is authorized image fixture)\n observation_3, metadata_3 = wildfire[3]\n self.assertIsInstance(observation_3, torch.Tensor) # image correctly loaded ?\n self.assertEqual(observation_3.size(), torch.Size([3, 326, 580]))\n self.assertTrue(torch.equal(metadata_3, torch.tensor([self.wildfire_df.loc[3]['fire'],\n self.wildfire_df.loc[3]['fire_id']]))) # metadata correctly loaded ?\n\n def test_invalid_csv_path_raises_exception(self):\n with self.assertRaises(ValueError):\n WildFireDataset(\n metadata='bad_path.csv',\n path_to_frames=self.path_to_frames\n )\n\n def test_wildfire_correctly_init_with_transform(self):\n wildfire = WildFireDataset(\n metadata=self.wildfire_path,\n path_to_frames=self.path_to_frames,\n transform=transforms.Compose([transforms.Resize((100, 66)), transforms.ToTensor()])\n )\n\n observation_3, _ = wildfire[3]\n self.assertEqual(observation_3.size(), torch.Size((3, 100, 66)))\n\n def test_dataloader_can_be_init_with_wildfire(self):\n wildfire = WildFireDataset(metadata=self.wildfire_path, path_to_frames=self.path_to_frames)\n DataLoader(wildfire, batch_size=64)\n\n\nclass WildFireSubSamplerTester(unittest.TestCase):\n\n def setUp(self):\n self.path_to_frames = Path(__file__).parent\n self.wildfire_path = Path(__file__).parent / 'wildfire_dataset.csv'\n self.wildfire_df = generate_wildfire_subsampler_dataset_fixture()\n self.wildfire_df.to_csv(self.wildfire_path)\n\n def test_good_size_after_subsamping(self):\n self.assertEqual(len(self.wildfire_df), 100)\n metadataSS = computeSubSet(self.wildfire_df, 2)\n\n self.assertEqual(len(metadataSS), 20)\n\n def test_metadata_changes_each_time(self):\n metadataSS_1 = computeSubSet(self.wildfire_df, 2, seed=1)\n metadataSS_2 = computeSubSet(self.wildfire_df, 2, seed=2)\n\n self.assertEqual(len(metadataSS_1), 20)\n self.assertEqual(len(metadataSS_2), 20)\n self.assertFalse(metadataSS_1['imgFile'].values.tolist() == metadataSS_2['imgFile'].values.tolist())\n\n def test_metadata_does_not_changes_with_same_seed(self):\n metadataSS_1 = computeSubSet(self.wildfire_df, 2, seed=1)\n metadataSS_2 = computeSubSet(self.wildfire_df, 2, seed=1)\n\n self.assertEqual(len(metadataSS_1), 20)\n self.assertEqual(len(metadataSS_2), 20)\n self.assertTrue(metadataSS_1['imgFile'].values.tolist() == metadataSS_2['imgFile'].values.tolist())\n\n def test_increase_not_fire_semples(self):\n metadataSS = computeSubSet(self.wildfire_path, 2, 1)\n\n self.assertGreater(len(metadataSS), 20)\n\n def test_invalid_csv_path_raises_exception(self):\n with self.assertRaises(ValueError):\n computeSubSet(\n metadata='bad_path.csv',\n frame_per_seq=2\n )\n\n\nclass WildFireDatasetSplitter(unittest.TestCase):\n\n def setUp(self):\n self.path_to_frames = Path(__file__).parent\n\n self.wildfire_df = generate_wildfire_dataset_fixture()\n\n self.wildfire = WildFireDataset(metadata=self.wildfire_df, path_to_frames=self.path_to_frames)\n\n def test_consistent_ratios_good_init(self):\n ratios = {'train': 0.7, 'val': 0.15, 'test': 0.15}\n splitter = WildFireSplitter(ratios)\n self.assertEqual(ratios, splitter.ratios)\n\n def test_inconsistent_ratios_raise_exception(self):\n ratios = {'train': 0.9, 'val': 0.2, 'test': 0.1} # sum > 1\n with self.assertRaises(ValueError):\n WildFireSplitter(ratios)\n\n def test_splitting_with_test_to_zero(self):\n ratios = {'train': 0.8, 'val': 0.2, 'test': 0}\n\n splitter = WildFireSplitter(ratios, seed=42)\n splitter.fit(self.wildfire)\n\n for (set_, ratio_) in splitter.ratios_.items():\n self.assertAlmostEqual(ratio_, ratios[set_], places=1)\n\n def test_splitting_gives_good_splits_size(self):\n n_samples_expected = {'train': 688, 'val': 147, 'test': 139}\n ratios = {'train': 0.7, 'val': 0.15, 'test': 0.15}\n\n splitter = WildFireSplitter(ratios, seed=42)\n splitter.fit(self.wildfire)\n\n self.assertEqual(splitter.n_samples_, n_samples_expected)\n for (set_, ratio_) in splitter.ratios_.items():\n self.assertAlmostEqual(ratio_, ratios[set_], places=1)\n\n def test_splitting_working_with_transforms(self):\n ratios = {'train': 0.7, 'val': 0.15, 'test': 0.15}\n transforms_expected = {'train': transforms.RandomCrop(10), 'val': None, 'test': None}\n\n splitter = WildFireSplitter(ratios, transforms=transforms_expected)\n splitter.fit(self.wildfire)\n\n for (set_, transform_expected) in transforms_expected.items():\n self.assertIs(getattr(splitter, set_).transform, transform_expected)\n\n def test_splitting_with_unavailable_algorithm_raise_exception(self):\n ratios = {'train': 0.7, 'val': 0.15, 'test': 0.15}\n\n splitter = WildFireSplitter(ratios, algorithm='wtf')\n with self.assertRaises(ValueError):\n splitter.fit(self.wildfire)\n\n\nif __name__ == '__main__':\n unittest.main()\n"
] |
[
[
"torch.Size",
"torch.utils.data.DataLoader",
"pandas.DataFrame",
"torch.tensor"
]
] |
alesanmed/as-route
|
[
"fc7fcb65496188f7c7e12626e2169f5315e4e3d1"
] |
[
"heuristic/Constructive.py"
] |
[
"# -*- coding: utf-8 -*-\nimport numpy as np\nimport heuristic.utils as utils\nimport random\n\nfrom heuristic.Graph import TSP_Graph\nfrom heuristic.Solution import Solution\n\ndef random_solution(graph, customers_list):\n if not isinstance(graph, TSP_Graph):\n utils.raise_value_error(graph, TSP_Graph, type(graph))\n \n if not isinstance(customers_list, list):\n utils.raise_value_error(\"customers_list\", list, type(customers_list))\n \n customers = np.empty((len(customers_list),), \n dtype=[('id', 'i4'), ('ws', 'i8'), ('we', 'i8'), ('t', 'i8')])\n \n for i, customer in enumerate(customers_list):\n depot_pos = graph.get_customer_index(0) \n c_pos = customer.get_row()\n customers[i] = (customer.get_row(), \n customer.get_window_start(),\n customer.get_window_end(),\n graph.get_value(depot_pos, c_pos))\n \n # Almacen siempre el primero, su ventana empieza en 0 y el tiempo hasta si\n # mismo es 0\n \n customers = customers[np.argsort(customers, order=('we', 't'))]\n \n depot = np.where(customers['id'] == 0)[0][0]\n \n customers = np.concatenate(([customers[depot]],\n customers[0:depot],\n customers[depot+1:]))\n ###############\n ## Provisional, se quitara la inversion del orden, es para forzar una solucion\n ## inicial invalida\n ##\n ## customers = customers[np.argsort(customers, order=('ws', 't'))[::-1]]\n ## first = customers[0]\n ## last = customers[-1]\n ## customers = np.concatenate(([last], customers[1:-1], [first]))\n ###############\n \n solution = Solution(len(customers_list))\n \n solution.set_graph(graph)\n solution.set_solution(customers['id'])\n \n \n start_time = int(customers['ws'][1] - customers['t'][1])\n if start_time < 0:\n start_time = 0\n\n solution.set_start_time(start_time)\n \n curr_time = start_time\n for i, c_row in enumerate(solution.get_solution()):\n customer = next((c for c in customers_list if c.get_row() == c_row), None)\n time_visited = curr_time + customers['t'][i]\n\n if time_visited < customer.get_window_start():\n time_visited = customer.get_window_start()\n \n customer.set_time_visited(int(time_visited))\n curr_time = time_visited\n \n solution.set_customer_list(customers_list)\n \n solution.compute_validity()\n \n return solution\n \ndef perturbation(solution, level):\n solution_new = Solution(solution.get_solution().size, solution=solution) \n \n min_index = solution_new.get_solution().size * 10000\n for i in range(level):\n index_origin = random.randint(1, solution_new.get_solution().size - 1)\n index_new = random.randint(1, solution_new.get_solution().size - 1)\n \n curr_min = min(index_origin, index_new)\n \n if curr_min < min_index:\n min_index = curr_min\n \n solution_new.one_shift(index_origin, index_new)\n \n solution_new.recompute_validity(min_index)\n solution_new.compute_validity()\n \n return solution_new\n \ndef local1shift(solution):\n customers_validity = solution.get_valid_customers()\n \n valid_customers = np.where(customers_validity == 1)[0]\n violated_customers = np.where(customers_validity == 0)[0]\n \n better_solution = None\n min_index = customers_validity.size * 10000\n\n # Backward movement of violated customers \n for i in violated_customers:\n index_origin = i\n for j in range(i - 1, 0, -1):\n index_new = j\n \n if not solution.is_arc_valid(i, j):\n break\n \n solution_new = Solution(solution.get_solution().size, solution=solution) \n solution_new.one_shift(index_origin, index_new)\n min_ = min(index_origin, index_new)\n\n if min_ < min_index: \n min_index = min_\n \n if solution_new.get_constructive_obj() > solution.get_constructive_obj():\n better_solution = solution_new\n break\n \n if better_solution is not None:\n break\n \n if better_solution is None:\n # Forward movement of non-violated customers\n for i in valid_customers:\n # Depot can't be moved\n if solution.get_solution()[i] == 0:\n continue\n\n index_origin = i\n for j in range(i + 1, customers_validity.size):\n index_new = j\n \n if not solution.is_arc_valid(j, i):\n break\n \n solution_new = Solution(solution.get_solution().size, solution=solution) \n solution_new.one_shift(index_origin, index_new)\n min_ = min(index_origin, index_new)\n\n if min_ < min_index: \n min_index = min_\n \n if solution_new.get_constructive_obj() > solution.get_constructive_obj():\n better_solution = solution_new\n break\n \n if better_solution is not None:\n break\n \n if better_solution is None:\n # Backward movement of non-violated customers\n for i in valid_customers:\n # Depot can't be moved\n if solution.get_solution()[i] == 0:\n continue\n\n index_origin = i\n for j in range(i - 1, 0, -1):\n index_new = j\n \n if not solution.is_arc_valid(i, j):\n break\n \n solution_new = Solution(solution.get_solution().size, solution=solution) \n solution_new.one_shift(index_origin, index_new)\n min_ = min(index_origin, index_new)\n\n if min_ < min_index: \n min_index = min_\n \n if solution_new.get_constructive_obj() > solution.get_constructive_obj():\n better_solution = solution_new\n break\n \n if better_solution is not None:\n break\n \n if better_solution is None:\n # Forward movement of violated customers\n for i in violated_customers:\n index_origin = i\n \n for j in range(i + 1, customers_validity.size):\n index_new = j\n \n if not solution.is_arc_valid(j, i):\n break\n \n solution_new = Solution(solution.get_solution().size, solution=solution) \n solution_new.one_shift(index_origin, index_new)\n min_ = min(index_origin, index_new)\n\n if min_ < min_index: \n min_index = min_\n \n if solution_new.get_constructive_obj() > solution.get_constructive_obj():\n better_solution = solution_new\n break\n \n if better_solution is not None:\n break\n \n if better_solution is None:\n better_solution = solution\n\n better_solution.recompute_validity(min_index)\n better_solution.compute_validity()\n\n return better_solution"
] |
[
[
"numpy.concatenate",
"numpy.argsort",
"numpy.where"
]
] |
arthur801031/3d-multi-resolution-rcnn
|
[
"8e5454a72f8daa174bf3eabfa5964152f04ab287",
"8e5454a72f8daa174bf3eabfa5964152f04ab287",
"8e5454a72f8daa174bf3eabfa5964152f04ab287"
] |
[
"mmdet/models/backbones/unet3d.py",
"mmdet/core/bbox/bbox_target.py",
"mmdet/core/bbox/samplers/base_sampler.py"
] |
[
"# based on implementation: https://github.com/usuyama/pytorch-unet/blob/master/pytorch_unet.py\n\nfrom ..registry import BACKBONES\n\nimport torch\nimport torch.nn as nn\n\ndef double_conv(in_channels, out_channels):\n return nn.Sequential(\n nn.Conv3d(in_channels, out_channels, 3, padding=1),\n nn.ReLU(inplace=True),\n nn.Conv3d(out_channels, out_channels, 3, padding=1),\n nn.ReLU(inplace=True)\n ) \n\[email protected]_module\nclass UNet3D(nn.Module):\n\n def __init__(self):\n super(UNet3D, self).__init__()\n \n self.dconv_down1 = double_conv(3, 16)\n self.dconv_down2 = double_conv(16, 32)\n self.dconv_down3 = double_conv(32, 64)\n self.dconv_down4 = double_conv(64, 128) \n\n self.maxpool = nn.MaxPool3d(2)\n # self.upsample = nn.functional.interpolate(scale_factor=2, mode='trilinear', align_corners=True) \n \n self.dconv_up3 = double_conv(64 + 128, 64)\n self.dconv_up2 = double_conv(32 + 64, 32)\n self.dconv_up1 = double_conv(32 + 16, 16)\n \n # self.conv_last = nn.Conv2d(64, n_class, 1)\n \n def init_weights(self, pretrained=None):\n pass\n\n def forward(self, x):\n conv1 = self.dconv_down1(x)\n x = self.maxpool(conv1)\n\n conv2 = self.dconv_down2(x)\n x = self.maxpool(conv2)\n \n conv3 = self.dconv_down3(x)\n x = self.maxpool(conv3) \n \n x = self.dconv_down4(x)\n \n x = nn.functional.interpolate(x, scale_factor=2, mode='trilinear', align_corners=True) \n x = torch.cat([x, conv3], dim=1)\n \n x = self.dconv_up3(x)\n x = nn.functional.interpolate(x, scale_factor=2, mode='trilinear', align_corners=True) \n x = torch.cat([x, conv2], dim=1) \n\n x = self.dconv_up2(x)\n x = nn.functional.interpolate(x, scale_factor=2, mode='trilinear', align_corners=True) \n x = torch.cat([x, conv1], dim=1) \n \n x = self.dconv_up1(x)\n \n return x",
"import torch\n\nfrom .transforms import bbox2delta, bbox2delta3d\nfrom ..utils import multi_apply\n\n\ndef bbox_target(pos_bboxes_list,\n neg_bboxes_list,\n pos_gt_bboxes_list,\n pos_gt_labels_list,\n cfg,\n reg_classes=1,\n target_means=[.0, .0, .0, .0],\n target_stds=[1.0, 1.0, 1.0, 1.0],\n concat=True):\n labels, label_weights, bbox_targets, bbox_weights = multi_apply(\n bbox_target_single,\n pos_bboxes_list,\n neg_bboxes_list,\n pos_gt_bboxes_list,\n pos_gt_labels_list,\n cfg=cfg,\n reg_classes=reg_classes,\n target_means=target_means,\n target_stds=target_stds)\n\n if concat:\n labels = torch.cat(labels, 0)\n label_weights = torch.cat(label_weights, 0)\n bbox_targets = torch.cat(bbox_targets, 0)\n bbox_weights = torch.cat(bbox_weights, 0)\n return labels, label_weights, bbox_targets, bbox_weights\n\ndef bbox_target_3d(pos_bboxes_list,\n neg_bboxes_list,\n pos_gt_bboxes_list,\n pos_gt_labels_list,\n cfg,\n reg_classes=1,\n target_means=[.0, .0, .0, .0, .0, .0],\n target_stds=[1.0, 1.0, 1.0, 1.0, 1.0, 1.0],\n concat=True):\n labels, label_weights, bbox_targets, bbox_weights = multi_apply(\n bbox_target_single_3d,\n pos_bboxes_list,\n neg_bboxes_list,\n pos_gt_bboxes_list,\n pos_gt_labels_list,\n cfg=cfg,\n reg_classes=reg_classes,\n target_means=target_means,\n target_stds=target_stds)\n if concat:\n labels = torch.cat(labels, 0)\n label_weights = torch.cat(label_weights, 0)\n bbox_targets = torch.cat(bbox_targets, 0)\n bbox_weights = torch.cat(bbox_weights, 0)\n return labels, label_weights, bbox_targets, bbox_weights\n\ndef bbox_target_3d_parcel(pos_bboxes_list,\n neg_bboxes_list,\n pos_gt_bboxes_list,\n pos_gt_labels_list,\n pos_gt_bregions_list,\n cfg,\n reg_classes=1,\n target_means=[.0, .0, .0, .0, .0, .0],\n target_stds=[1.0, 1.0, 1.0, 1.0, 1.0, 1.0],\n concat=True):\n labels, label_weights, bbox_targets, bbox_weights, bregions, bregion_weights = multi_apply(\n bbox_target_single_3d_parcel,\n pos_bboxes_list,\n neg_bboxes_list,\n pos_gt_bboxes_list,\n pos_gt_labels_list,\n pos_gt_bregions_list,\n cfg=cfg,\n reg_classes=reg_classes,\n target_means=target_means,\n target_stds=target_stds)\n if concat:\n labels = torch.cat(labels, 0)\n label_weights = torch.cat(label_weights, 0)\n bbox_targets = torch.cat(bbox_targets, 0)\n bbox_weights = torch.cat(bbox_weights, 0)\n bregions = torch.cat(bregions, 0)\n bregion_weights = torch.cat(bregion_weights, 0)\n return labels, label_weights, bbox_targets, bbox_weights, bregions, bregion_weights\n\ndef bbox_target_single(pos_bboxes,\n neg_bboxes,\n pos_gt_bboxes,\n pos_gt_labels,\n cfg,\n reg_classes=1,\n target_means=[.0, .0, .0, .0],\n target_stds=[1.0, 1.0, 1.0, 1.0]):\n num_pos = pos_bboxes.size(0)\n num_neg = neg_bboxes.size(0)\n num_samples = num_pos + num_neg\n labels = pos_bboxes.new_zeros(num_samples, dtype=torch.long)\n label_weights = pos_bboxes.new_zeros(num_samples)\n bbox_targets = pos_bboxes.new_zeros(num_samples, 4)\n bbox_weights = pos_bboxes.new_zeros(num_samples, 4)\n if num_pos > 0:\n labels[:num_pos] = pos_gt_labels\n pos_weight = 1.0 if cfg.pos_weight <= 0 else cfg.pos_weight\n label_weights[:num_pos] = pos_weight\n pos_bbox_targets = bbox2delta(pos_bboxes, pos_gt_bboxes, target_means,\n target_stds)\n bbox_targets[:num_pos, :] = pos_bbox_targets\n bbox_weights[:num_pos, :] = 1\n if num_neg > 0:\n label_weights[-num_neg:] = 1.0\n\n return labels, label_weights, bbox_targets, bbox_weights\n\ndef bbox_target_single_3d(pos_bboxes,\n neg_bboxes,\n pos_gt_bboxes,\n pos_gt_labels,\n cfg,\n reg_classes=1,\n target_means=[.0, .0, .0, .0, .0, .0],\n target_stds=[1.0, 1.0, 1.0, 1.0, 1.0, 1.0]):\n num_pos = pos_bboxes.size(0)\n num_neg = neg_bboxes.size(0)\n num_samples = num_pos + num_neg\n labels = pos_bboxes.new_zeros(num_samples, dtype=torch.long)\n label_weights = pos_bboxes.new_zeros(num_samples)\n bbox_targets = pos_bboxes.new_zeros(num_samples, 6)\n bbox_weights = pos_bboxes.new_zeros(num_samples, 6)\n if num_pos > 0:\n labels[:num_pos] = pos_gt_labels\n pos_weight = 1.0 if cfg.pos_weight <= 0 else cfg.pos_weight\n label_weights[:num_pos] = pos_weight\n pos_bbox_targets = bbox2delta3d(pos_bboxes, pos_gt_bboxes, target_means,\n target_stds)\n bbox_targets[:num_pos, :] = pos_bbox_targets\n bbox_weights[:num_pos, :] = 1\n if num_neg > 0:\n label_weights[-num_neg:] = 1.0\n\n # if torch.isnan(bbox_targets).any().item() == 1:\n # breakpoint()\n return labels, label_weights, bbox_targets, bbox_weights\n\ndef bbox_target_single_3d_parcel(pos_bboxes,\n neg_bboxes,\n pos_gt_bboxes,\n pos_gt_labels,\n pos_gt_bregions,\n cfg,\n reg_classes=1,\n target_means=[.0, .0, .0, .0, .0, .0],\n target_stds=[1.0, 1.0, 1.0, 1.0, 1.0, 1.0]):\n num_pos = pos_bboxes.size(0)\n num_neg = neg_bboxes.size(0)\n num_samples = num_pos + num_neg\n labels = pos_bboxes.new_zeros(num_samples, dtype=torch.long)\n bregions = pos_bboxes.new_zeros(num_samples, dtype=torch.long)\n label_weights = pos_bboxes.new_zeros(num_samples)\n bregion_weights = pos_bboxes.new_zeros(num_samples)\n bbox_targets = pos_bboxes.new_zeros(num_samples, 6)\n bbox_weights = pos_bboxes.new_zeros(num_samples, 6)\n if num_pos > 0:\n labels[:num_pos] = pos_gt_labels\n bregions[:num_pos] = pos_gt_bregions\n pos_weight = 1.0 if cfg.pos_weight <= 0 else cfg.pos_weight\n label_weights[:num_pos] = pos_weight\n bregion_weights[:num_pos] = pos_weight\n pos_bbox_targets = bbox2delta3d(pos_bboxes, pos_gt_bboxes, target_means,\n target_stds)\n bbox_targets[:num_pos, :] = pos_bbox_targets\n bbox_weights[:num_pos, :] = 1\n if num_neg > 0:\n label_weights[-num_neg:] = 1.0\n bregion_weights[-num_neg:] = 1.0\n\n # if torch.isnan(bbox_targets).any().item() == 1:\n # breakpoint()\n return labels, label_weights, bbox_targets, bbox_weights, bregions, bregion_weights\n\n\ndef expand_target(bbox_targets, bbox_weights, labels, num_classes):\n breakpoint()\n bbox_targets_expand = bbox_targets.new_zeros((bbox_targets.size(0),\n 4 * num_classes))\n bbox_weights_expand = bbox_weights.new_zeros((bbox_weights.size(0),\n 4 * num_classes))\n for i in torch.nonzero(labels > 0).squeeze(-1):\n start, end = labels[i] * 4, (labels[i] + 1) * 4\n bbox_targets_expand[i, start:end] = bbox_targets[i, :]\n bbox_weights_expand[i, start:end] = bbox_weights[i, :]\n return bbox_targets_expand, bbox_weights_expand\n",
"from abc import ABCMeta, abstractmethod\n\nimport torch\n\nfrom .sampling_result import SamplingResult\n\n\nclass BaseSampler(metaclass=ABCMeta):\n\n def __init__(self,\n num,\n pos_fraction,\n neg_pos_ub=-1,\n add_gt_as_proposals=True,\n **kwargs):\n self.num = num\n self.pos_fraction = pos_fraction\n self.neg_pos_ub = neg_pos_ub\n self.add_gt_as_proposals = add_gt_as_proposals\n self.pos_sampler = self\n self.neg_sampler = self\n\n @abstractmethod\n def _sample_pos(self, assign_result, num_expected, **kwargs):\n pass\n\n @abstractmethod\n def _sample_neg(self, assign_result, num_expected, **kwargs):\n pass\n\n def sample(self,\n assign_result,\n bboxes,\n gt_bboxes,\n gt_labels=None,\n **kwargs):\n \"\"\"Sample positive and negative bboxes.\n\n This is a simple implementation of bbox sampling given candidates,\n assigning results and ground truth bboxes.\n\n Args:\n assign_result (:obj:`AssignResult`): Bbox assigning results.\n bboxes (Tensor): Boxes to be sampled from.\n gt_bboxes (Tensor): Ground truth bboxes.\n gt_labels (Tensor, optional): Class labels of ground truth bboxes.\n\n Returns:\n :obj:`SamplingResult`: Sampling result.\n \"\"\"\n if isinstance(gt_bboxes, list) and len(gt_bboxes) == 1:\n gt_bboxes = gt_bboxes[0]\n if isinstance(gt_labels, list) and len(gt_labels) == 1:\n gt_labels = gt_labels[0]\n\n # scores = None\n if bboxes.shape[1] >= 6:\n # if bboxes.shape[1] == 7:\n # # sampling proposals for bbox head\n # scores = bboxes[:, 6]\n bboxes = bboxes[:, :6]\n elif bboxes.shape[1] >= 4:\n bboxes = bboxes[:, :4]\n\n gt_flags = bboxes.new_zeros((bboxes.shape[0], ), dtype=torch.uint8)\n if self.add_gt_as_proposals:\n bboxes = torch.cat([gt_bboxes, bboxes], dim=0)\n assign_result.add_gt_(gt_labels)\n gt_ones = bboxes.new_ones(gt_bboxes.shape[0], dtype=torch.uint8)\n gt_flags = torch.cat([gt_ones, gt_flags])\n\n num_expected_pos = int(self.num * self.pos_fraction)\n pos_inds = self.pos_sampler._sample_pos(\n assign_result, num_expected_pos, bboxes=bboxes, **kwargs)\n # We found that sampled indices have duplicated items occasionally.\n # (may be a bug of PyTorch)\n pos_inds = pos_inds.unique()\n num_sampled_pos = pos_inds.numel()\n num_expected_neg = self.num - num_sampled_pos\n if self.neg_pos_ub >= 0:\n _pos = max(1, num_sampled_pos)\n neg_upper_bound = int(self.neg_pos_ub * _pos)\n if num_expected_neg > neg_upper_bound:\n num_expected_neg = neg_upper_bound\n neg_inds = self.neg_sampler._sample_neg(\n assign_result, num_expected_neg, bboxes=bboxes, **kwargs)\n\n # Hard negative mining: half of neg_inds will be chosen based on the highest scores\n # sampling proposals for bbox head\n # if scores is not None:\n # num_expected_neg_first_half = int(round(num_expected_neg / 2))\n # _, topk_inds = scores.topk(num_expected_neg_first_half)\n # topk_inds = topk_inds + gt_bboxes.shape[0] # account for gt_bboxes in the front, so push back index by number of gt_bboxes\n # neg_inds = torch.cat((topk_inds, neg_inds))\n # neg_inds = torch.unique(neg_inds.cpu(), sorted=False).to(neg_inds.device)\n # # topk_inds begin at the back so flipping is required\n # neg_inds = torch.flip(neg_inds, [0])\n # neg_inds = neg_inds[:num_expected_neg]\n # else:\n # neg_inds = neg_inds.unique()\n neg_inds = neg_inds.unique()\n\n return SamplingResult(pos_inds, neg_inds, bboxes, gt_bboxes,\n assign_result, gt_flags)\n"
] |
[
[
"torch.cat",
"torch.nn.MaxPool3d",
"torch.nn.Conv3d",
"torch.nn.functional.interpolate",
"torch.nn.ReLU"
],
[
"torch.nonzero",
"torch.cat"
],
[
"torch.cat"
]
] |
DefTruth/tensorpack
|
[
"df82c65a29883984a04a75885e0475df19ca4f19"
] |
[
"examples/FasterRCNN/predict.py"
] |
[
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport argparse\nimport itertools\nimport numpy as np\nimport os\nimport shutil\nimport tensorflow as tf\nimport cv2\nimport six\nimport tqdm\n\nassert six.PY3, \"This example requires Python 3!\"\n\nimport tensorpack.utils.viz as tpviz\nfrom tensorpack.predict import MultiTowerOfflinePredictor, OfflinePredictor, PredictConfig\nfrom tensorpack.tfutils import get_model_loader, get_tf_version_tuple\nfrom tensorpack.utils import fs, logger\n\nfrom dataset import DatasetRegistry, register_coco\nfrom config import config as cfg\nfrom config import finalize_configs\nfrom data import get_eval_dataflow, get_train_dataflow\nfrom eval import DetectionResult, multithread_predict_dataflow, predict_image\nfrom modeling.generalized_rcnn import ResNetC4Model, ResNetFPNModel\nfrom viz import draw_annotation, draw_final_outputs, draw_predictions, draw_proposal_recall\n\n\ndef do_visualize(model, model_path, nr_visualize=100, output_dir='output'):\n \"\"\"\n Visualize some intermediate results (proposals, raw predictions) inside the pipeline.\n \"\"\"\n df = get_train_dataflow()\n df.reset_state()\n\n pred = OfflinePredictor(PredictConfig(\n model=model,\n session_init=get_model_loader(model_path),\n input_names=['image', 'gt_boxes', 'gt_labels'],\n output_names=[\n 'generate_{}_proposals/boxes'.format('fpn' if cfg.MODE_FPN else 'rpn'),\n 'generate_{}_proposals/scores'.format('fpn' if cfg.MODE_FPN else 'rpn'),\n 'fastrcnn_all_scores',\n 'output/boxes',\n 'output/scores',\n 'output/labels',\n ]))\n\n if os.path.isdir(output_dir):\n shutil.rmtree(output_dir)\n fs.mkdir_p(output_dir)\n with tqdm.tqdm(total=nr_visualize) as pbar:\n for idx, dp in itertools.islice(enumerate(df), nr_visualize):\n img, gt_boxes, gt_labels = dp['image'], dp['gt_boxes'], dp['gt_labels']\n\n rpn_boxes, rpn_scores, all_scores, \\\n final_boxes, final_scores, final_labels = pred(img, gt_boxes, gt_labels)\n\n # draw groundtruth boxes\n gt_viz = draw_annotation(img, gt_boxes, gt_labels)\n # draw best proposals for each groundtruth, to show recall\n proposal_viz, good_proposals_ind = draw_proposal_recall(img, rpn_boxes, rpn_scores, gt_boxes)\n # draw the scores for the above proposals\n score_viz = draw_predictions(img, rpn_boxes[good_proposals_ind], all_scores[good_proposals_ind])\n\n results = [DetectionResult(*args) for args in\n zip(final_boxes, final_scores, final_labels,\n [None] * len(final_labels))]\n final_viz = draw_final_outputs(img, results)\n\n viz = tpviz.stack_patches([\n gt_viz, proposal_viz,\n score_viz, final_viz], 2, 2)\n\n if os.environ.get('DISPLAY', None):\n tpviz.interactive_imshow(viz)\n cv2.imwrite(\"{}/{:03d}.png\".format(output_dir, idx), viz)\n pbar.update()\n\n\ndef do_evaluate(pred_config, output_file):\n num_tower = max(cfg.TRAIN.NUM_GPUS, 1)\n graph_funcs = MultiTowerOfflinePredictor(\n pred_config, list(range(num_tower))).get_predictors()\n\n for dataset in cfg.DATA.VAL:\n logger.info(\"Evaluating {} ...\".format(dataset))\n dataflows = [\n get_eval_dataflow(dataset, shard=k, num_shards=num_tower)\n for k in range(num_tower)]\n all_results = multithread_predict_dataflow(dataflows, graph_funcs)\n output = output_file + '-' + dataset\n DatasetRegistry.get(dataset).eval_inference_results(all_results, output)\n\n\ndef do_predict(pred_func, input_file):\n img = cv2.imread(input_file, cv2.IMREAD_COLOR)\n results = predict_image(img, pred_func)\n final = draw_final_outputs(img, results)\n viz = np.concatenate((img, final), axis=1)\n cv2.imwrite(\"output.png\", viz)\n logger.info(\"Inference output for {} written to output.png\".format(input_file))\n tpviz.interactive_imshow(viz)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--load', help='load a model for evaluation.', required=True)\n parser.add_argument('--visualize', action='store_true', help='visualize intermediate results')\n parser.add_argument('--evaluate', help=\"Run evaluation. \"\n \"This argument is the path to the output json evaluation file\")\n parser.add_argument('--predict', help=\"Run prediction on a given image. \"\n \"This argument is the path to the input image file\", nargs='+')\n parser.add_argument('--benchmark', action='store_true', help=\"Benchmark the speed of the model + postprocessing\")\n parser.add_argument('--config', help=\"A list of KEY=VALUE to overwrite those defined in config.py\",\n nargs='+')\n\n args = parser.parse_args()\n if args.config:\n cfg.update_args(args.config)\n register_coco(cfg.DATA.BASEDIR) # add COCO datasets to the registry\n MODEL = ResNetFPNModel() if cfg.MODE_FPN else ResNetC4Model()\n\n if not tf.test.is_gpu_available():\n from tensorflow.python.framework import test_util\n assert get_tf_version_tuple() >= (1, 7) and test_util.IsMklEnabled(), \\\n \"Inference requires either GPU support or MKL support!\"\n assert args.load\n finalize_configs(is_training=False)\n\n if args.predict or args.visualize:\n cfg.TEST.RESULT_SCORE_THRESH = cfg.TEST.RESULT_SCORE_THRESH_VIS\n\n if args.visualize:\n do_visualize(MODEL, args.load)\n else:\n predcfg = PredictConfig(\n model=MODEL,\n session_init=get_model_loader(args.load),\n input_names=MODEL.get_inference_tensor_names()[0],\n output_names=MODEL.get_inference_tensor_names()[1])\n if args.predict:\n predictor = OfflinePredictor(predcfg)\n for image_file in args.predict:\n do_predict(predictor, image_file)\n elif args.evaluate:\n assert args.evaluate.endswith('.json'), args.evaluate\n do_evaluate(predcfg, args.evaluate)\n elif args.benchmark:\n df = get_eval_dataflow(cfg.DATA.VAL[0])\n df.reset_state()\n predictor = OfflinePredictor(predcfg)\n for img in tqdm.tqdm(df, total=len(df)):\n # This include post-processing time, which is done on CPU and not optimized\n # To exclude it, modify `predict_image`.\n predict_image(img[0], predictor)\n"
] |
[
[
"numpy.concatenate",
"tensorflow.test.is_gpu_available",
"tensorflow.python.framework.test_util.IsMklEnabled"
]
] |
aitoehigie/britecore_flask
|
[
"eef1873dbe6b2cc21f770bc6dec783007ae4493b"
] |
[
"venv/lib/python3.6/site-packages/pylint/test/functional/undefined_variable.py"
] |
[
"# pylint: disable=missing-docstring, multiple-statements, useless-object-inheritance\n# pylint: disable=too-few-public-methods, no-init, no-self-use,bare-except,broad-except, import-error\nfrom __future__ import print_function\n\nDEFINED = 1\n\nif DEFINED != 1:\n if DEFINED in (unknown, DEFINED): # [undefined-variable]\n DEFINED += 1\n\n\ndef in_method(var):\n \"\"\"method doc\"\"\"\n var = nomoreknown # [undefined-variable]\n assert var\n\n\nDEFINED = {DEFINED: __revision__} # [undefined-variable]\n# +1:[undefined-variable]\nDEFINED[__revision__] = OTHER = \"move this is astroid test\"\n\nOTHER += \"$\"\n\n\ndef bad_default(var, default=unknown2): # [undefined-variable]\n \"\"\"function with defaut arg's value set to an unexistant name\"\"\"\n print(var, default)\n print(xxxx) # [undefined-variable]\n augvar += 1 # [undefined-variable]\n del vardel # [undefined-variable]\n\n\nLMBD = lambda x, y=doesnotexist: x + y # [undefined-variable]\nLMBD2 = lambda x, y: x + z # [undefined-variable]\n\ntry:\n POUET # don't catch me\nexcept NameError:\n POUET = \"something\"\n\ntry:\n POUETT # [used-before-assignment]\nexcept Exception: # pylint:disable = broad-except\n POUETT = \"something\"\n\ntry:\n POUETTT # don't catch me\nexcept: # pylint:disable = bare-except\n POUETTT = \"something\"\n\nprint(POUET, POUETT, POUETTT)\n\n\ntry:\n PLOUF # [used-before-assignment]\nexcept ValueError:\n PLOUF = \"something\"\n\nprint(PLOUF)\n\n\ndef if_branch_test(something):\n \"\"\"hop\"\"\"\n if something == 0:\n if xxx == 1: # [used-before-assignment]\n pass\n else:\n print(xxx)\n xxx = 3\n\n\ndef decorator(arg):\n \"\"\"Decorator with one argument.\"\"\"\n return lambda: list(arg)\n\n\n@decorator(arg=[i * 2 for i in range(15)])\ndef func1():\n \"\"\"A function with a decorator that contains a listcomp.\"\"\"\n\n\n@decorator(arg=(i * 2 for i in range(15)))\ndef func2():\n \"\"\"A function with a decorator that contains a genexpr.\"\"\"\n\n\n@decorator(lambda x: x > 0)\ndef main():\n \"\"\"A function with a decorator that contains a lambda.\"\"\"\n\n\n# Test shared scope.\n\n\ndef test_arguments(arg=TestClass): # [used-before-assignment]\n \"\"\" TestClass isn't defined yet. \"\"\"\n return arg\n\n\nclass TestClass(Ancestor): # [used-before-assignment]\n \"\"\" contains another class, which uses an undefined ancestor. \"\"\"\n\n class MissingAncestor(Ancestor1): # [used-before-assignment]\n \"\"\" no op \"\"\"\n\n def test1(self):\n \"\"\" It should trigger here, because the two classes\n have the same scope.\n \"\"\"\n\n class UsingBeforeDefinition(Empty): # [used-before-assignment]\n \"\"\" uses Empty before definition \"\"\"\n\n class Empty(object):\n \"\"\" no op \"\"\"\n\n return UsingBeforeDefinition\n\n def test(self):\n \"\"\" Ancestor isn't defined yet, but we don't care. \"\"\"\n\n class MissingAncestor1(Ancestor):\n \"\"\" no op \"\"\"\n\n return MissingAncestor1\n\n\nclass Self(object):\n \"\"\" Detect when using the same name inside the class scope. \"\"\"\n\n obj = Self # [undefined-variable]\n\n\nclass Self1(object):\n \"\"\" No error should be raised here. \"\"\"\n\n def test(self):\n \"\"\" empty \"\"\"\n return Self1\n\n\nclass Ancestor(object):\n \"\"\" No op \"\"\"\n\n\nclass Ancestor1(object):\n \"\"\" No op \"\"\"\n\n\nNANA = BAT # [undefined-variable]\ndel BAT\n\n\nclass KeywordArgument(object):\n \"\"\"Test keyword arguments.\"\"\"\n\n enable = True\n\n def test(self, is_enabled=enable):\n \"\"\"do nothing.\"\"\"\n\n def test1(self, is_enabled=enabled): # [used-before-assignment]\n \"\"\"enabled is undefined at this point, but it is used before assignment.\"\"\"\n\n def test2(self, is_disabled=disabled): # [undefined-variable]\n \"\"\"disabled is undefined\"\"\"\n\n enabled = True\n\n func = lambda arg=arg: arg * arg # [undefined-variable]\n\n arg2 = 0\n func2 = lambda arg2=arg2: arg2 * arg2\n\n\n# Don't emit if the code is protected by NameError\ntry:\n unicode_1\nexcept NameError:\n pass\n\ntry:\n unicode_2 # [undefined-variable]\nexcept Exception:\n pass\n\ntry:\n unicode_3\nexcept:\n pass\n\ntry:\n unicode_4 # [undefined-variable]\nexcept ValueError:\n pass\n\n# See https://bitbucket.org/logilab/pylint/issue/111/\ntry:\n raise IOError(1, \"a\")\nexcept IOError as err:\n print(err)\n\n\ndef test_conditional_comprehension():\n methods = [\"a\", \"b\", \"_c\", \"_d\"]\n my_methods = sum(1 for method in methods if not method.startswith(\"_\"))\n return my_methods\n\n\nclass MyError(object):\n pass\n\n\nclass MyClass(object):\n class MyError(MyError):\n pass\n\n\ndef dec(inp):\n def inner(func):\n print(inp)\n return func\n\n return inner\n\n\n# Make sure lambdas with expressions\n# referencing parent class do not raise undefined variable\n# because at the time of their calling, the class name will\n# be populated\n# See https://github.com/PyCQA/pylint/issues/704\nclass LambdaClass:\n myattr = 1\n mylambda = lambda: LambdaClass.myattr\n\n\n# Need different classes to make sure\n# consumed variables don't get in the way\nclass LambdaClass2:\n myattr = 1\n # Different base_scope scope but still applies\n mylambda2 = lambda: [LambdaClass2.myattr for _ in [1, 2]]\n\n\nclass LambdaClass3:\n myattr = 1\n # Nested default argument in lambda\n # Should not raise error\n mylambda3 = lambda: lambda a=LambdaClass3: a\n\n\nclass LambdaClass4:\n myattr = 1\n mylambda4 = lambda a=LambdaClass4: lambda: a # [undefined-variable]\n\n\n# Make sure the first lambda does not consume the LambdaClass5 class\n# name although the expression is is valid\n# Consuming the class would cause the subsequent undefined-variable to be masked\nclass LambdaClass5:\n myattr = 1\n mylambda = lambda: LambdaClass5.myattr\n mylambda4 = lambda a=LambdaClass5: lambda: a # [undefined-variable]\n\n\ndef nonlocal_in_ifexp():\n import matplotlib.pyplot as plt\n\n def onclick(event):\n if event:\n nonlocal i\n i += 1\n print(i)\n\n i = 0\n fig = plt.figure()\n fig.canvas.mpl_connect(\"button_press_event\", onclick)\n plt.show(block=True)\n"
] |
[
[
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
]
] |
QuadCtrl/quad-ctrl
|
[
"ed1a6b7ee747a7ab045f9591b4747c6a2fe0a2f4"
] |
[
"gym_pybullet_drones/envs/BaseAviary.py"
] |
[
"import os\nfrom sys import platform\nimport time\nimport collections\nfrom datetime import datetime\nfrom enum import Enum\nimport xml.etree.ElementTree as etxml\nfrom PIL import Image\n# import pkgutil\n# egl = pkgutil.get_loader('eglRenderer')\nimport numpy as np\nimport pybullet as p\nimport pybullet_data\nimport gym\n\nclass DroneModel(Enum):\n \"\"\"Drone models enumeration class.\"\"\"\n\n CF2X = \"cf2x\" # Bitcraze Craziflie 2.0 in the X configuration\n CF2P = \"cf2p\" # Bitcraze Craziflie 2.0 in the + configuration\n HB = \"hb\" # Generic quadrotor (with AscTec Hummingbird inertial properties)\n\n################################################################################\n\nclass Physics(Enum):\n \"\"\"Physics implementations enumeration class.\"\"\"\n\n PYB = \"pyb\" # Base PyBullet physics update\n DYN = \"dyn\" # Update with an explicit model of the dynamics\n PYB_GND = \"pyb_gnd\" # PyBullet physics update with ground effect\n PYB_DRAG = \"pyb_drag\" # PyBullet physics update with drag\n PYB_DW = \"pyb_dw\" # PyBullet physics update with downwash\n PYB_GND_DRAG_DW = \"pyb_gnd_drag_dw\" # PyBullet physics update with ground effect, drag, and downwash\n\n################################################################################\n\nclass ImageType(Enum):\n \"\"\"Camera capture image type enumeration class.\"\"\"\n\n RGB = 0 # Red, green, blue (and alpha)\n DEP = 1 # Depth\n SEG = 2 # Segmentation by object id\n BW = 3 # Black and white\n\n################################################################################\n\nclass BaseAviary(gym.Env):\n \"\"\"Base class for \"drone aviary\" Gym environments.\"\"\"\n\n metadata = {'render.modes': ['human']}\n \n ################################################################################\n\n def __init__(self,\n drone_model: DroneModel=DroneModel.CF2X,\n num_drones: int=1,\n neighbourhood_radius: float=np.inf,\n initial_xyzs=None,\n initial_rpys=None,\n physics: Physics=Physics.PYB,\n freq: int=240,\n aggregate_phy_steps: int=1,\n gui=False,\n record=False,\n obstacles=False,\n user_debug_gui=True,\n vision_attributes=False,\n dynamics_attributes=False\n ):\n \"\"\"Initialization of a generic aviary environment.\n\n Parameters\n ----------\n drone_model : DroneModel, optional\n The desired drone type (detailed in an .urdf file in folder `assets`).\n num_drones : int, optional\n The desired number of drones in the aviary.\n neighbourhood_radius : float, optional\n Radius used to compute the drones' adjacency matrix, in meters.\n initial_xyzs: ndarray | None, optional\n (NUM_DRONES, 3)-shaped array containing the initial XYZ position of the drones.\n initial_rpys: ndarray | None, optional\n (NUM_DRONES, 3)-shaped array containing the initial orientations of the drones (in radians).\n physics : Physics, optional\n The desired implementation of PyBullet physics/custom dynamics.\n freq : int, optional\n The frequency (Hz) at which the physics engine steps.\n aggregate_phy_steps : int, optional\n The number of physics steps within one call to `BaseAviary.step()`.\n gui : bool, optional\n Whether to use PyBullet's GUI.\n record : bool, optional\n Whether to save a video of the simulation in folder `files/videos/`.\n obstacles : bool, optional\n Whether to add obstacles to the simulation.\n user_debug_gui : bool, optional\n Whether to draw the drones' axes and the GUI RPMs sliders.\n vision_attributes : bool, optional\n Whether to allocate the attributes needed by vision-based aviary subclasses.\n dynamics_attributes : bool, optional\n Whether to allocate the attributes needed by subclasses accepting thrust and torques inputs.\n\n \"\"\"\n #### Constants #############################################\n self.G = 9.8\n self.RAD2DEG = 180/np.pi\n self.DEG2RAD = np.pi/180\n self.SIM_FREQ = freq\n self.TIMESTEP = 1./self.SIM_FREQ\n self.AGGR_PHY_STEPS = aggregate_phy_steps\n #### Parameters ############################################\n self.NUM_DRONES = num_drones\n self.NEIGHBOURHOOD_RADIUS = neighbourhood_radius\n #### Options ###############################################\n self.DRONE_MODEL = drone_model\n self.GUI = gui\n self.RECORD = record\n self.PHYSICS = physics\n self.OBSTACLES = obstacles\n self.USER_DEBUG = user_debug_gui\n self.URDF = self.DRONE_MODEL.value + \".urdf\"\n #### Load the drone properties from the .urdf file #########\n self.M, \\\n self.L, \\\n self.THRUST2WEIGHT_RATIO, \\\n self.J, \\\n self.J_INV, \\\n self.KF, \\\n self.KM, \\\n self.COLLISION_H,\\\n self.COLLISION_R, \\\n self.COLLISION_Z_OFFSET, \\\n self.MAX_SPEED_KMH, \\\n self.GND_EFF_COEFF, \\\n self.PROP_RADIUS, \\\n self.DRAG_COEFF, \\\n self.DW_COEFF_1, \\\n self.DW_COEFF_2, \\\n self.DW_COEFF_3 = self._parseURDFParameters()\n print(\"[INFO] BaseAviary.__init__() loaded parameters from the drone's .urdf:\\n[INFO] m {:f}, L {:f},\\n[INFO] ixx {:f}, iyy {:f}, izz {:f},\\n[INFO] kf {:f}, km {:f},\\n[INFO] t2w {:f}, max_speed_kmh {:f},\\n[INFO] gnd_eff_coeff {:f}, prop_radius {:f},\\n[INFO] drag_xy_coeff {:f}, drag_z_coeff {:f},\\n[INFO] dw_coeff_1 {:f}, dw_coeff_2 {:f}, dw_coeff_3 {:f}\".format(\n self.M, self.L, self.J[0,0], self.J[1,1], self.J[2,2], self.KF, self.KM, self.THRUST2WEIGHT_RATIO, self.MAX_SPEED_KMH, self.GND_EFF_COEFF, self.PROP_RADIUS, self.DRAG_COEFF[0], self.DRAG_COEFF[2], self.DW_COEFF_1, self.DW_COEFF_2, self.DW_COEFF_3))\n #### Compute constants #####################################\n self.GRAVITY = self.G*self.M\n self.HOVER_RPM = np.sqrt(self.GRAVITY / (4*self.KF))\n self.MAX_RPM = np.sqrt((self.THRUST2WEIGHT_RATIO*self.GRAVITY) / (4*self.KF))\n self.MAX_THRUST = (4*self.KF*self.MAX_RPM**2)\n if self.DRONE_MODEL == DroneModel.CF2X:\n self.MAX_XY_TORQUE = (2*self.L*self.KF*self.MAX_RPM**2)/np.sqrt(2)\n elif self.DRONE_MODEL in [DroneModel.CF2P, DroneModel.HB]:\n self.MAX_XY_TORQUE = (self.L*self.KF*self.MAX_RPM**2)\n self.MAX_Z_TORQUE = (2*self.KM*self.MAX_RPM**2)\n self.GND_EFF_H_CLIP = 0.25 * self.PROP_RADIUS * np.sqrt((15 * self.MAX_RPM**2 * self.KF * self.GND_EFF_COEFF) / self.MAX_THRUST)\n #### Create attributes for vision tasks ####################\n self.VISION_ATTR = vision_attributes\n if self.VISION_ATTR:\n self.IMG_RES = np.array([64, 48])\n self.IMG_FRAME_PER_SEC = 24\n self.IMG_CAPTURE_FREQ = int(self.SIM_FREQ/self.IMG_FRAME_PER_SEC)\n self.rgb = np.zeros(((self.NUM_DRONES, self.IMG_RES[1], self.IMG_RES[0], 4)))\n self.dep = np.ones(((self.NUM_DRONES, self.IMG_RES[1], self.IMG_RES[0])))\n self.seg = np.zeros(((self.NUM_DRONES, self.IMG_RES[1], self.IMG_RES[0])))\n if self.IMG_CAPTURE_FREQ%self.AGGR_PHY_STEPS != 0:\n print(\"[ERROR] in BaseAviary.__init__(), aggregate_phy_steps incompatible with the desired video capture frame rate ({:f}Hz)\".format(self.IMG_FRAME_PER_SEC))\n exit()\n if self.RECORD:\n self.ONBOARD_IMG_PATH = os.path.dirname(os.path.abspath(__file__))+\"/../../files/videos/onboard-\"+datetime.now().strftime(\"%m.%d.%Y_%H.%M.%S\")+\"/\"\n os.makedirs(os.path.dirname(self.ONBOARD_IMG_PATH), exist_ok=True)\n #### Create attributes for dynamics control inputs #########\n self.DYNAMICS_ATTR = dynamics_attributes\n if self.DYNAMICS_ATTR:\n if self.DRONE_MODEL == DroneModel.CF2X:\n self.A = np.array([ [1, 1, 1, 1], [1/np.sqrt(2), 1/np.sqrt(2), -1/np.sqrt(2), -1/np.sqrt(2)], [-1/np.sqrt(2), 1/np.sqrt(2), 1/np.sqrt(2), -1/np.sqrt(2)], [-1, 1, -1, 1] ])\n elif self.DRONE_MODEL in [DroneModel.CF2P, DroneModel.HB]:\n self.A = np.array([ [1, 1, 1, 1], [0, 1, 0, -1], [-1, 0, 1, 0], [-1, 1, -1, 1] ])\n self.INV_A = np.linalg.inv(self.A)\n self.B_COEFF = np.array([1/self.KF, 1/(self.KF*self.L), 1/(self.KF*self.L), 1/self.KM])\n #### Connect to PyBullet ###################################\n if self.GUI:\n #### With debug GUI ########################################\n self.CLIENT = p.connect(p.GUI) # p.connect(p.GUI, options=\"--opengl2\")\n for i in [p.COV_ENABLE_RGB_BUFFER_PREVIEW, p.COV_ENABLE_DEPTH_BUFFER_PREVIEW, p.COV_ENABLE_SEGMENTATION_MARK_PREVIEW]:\n p.configureDebugVisualizer(i, 0, physicsClientId=self.CLIENT)\n p.resetDebugVisualizerCamera(cameraDistance=1.45,\n cameraYaw=-30,\n cameraPitch=-30,\n cameraTargetPosition=[0, 0, 0.5],\n physicsClientId=self.CLIENT\n )\n ret = p.getDebugVisualizerCamera(physicsClientId=self.CLIENT)\n print(\"viewMatrix\", ret[2])\n print(\"projectionMatrix\", ret[3])\n if self.USER_DEBUG:\n #### Add input sliders to the GUI ##########################\n self.SLIDERS = -1*np.ones(4)\n for i in range(4):\n self.SLIDERS[i] = p.addUserDebugParameter(\"Propeller \"+str(i)+\" RPM\", 0, self.MAX_RPM, self.HOVER_RPM, physicsClientId=self.CLIENT)\n self.INPUT_SWITCH = p.addUserDebugParameter(\"Use GUI RPM\", 9999, -1, 0, physicsClientId=self.CLIENT)\n else:\n #### Without debug GUI #####################################\n self.CLIENT = p.connect(p.DIRECT)\n #### Uncomment the following line to use EGL Render Plugin #\n #### Instead of TinyRender (CPU-based) in PYB's Direct mode\n # if platform == \"linux\": p.setAdditionalSearchPath(pybullet_data.getDataPath()); plugin = p.loadPlugin(egl.get_filename(), \"_eglRendererPlugin\"); print(\"plugin=\", plugin)\n if self.RECORD:\n #### Set the camera parameters to save frames in DIRECT mode\n self.VID_WIDTH=int(640)\n self.VID_HEIGHT=int(480)\n self.FRAME_PER_SEC = 24\n self.CAPTURE_FREQ = int(self.SIM_FREQ/self.FRAME_PER_SEC)\n self.CAM_VIEW = p.computeViewMatrixFromYawPitchRoll(distance=2.8,\n yaw=-30,\n pitch=-30,\n roll=0,\n cameraTargetPosition=[0, 0, 1],\n upAxisIndex=2,\n physicsClientId=self.CLIENT\n )\n self.CAM_PRO = p.computeProjectionMatrixFOV(fov=60.0,\n aspect=self.VID_WIDTH/self.VID_HEIGHT,\n nearVal=0.1,\n farVal=1000.0\n )\n #### Set initial poses #####################################\n if initial_xyzs is None:\n self.INIT_XYZS = np.vstack([np.array([x*4*self.L for x in range(self.NUM_DRONES)]), \\\n np.array([y*4*self.L for y in range(self.NUM_DRONES)]), \\\n np.ones(self.NUM_DRONES) * (self.COLLISION_H/2-self.COLLISION_Z_OFFSET+.1)]).transpose().reshape(self.NUM_DRONES, 3)\n elif np.array(initial_xyzs).shape == (self.NUM_DRONES, 3):\n self.INIT_XYZS = initial_xyzs\n else:\n print(\"[ERROR] invalid initial_xyzs in BaseAviary.__init__(), try initial_xyzs.reshape(NUM_DRONES,3)\")\n if initial_rpys is None:\n self.INIT_RPYS = np.zeros((self.NUM_DRONES, 3))\n elif np.array(initial_rpys).shape == (self.NUM_DRONES, 3):\n self.INIT_RPYS = initial_rpys\n else:\n print(\"[ERROR] invalid initial_rpys in BaseAviary.__init__(), try initial_rpys.reshape(NUM_DRONES,3)\")\n #### Create action and observation spaces ##################\n self.action_space = self._actionSpace()\n self.observation_space = self._observationSpace()\n #### Housekeeping ##########################################\n self._housekeeping()\n #### Update and store the drones kinematic information #####\n self._updateAndStoreKinematicInformation()\n #### Start video recording #################################\n self._startVideoRecording()\n \n ################################################################################\n\n def reset(self):\n \"\"\"Resets the environment.\n\n Returns\n -------\n ndarray | dict[..]\n The initial observation, check the specific implementation of `_computeObs()`\n in each subclass for its format.\n\n \"\"\"\n p.resetSimulation(physicsClientId=self.CLIENT)\n #### Housekeeping ##########################################\n self._housekeeping()\n #### Update and store the drones kinematic information #####\n self._updateAndStoreKinematicInformation()\n #### Start video recording #################################\n self._startVideoRecording()\n #### Return the initial observation ########################\n return self._computeObs()\n \n ################################################################################\n\n def step(self,\n action\n ):\n \"\"\"Advances the environment by one simulation step.\n\n Parameters\n ----------\n action : ndarray | dict[..]\n The input action for one or more drones, translated into RPMs by\n the specific implementation of `_preprocessAction()` in each subclass.\n\n Returns\n -------\n ndarray | dict[..]\n The step's observation, check the specific implementation of `_computeObs()`\n in each subclass for its format.\n float | dict[..]\n The step's reward value(s), check the specific implementation of `_computeReward()`\n in each subclass for its format.\n bool | dict[..]\n Whether the current epoisode is over, check the specific implementation of `_computeDone()`\n in each subclass for its format.\n dict[..]\n Additional information as a dictionary, check the specific implementation of `_computeInfo()`\n in each subclass for its format.\n\n \"\"\"\n #### Save PNG video frames if RECORD=True and GUI=False ####\n if self.RECORD and not self.GUI and self.step_counter%self.CAPTURE_FREQ == 0:\n [w, h, rgb, dep, seg] = p.getCameraImage(width=self.VID_WIDTH,\n height=self.VID_HEIGHT,\n shadow=1,\n viewMatrix=self.CAM_VIEW,\n projectionMatrix=self.CAM_PRO,\n renderer=p.ER_TINY_RENDERER,\n flags=p.ER_SEGMENTATION_MASK_OBJECT_AND_LINKINDEX,\n physicsClientId=self.CLIENT\n )\n (Image.fromarray(np.reshape(rgb, (h, w, 4)), 'RGBA')).save(self.IMG_PATH+\"frame_\"+str(self.FRAME_NUM)+\".png\")\n #### Save the depth or segmentation view instead #######\n # dep = ((dep-np.min(dep)) * 255 / (np.max(dep)-np.min(dep))).astype('uint8')\n # (Image.fromarray(np.reshape(dep, (h, w)))).save(self.IMG_PATH+\"frame_\"+str(self.FRAME_NUM)+\".png\")\n # seg = ((seg-np.min(seg)) * 255 / (np.max(seg)-np.min(seg))).astype('uint8')\n # (Image.fromarray(np.reshape(seg, (h, w)))).save(self.IMG_PATH+\"frame_\"+str(self.FRAME_NUM)+\".png\")\n self.FRAME_NUM += 1\n #### Read the GUI's input parameters #######################\n if self.GUI and self.USER_DEBUG:\n current_input_switch = p.readUserDebugParameter(self.INPUT_SWITCH, physicsClientId=self.CLIENT)\n if current_input_switch > self.last_input_switch:\n self.last_input_switch = current_input_switch\n self.USE_GUI_RPM = True if self.USE_GUI_RPM == False else False\n if self.USE_GUI_RPM:\n for i in range(4):\n self.gui_input[i] = p.readUserDebugParameter(int(self.SLIDERS[i]), physicsClientId=self.CLIENT)\n clipped_action = np.tile(self.gui_input, (self.NUM_DRONES, 1))\n if self.step_counter%(self.SIM_FREQ/2) == 0:\n self.GUI_INPUT_TEXT = [p.addUserDebugText(\"Using GUI RPM\",\n textPosition=[0, 0, 0],\n textColorRGB=[1, 0, 0],\n lifeTime=1,\n textSize=2,\n parentObjectUniqueId=self.DRONE_IDS[i],\n parentLinkIndex=-1,\n replaceItemUniqueId=int(self.GUI_INPUT_TEXT[i]),\n physicsClientId=self.CLIENT\n ) for i in range(self.NUM_DRONES)]\n #### Save, preprocess, and clip the action to the max. RPM #\n else:\n self._saveLastAction(action)\n clipped_action = np.reshape(self._preprocessAction(action), (self.NUM_DRONES, 4))\n #### Repeat for as many as the aggregate physics steps #####\n for _ in range(self.AGGR_PHY_STEPS):\n #### Update and store the drones kinematic info for certain\n #### Between aggregate steps for certain types of update ###\n if self.AGGR_PHY_STEPS > 1 and self.PHYSICS in [Physics.DYN, Physics.PYB_GND, Physics.PYB_DRAG, Physics.PYB_DW, Physics.PYB_GND_DRAG_DW]:\n self._updateAndStoreKinematicInformation()\n #### Step the simulation using the desired physics update ##\n for i in range (self.NUM_DRONES):\n if self.PHYSICS == Physics.PYB:\n self._physics(clipped_action[i, :], i)\n elif self.PHYSICS == Physics.DYN:\n self._dynamics(clipped_action[i, :], i)\n elif self.PHYSICS == Physics.PYB_GND:\n self._physics(clipped_action[i, :], i)\n self._groundEffect(clipped_action[i, :], i)\n elif self.PHYSICS == Physics.PYB_DRAG:\n self._physics(clipped_action[i, :], i)\n self._drag(self.last_clipped_action[i, :], i)\n elif self.PHYSICS == Physics.PYB_DW:\n self._physics(clipped_action[i, :], i)\n self._downwash(i)\n elif self.PHYSICS == Physics.PYB_GND_DRAG_DW:\n self._physics(clipped_action[i, :], i)\n self._groundEffect(clipped_action[i, :], i)\n self._drag(self.last_clipped_action[i, :], i)\n self._downwash(i)\n #### PyBullet computes the new state, unless Physics.DYN ###\n if self.PHYSICS != Physics.DYN:\n p.stepSimulation(physicsClientId=self.CLIENT)\n #### Save the last applied action (e.g. to compute drag) ###\n self.last_clipped_action = clipped_action\n #### Update and store the drones kinematic information #####\n self._updateAndStoreKinematicInformation()\n #### Prepare the return values #############################\n obs = self._computeObs()\n reward = self._computeReward()\n done = self._computeDone()\n info = self._computeInfo()\n #### Advance the step counter ##############################\n self.step_counter = self.step_counter + (1 * self.AGGR_PHY_STEPS)\n return obs, reward, done, info\n \n ################################################################################\n \n def render(self,\n mode='human',\n close=False\n ):\n \"\"\"Prints a textual output of the environment.\n\n Parameters\n ----------\n mode : str, optional\n Unused.\n close : bool, optional\n Unused.\n\n \"\"\"\n if self.first_render_call and not self.GUI:\n print(\"[WARNING] BaseAviary.render() is implemented as text-only, re-initialize the environment using Aviary(gui=True) to use PyBullet's graphical interface\")\n self.first_render_call = False\n print(\"\\n[INFO] BaseAviary.render() ——— it {:04d}\".format(self.step_counter),\n \"——— wall-clock time {:.1f}s,\".format(time.time()-self.RESET_TIME),\n \"simulation time {:.1f}s@{:d}Hz ({:.2f}x)\".format(self.step_counter*self.TIMESTEP, self.SIM_FREQ, (self.step_counter*self.TIMESTEP)/(time.time()-self.RESET_TIME)))\n for i in range (self.NUM_DRONES):\n print(\"[INFO] BaseAviary.render() ——— drone {:d}\".format(i),\n \"——— x {:+06.2f}, y {:+06.2f}, z {:+06.2f}\".format(self.pos[i, 0], self.pos[i, 1], self.pos[i, 2]),\n \"——— velocity {:+06.2f}, {:+06.2f}, {:+06.2f}\".format(self.vel[i, 0], self.vel[i, 1], self.vel[i, 2]),\n \"——— roll {:+06.2f}, pitch {:+06.2f}, yaw {:+06.2f}\".format(self.rpy[i, 0]*self.RAD2DEG, self.rpy[i, 1]*self.RAD2DEG, self.rpy[i, 2]*self.RAD2DEG),\n \"——— angular velocity {:+06.4f}, {:+06.4f}, {:+06.4f} ——— \".format(self.ang_v[i, 0], self.ang_v[i, 1], self.ang_v[i, 2]))\n \n ################################################################################\n\n def close(self):\n \"\"\"Terminates the environment.\n \"\"\"\n if self.RECORD and self.GUI:\n p.stopStateLogging(self.VIDEO_ID, physicsClientId=self.CLIENT)\n p.disconnect(physicsClientId=self.CLIENT)\n \n ################################################################################\n\n def getPyBulletClient(self):\n \"\"\"Returns the PyBullet Client Id.\n\n Returns\n -------\n int:\n The PyBullet Client Id.\n\n \"\"\"\n return self.CLIENT\n \n ################################################################################\n\n def getDroneIds(self):\n \"\"\"Return the Drone Ids.\n\n Returns\n -------\n ndarray:\n (NUM_DRONES,)-shaped array of ints containing the drones' ids.\n\n \"\"\"\n return self.DRONE_IDS\n \n ################################################################################\n\n def _housekeeping(self):\n \"\"\"Housekeeping function.\n\n Allocation and zero-ing of the variables and PyBullet's parameters/objects\n in the `reset()` function.\n\n \"\"\"\n #### Initialize/reset counters and zero-valued variables ###\n self.RESET_TIME = time.time()\n self.step_counter = 0\n self.first_render_call = True\n self.X_AX = -1*np.ones(self.NUM_DRONES)\n self.Y_AX = -1*np.ones(self.NUM_DRONES)\n self.Z_AX = -1*np.ones(self.NUM_DRONES)\n self.GUI_INPUT_TEXT = -1*np.ones(self.NUM_DRONES)\n self.USE_GUI_RPM=False\n self.last_input_switch = 0\n self.last_action = -1*np.ones((self.NUM_DRONES, 4))\n self.last_clipped_action = np.zeros((self.NUM_DRONES, 4))\n self.gui_input = np.zeros(4)\n #### Initialize the drones kinemaatic information ##########\n self.pos = np.zeros((self.NUM_DRONES, 3))\n self.quat = np.zeros((self.NUM_DRONES, 4))\n self.rpy = np.zeros((self.NUM_DRONES, 3))\n self.vel = np.zeros((self.NUM_DRONES, 3))\n self.ang_v = np.zeros((self.NUM_DRONES, 3))\n if self.PHYSICS == Physics.DYN:\n self.rpy_rates = np.zeros((self.NUM_DRONES, 3))\n #### Set PyBullet's parameters #############################\n p.setGravity(0, 0, -self.G, physicsClientId=self.CLIENT)\n p.setRealTimeSimulation(0, physicsClientId=self.CLIENT)\n p.setTimeStep(self.TIMESTEP, physicsClientId=self.CLIENT)\n p.setAdditionalSearchPath(pybullet_data.getDataPath(), physicsClientId=self.CLIENT)\n #### Load ground plane, drone and obstacles models #########\n self.PLANE_ID = p.loadURDF(\"plane.urdf\", physicsClientId=self.CLIENT)\n self.DRONE_IDS = np.array([p.loadURDF(os.path.dirname(os.path.abspath(__file__))+\"/../assets/\"+self.URDF,\n self.INIT_XYZS[i,:],\n p.getQuaternionFromEuler(self.INIT_RPYS[i,:]),\n flags = p.URDF_USE_INERTIA_FROM_FILE,\n physicsClientId=self.CLIENT\n ) for i in range(self.NUM_DRONES)])\n for i in range(self.NUM_DRONES):\n #### Show the frame of reference of the drone, note that ###\n #### It severly slows down the GUI #########################\n if self.GUI and self.USER_DEBUG:\n self._showDroneLocalAxes(i)\n #### Disable collisions between drones' and the ground plane\n #### E.g., to start a drone at [0,0,0] #####################\n # p.setCollisionFilterPair(bodyUniqueIdA=self.PLANE_ID, bodyUniqueIdB=self.DRONE_IDS[i], linkIndexA=-1, linkIndexB=-1, enableCollision=0, physicsClientId=self.CLIENT)\n if self.OBSTACLES:\n self._addObstacles()\n \n ################################################################################\n\n def _updateAndStoreKinematicInformation(self):\n \"\"\"Updates and stores the drones kinemaatic information.\n\n This method is meant to limit the number of calls to PyBullet in each step\n and improve performance (at the expense of memory).\n\n \"\"\"\n for i in range (self.NUM_DRONES):\n self.pos[i], self.quat[i] = p.getBasePositionAndOrientation(self.DRONE_IDS[i], physicsClientId=self.CLIENT)\n self.rpy[i] = p.getEulerFromQuaternion(self.quat[i])\n self.vel[i], self.ang_v[i] = p.getBaseVelocity(self.DRONE_IDS[i], physicsClientId=self.CLIENT)\n \n ################################################################################\n\n def _startVideoRecording(self):\n \"\"\"Starts the recording of a video output.\n\n The format of the video output is .mp4, if GUI is True, or .png, otherwise.\n The video is saved under folder `files/videos`.\n\n \"\"\"\n if self.RECORD and self.GUI:\n self.VIDEO_ID = p.startStateLogging(loggingType=p.STATE_LOGGING_VIDEO_MP4,\n fileName=os.path.dirname(os.path.abspath(__file__))+\"/../../files/videos/video-\"+datetime.now().strftime(\"%m.%d.%Y_%H.%M.%S\")+\".mp4\",\n physicsClientId=self.CLIENT\n )\n if self.RECORD and not self.GUI:\n self.FRAME_NUM = 0\n self.IMG_PATH = os.path.dirname(os.path.abspath(__file__))+\"/../../files/videos/video-\"+datetime.now().strftime(\"%m.%d.%Y_%H.%M.%S\")+\"/\"\n os.makedirs(os.path.dirname(self.IMG_PATH), exist_ok=True)\n \n ################################################################################\n\n def _getDroneStateVector(self,\n nth_drone\n ):\n \"\"\"Returns the state vector of the n-th drone.\n\n Parameters\n ----------\n nth_drone : int\n The ordinal number/position of the desired drone in list self.DRONE_IDS.\n\n Returns\n -------\n ndarray \n (20,)-shaped array of floats containing the state vector of the n-th drone.\n Check the only line in this method and `_updateAndStoreKinematicInformation()`\n to understand its format.\n\n \"\"\"\n state = np.hstack([self.pos[nth_drone, :], self.quat[nth_drone, :], self.rpy[nth_drone, :],\n self.vel[nth_drone, :], self.ang_v[nth_drone, :], self.last_clipped_action[nth_drone, :]])\n return state.reshape(20,)\n\n ################################################################################\n\n def _getDroneImages(self,\n nth_drone,\n segmentation: bool=True\n ):\n \"\"\"Returns camera captures from the n-th drone POV.\n\n Parameters\n ----------\n nth_drone : int\n The ordinal number/position of the desired drone in list self.DRONE_IDS.\n segmentation : bool, optional\n Whehter to compute the compute the segmentation mask.\n It affects performance.\n\n Returns\n -------\n ndarray \n (h, w, 4)-shaped array of uint8's containing the RBG(A) image captured from the n-th drone's POV.\n ndarray\n (h, w)-shaped array of uint8's containing the depth image captured from the n-th drone's POV.\n ndarray\n (h, w)-shaped array of uint8's containing the segmentation image captured from the n-th drone's POV.\n\n \"\"\"\n if self.IMG_RES is None:\n print(\"[ERROR] in BaseAviary._getDroneImages(), remember to set self.IMG_RES to np.array([width, height])\")\n exit()\n rot_mat = np.array(p.getMatrixFromQuaternion(self.quat[nth_drone, :])).reshape(3, 3)\n #### Set target point, camera view and projection matrices #\n target = np.dot(rot_mat,np.array([1000, 0, 0])) + np.array(self.pos[nth_drone, :])\n DRONE_CAM_VIEW = p.computeViewMatrix(cameraEyePosition=self.pos[nth_drone, :]+np.array([0, 0, self.L]),\n cameraTargetPosition=target,\n cameraUpVector=[0, 0, 1],\n physicsClientId=self.CLIENT\n )\n DRONE_CAM_PRO = p.computeProjectionMatrixFOV(fov=60.0,\n aspect=1.0,\n nearVal=self.L,\n farVal=1000.0\n )\n SEG_FLAG = p.ER_SEGMENTATION_MASK_OBJECT_AND_LINKINDEX if segmentation else p.ER_NO_SEGMENTATION_MASK\n [w, h, rgb, dep, seg] = p.getCameraImage(width=self.IMG_RES[0],\n height=self.IMG_RES[1],\n shadow=1,\n viewMatrix=DRONE_CAM_VIEW,\n projectionMatrix=DRONE_CAM_PRO,\n flags=SEG_FLAG,\n physicsClientId=self.CLIENT\n )\n rgb = np.reshape(rgb, (h, w, 4))\n dep = np.reshape(dep, (h, w))\n seg = np.reshape(seg, (h, w))\n return rgb, dep, seg\n\n ################################################################################\n\n def _exportImage(self,\n img_type: ImageType,\n img_input,\n path: str,\n frame_num: int=0\n ):\n \"\"\"Returns camera captures from the n-th drone POV.\n\n Parameters\n ----------\n img_type : ImageType\n The image type: RGB(A), depth, segmentation, or B&W (from RGB).\n img_input : ndarray\n (h, w, 4)-shaped array of uint8's for RBG(A) or B&W images.\n (h, w)-shaped array of uint8's for depth or segmentation images.\n path : str\n Path where to save the output as PNG.\n fram_num: int, optional\n Frame number to append to the PNG's filename.\n\n \"\"\"\n if img_type == ImageType.RGB:\n (Image.fromarray(img_input.astype('uint8'), 'RGBA')).save(path+\"frame_\"+str(frame_num)+\".png\")\n elif img_type == ImageType.DEP:\n temp = ((img_input-np.min(img_input)) * 255 / (np.max(img_input)-np.min(img_input))).astype('uint8')\n elif img_type == ImageType.SEG:\n temp = ((img_input-np.min(img_input)) * 255 / (np.max(img_input)-np.min(img_input))).astype('uint8')\n elif img_type == ImageType.BW:\n temp = (np.sum(img_input[:, :, 0:2], axis=2) / 3).astype('uint8')\n else:\n print(\"[ERROR] in BaseAviary._exportImage(), unknown ImageType\")\n exit()\n if img_type != ImageType.RGB:\n (Image.fromarray(temp)).save(path+\"frame_\"+str(frame_num)+\".png\")\n\n ################################################################################\n\n def _getAdjacencyMatrix(self):\n \"\"\"Computes the adjacency matrix of a multi-drone system.\n\n Attribute NEIGHBOURHOOD_RADIUS is used to determine neighboring relationships.\n\n Returns\n -------\n ndarray\n (NUM_DRONES, NUM_DRONES)-shaped array of 0's and 1's representing the adjacency matrix \n of the system: adj_mat[i,j] == 1 if (i, j) are neighbors; == 0 otherwise.\n\n \"\"\"\n adjacency_mat = np.identity(self.NUM_DRONES)\n for i in range(self.NUM_DRONES-1):\n for j in range(self.NUM_DRONES-i-1):\n if np.linalg.norm(self.pos[i, :]-self.pos[j+i+1, :]) < self.NEIGHBOURHOOD_RADIUS:\n adjacency_mat[i, j+i+1] = adjacency_mat[j+i+1, i] = 1\n return adjacency_mat\n \n ################################################################################\n \n def _physics(self,\n rpm,\n nth_drone\n ):\n \"\"\"Base PyBullet physics implementation.\n\n Parameters\n ----------\n rpm : ndarray\n (4)-shaped array of ints containing the RPMs values of the 4 motors.\n nth_drone : int\n The ordinal number/position of the desired drone in list self.DRONE_IDS.\n\n \"\"\"\n forces = np.array(rpm**2)*self.KF\n torques = np.array(rpm**2)*self.KM\n z_torque = (-torques[0] + torques[1] - torques[2] + torques[3])\n for i in range(4):\n p.applyExternalForce(self.DRONE_IDS[nth_drone],\n i,\n forceObj=[0, 0, forces[i]],\n posObj=[0, 0, 0],\n flags=p.LINK_FRAME,\n physicsClientId=self.CLIENT\n )\n p.applyExternalTorque(self.DRONE_IDS[nth_drone],\n 4,\n torqueObj=[0, 0, z_torque],\n flags=p.LINK_FRAME,\n physicsClientId=self.CLIENT\n )\n\n ################################################################################\n\n def _groundEffect(self,\n rpm,\n nth_drone\n ):\n \"\"\"PyBullet implementation of a ground effect model.\n\n Inspired by the analytical model used for comparison in (Shi et al., 2019).\n\n Parameters\n ----------\n rpm : ndarray\n (4)-shaped array of ints containing the RPMs values of the 4 motors.\n nth_drone : int\n The ordinal number/position of the desired drone in list self.DRONE_IDS.\n\n \"\"\"\n #### Kin. info of all links (propellers and center of mass)\n link_states = np.array(p.getLinkStates(self.DRONE_IDS[nth_drone],\n linkIndices=[0, 1, 2, 3, 4],\n computeLinkVelocity=1,\n computeForwardKinematics=1,\n physicsClientId=self.CLIENT\n ))\n #### Simple, per-propeller ground effects ##################\n prop_heights = np.array([link_states[0, 0][2], link_states[1, 0][2], link_states[2, 0][2], link_states[3, 0][2]])\n prop_heights = np.clip(prop_heights, self.GND_EFF_H_CLIP, np.inf)\n gnd_effects = np.array(rpm**2) * self.KF * self.GND_EFF_COEFF * (self.PROP_RADIUS/(4 * prop_heights))**2\n if np.abs(self.rpy[nth_drone,0]) < np.pi/2 and np.abs(self.rpy[nth_drone,1]) < np.pi/2:\n for i in range(4):\n p.applyExternalForce(self.DRONE_IDS[nth_drone],\n i,\n forceObj=[0, 0, gnd_effects[i]],\n posObj=[0, 0, 0],\n flags=p.LINK_FRAME,\n physicsClientId=self.CLIENT\n )\n\n ################################################################################\n\n def _drag(self,\n rpm,\n nth_drone\n ):\n \"\"\"PyBullet implementation of a drag model.\n\n Based on the the system identification in (Forster, 2015).\n\n Parameters\n ----------\n rpm : ndarray\n (4)-shaped array of ints containing the RPMs values of the 4 motors.\n nth_drone : int\n The ordinal number/position of the desired drone in list self.DRONE_IDS.\n\n \"\"\"\n #### Rotation matrix of the base ###########################\n base_rot = np.array(p.getMatrixFromQuaternion(self.quat[nth_drone, :])).reshape(3, 3)\n #### Simple draft model applied to the base/center of mass #\n drag_factors = -1 * self.DRAG_COEFF * np.sum(np.array(2*np.pi*rpm/60))\n drag = np.dot(base_rot, drag_factors*np.array(self.vel[nth_drone, :]))\n p.applyExternalForce(self.DRONE_IDS[nth_drone],\n 4,\n forceObj=drag,\n posObj=[0, 0, 0],\n flags=p.LINK_FRAME,\n physicsClientId=self.CLIENT\n )\n \n ################################################################################\n\n def _downwash(self,\n nth_drone\n ):\n \"\"\"PyBullet implementation of a ground effect model.\n\n Based on experiments conducted at the Dynamic Systems Lab by SiQi Zhou.\n\n Parameters\n ----------\n nth_drone : int\n The ordinal number/position of the desired drone in list self.DRONE_IDS.\n\n \"\"\"\n for i in range(self.NUM_DRONES):\n delta_z = self.pos[i, 2] - self.pos[nth_drone, 2]\n delta_xy = np.linalg.norm(np.array(self.pos[i, 0:2]) - np.array(self.pos[nth_drone, 0:2]))\n if delta_z > 0 and delta_xy < 10: # Ignore drones more than 10 meters away\n alpha = self.DW_COEFF_1 * (self.PROP_RADIUS/(4*delta_z))**2\n beta = self.DW_COEFF_2 * delta_z + self.DW_COEFF_3\n downwash = [0, 0, -alpha * np.exp(-.5*(delta_xy/beta)**2)]\n p.applyExternalForce(self.DRONE_IDS[nth_drone],\n 4,\n forceObj=downwash,\n posObj=[0, 0, 0],\n flags=p.LINK_FRAME,\n physicsClientId=self.CLIENT\n )\n\n ################################################################################\n\n def _dynamics(self,\n rpm,\n nth_drone\n ):\n \"\"\"Explicit dynamics implementation.\n\n Based on code written at the Dynamic Systems Lab by James Xu.\n\n Parameters\n ----------\n rpm : ndarray\n (4)-shaped array of ints containing the RPMs values of the 4 motors.\n nth_drone : int\n The ordinal number/position of the desired drone in list self.DRONE_IDS.\n\n \"\"\"\n #### Current state #########################################\n pos = self.pos[nth_drone,:]\n quat = self.quat[nth_drone,:]\n rpy = self.rpy[nth_drone,:]\n vel = self.vel[nth_drone,:]\n rpy_rates = self.rpy_rates[nth_drone,:]\n rotation = np.array(p.getMatrixFromQuaternion(quat)).reshape(3, 3)\n #### Compute forces and torques ############################\n forces = np.array(rpm**2) * self.KF\n thrust = np.array([0, 0, np.sum(forces)])\n thrust_world_frame = np.dot(rotation, thrust)\n force_world_frame = thrust_world_frame - np.array([0, 0, self.GRAVITY])\n z_torques = np.array(rpm**2)*self.KM\n z_torque = (-z_torques[0] + z_torques[1] - z_torques[2] + z_torques[3])\n if self.DRONE_MODEL==DroneModel.CF2X:\n x_torque = (forces[0] + forces[1] - forces[2] - forces[3]) * (self.L/np.sqrt(2))\n y_torque = (- forces[0] + forces[1] + forces[2] - forces[3]) * (self.L/np.sqrt(2))\n elif self.DRONE_MODEL==DroneModel.CF2P or self.DRONE_MODEL==DroneModel.HB:\n x_torque = (forces[1] - forces[3]) * self.L\n y_torque = (-forces[0] + forces[2]) * self.L\n torques = np.array([x_torque, y_torque, z_torque])\n torques = torques - np.cross(rpy_rates, np.dot(self.J, rpy_rates))\n rpy_rates_deriv = np.dot(self.J_INV, torques)\n no_pybullet_dyn_accs = force_world_frame / self.M\n #### Update state ##########################################\n vel = vel + self.TIMESTEP * no_pybullet_dyn_accs\n rpy_rates = rpy_rates + self.TIMESTEP * rpy_rates_deriv\n pos = pos + self.TIMESTEP * vel\n rpy = rpy + self.TIMESTEP * rpy_rates\n #### Set PyBullet's state ##################################\n p.resetBasePositionAndOrientation(self.DRONE_IDS[nth_drone],\n pos,\n p.getQuaternionFromEuler(rpy),\n physicsClientId=self.CLIENT\n )\n #### Note: the base's velocity only stored and not used ####\n p.resetBaseVelocity(self.DRONE_IDS[nth_drone],\n vel,\n [-1, -1, -1], # ang_vel not computed by DYN\n physicsClientId=self.CLIENT\n )\n #### Store the roll, pitch, yaw rates for the next step ####\n self.rpy_rates[nth_drone,:] = rpy_rates\n \n ################################################################################\n\n def _normalizedActionToRPM(self,\n action\n ):\n \"\"\"De-normalizes the [-1, 1] range to the [0, MAX_RPM] range.\n\n Parameters\n ----------\n action : ndarray\n (4)-shaped array of ints containing an input in the [-1, 1] range.\n\n Returns\n -------\n ndarray\n (4)-shaped array of ints containing RPMs for the 4 motors in the [0, MAX_RPM] range.\n\n \"\"\"\n if np.any(np.abs(action)) > 1:\n print(\"\\n[ERROR] it\", self.step_counter, \"in BaseAviary._normalizedActionToRPM(), out-of-bound action\")\n return np.where(action <= 0, (action+1)*self.HOVER_RPM, action*self.MAX_RPM) # Non-linear mapping: -1 -> 0, 0 -> HOVER_RPM, 1 -> MAX_RPM\n \n ################################################################################\n\n def _saveLastAction(self,\n action\n ):\n \"\"\"Stores the most recent action into attribute `self.last_action`.\n\n The last action can be used to compute aerodynamic effects.\n The method disambiguates between array and dict inputs \n (for single or multi-agent aviaries, respectively).\n\n Parameters\n ----------\n action : ndarray | dict\n (4)-shaped array of ints (or dictionary of arrays) containing the current RPMs input.\n\n \"\"\"\n if isinstance(action, collections.abc.Mapping):\n for k, v in action.items(): \n res_v = np.resize(v, (1, 4)) # Resize, possibly with repetition, to cope with different action spaces in RL subclasses\n self.last_action[int(k), :] = res_v\n else: \n res_action = np.resize(action, (1, 4)) # Resize, possibly with repetition, to cope with different action spaces in RL subclasses\n self.last_action = np.reshape(res_action, (self.NUM_DRONES, 4))\n \n ################################################################################\n\n def _showDroneLocalAxes(self,\n nth_drone\n ):\n \"\"\"Draws the local frame of the n-th drone in PyBullet's GUI.\n\n Parameters\n ----------\n nth_drone : int\n The ordinal number/position of the desired drone in list self.DRONE_IDS.\n\n \"\"\"\n if self.GUI:\n AXIS_LENGTH = 2*self.L\n self.X_AX[nth_drone] = p.addUserDebugLine(lineFromXYZ=[0, 0, 0],\n lineToXYZ=[AXIS_LENGTH, 0, 0],\n lineColorRGB=[1, 0, 0],\n parentObjectUniqueId=self.DRONE_IDS[nth_drone],\n parentLinkIndex=-1,\n replaceItemUniqueId=int(self.X_AX[nth_drone]),\n physicsClientId=self.CLIENT\n )\n self.Y_AX[nth_drone] = p.addUserDebugLine(lineFromXYZ=[0, 0, 0],\n lineToXYZ=[0, AXIS_LENGTH, 0],\n lineColorRGB=[0, 1, 0],\n parentObjectUniqueId=self.DRONE_IDS[nth_drone],\n parentLinkIndex=-1,\n replaceItemUniqueId=int(self.Y_AX[nth_drone]),\n physicsClientId=self.CLIENT\n )\n self.Z_AX[nth_drone] = p.addUserDebugLine(lineFromXYZ=[0, 0, 0],\n lineToXYZ=[0, 0, AXIS_LENGTH],\n lineColorRGB=[0, 0, 1],\n parentObjectUniqueId=self.DRONE_IDS[nth_drone],\n parentLinkIndex=-1,\n replaceItemUniqueId=int(self.Z_AX[nth_drone]),\n physicsClientId=self.CLIENT\n )\n \n ################################################################################\n\n def _addObstacles(self):\n \"\"\"Add obstacles to the environment.\n\n These obstacles are loaded from standard URDF files included in Bullet.\n\n \"\"\"\n p.loadURDF(\"samurai.urdf\",\n physicsClientId=self.CLIENT\n )\n p.loadURDF(\"duck_vhacd.urdf\",\n [-.5, -.5, .05],\n p.getQuaternionFromEuler([0, 0, 0]),\n physicsClientId=self.CLIENT\n )\n p.loadURDF(\"cube_no_rotation.urdf\",\n [-.5, -2.5, .5],\n p.getQuaternionFromEuler([0, 0, 0]),\n physicsClientId=self.CLIENT\n )\n p.loadURDF(\"sphere2.urdf\",\n [0, 2, .5],\n p.getQuaternionFromEuler([0,0,0]),\n physicsClientId=self.CLIENT\n )\n \n ################################################################################\n \n def _parseURDFParameters(self):\n \"\"\"Loads parameters from an URDF file.\n\n This method is nothing more than a custom XML parser for the .urdf\n files in folder `assets/`.\n\n \"\"\"\n URDF_TREE = etxml.parse(os.path.dirname(os.path.abspath(__file__))+\"/../assets/\"+self.URDF).getroot()\n M = float(URDF_TREE[1][0][1].attrib['value'])\n L = float(URDF_TREE[0].attrib['arm'])\n THRUST2WEIGHT_RATIO = float(URDF_TREE[0].attrib['thrust2weight'])\n IXX = float(URDF_TREE[1][0][2].attrib['ixx'])\n IYY = float(URDF_TREE[1][0][2].attrib['iyy'])\n IZZ = float(URDF_TREE[1][0][2].attrib['izz'])\n J = np.diag([IXX, IYY, IZZ])\n J_INV = np.linalg.inv(J)\n KF = float(URDF_TREE[0].attrib['kf'])\n KM = float(URDF_TREE[0].attrib['km'])\n COLLISION_H = float(URDF_TREE[1][2][1][0].attrib['length'])\n COLLISION_R = float(URDF_TREE[1][2][1][0].attrib['radius'])\n COLLISION_SHAPE_OFFSETS = [float(s) for s in URDF_TREE[1][2][0].attrib['xyz'].split(' ')]\n COLLISION_Z_OFFSET = COLLISION_SHAPE_OFFSETS[2]\n MAX_SPEED_KMH = float(URDF_TREE[0].attrib['max_speed_kmh'])\n GND_EFF_COEFF = float(URDF_TREE[0].attrib['gnd_eff_coeff'])\n PROP_RADIUS = float(URDF_TREE[0].attrib['prop_radius'])\n DRAG_COEFF_XY = float(URDF_TREE[0].attrib['drag_coeff_xy'])\n DRAG_COEFF_Z = float(URDF_TREE[0].attrib['drag_coeff_z'])\n DRAG_COEFF = np.array([DRAG_COEFF_XY, DRAG_COEFF_XY, DRAG_COEFF_Z])\n DW_COEFF_1 = float(URDF_TREE[0].attrib['dw_coeff_1'])\n DW_COEFF_2 = float(URDF_TREE[0].attrib['dw_coeff_2'])\n DW_COEFF_3 = float(URDF_TREE[0].attrib['dw_coeff_3'])\n return M, L, THRUST2WEIGHT_RATIO, J, J_INV, KF, KM, COLLISION_H, COLLISION_R, COLLISION_Z_OFFSET, MAX_SPEED_KMH, \\\n GND_EFF_COEFF, PROP_RADIUS, DRAG_COEFF, DW_COEFF_1, DW_COEFF_2, DW_COEFF_3\n \n ################################################################################\n \n def _actionSpace(self):\n \"\"\"Returns the action space of the environment.\n\n Must be implemented in a subclass.\n\n \"\"\"\n raise NotImplementedError\n \n ################################################################################\n\n def _observationSpace(self):\n \"\"\"Returns the observation space of the environment.\n\n Must be implemented in a subclass.\n\n \"\"\"\n raise NotImplementedError\n \n ################################################################################\n \n def _computeObs(self):\n \"\"\"Returns the current observation of the environment.\n\n Must be implemented in a subclass.\n\n \"\"\"\n raise NotImplementedError\n \n ################################################################################\n\n def _preprocessAction(self,\n action\n ):\n \"\"\"Pre-processes the action passed to `.step()` into motors' RPMs.\n\n Must be implemented in a subclass.\n\n Parameters\n ----------\n action : ndarray | dict[..]\n The input action for one or more drones, to be translated into RPMs.\n\n \"\"\"\n raise NotImplementedError\n\n ################################################################################\n\n def _computeReward(self):\n \"\"\"Computes the current reward value(s).\n\n Must be implemented in a subclass.\n\n \"\"\"\n raise NotImplementedError\n\n ################################################################################\n\n def _computeDone(self):\n \"\"\"Computes the current done value(s).\n\n Must be implemented in a subclass.\n\n \"\"\"\n raise NotImplementedError\n\n ################################################################################\n\n def _computeInfo(self):\n \"\"\"Computes the current info dict(s).\n\n Must be implemented in a subclass.\n\n \"\"\"\n raise NotImplementedError\n"
] |
[
[
"numpy.diag",
"numpy.dot",
"numpy.resize",
"numpy.sqrt",
"numpy.max",
"numpy.exp",
"numpy.where",
"numpy.hstack",
"numpy.clip",
"numpy.reshape",
"numpy.zeros",
"numpy.min",
"numpy.linalg.inv",
"numpy.identity",
"numpy.array",
"numpy.sum",
"numpy.abs",
"numpy.tile",
"numpy.linalg.norm",
"numpy.ones"
]
] |
aroig/nnutil
|
[
"88df41ee89f592a28c1661ee8837dd8e8ca42cf3"
] |
[
"nnutil/visual/bars.py"
] |
[
"import numpy as np\nimport math\n\n_vbars = \" ▁▂▃▄▅▆▇█\"\n\ndef bar_graph(data):\n if len(data) > 64:\n data = np.interp(np.linspace(0, len(data), 64),\n np.arange(0, len(data)),\n np.array(data))\n\n M = max(data)\n def _bar(alpha):\n if math.isnan(alpha):\n return 'N'\n else:\n n = int((len(_vbars) - 1) * max(0.0, min(1.0, alpha)))\n return _vbars[n]\n\n if M > 0:\n return ''.join([_bar(x/M) for x in data])\n else:\n return len(data) * ' '\n"
] |
[
[
"numpy.array"
]
] |
ValterFallenius/metnet
|
[
"7cde48a7b5fc0b69a8ce9083f934949362620fd5"
] |
[
"metnet/layers/ConvLSTM.py"
] |
[
"\"\"\"Originally adapted from https://github.com/aserdega/convlstmgru, MIT License Andriy Serdega\"\"\"\nfrom typing import Any, List, Optional\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch import Tensor\n\n\nclass ConvLSTMCell(nn.Module):\n \"\"\"ConvLSTM Cell\"\"\"\n\n def __init__(\n self,\n input_dim: int,\n hidden_dim: int,\n kernel_size: int,\n bias=True,\n activation=F.tanh,\n batchnorm=False,\n ):\n \"\"\"\n ConLSTM Cell\n\n Args:\n input_dim: Number of input channels\n hidden_dim: Number of hidden channels\n kernel_size: Kernel size\n bias: Whether to add bias\n activation: Activation to use\n batchnorm: Whether to use batch norm\n \"\"\"\n super(ConvLSTMCell, self).__init__()\n\n self.input_dim = input_dim\n self.hidden_dim = hidden_dim\n\n self.kernel_size = kernel_size\n self.padding = kernel_size // 2, kernel_size // 2\n self.bias = bias\n self.activation = activation\n self.batchnorm = batchnorm\n\n self.conv = nn.Conv2d(\n in_channels=self.input_dim + self.hidden_dim,\n out_channels=4 * self.hidden_dim,\n kernel_size=self.kernel_size,\n padding=self.padding,\n bias=self.bias,\n )\n\n self.reset_parameters()\n\n def forward(self, x: torch.Tensor, prev_state: list) -> tuple[torch.Tensor, torch.Tensor]:\n \"\"\"\n Compute forward pass\n\n Args:\n x: Input tensor of [Batch, Channel, Height, Width]\n prev_state: Previous hidden state\n\n Returns:\n The new hidden state and output\n \"\"\"\n h_prev, c_prev = prev_state\n\n combined = torch.cat((x, h_prev), dim=1) # concatenate along channel axis\n combined_conv = self.conv(combined)\n\n cc_i, cc_f, cc_o, cc_g = torch.split(combined_conv, self.hidden_dim, dim=1)\n\n i = F.sigmoid(cc_i)\n f = F.sigmoid(cc_f)\n\n g = self.activation(cc_g)\n c_cur = f * c_prev + i * g\n\n o = F.sigmoid(cc_o)\n\n h_cur = o * self.activation(c_cur)\n\n return h_cur, c_cur\n\n def init_hidden(self, x: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor]:\n \"\"\"\n Initializes the hidden state\n Args:\n x: Input tensor to initialize for\n\n Returns:\n Tuple containing the hidden states\n \"\"\"\n state = (\n torch.zeros(x.size()[0], self.hidden_dim, x.size()[3], x.size()[4]),\n torch.zeros(x.size()[0], self.hidden_dim, x.size()[3], x.size()[4]),\n )\n state = (state[0].type_as(x), state[1].type_as(x))\n return state\n\n def reset_parameters(self) -> None:\n \"\"\"Resets parameters\"\"\"\n nn.init.xavier_uniform_(self.conv.weight, gain=nn.init.calculate_gain(\"tanh\"))\n self.conv.bias.data.zero_()\n\n if self.batchnorm:\n self.bn1.reset_parameters()\n self.bn2.reset_parameters()\n\n\nclass ConvLSTM(nn.Module):\n def __init__(\n self,\n input_dim: int,\n hidden_dim: int,\n kernel_size: int,\n num_layers: int,\n bias=True,\n activation=F.tanh,\n batchnorm=False,\n ):\n \"\"\"\n ConvLSTM module\n\n Args:\n input_dim: Input dimension size\n hidden_dim: Hidden dimension size\n kernel_size: Kernel size\n num_layers: Number of layers\n bias: Whether to add bias\n activation: Activation function\n batchnorm: Whether to use batch norm\n \"\"\"\n super(ConvLSTM, self).__init__()\n\n # Make sure that both `kernel_size` and `hidden_dim` are lists having len == num_layers\n kernel_size = self._extend_for_multilayer(kernel_size, num_layers)\n hidden_dim = self._extend_for_multilayer(hidden_dim, num_layers)\n activation = self._extend_for_multilayer(activation, num_layers)\n\n if not len(kernel_size) == len(hidden_dim) == len(activation) == num_layers:\n raise ValueError(\"Inconsistent list length.\")\n\n self.input_dim = input_dim\n self.hidden_dim = hidden_dim\n self.kernel_size = kernel_size\n self.num_layers = num_layers\n self.batch_first = True\n self.bias = bias\n\n cell_list = []\n for i in range(0, self.num_layers):\n cur_input_dim = self.input_dim if i == 0 else self.hidden_dim[i - 1]\n\n cell_list.append(\n ConvLSTMCell(\n input_dim=cur_input_dim,\n hidden_dim=self.hidden_dim[i],\n kernel_size=self.kernel_size[i],\n bias=self.bias,\n activation=activation[i],\n batchnorm=batchnorm,\n )\n )\n\n self.cell_list = nn.ModuleList(cell_list)\n\n self.reset_parameters()\n\n def forward(\n self, x: torch.Tensor, hidden_state: Optional[list] = None\n ) -> tuple[Tensor, list[tuple[Any, Any]]]:\n \"\"\"\n Computes the output of the ConvLSTM\n\n Args:\n x: Input Tensor of shape [Batch, Time, Channel, Width, Height]\n hidden_state: List of hidden states to use, if none passed, it will be generated\n\n Returns:\n The layer output and list of last states\n \"\"\"\n cur_layer_input = torch.unbind(x, dim=int(self.batch_first))\n\n if not hidden_state:\n hidden_state = self.get_init_states(x)\n\n seq_len = len(cur_layer_input)\n\n last_state_list = []\n\n for layer_idx in range(self.num_layers):\n h, c = hidden_state[layer_idx]\n output_inner = []\n for t in range(seq_len):\n h, c = self.cell_list[layer_idx](x=cur_layer_input[t], prev_state=[h, c])\n output_inner.append(h)\n\n cur_layer_input = output_inner\n last_state_list.append((h, c))\n\n layer_output = torch.stack(output_inner, dim=int(self.batch_first))\n\n return layer_output, last_state_list\n\n def reset_parameters(self) -> None:\n \"\"\"\n Reset parameters\n \"\"\"\n for c in self.cell_list:\n c.reset_parameters()\n\n def get_init_states(self, x: torch.Tensor) -> List[torch.Tensor]:\n \"\"\"\n Constructs the initial hidden states\n\n Args:\n x: Tensor to use for constructing state\n\n Returns:\n The initial hidden states for all the layers in the network\n \"\"\"\n init_states = []\n for i in range(self.num_layers):\n init_states.append(self.cell_list[i].init_hidden(x))\n return init_states\n\n @staticmethod\n def _extend_for_multilayer(param, num_layers):\n \"\"\"\n Extends a parameter for multiple layers\n\n Args:\n param: Parameter to copy\n num_layers: Number of layers\n\n Returns:\n The extended parameter\n \"\"\"\n if not isinstance(param, list):\n param = [param] * num_layers\n return param\n"
] |
[
[
"torch.nn.init.calculate_gain",
"torch.cat",
"torch.nn.ModuleList",
"torch.nn.Conv2d",
"torch.nn.functional.sigmoid",
"torch.split"
]
] |
sundogu/ML-Bayes-Rule-Classification
|
[
"ac476e21130c86d082783ab83b8badd368c87291"
] |
[
"bayes_rule_classifier.py"
] |
[
"import numpy as np\r\nimport scipy.stats as stats\r\n\r\n\r\nclass Classifier:\r\n # Class Variables\r\n _n_class = _p_m_s = None\r\n\r\n # Constructor\r\n def __init__(self, col_1, col_2, n_class):\r\n self._init_var(col_1, col_2, n_class)\r\n\r\n # Methods\r\n def _init_var(self, col_1, col_2, n_class):\r\n self._n_class = n_class\r\n\r\n assert len(col_1) == len(col_2)\r\n hmap = self._sort_cols(col_1, col_2)\r\n\r\n assert self._n_class == len(list(hmap))\r\n self._load_prior(col_2)\r\n self._load_mean_std(hmap)\r\n\r\n def _load_prior(self, col_2):\r\n self._p_m_s = {}\r\n for i in range(self._n_class):\r\n self._p_m_s[i] = {\"prior\": col_2.count(i) / float(len(col_2))}\r\n\r\n return\r\n\r\n def _sort_cols(self, col_1, col_2):\r\n hmap = {}\r\n\r\n for i in range(len(col_1)):\r\n if col_2[i] not in hmap:\r\n hmap[col_2[i]] = []\r\n\r\n hmap[col_2[i]].append(col_1[i])\r\n\r\n return hmap\r\n\r\n def _load_mean_std(self, hmap):\r\n for k in list(hmap):\r\n self._p_m_s[k][\"mean\"] = np.mean(hmap[k])\r\n self._p_m_s[k][\"std\"] = np.std(hmap[k], ddof=1)\r\n\r\n return\r\n\r\n def classify(self, test_x):\r\n def likelihood_x_prior(x, class_n):\r\n pms = self._p_m_s[class_n]\r\n return stats.norm(pms[\"mean\"], pms[\"std\"]).pdf(x) * pms[\"prior\"]\r\n\r\n evidence = 0\r\n\r\n for k in list(self._p_m_s):\r\n evidence += likelihood_x_prior(test_x, k)\r\n\r\n hmap = {}\r\n\r\n for k in list(self._p_m_s):\r\n if evidence != 0:\r\n post = likelihood_x_prior(test_x, k) / evidence\r\n else:\r\n post = 0\r\n\r\n if post not in hmap:\r\n hmap[post] = []\r\n\r\n hmap[post].append(k)\r\n\r\n class_list = hmap[np.max(list(hmap))]\r\n return class_list[np.random.randint(0, len(class_list))]\r\n"
] |
[
[
"scipy.stats.norm",
"numpy.std",
"numpy.mean"
]
] |
wangjinjia1/dcase2019task5_YSU
|
[
"c307cd118bb27cb913850f80d14f327399145ee9"
] |
[
"train.py"
] |
[
"#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jul 3 08:08:11 2019\n\n@author: barry\n\"\"\"\nimport os\nimport sys\nsys.path.insert(1, os.path.join(sys.path[0], '../utils'))\nimport numpy as np\nimport argparse\nimport h5py\nimport math\nimport time\nimport logging\nimport matplotlib.pyplot as plt\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\n\nfrom utilities import (create_folder, get_filename, create_logging, \n load_scalar, get_labels)\nfrom data_generator import DataGenerator\nfrom models import TFSANN\nfrom losses import binary_cross_entropy\nfrom evaluate import Evaluator, StatisticsContainer\nfrom pytorch_utils import move_data_to_gpu\nimport config\n\n\n\ndef train(args):\n '''Training. Model will be saved after several iterations. \n \n Args: \n dataset_dir: string, directory of dataset\n workspace: string, directory of workspace\n taxonomy_level: 'fine' | 'coarse'\n model_type: string, e.g. 'Cnn_9layers_MaxPooling'\n holdout_fold: '1' | 'None', where '1' indicates using validation and \n 'None' indicates using full data for training\n batch_size: int\n cuda: bool\n mini_data: bool, set True for debugging on a small part of data\n '''\n\n # Arugments & parameters\n dataset_dir = args.dataset_dir\n workspace = args.workspace\n taxonomy_level = args.taxonomy_level\n model_type = args.model_type\n holdout_fold = args.holdout_fold\n batch_size = args.batch_size\n cuda = args.cuda and torch.cuda.is_available()\n mini_data = args.mini_data\n filename = args.filename\n \n seq_len = 640\n mel_bins = config.mel_bins\n frames_per_second = config.frames_per_second\n max_iteration = 10 # Number of mini-batches to evaluate on training data\n reduce_lr = True\n \n labels = get_labels(taxonomy_level)\n classes_num = len(labels)\n \n # Paths\n if mini_data:\n prefix = 'minidata_'\n else:\n prefix = ''\n \n train_hdf5_path = os.path.join(workspace, 'features', \n '{}logmel_{}frames_{}melbins'.format(prefix, frames_per_second, mel_bins), \n 'train.h5')\n \n validate_hdf5_path = os.path.join(workspace, 'features', \n '{}logmel_{}frames_{}melbins'.format(prefix, frames_per_second, mel_bins), \n 'validate.h5')\n \n scalar_path = os.path.join(workspace, 'scalars', \n '{}logmel_{}frames_{}melbins'.format(prefix, frames_per_second, mel_bins), \n 'train.h5')\n \n checkpoints_dir = os.path.join(workspace, 'checkpoints', filename, \n '{}logmel_{}frames_{}melbins'.format(prefix, frames_per_second, mel_bins), \n 'taxonomy_level={}'.format(taxonomy_level), \n 'holdout_fold={}'.format(holdout_fold), model_type)\n create_folder(checkpoints_dir)\n \n _temp_submission_path = os.path.join(workspace, '_temp_submissions', filename, \n '{}logmel_{}frames_{}melbins'.format(prefix, frames_per_second, mel_bins), \n 'taxonomy_level={}'.format(taxonomy_level), \n 'holdout_fold={}'.format(holdout_fold), model_type, '_submission.csv')\n create_folder(os.path.dirname(_temp_submission_path))\n \n validate_statistics_path = os.path.join(workspace, 'statistics', filename, \n '{}logmel_{}frames_{}melbins'.format(prefix, frames_per_second, mel_bins), \n 'taxonomy_level={}'.format(taxonomy_level), \n 'holdout_fold={}'.format(holdout_fold), model_type, \n 'validate_statistics.pickle')\n create_folder(os.path.dirname(validate_statistics_path))\n \n annotation_path = os.path.join(dataset_dir, 'annotations.csv')\n \n yaml_path = os.path.join(dataset_dir, 'dcase-ust-taxonomy.yaml')\n \n logs_dir = os.path.join(workspace, 'logs', filename, args.mode, \n '{}logmel_{}frames_{}melbins'.format(prefix, frames_per_second, mel_bins), \n 'taxonomy_level={}'.format(taxonomy_level), \n 'holdout_fold={}'.format(holdout_fold), model_type)\n create_logging(logs_dir, 'w')\n logging.info(args)\n\n if cuda:\n logging.info('Using GPU.')\n else:\n logging.info('Using CPU. Set --cuda flag to use GPU.')\n\n # Load scalar\n scalar = load_scalar(scalar_path)\n \n # Model\n Model = eval(model_type)\n model = Model(classes_num, seq_len, mel_bins, cuda)\n \n if cuda:\n model.cuda()\n \n # Optimizer\n optimizer = optim.Adam(model.parameters(), lr=1e-3, betas=(0.9, 0.999),\n eps=1e-08, weight_decay=0., amsgrad=True)\n print('cliqueNet parameters:', sum(param.numel() for param in model.parameters()))\n # Data generator\n data_generator = DataGenerator(\n train_hdf5_path=train_hdf5_path, \n validate_hdf5_path=validate_hdf5_path, \n holdout_fold=holdout_fold, \n scalar=scalar, \n batch_size=batch_size)\n \n # Evaluator\n evaluator = Evaluator(\n model=model, \n data_generator=data_generator, \n taxonomy_level=taxonomy_level, \n cuda=cuda, \n verbose=False)\n \n # Statistics\n validate_statistics_container = StatisticsContainer(validate_statistics_path)\n \n train_bgn_time = time.time()\n iteration = 0\n \n # Train on mini batches\n for batch_data_dict in data_generator.generate_train():\n \n # Evaluate\n if iteration % 200 == 0:\n logging.info('------------------------------------')\n logging.info('Iteration: {}, {} level statistics:'.format(\n iteration, taxonomy_level))\n\n train_fin_time = time.time()\n\n # Evaluate on training data\n if mini_data:\n raise Exception('`mini_data` flag must be set to False to use '\n 'the official evaluation tool!')\n \n train_statistics = evaluator.evaluate(\n data_type='train', \n max_iteration=None)\n \n # Evaluate on validation data\n if holdout_fold != 'none':\n validate_statistics = evaluator.evaluate(\n data_type='validate', \n submission_path=_temp_submission_path, \n annotation_path=annotation_path, \n yaml_path=yaml_path, \n max_iteration=None)\n \n validate_statistics_container.append_and_dump(\n iteration, validate_statistics)\n\n train_time = train_fin_time - train_bgn_time\n validate_time = time.time() - train_fin_time\n\n logging.info(\n 'Train time: {:.3f} s, validate time: {:.3f} s'\n ''.format(train_time, validate_time))\n\n train_bgn_time = time.time()\n\n # Save model\n if iteration % 1000 == 0 and iteration > 0:\n checkpoint = {\n 'iteration': iteration, \n 'model': model.state_dict(), \n 'optimizer': optimizer.state_dict()}\n\n checkpoint_path = os.path.join(\n checkpoints_dir, '{}_iterations.pth'.format(iteration))\n \n torch.save(checkpoint, checkpoint_path)\n logging.info('Model saved to {}'.format(checkpoint_path))\n \n # Reduce learning rate\n if reduce_lr and iteration % 200 == 0 and iteration > 0:\n for param_group in optimizer.param_groups:\n param_group['lr'] *= 0.9\n \n # Move data to GPU\n for key in batch_data_dict.keys():\n if key in ['feature', 'fine_target', 'coarse_target']:\n batch_data_dict[key] = move_data_to_gpu(\n batch_data_dict[key], cuda)\n \n # Train\n model.train()\n batch_output = model(batch_data_dict['feature'])\n \n # loss\n batch_target = batch_data_dict['{}_target'.format(taxonomy_level)]\n loss = binary_cross_entropy(batch_output, batch_target)\n\n # Backward\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n # Stop learning\n if iteration == 3000:\n break\n \n iteration += 1\n \n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Example of parser. ')\n subparsers = parser.add_subparsers(dest='mode')\n\n parser_train = subparsers.add_parser('train')\n parser_train.add_argument('--dataset_dir', type=str, required=True, help='Directory of dataset.')\n parser_train.add_argument('--workspace', type=str, required=True, help='Directory of your workspace.')\n parser_train.add_argument('--taxonomy_level', type=str, choices=['fine', 'coarse'], required=True)\n parser_train.add_argument('--model_type', type=str, required=True, help='E.g., TFSANN.')\n parser_train.add_argument('--holdout_fold', type=str, choices=['1', 'none'], required=True)\n parser_train.add_argument('--batch_size', type=int, required=True)\n parser_train.add_argument('--cuda', action='store_true', default=True)\n parser_train.add_argument('--mini_data', action='store_true', default=False, help='Set True for debugging on a small part of data.')\n \n args = parser.parse_args()\n args.filename = get_filename(__file__)\n\n if args.mode == 'train':\n train(args)\n \n else:\n raise Exception('Error argument!')"
] |
[
[
"torch.cuda.is_available",
"torch.save"
]
] |
ahmednader10/Machine_Learning
|
[
"fab0c7cd773b5e001b56c5349550085e34661e4d",
"fab0c7cd773b5e001b56c5349550085e34661e4d"
] |
[
"Tensorflow/MNIST/Chapter1.py",
"MNIST_NN_VS_SVM/plots.py"
] |
[
"import tensorflow as tf\n\nfrom tensorflow.examples.tutorials.mnist import input_data\nmnist = input_data.read_data_sets('MNIST_data', one_hot=True)\n\nX = tf.placeholder(tf.float32, [None, 28, 28, 1])\nW = tf.Variable(tf.zeros([784,10]))\nb = tf.Variable(tf.zeros([10]))\n\nX = tf.reshape(X, [-1, 784])\n#model\nY = tf.nn.softmax(tf.matmul(X, W) + b)\n\n#placeholder for correct answers\nY_ = tf.placeholder(tf.float32, [None, 10])\n\n#loss function\ncross_entropy = -tf.reduce_sum(Y_ * tf.log(Y))\n\n# % of correct answers in batch\nis_correct = tf.equal(tf.argmax(Y,1), tf.argmax(Y_, 1))\naccuracy = tf.reduce_mean(tf.cast(is_correct, tf.float32))\n\noptimizer = tf.train.GradientDescentOptimizer(0.003)\ntrain_step = optimizer.minimize(cross_entropy)\n\ninit = tf.initialize_all_variables()\nsess = tf.Session()\nsess.run(init)\n\nfor i in range(10000):\n #load batch images and correct images\n batch_X, batch_Y = mnist.train.next_batch(100)\n\n train_data = {X: batch_X, Y_: batch_Y}\n #train\n sess.run(train_step, feed_dict = {X: batch_X, Y_: batch_Y})\n\n #print in case of success\n a,c = sess.run([accuracy, cross_entropy], feed_dict={X: batch_X, Y_: batch_Y})\n\n #success on test data?\n test_data = {X:mnist.test.images, Y_:mnist.test.labels}\n a,c = sess.run([accuracy, cross_entropy], feed_dict = {X:mnist.test.images, Y_:mnist.test.labels})\n\nprint(\"accuracy:\" + str(a) + \" loss: \" + str(c))\n",
"import pandas as pd\r\nimport sklearn\r\nfrom sklearn.neural_network import MLPClassifier, MLPRegressor\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.svm import LinearSVC, SVC\r\nfrom sklearn.multiclass import OneVsRestClassifier\r\nfrom sklearn.tree import DecisionTreeClassifier\r\nfrom sklearn import preprocessing\r\nimport numpy as np\r\nimport os, struct\r\nfrom array import array as pyarray\r\nfrom numpy import append, array, int8, uint8, zeros\r\nfrom pylab import *\r\nfrom numpy import *\r\nfrom sklearn.model_selection import validation_curve\r\nimport matplotlib.pyplot as plt\r\n\r\nparam_range = [120,100,80,60,40,35,30,20]\r\n\r\n\r\ntrain_scores_mean1 = [1,1,1,1,1] #1 => learning rates\r\ntrain_scores_mean2 = [0.9706,0.9889,0.9962,0.9974,0.9987] #2 => C values\r\ntrain_scores_mean3 = [1,1,1,1,1] #3 => Momentum values\r\ntrain_scores_mean4 = [1,1,1,1,1] #4 => Batch size values\r\ntrain_scores_mean5 = [1,1,1,1,1] #5 => hidden nodes size values\r\ntest_scores_mean1 = [0.9639,0.9667,0.9678,0.9681,0.9708]\r\ntest_scores_mean2 = [0.9302,0.9209,0.9137,0.9126,0.9111]\r\ntest_scores_mean3 = [0.965,0.9665,0.9679,0.9659,0.0964] \r\ntest_scores_mean4 = [0.9689,0.9704,0.9697,0.9656,0.9657]\r\ntest_scores_mean5 = [0.9509,0.9593,0.9618,0.9631,0.966]\r\n\r\npca_values = [0.9468, 0.9522, 0.9558, 0.9602, 0.9608, 0.962, 0.9622, 0.9616]\r\n\r\nplt.title(\"Testing Curve for SVC using PCA\")\r\nplt.xlabel(\"Number of components\")\r\nplt.ylabel(\"Score\")\r\nplt.ylim(0.94, 0.975)\r\nplt.plot(param_range, pca_values, label=\"Testing score\",\r\n color=\"navy\")\r\n#plt.plot(param_range, test_scores_mean4, label=\"Cross-validation score\",\r\n# color=\"navy\")\r\n\r\nplt.legend(loc=\"best\")\r\nplt.show()"
] |
[
[
"tensorflow.matmul",
"tensorflow.zeros",
"tensorflow.reshape",
"tensorflow.cast",
"tensorflow.placeholder",
"tensorflow.initialize_all_variables",
"tensorflow.train.GradientDescentOptimizer",
"tensorflow.log",
"tensorflow.Session",
"tensorflow.argmax",
"tensorflow.examples.tutorials.mnist.input_data.read_data_sets"
],
[
"matplotlib.pyplot.legend",
"matplotlib.pyplot.title",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel"
]
] |
anonymousprojs/ISSTA2022-study
|
[
"94cef7fc4c098c03bb08ff8865d0c1d9a5de86b2",
"94cef7fc4c098c03bb08ff8865d0c1d9a5de86b2",
"94cef7fc4c098c03bb08ff8865d0c1d9a5de86b2"
] |
[
"coverage/rq3/rq3_script.py",
"coverage/tools/surprise_adequacy/sa.py",
"coverage/tools/surprise_adequacy/surprise_adequacy.py"
] |
[
"import argparse\r\nimport configparser\r\nimport os\r\nimport numpy as np\r\nfrom datetime import datetime, date\r\n\r\nfrom pandas import DataFrame\r\n\r\nfrom coverage import root_dir\r\nimport coverage.tools.dataloader as dataloader\r\nfrom coverage.tools import common_utils\r\nimport coverage.tools.model_utils as model_utils\r\nfrom coverage.tools.coverage_utils import execute_sampling, SurpriseCoverage\r\n\r\n\r\ndef get_aggregated_indices(labels, select_idx):\r\n sampled_indices_list = []\r\n for class_id in select_idx:\r\n sampled_indices = np.nonzero(labels == class_id)[0]\r\n sampled_indices_list.append(sampled_indices)\r\n aggregated_indices = np.concatenate(sampled_indices_list)\r\n return aggregated_indices\r\n\r\n\r\nif __name__ == \"__main__\":\r\n parser = argparse.ArgumentParser()\r\n parser.add_argument(\"--sample_capacity\", help=\"number of images\", type=int, default=800)\r\n parser.add_argument(\"--repeat_times\", help=\"number of selected classes\", type=int, default=2)\r\n parser.add_argument(\"--dataset_network\", help=\"selected class id\", type=str, default=\"cifar100_resnet32\")\r\n parser.add_argument(\"--attack\", help=\"adversarial attack\", type=str, default=\"cw\")\r\n parser.add_argument(\"--exp_date\", help=\"data_of_exp\", type=str,)\r\n parser.add_argument(\"--split_id\", help=\"id number of select split\", type=int, default=1)\r\n console_args = parser.parse_args()\r\n print(console_args)\r\n\r\n dataset_network = console_args.dataset_network\r\n\r\n exp_cfg = configparser.ConfigParser()\r\n coverage_parameters = {\"n_bucket\": 1000}\r\n exp_cfg.read(f\"{root_dir}/config/exp.conf\")\r\n total_group_nums = exp_cfg['parameters'].getint(\"group_nums\")\r\n coverage_parameters[\"kmnc_k_section\"] = exp_cfg['parameters'].getint(\"kmnc_k_section\")\r\n coverage_parameters[\"tknc_k_value\"] = exp_cfg['parameters'].getint(\"tknc_k_value\")\r\n coverage_parameters[\"nc_threshold\"] = exp_cfg['parameters'].getfloat(\"nc_threshold\")\r\n coverage_parameters[\"idc_relevant_neurons\"] = exp_cfg['parameters'].getint(\"idc_relevant_neurons\")\r\n\r\n rq3_path = exp_cfg['parameters'].get(\"rq3_path\")\r\n sa_dir_name = exp_cfg['parameters'].get(\"sa_intermediate\")\r\n sa_intermedia_path = os.path.join(root_dir, sa_dir_name)\r\n idc_dir_name = exp_cfg['parameters'].get(\"idc_intermediate\")\r\n idc_intermedia_path = os.path.join(root_dir, idc_dir_name)\r\n coverage_parameters[\"idc_intermedia_path\"] = idc_intermedia_path\r\n console_args.exp_date = str(date.today()) if console_args.exp_date is None else console_args.exp_date\r\n dataset_network_dir = os.path.join(root_dir, rq3_path, console_args.exp_date, dataset_network)\r\n common_utils.create_path(sa_intermedia_path, idc_intermedia_path, rq3_path, dataset_network_dir)\r\n\r\n dataset_name, network_name = tuple(dataset_network.split(\"_\"))\r\n num_classes = dataloader.class_num(dataset_name)\r\n test_sizes = dataloader.test_sizes[dataset_name]\r\n\r\n s0 = datetime.now()\r\n # load model and boundary\r\n classifier = model_utils.load_model(network=network_name, dataset=dataset_name)\r\n boundary = common_utils.load_boundary(dataset_name, network_name)\r\n # direct use `size_per_class` correctly classified images\r\n x_test, y_test = dataloader.load_dataset(dataset_name)\r\n x_test = dataloader.preprocess_dataset(dataset_name, network_name, x_test)\r\n print(f\"INFO: {dataset_name, network_name} value range of clean images :[{np.min(x_test)},{np.max(x_test)}]\")\r\n\r\n # the adversarial inputs are already preprocessed.\r\n adv_x, adv_y = dataloader.load_adversarial_images(dataset_name, network_name, console_args.attack, mode=\"full\")\r\n print(f\"INFO: {dataset_name, network_name} value range of adv images :[{np.min(adv_x)},{np.max(adv_x)}]\")\r\n\r\n # I skip loading train set here. We don't need train-set because we have generated SA and IDC intermediate files\r\n skip_train = True\r\n if skip_train:\r\n x_train = y_train = None\r\n else:\r\n # note that the y_train is not in one-vector format. It's just an array of class ids.\r\n x_train, y_train = dataloader.load_train_set(console_args.dataset)\r\n x_train = dataloader.preprocess_dataset(console_args.dataset, console_args.network, x_train)\r\n print(f\"INFO: {console_args.dataset, console_args.network} \"\r\n f\"value range of train images :[{np.min(x_train)},{np.max(x_train)}]\")\r\n print(f\"Data & Model preparing time:{datetime.now() - s0}\")\r\n\r\n sampling_indices = common_utils.sampling_indices_dict(500, dataset_model=dataset_network,\r\n test_size=console_args.sample_capacity)\r\n correct_indices = sampling_indices['pure_correct_indices']\r\n pure_correct_labels = y_test[correct_indices].copy()\r\n\r\n # we divide the classes into ten splits\r\n section_num = 10\r\n class_ids = np.arange(num_classes)\r\n section_length = int(num_classes / section_num)\r\n\r\n adv_lsa, adv_dsa, adv_mdsa = common_utils.cached_sa(dataset_network=dataset_network,\r\n attack_type=console_args.attack,\r\n test_size=test_sizes)\r\n clean_lsa, clean_dsa, clean_mdsa = common_utils.cached_sa(dataset_network=dataset_network,\r\n attack_type=\"normal\",\r\n test_size=test_sizes)\r\n sa_dict = dict()\r\n sa_dict[\"clean_lsa\"], sa_dict[\"adv_lsa\"] = clean_lsa, adv_lsa\r\n sa_dict[\"clean_dsa\"], sa_dict[\"adv_dsa\"] = clean_dsa, adv_dsa\r\n sa_dict[\"clean_mdsa\"], sa_dict[\"adv_mdsa\"] = clean_mdsa, adv_mdsa\r\n sa_dict[\"lsa_boundary\"] = SurpriseCoverage.filter_outliers(\"LSA\",np.concatenate([clean_lsa,adv_lsa]).copy())\r\n sa_dict[\"dsa_boundary\"] = SurpriseCoverage.filter_outliers(\"DSA\",np.concatenate([clean_dsa,adv_dsa]).copy())\r\n sa_dict[\"mdsa_boundary\"] = SurpriseCoverage.filter_outliers(\"MDSA\",np.concatenate([clean_mdsa,adv_mdsa]).copy())\r\n\r\n start_class_id = int(section_length * console_args.split_id)\r\n top_idx = class_ids[start_class_id:start_class_id + section_length]\r\n print(f\"Selecting spilt:{console_args.split_id},classes:{top_idx}\")\r\n df_titles = [\"Sampling_Name\", \"correct_proportion\", \"NC\", \"NBC\", \"SNAC\", \"TKNC\", 'KMNC', \"LSC\", \"DSC\", \"MDSC\",\r\n \"IDC\", \"error_rate\"]\r\n df_path = os.path.join(dataset_network_dir,\r\n f\"{console_args.dataset_network}_{console_args.attack}_size{console_args.sample_capacity}\"\r\n f\"_class_ratio-split{console_args.split_id}.xlsx\")\r\n\r\n df = DataFrame(columns=df_titles)\r\n row_id = 0\r\n\r\n _aggregated_correct_idx = get_aggregated_indices(pure_correct_labels, top_idx)\r\n aggregated_correct_idx = correct_indices[_aggregated_correct_idx]\r\n aggregated_wrong_idx = get_aggregated_indices(adv_y, top_idx)\r\n\r\n s0 = datetime.now()\r\n for rid in range(console_args.repeat_times):\r\n if len(aggregated_correct_idx) >= console_args.sample_capacity:\r\n adv_minimum = 0\r\n else:\r\n adv_minimum = console_args.sample_capacity - len(aggregated_correct_idx)\r\n adv_maximum = int(console_args.sample_capacity * 0.7)\r\n assert adv_maximum > adv_minimum, f\"Maximum {adv_maximum} <= Minimum {adv_minimum}. \" \\\r\n f\"Only {len(aggregated_correct_idx)} correct inputs are found.\"\r\n wrong_num = np.random.randint(low=adv_minimum, high=adv_maximum + 1)\r\n correct_num = console_args.sample_capacity - wrong_num\r\n print(f\"Repeat times: {rid} of {console_args.repeat_times}, correct: {correct_num}, wrong: {wrong_num}\")\r\n select_correct_idx = np.random.choice(a=aggregated_correct_idx, size=correct_num, replace=False)\r\n select_wrong_idx = np.random.choice(a=aggregated_wrong_idx, size=wrong_num, replace=False)\r\n select_correct_inputs, select_correct_labels = \\\r\n x_test[select_correct_idx].copy(), y_test[select_correct_idx].copy()\r\n select_wrong_inputs, select_wrong_labels = \\\r\n adv_x[select_wrong_idx].copy(), adv_y[select_wrong_idx].copy()\r\n selected_x = np.concatenate([select_correct_inputs, select_wrong_inputs])\r\n selected_y = np.concatenate([select_correct_labels, select_wrong_labels])\r\n row = execute_sampling(dataset_network=dataset_network, classifier=classifier, x=selected_x, y=selected_y,\r\n train_inputs=x_train, train_labels=y_train, boundary=boundary, sa_dict=sa_dict,\r\n coverage_parameters=coverage_parameters, normal_indices=select_correct_idx,\r\n adv_indices=select_wrong_idx,classification=True)\r\n\r\n row_str = [round(rate, 2) for rate in row]\r\n sampling_row = [f\"sample{console_args.split_id}_repeat_{rid}\",\r\n round(correct_num / console_args.sample_capacity, 2)]\r\n sampling_row.extend(row_str)\r\n df.loc[row_id] = sampling_row\r\n row_id += 1\r\n df.to_excel(df_path)\r\n\r\n elapsed = (datetime.now() - s0)\r\n print(f\"RQ2 Time used for {dataset_network}-{console_args.attack} \", elapsed)\r\n",
"from warnings import warn\r\n\r\nimport os\r\n\r\nfrom multiprocessing import Pool\r\n\r\nimport numpy as np\r\nfrom tqdm import tqdm\r\nfrom keras.models import Model\r\nfrom scipy.stats import gaussian_kde\r\nfrom coverage.tools.surprise_adequacy.sa_utils import *\r\nfrom coverage.tools.common_utils import ScoreUtils\r\nfrom coverage.tools.deepspeech.deepspeech_utils import DSDataUtils\r\n\r\n\r\ndef _aggr_output(x):\r\n return [np.mean(x[..., j]) for j in range(x.shape[-1])]\r\n\r\n\r\ndef _get_saved_path(base_path, dataset, network, train_size: int, dtype, layer_names):\r\n \"\"\"Determine saved path of ats and pred\r\n Args:\r\n base_path (str): Base save path.\r\n dataset (str): Name of dataset.\r\n dtype (str): Name of dataset type (e.g., train, test, fgsm, ...).\r\n layer_names (list): List of layer names.\r\n Returns:\r\n ats_path: File path of ats.\r\n pred_path: File path of pred (independent of layers)\r\n \"\"\"\r\n\r\n joined_layer_names = \"_\".join(layer_names)\r\n return (\r\n os.path.join(\r\n base_path,\r\n dataset + \"_\" + network + \"_\" + dtype + \"_\" +\r\n str(train_size) + \"_\" + joined_layer_names + \"_ats\" + \".npy\",\r\n ),\r\n os.path.join(base_path, dataset + \"_\" + network + \"_\" +\r\n dtype + \"_\" + str(train_size) + \"_pred\" + \".npy\"),\r\n )\r\n\r\n\r\ndef get_ats(\r\n model,\r\n dataset,\r\n name,\r\n layer_names,\r\n save_path=None,\r\n batch_size=128,\r\n is_classification=True,\r\n num_classes=10,\r\n num_proc=10,\r\n dataset_name=None,\r\n):\r\n \"\"\"Extract activation traces of dataset from model.\r\n Args:\r\n model (keras model): Subject model.\r\n dataset (list): Set of inputs fed into the model.\r\n name (str): Name of input set.\r\n layer_names (list): List of selected layer names.\r\n save_path (tuple): Paths of being saved ats and pred.\r\n batch_size (int): Size of batch when serving.\r\n is_classification (bool): Task type, True if classification task or False.\r\n num_classes (int): The number of classes (labels) in the dataset.\r\n num_proc (int): The number of processes for multiprocessing.\r\n Returns:\r\n ats (list): List of (layers, inputs, neuron outputs).\r\n pred (list): List of predicted classes.\r\n \"\"\"\r\n\r\n temp_model = Model(\r\n inputs=model.input,\r\n outputs=[model.get_layer(\r\n layer_name).output for layer_name in layer_names],\r\n )\r\n\r\n prefix = info(\"[\" + name + \"] \")\r\n if is_classification:\r\n p = Pool(num_proc)\r\n print(prefix + \"Model serving\")\r\n # pred = model.predict_classes(dataset, batch_size=batch_size, verbose=1)\r\n predict = model.predict(dataset, batch_size=batch_size, verbose=1)\r\n if dataset_name == \"speech-commands\":\r\n pred_words = ScoreUtils.speech_commands_prediction(predict)\r\n pred = [DSDataUtils.get_words_idx(s) for s in pred_words]\r\n else:\r\n pred = np.argmax(predict, axis=1)\r\n\r\n if len(layer_names) == 1:\r\n layer_outputs = [\r\n temp_model.predict(dataset, batch_size=batch_size, verbose=1)\r\n ]\r\n else:\r\n layer_outputs = temp_model.predict(\r\n dataset, batch_size=batch_size, verbose=1\r\n )\r\n\r\n print(prefix + \"Processing ATs\")\r\n ats = None\r\n for layer_name, layer_output in zip(layer_names, layer_outputs):\r\n print(\"Layer: \" + layer_name)\r\n # (primarily for convolutional layers - note that kim et al used ndim==3)\r\n # I think here should be 2.\r\n # The output shape may be like (batch_size,channel1,channel2),\r\n # and we should change it to (batch_size,channel2)\r\n if layer_output[0].ndim >= 2:\r\n # For convolutional layers\r\n layer_matrix = np.array(\r\n p.map(_aggr_output, [layer_output[i]\r\n for i in range(len(dataset))])\r\n )\r\n else:\r\n layer_matrix = np.array(layer_output)\r\n\r\n if ats is None:\r\n ats = layer_matrix\r\n else:\r\n ats = np.append(ats, layer_matrix, axis=1)\r\n layer_matrix = None\r\n else:\r\n p = Pool(num_proc)\r\n pred = []\r\n print(prefix + \"Model serving\")\r\n if len(layer_names) == 1:\r\n layer_outputs = [\r\n temp_model.predict(dataset, batch_size=batch_size, verbose=1)\r\n ]\r\n else:\r\n layer_outputs = temp_model.predict(\r\n dataset, batch_size=batch_size, verbose=1\r\n )\r\n\r\n print(prefix + \"Processing ATs\")\r\n ats = None\r\n for layer_name, layer_output in zip(layer_names, layer_outputs):\r\n print(\"Layer: \" + layer_name)\r\n if layer_output[0].ndim == 3:\r\n # For convolutional layers\r\n layer_matrix = np.array(\r\n p.map(_aggr_output, [layer_output[i]\r\n for i in range(len(dataset))])\r\n )\r\n else:\r\n layer_matrix = np.array(layer_output)\r\n\r\n if ats is None:\r\n ats = layer_matrix\r\n else:\r\n ats = np.append(ats, layer_matrix, axis=1)\r\n layer_matrix = None\r\n\r\n # if save_path is not None:\r\n # np.save(save_path[0], ats)\r\n # np.save(save_path[1], pred)\r\n\r\n return ats, pred\r\n\r\n\r\ndef find_closest_at(at, train_ats):\r\n \"\"\"The closest distance between subject AT and training ATs.\r\n Args:\r\n at (list): List of activation traces of an input.\r\n train_ats (list): List of activation traces in training set (filtered)\r\n\r\n Returns:\r\n dist (int): The closest distance.\r\n at (list): Training activation trace that has the closest distance.\r\n \"\"\"\r\n\r\n dist = np.linalg.norm(at - train_ats, axis=1)\r\n return (min(dist), train_ats[np.argmin(dist)])\r\n\r\n\r\ndef _get_train_target_ats(model, x_train, x_target, target_name, layer_names, args):\r\n \"\"\"Extract ats of train and target inputs. If there are saved files, then skip it.\r\n Args:\r\n model (keras model): Subject model.\r\n x_train (list): Set of training inputs.\r\n x_target (list): Set of target (test or adversarial) inputs.\r\n target_name (str): Name of target set.\r\n layer_names (list): List of selected layer names.\r\n args: keyboard console_args.\r\n Returns:\r\n train_ats (list): ats of train set.\r\n train_pred (list): pred of train set.\r\n target_ats (list): ats of target set.\r\n target_pred (list): pred of target set.\r\n \"\"\"\r\n train_size = len(x_train)\r\n saved_train_path = _get_saved_path(\r\n args.save_path, args.dataset, args.network, train_size, \"train\", layer_names)\r\n if os.path.exists(saved_train_path[0]):\r\n print(infog(\"Found saved {} ATs, skip serving\".format(\"train\")))\r\n # In case train_ats is stored in a disk\r\n train_ats = np.load(saved_train_path[0])\r\n train_pred = np.load(saved_train_path[1])\r\n else:\r\n train_ats, train_pred = get_ats(\r\n model,\r\n x_train,\r\n \"train\",\r\n layer_names,\r\n num_classes=args.num_classes,\r\n is_classification=args.is_classification,\r\n save_path=saved_train_path,\r\n dataset_name=args.dataset,\r\n )\r\n print(infog(\"train ATs is saved at \" + saved_train_path[0]))\r\n if saved_train_path is not None:\r\n np.save(saved_train_path[0], train_ats)\r\n np.save(saved_train_path[1], train_pred)\r\n\r\n saved_target_path = _get_saved_path(\r\n args.save_path, args.dataset, args.network, train_size, target_name, layer_names\r\n )\r\n\r\n if True:\r\n target_ats, target_pred = get_ats(\r\n model,\r\n x_target,\r\n target_name,\r\n layer_names,\r\n num_classes=args.num_classes,\r\n is_classification=args.is_classification,\r\n save_path=saved_target_path,\r\n dataset_name=args.dataset,\r\n )\r\n print(infog(target_name + \" ATs is saved at \" + saved_target_path[0]))\r\n return train_ats, train_pred, target_ats, target_pred\r\n\r\n\r\ndef generate_at(model, x_train, args, layer_names):\r\n train_size = len(x_train)\r\n saved_train_path = _get_saved_path(\r\n args.save_path, args.dataset, args.network, train_size, \"train\", layer_names)\r\n if os.path.exists(saved_train_path[0]):\r\n print(infog(\"Found saved {} ATs, skip serving\".format(\"train\")))\r\n print(\"Skip training ats generation\")\r\n else:\r\n train_ats, train_pred = get_ats(\r\n model,\r\n x_train,\r\n \"train\",\r\n layer_names,\r\n num_classes=args.num_classes,\r\n is_classification=args.is_classification,\r\n save_path=saved_train_path,\r\n )\r\n print(infog(\"train ATs is saved at \" + saved_train_path[0]))\r\n if saved_train_path is not None:\r\n np.save(saved_train_path[0], train_ats)\r\n np.save(saved_train_path[1], train_pred)\r\n\r\n\r\ndef fetch_dsa(model, x_train, x_target, target_name, layer_names, args):\r\n # \"\"\"Distance-based SA\r\n # Args:\r\n # model (keras model): Subject model.\r\n # x_train (list): Set of training inputs.\r\n # x_target (list): Set of target (test or adversarial) inputs.\r\n # target_name (str): Name of target set.\r\n # sa_layer_names (list): List of selected layer names.\r\n # console_args: keyboard console_args.\r\n # Returns:\r\n # dsa (list): List of dsa for each target input.\r\n # \"\"\"\r\n\r\n assert args.is_classification\r\n\r\n prefix = info(\"[\" + target_name + \"] \")\r\n train_ats, train_pred, target_ats, target_pred = _get_train_target_ats(\r\n model, x_train, x_target, target_name, layer_names, args\r\n )\r\n\r\n class_matrix = {}\r\n all_idx = []\r\n for i, label in enumerate(train_pred):\r\n if label not in class_matrix:\r\n class_matrix[label] = []\r\n class_matrix[label].append(i)\r\n all_idx.append(i)\r\n\r\n dsa = []\r\n\r\n print(prefix + \"Fetching DSA\")\r\n for i, at in enumerate(tqdm(target_ats)):\r\n label = target_pred[i]\r\n a_dist, a_dot = find_closest_at(at, train_ats[class_matrix[label]])\r\n b_dist, _ = find_closest_at(\r\n a_dot, train_ats[list(set(all_idx) - set(class_matrix[label]))]\r\n )\r\n dsa.append(a_dist / b_dist)\r\n\r\n return dsa\r\n\r\n\r\ndef fetch_mdsa(model, x_train, x_target, target_name, layer_names, args):\r\n \"\"\"\r\n @param model: Subject model.\r\n @param x_train: Set of training inputs.\r\n @param x_target: Set of target (test or adversarial) inputs.\r\n @param target_name: name of targeted test inputs\r\n @param layer_names: List of selected layer names.\r\n @param args: keyboard console_args.\r\n @return: List of mdsa for each target input.\r\n \"\"\"\r\n\r\n assert args.is_classification\r\n\r\n prefix = info(\"[\" + target_name + \"] \")\r\n train_ats, train_pred, target_ats, target_pred = _get_train_target_ats(\r\n model, x_train, x_target, target_name, layer_names, args\r\n )\r\n\r\n class_matrix = {}\r\n all_idx = []\r\n for i, label in enumerate(train_pred):\r\n if label not in class_matrix:\r\n class_matrix[label] = []\r\n class_matrix[label].append(i)\r\n all_idx.append(i)\r\n mdsa = []\r\n\r\n print(prefix + \"Fetching MDSA\")\r\n train_size = len(x_train)\r\n mdsa_inter_path = os.path.join(\r\n args.save_path, f\"{args.dataset}_{args.network}_{train_size}_mdsa_inter.npz\")\r\n if os.path.exists(mdsa_inter_path):\r\n inter_dict = np.load(mdsa_inter_path, allow_pickle=True)\r\n to_keep_dict, mu_dict, Sinv_dict = inter_dict[\"to_keep\"][(\r\n )], inter_dict[\"mu\"][()], inter_dict[\"Sinv\"][()]\r\n else:\r\n # generate to_keep\r\n # here, train_ats should be like (test_size, cols_nums)\r\n to_keep_dict = dict()\r\n mu_dict = dict()\r\n Sinv_dict = dict()\r\n for label in range(args.num_classes):\r\n _to_keep = np.ones(train_ats.shape[1], dtype=np.bool_)\r\n # print(\"INFO\",train_ats[class_matrix[label]].shape)\r\n col_vectors = np.transpose(train_ats[class_matrix[label]])\r\n # print(\"INFO\",col_vectors.shape)\r\n for i in range(col_vectors.shape[0]):\r\n # print(np.var(col_vectors[i]))\r\n if np.var(col_vectors[i]) < args.var_threshold:\r\n _to_keep[i] = False\r\n refined_ats = col_vectors[_to_keep, :]\r\n to_keep_dict[label] = _to_keep\r\n _mu = np.mean(refined_ats, axis=1).transpose()\r\n mu_dict[label] = _mu.copy()\r\n _Sinv = np.linalg.inv(np.cov(refined_ats))\r\n Sinv_dict[label] = _Sinv.copy()\r\n np.savez(mdsa_inter_path, to_keep=to_keep_dict,\r\n mu=mu_dict, Sinv=Sinv_dict)\r\n\r\n for i, at in enumerate(tqdm(target_ats)):\r\n to_keep = to_keep_dict[target_pred[i]]\r\n col_vector = at.transpose()\r\n refined_col_vector = col_vector[to_keep].transpose()\r\n label = target_pred[i]\r\n mu, Sinv = mu_dict[label], Sinv_dict[label]\r\n tmp = np.dot((refined_col_vector - mu).transpose(), Sinv)\r\n mdsa.append(np.sqrt(np.dot(tmp, (refined_col_vector - mu))).item())\r\n\r\n return mdsa\r\n\r\n\r\ndef _get_kdes(train_ats, train_pred, class_matrix, args):\r\n \"\"\"Kernel density estimation\r\n Args:\r\n train_ats (list): List of activation traces in training set.\r\n train_pred (list): List of prediction of train set.\r\n class_matrix (list): List of index of classes.\r\n args: Keyboard console_args.\r\n Returns:\r\n kdes (list): List of kdes per label if classification task.\r\n removed_cols (list): List of removed columns by variance threshold.\r\n \"\"\"\r\n\r\n removed_cols = []\r\n if args.is_classification:\r\n for label in range(args.num_classes):\r\n col_vectors = np.transpose(train_ats[class_matrix[label]])\r\n for i in range(col_vectors.shape[0]):\r\n if (\r\n np.var(col_vectors[i]) < args.var_threshold\r\n and i not in removed_cols\r\n ):\r\n removed_cols.append(i)\r\n print(sorted(removed_cols))\r\n kdes = {}\r\n for label in tqdm(range(args.num_classes), desc=\"kde\"):\r\n refined_ats = np.transpose(train_ats[class_matrix[label]])\r\n refined_ats = np.delete(refined_ats, removed_cols, axis=0)\r\n print(refined_ats.shape)\r\n print(label)\r\n if refined_ats.shape[0] == 0:\r\n print(\r\n warn(\"ats were removed by threshold {}\".format(\r\n args.var_threshold))\r\n )\r\n break\r\n kdes[label] = gaussian_kde(refined_ats)\r\n\r\n else:\r\n if np.isnan(train_ats).any():\r\n print(\"Found nan in train ats\")\r\n col_vectors = np.transpose(train_ats)\r\n for i in range(col_vectors.shape[0]):\r\n if np.var(col_vectors[i]) < args.var_threshold:\r\n removed_cols.append(i)\r\n print(len(removed_cols))\r\n refined_ats = np.transpose(train_ats)\r\n refined_ats = np.delete(refined_ats, removed_cols, axis=0)\r\n if refined_ats.shape[0] == 0:\r\n print(warn(\"ats were removed by threshold {}\".format(args.var_threshold)))\r\n kdes = [gaussian_kde(refined_ats)]\r\n print(gaussian_kde(refined_ats))\r\n # print(type(kdes[0]))\r\n # if np.isnan(kdes[0]).any():\r\n # raise Exception(\"Found NaN in kde\")\r\n\r\n print(infog(\"The number of removed columns: {}\".format(len(removed_cols))))\r\n\r\n return kdes, removed_cols\r\n\r\n\r\ndef _get_lsa(kde, at, removed_cols):\r\n refined_at = np.delete(at, removed_cols, axis=0)\r\n # print(refined_at)\r\n # print(np.transpose(refined_at))\r\n transpose_refined_at = np.transpose(refined_at)\r\n _logpdf = -kde.logpdf(transpose_refined_at)\r\n res = np.asscalar(_logpdf)\r\n if np.isnan(res).any() or np.isinf(res).any():\r\n raise Exception()\r\n return np.asscalar(-kde.logpdf(np.transpose(refined_at)))\r\n\r\n\r\ndef fetch_lsa(model, x_train, x_target, target_name, layer_names, args):\r\n def check_nan(x):\r\n import math\r\n if isinstance(x, np.ndarray):\r\n if np.isnan(x).any() or np.isinf(x).any():\r\n raise Exception(\"nan\")\r\n if isinstance(x, list):\r\n for xi in x:\r\n if math.isnan(xi) or math.isinf(xi):\r\n raise Exception(\"nan\")\r\n print(\"No nan found\")\r\n\r\n # \"\"\"Likelihood-based SA\r\n # Args:\r\n # model (keras model): Subject model.\r\n # x_train (list): Set of training inputs.\r\n # x_target (list): Set of target (test or[] adversarial) inputs.\r\n # target_name (str): Name of target set.\r\n # sa_layer_names (list): List of selected layer names.\r\n # console_args: Keyboard console_args.\r\n # Returns:\r\n # lsa (list): List of lsa for each target input.\r\n # \"\"\"\r\n\r\n prefix = info(\"[\" + target_name + \"] \")\r\n train_ats, train_pred, target_ats, target_pred = _get_train_target_ats(\r\n model, x_train, x_target, target_name, layer_names, args\r\n )\r\n\r\n check_nan(train_ats)\r\n check_nan(train_pred)\r\n check_nan(target_ats)\r\n check_nan(target_pred)\r\n\r\n class_matrix = {}\r\n if args.is_classification:\r\n for i, label in enumerate(train_pred):\r\n if label not in class_matrix.keys():\r\n class_matrix[label] = []\r\n class_matrix[label].append(i)\r\n\r\n kdes, removed_cols = _get_kdes(train_ats, train_pred, class_matrix, args)\r\n\r\n lsa = []\r\n print(prefix + \"Fetching LSA\")\r\n if args.is_classification:\r\n for i, at in enumerate(tqdm(target_ats)):\r\n label = target_pred[i]\r\n kde = kdes[label]\r\n lsa.append(_get_lsa(kde, at, removed_cols))\r\n else:\r\n kde = kdes[0]\r\n for at in tqdm(target_ats):\r\n lsa.append(_get_lsa(kde, at, removed_cols))\r\n\r\n return lsa\r\n\r\n\r\ndef get_sc(lower, upper, k, sa):\r\n \"\"\"Surprise Coverage\r\n Args:\r\n lower (int): Lower bound.\r\n upper (int): Upper bound.\r\n k (int): The number of buckets.\r\n sa (list): List of lsa or dsa.\r\n Returns:\r\n cov (int): Surprise coverage.\r\n \"\"\"\r\n\r\n buckets = np.digitize(sa, np.linspace(lower, upper, k))\r\n return len(list(set(buckets))) / float(k) * 100\r\n\r\n\r\n# sa_selected_layers = {\r\n# 'alexnet' : [\"\"],\r\n# 'lenet5': ['dense_3'],\r\n# 'vgg16': ['dense_1'],\r\n# 'resnet20': ['activation_19'],\r\n# 'resnet32': ['activation_28'],\r\n# 'vgg19': ['block5_conv4'],\r\n# 'resnet50': ['activation_49'],\r\n# 'deepspeech': ['dense_1'],\r\n# 'dave-orig': ['fc4'],\r\n# }\r\n\r\nsa_selected_layers = {\r\n 'cifar10_alexnet': [\"dense_2\"], # -3\r\n # 'cifar10_alexnet': [\"dense_1\"], # -3\r\n \"fashion-mnist_lenet5\": [\"dense_3\"], # -2\r\n 'mnist_lenet5': ['dense_3'], # -2\r\n 'cifar10_vgg16': ['dense_1'], # -3\r\n 'cifar10_resnet20': ['flatten_1'], # -1\r\n 'cifar100_resnet32': ['flatten_1'], # -1\r\n 'imagenet_vgg19': ['block5_conv4'], # -6\r\n 'imagenet_resnet50': ['activation_49'], # -3\r\n 'speech-commands_deepspeech': ['dense_1'],\r\n 'driving_dave-orig': ['fc4'],\r\n 'driving_dave-dropout': ['fc3'],\r\n}\r\n",
"import abc\r\nimport os\r\nimport pickle\r\nfrom abc import ABC\r\nfrom concurrent.futures.thread import ThreadPoolExecutor\r\nfrom typing import Tuple, List, Union, Dict\r\n\r\nimport numpy as np\r\nimport tensorflow as tf\r\nfrom dataclasses import dataclass\r\nfrom scipy.stats import gaussian_kde\r\n# from tensorflow.keras.models import Model\r\nfrom keras.models import Model\r\n\r\nfrom tqdm import tqdm\r\n\r\n\r\n@dataclass\r\nclass SurpriseAdequacyConfig:\r\n \"\"\"Stores basic immutable surprise adequacy configuration.\r\n Instances of this class are reusable amongst different instances of surprise adequacy.\r\n\r\n Note: Jetbrains 'unresolved reference' is wrong: https://youtrack.jetbrains.com/issue/PY-28549\r\n\r\n Args:\r\n is_classification (bool): A boolean indicating if the NN under test solves a classification problem.\r\n num_classes (None, int): The number of classes (for classification problems)\r\n or None (for regression problems). Default: None\r\n layer_names (List(str)): List of layer names whose ATs are to be extracted. Code takes last layer.\r\n saved_path (str): Path to store and load ATs\r\n dataset_name (str): Dataset to be used. Currently supports mnist and cifar-10.\r\n num_classes (int): No. of classes in classification. Default is 10.\r\n min_var_threshold (float): Threshold value to check variance of ATs\r\n batch_size (int): Batch size to use while predicting.\r\n\r\n Raises:\r\n ValueError: If any of the config parameters takes an illegal value.\r\n \"\"\"\r\n\r\n saved_path: str\r\n is_classification: bool\r\n layer_names: List[str]\r\n ds_name: str\r\n net_name: str\r\n num_classes: Union[int, None]\r\n min_var_threshold: float = 1e-5\r\n batch_size: int = 128\r\n\r\n def __post_init__(self):\r\n if self.is_classification and not self.num_classes:\r\n raise ValueError(\"num_classes is a mandatory parameter \"\r\n \"in SurpriseAdequacyConfig for classification problems\")\r\n elif not self.is_classification and self.num_classes:\r\n raise ValueError(f\"num_classes must be None (but was {self.num_classes}) \"\r\n \"in SurpriseAdequacyConfig for regression problems\")\r\n elif self.is_classification and self.num_classes < 0:\r\n raise ValueError(f\"num_classes must be positive but was {self.num_classes}) \")\r\n elif self.min_var_threshold < 0:\r\n raise ValueError(f\"Variance threshold cannot be negative, but was {self.min_var_threshold}\")\r\n\r\n elif self.ds_name is None or self.ds_name == \"\":\r\n raise ValueError(f\"dataset name must not be None or empty\")\r\n\r\n elif len(self.layer_names) == 0:\r\n raise ValueError(f\"Layer list cannot be empty\")\r\n elif len(self.layer_names) != len(set(self.layer_names)):\r\n raise ValueError(f\"Layer list cannot contain duplicates\")\r\n\r\n\r\nclass SurpriseAdequacy(ABC):\r\n\r\n def __init__(self, model: tf.keras.Model, train_data: np.ndarray, config: SurpriseAdequacyConfig) -> None:\r\n self.model = model\r\n self.train_data = train_data\r\n self.train_ats = None\r\n self.train_pred = None\r\n self.class_matrix = {}\r\n self.config = config\r\n self.train_size = len(self.train_data)\r\n\r\n def _get_saved_path(self, ds_type: str, ) -> Tuple[str, str]:\r\n \"\"\"Determine saved path of ats and pred\r\n\r\n Args:\r\n ds_type: Type of dataset: Typically one of {Train, Test, Target}.\r\n\r\n Returns:\r\n ats_path: File path of ats.\r\n pred_path: File path of pred (independent of layers)\r\n \"\"\"\r\n\r\n joined_layer_names = \"_\".join(self.config.layer_names)\r\n\r\n return (\r\n os.path.join(\r\n self.config.saved_path,\r\n self.config.ds_name + \"_\" + self.config.net_name + \"_\" + ds_type + \"_\" + str(self.train_size) +\r\n \"_\" + joined_layer_names + \"_ats\" + \".npy\",\r\n ),\r\n os.path.join(self.config.saved_path, self.config.ds_name + \"_\" + self.config.net_name + \"_\" + ds_type +\r\n \"_\" + str(self.train_size) + \"_pred\" + \".npy\"),\r\n )\r\n\r\n # Returns ats and returns predictions\r\n def _load_or_calculate_ats(self, dataset: np.ndarray, ds_type: str) -> Tuple[np.ndarray, np.ndarray]:\r\n\r\n \"\"\"Determine activation traces train, target, and test datasets\r\n\r\n Args:\r\n dataset (ndarray): x_train or x_test or x_target.\r\n ds_type (str): Type of dataset: Train, Test, or Target.\r\n\r\n Returns:\r\n ats (ndarray): Activation traces (Shape of num_examples * num_nodes).\r\n pred (ndarray): 1-D Array of predictions\r\n\r\n \"\"\"\r\n print(f\"Calculating the ats for {ds_type} dataset\")\r\n\r\n saved_target_path = self._get_saved_path(ds_type)\r\n # YM: I want to use cached training activation for train set\r\n # all test case should be re-calculated because we use different groups of test cases.\r\n if ds_type.lower() == \"train\":\r\n if saved_target_path is not None and os.path.exists(saved_target_path[0]):\r\n print(f\"Found saved {ds_type} ATs, skip at collection from model\")\r\n return self._load_ats(ds_type)\r\n else:\r\n ats, pred = self._calculate_ats(dataset)\r\n if saved_target_path is not None:\r\n np.save(saved_target_path[0], ats)\r\n np.save(saved_target_path[1], pred)\r\n print(\r\n f\"[{ds_type}] Saved the ats and predictions to {saved_target_path[0]} and {saved_target_path[1]}\")\r\n else:\r\n raise ValueError(\"Try to save train at but save_path is None\")\r\n return ats, pred\r\n else:\r\n ats, pred = self._calculate_ats(dataset)\r\n return ats, pred\r\n\r\n @classmethod\r\n def _output_dim_reduction(cls, layer_output):\r\n return np.mean(layer_output, axis=tuple(range(1, layer_output.ndim - 1)))\r\n\r\n def _calculate_ats(self, dataset: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:\r\n output_layers = [self.model.get_layer(layer_name).output for layer_name in self.config.layer_names]\r\n output_layers.append(self.model.output)\r\n temp_model = Model(\r\n inputs=self.model.input,\r\n outputs=output_layers\r\n )\r\n\r\n # Get the activation traces of the inner layers and the output of the final layer\r\n layer_outputs: List[np.ndarray] = temp_model.predict(dataset, batch_size=self.config.batch_size, verbose=1)\r\n # Remove the (output layer) dnn outputs from the list and store them as separate result\r\n dnn_output = layer_outputs.pop()\r\n\r\n if self.config.is_classification:\r\n pred = np.argmax(dnn_output, axis=1)\r\n\r\n ats = None\r\n for layer_name, layer_output in zip(self.config.layer_names, layer_outputs):\r\n print(\"Layer: \" + layer_name)\r\n if layer_output[0].ndim >= 3:\r\n # (primarily for convolutional layers - note that kim et al used ndim==3)\r\n layer_matrix = self._output_dim_reduction(layer_output)\r\n else:\r\n layer_matrix = np.array(layer_output)\r\n\r\n if ats is None:\r\n # Shape of ats will be num_inputs x num_nodes_in_layer\r\n ats = layer_matrix\r\n else:\r\n ats = np.append(ats, layer_matrix, axis=1)\r\n\r\n return ats, pred\r\n\r\n def _load_ats(self, ds_type: str) -> Tuple[np.ndarray, np.ndarray]:\r\n # In case train_ats is stored in a disk\r\n saved_target_path = self._get_saved_path(ds_type)\r\n ats: np.ndarray = np.load(saved_target_path[0])\r\n pred: np.ndarray = np.load(saved_target_path[1])\r\n return ats, pred\r\n\r\n def _load_or_calc_train_ats(self) -> None:\r\n \"\"\"Load or get actviation traces of training inputs\r\n\r\n Args:\r\n Returns:\r\n None. train_ats and train_pred are init() variables in super class NoveltyScore.\r\n\r\n \"\"\"\r\n\r\n saved_train_path = self._get_saved_path(\"train\")\r\n\r\n if os.path.exists(saved_train_path[0]):\r\n print(\"Found saved {} ATs, skip serving\".format(\"train\"))\r\n # In case train_ats is stored in a disk\r\n self.train_ats, self.train_pred = np.load(saved_train_path[0]), np.load(saved_train_path[1])\r\n\r\n else:\r\n self.train_ats, self.train_pred = self._load_or_calculate_ats(dataset=self.train_data, ds_type=\"train\")\r\n\r\n def prep(self) -> None:\r\n \"\"\"\r\n Prepare class matrix from training activation traces. Class matrix is a dictionary\r\n with keys as labels and values as lists of positions as predicted by model\r\n\r\n Args:\r\n stored on the file system for later use.\r\n\r\n Returns:\r\n None.\r\n\r\n \"\"\"\r\n self._load_or_calc_train_ats()\r\n if self.config.is_classification:\r\n # TODO Check if we can vectorize this loop\r\n for i, label in enumerate(self.train_pred):\r\n if label not in self.class_matrix:\r\n self.class_matrix[label] = []\r\n self.class_matrix[label].append(i)\r\n\r\n def clear_cache(self, saved_path: str) -> None:\r\n \"\"\"\r\n\r\n Delete files of activation traces.\r\n\r\n Args:\r\n saved_path(str): Base directory path\r\n\r\n \"\"\"\r\n to_remove = ['train', 'test', 'target']\r\n for f in to_remove:\r\n path = self._get_saved_path(f)\r\n os.remove(os.path.join(saved_path, path[0]))\r\n os.remove(os.path.join(saved_path, path[1]))\r\n\r\n # files = [f for f in os.listdir(saved_path) if f.endswith('.npy')]\r\n # for f in files:\r\n # os.remove(os.path.join(saved_path, f))\r\n\r\n @abc.abstractmethod\r\n def calc(self, target_data: np.ndarray, ds_type: str) -> Tuple[np.ndarray, np.ndarray]:\r\n \"\"\"\r\n Calculates prediction and novelty scores\r\n :param target_data: a numpy array consisting of the data to be tested\r\n :param ds_type: string, 'train' or 'test'\r\n :return: A tuple of two one-dimensional arrays: surprises and predictions\r\n \"\"\"\r\n pass\r\n\r\n\r\nclass LSA(SurpriseAdequacy):\r\n\r\n def __init__(self, model: tf.keras.Model, train_data: np.ndarray, config: SurpriseAdequacyConfig) -> None:\r\n super().__init__(model, train_data, config)\r\n self.kdes = None\r\n self.removed_rows = None\r\n\r\n def prep(self, ) -> None:\r\n super().prep()\r\n self._load_or_create_likelyhood_estimator()\r\n\r\n def _load_or_create_likelyhood_estimator(self) -> None:\r\n \"\"\"Load or get actviation traces of training inputs\r\n\r\n Args:\r\n\r\n Returns:\r\n None. train_ats and train_pred are init() variables in super class NoveltyScore.\r\n\r\n \"\"\"\r\n\r\n kdes_path = os.path.join(self.config.saved_path,\r\n self.config.ds_name + \"_\" + self.config.net_name + \"_train_\" + str(\r\n self.train_size) + \"kdes.npy\")\r\n rem_row_path = os.path.join(self.config.saved_path,\r\n self.config.ds_name + \"_\" + self.config.net_name + \"_train_\" + str(\r\n self.train_size) + \"remrows.npy\")\r\n\r\n if os.path.exists(kdes_path) and os.path.exists(rem_row_path):\r\n with open(kdes_path, 'rb') as file:\r\n self.kdes = pickle.load(file)\r\n with open(rem_row_path, 'rb') as file:\r\n self.removed_rows = pickle.load(file)\r\n else:\r\n self.kdes, self.removed_rows = self._calc_kdes()\r\n with open(kdes_path, 'wb') as file:\r\n pickle.dump(self.kdes, file=file)\r\n with open(rem_row_path, 'wb') as file:\r\n pickle.dump(self.removed_rows, file=file)\r\n\r\n def calc(self, target_data: np.ndarray, ds_type: str) -> Tuple[np.ndarray, np.ndarray]:\r\n \"\"\"\r\n Return LSA values for target. Note that target_data here means both test and adversarial data. Separate calls in main.\r\n\r\n Args:\r\n target_data (ndarray): x_test or x_target.\r\n ds_type (str): Type of dataset: Train, Test, or Target.\r\n\r\n Returns:\r\n lsa (float): List of scalar LSA values\r\n\r\n \"\"\"\r\n assert self.kdes is not None and self.removed_rows is not None, \\\r\n \"LSA has not yet been prepared. Run lsa.prep()\"\r\n\r\n target_ats, target_pred = self._load_or_calculate_ats(dataset=target_data, ds_type=ds_type, )\r\n\r\n print(f\"[{ds_type}] Calculating LSA\")\r\n lsa_as_list = self._calc_lsa(target_ats, target_pred)\r\n return np.array(lsa_as_list), target_pred\r\n\r\n def _calc_kdes(self) -> Tuple[dict, List[int]]:\r\n \"\"\"\r\n Determine Gaussian KDE for each label and list of removed rows based on variance threshold, if any.\r\n\r\n Args:\r\n Returns:\r\n kdes: Dict - labels are keys, values are scipy kde objects\r\n removed_rows: Array of positions of removed rows\r\n\r\n \"\"\"\r\n\r\n if self.config.is_classification:\r\n kdes, removed_rows = self._classification_kdes()\r\n else:\r\n kdes, removed_rows = self._regression_kdes()\r\n\r\n print((f\"Ignoring the activations of {len(removed_rows)} traces \"\r\n f\"as their variance is not high enough.\"))\r\n\r\n return kdes, removed_rows\r\n\r\n def _regression_kdes(self) -> Tuple[List[gaussian_kde], List[int]]:\r\n removed_rows = []\r\n row_vectors = np.transpose(self.train_ats)\r\n for activation_node in range(row_vectors.shape[0]):\r\n if np.var(row_vectors[activation_node]) < self.config.min_var_threshold:\r\n removed_rows.append(activation_node)\r\n refined_ats = np.transpose(self.train_ats)\r\n refined_ats = np.delete(refined_ats, removed_rows, axis=0)\r\n if refined_ats.shape[0] != 0:\r\n\r\n kdes = [self._create_gaussian_kde(refined_ats)]\r\n return kdes, removed_rows\r\n\r\n else:\r\n raise ValueError(f\"All ats were removed by threshold: \", self.config.min_var_threshold)\r\n\r\n def _classification_kdes(self) -> Tuple[Dict[int, gaussian_kde], List[int]]:\r\n removed_rows = []\r\n for label in range(self.config.num_classes):\r\n # Shape of (num_activation nodes x num_examples_by_label)\r\n row_vectors: np.ndarray = np.transpose(self.train_ats[self.class_matrix[label]])\r\n positions: np.ndarray = np.where(np.var(row_vectors, axis=1) < self.config.min_var_threshold)[0]\r\n\r\n for p in positions:\r\n removed_rows.append(p)\r\n removed_rows = list(set(removed_rows))\r\n print(removed_rows)\r\n kdes = {}\r\n for label in tqdm(range(self.config.num_classes), desc=\"kde\"):\r\n\r\n refined_ats = np.transpose(self.train_ats[self.class_matrix[label]])\r\n refined_ats = np.delete(refined_ats, removed_rows, axis=0)\r\n\r\n if refined_ats.shape[0] == 0:\r\n print(f\"Ats for label {label} were removed by threshold {self.config.min_var_threshold}\")\r\n break\r\n\r\n kdes[label] = self._create_gaussian_kde(refined_ats)\r\n\r\n return kdes, removed_rows\r\n\r\n @staticmethod\r\n def _create_gaussian_kde(refined_ats):\r\n return gaussian_kde(refined_ats)\r\n\r\n def _calc_lsa(self,\r\n target_ats: np.ndarray,\r\n target_pred: np.ndarray) -> np.ndarray:\r\n \"\"\"\r\n Calculate scalar LSA value of target activation traces\r\n\r\n Args:\r\n target_ats (ndarray): Activation traces of target_data.\r\n target_pred(ndarray): 1-D Array of predicted labels\r\n ds_type (str): Type of dataset: Test or Target.\r\n removed_rows (list): Positions to skip\r\n kdes: Dict of scipy kde objects\r\n\r\n Returns:\r\n lsa (float): List of scalar LSA values\r\n\r\n \"\"\"\r\n\r\n if self.config.is_classification:\r\n lsa: np.ndarray = self._calc_classification_lsa(target_ats, target_pred)\r\n else:\r\n lsa: np.ndarray = self._calc_regression_lsa(target_ats)\r\n return lsa\r\n\r\n def _calc_regression_lsa(self, target_ats: np.ndarray) -> np.ndarray:\r\n kde = self.kdes[0]\r\n refined_at: np.ndarray = np.delete(target_ats, self.removed_rows, axis=1)\r\n return -kde.logpdf(np.transpose(refined_at))\r\n\r\n def _calc_classification_lsa(self,\r\n target_ats: np.ndarray,\r\n target_pred: np.ndarray) -> np.ndarray:\r\n result = np.empty(shape=target_pred.shape, dtype=float)\r\n refined_ats = np.delete(target_ats, self.removed_rows, axis=1)\r\n for label in self.class_matrix.keys():\r\n for_label_indexes = target_pred == label\r\n kde = self.kdes[label]\r\n selected_ats = refined_ats[for_label_indexes]\r\n result[for_label_indexes] = -kde.logpdf(np.transpose(selected_ats))\r\n return result\r\n\r\n\r\nclass DSA(SurpriseAdequacy):\r\n\r\n def __init__(self, model: tf.keras.Model,\r\n train_data: np.ndarray,\r\n config: SurpriseAdequacyConfig,\r\n dsa_batch_size=500,\r\n max_workers=None) -> None:\r\n super().__init__(model, train_data, config)\r\n self.dsa_batch_size = dsa_batch_size\r\n self.max_workers = max_workers\r\n\r\n def calc(self, target_data: np.ndarray, ds_type: str) -> Tuple[np.ndarray, np.ndarray]:\r\n \"\"\"\r\n Return DSA values for target. Note that target_data here means both test and adversarial data. Separate calls in main.\r\n\r\n Args:\r\n target_data (ndarray): x_test or x_target.\r\n ds_type (str): Type of dataset: Train, Test, or Target.\r\n\r\n Returns:\r\n dsa (float): List of scalar DSA values\r\n\r\n \"\"\"\r\n target_ats, target_pred = self._load_or_calculate_ats(dataset=target_data, ds_type=ds_type)\r\n return self._calc_dsa(target_ats, target_pred, ds_type), target_pred\r\n\r\n def _calc_dsa(self, target_ats: np.ndarray, target_pred: np.ndarray, ds_type: str) -> np.ndarray:\r\n\r\n \"\"\"\r\n Calculate scalar DSA value of target activation traces\r\n\r\n Args:\r\n target_ats (ndarray): Activation traces of target_data.\r\n ds_type (str): Type of dataset: Test or Target.\r\n target_pred (ndarray): 1-D Array of predicted labels\r\n\r\n Returns:\r\n dsa (float): List of scalar DSA values\r\n\r\n \"\"\"\r\n\r\n start = 0\r\n\r\n print(f\"[{ds_type}] Calculating DSA\")\r\n\r\n num_targets = target_pred.shape[0]\r\n futures = []\r\n dsa = np.empty(shape=target_pred.shape[0])\r\n\r\n print(f\"[{self.__class__}] Using {self.train_ats.shape[0]} train samples\")\r\n with ThreadPoolExecutor(max_workers=self.max_workers) as executor:\r\n while start < num_targets:\r\n\r\n # Select batch\r\n diff = num_targets - start\r\n if diff < self.dsa_batch_size:\r\n batch = target_pred[start:start + diff]\r\n else:\r\n batch = target_pred[start: start + self.dsa_batch_size]\r\n\r\n # Calculate DSA per label\r\n for label in range(self.config.num_classes):\r\n\r\n def task(t_batch, t_label, t_start):\r\n matches = np.where(t_batch == t_label)\r\n if len(matches) > 0:\r\n a_min_dist, b_min_dist = self._dsa_distances(t_label, matches, t_start, target_ats)\r\n t_task_dsa = a_min_dist / b_min_dist\r\n return matches[0], t_start, t_task_dsa\r\n else:\r\n return None, None, None\r\n\r\n futures.append(executor.submit(task, np.copy(batch), label, start))\r\n\r\n start += self.dsa_batch_size\r\n\r\n for future in futures:\r\n f_idxs, f_start, f_task_dsa = future.result()\r\n if f_idxs is not None:\r\n dsa[f_idxs + f_start] = f_task_dsa\r\n\r\n return dsa\r\n\r\n def _dsa_distances(self, label: int, matches: np.ndarray, start: int, target_ats: np.ndarray) -> \\\r\n Tuple[np.ndarray, np.ndarray]:\r\n\r\n target_matches = target_ats[matches[0] + start]\r\n train_matches_same_class = self.train_ats[self.class_matrix[label]]\r\n a_dist = target_matches[:, None] - train_matches_same_class\r\n a_dist_norms = np.linalg.norm(a_dist, axis=2)\r\n a_min_dist = np.min(a_dist_norms, axis=1)\r\n closest_position = np.argmin(a_dist_norms, axis=1)\r\n closest_ats = train_matches_same_class[closest_position]\r\n other_classes_indexes = np.ones(shape=self.train_ats.shape[0], dtype=bool)\r\n other_classes_indexes[self.class_matrix[label]] = 0\r\n train_matches_other_classes = self.train_ats[other_classes_indexes]\r\n b_dist = closest_ats[:, None] - train_matches_other_classes\r\n b_dist_norms = np.linalg.norm(b_dist, axis=2)\r\n b_min_dist = np.min(b_dist_norms, axis=1)\r\n\r\n return a_min_dist, b_min_dist\r\n"
] |
[
[
"numpy.nonzero",
"numpy.random.choice",
"numpy.min",
"numpy.arange",
"pandas.DataFrame",
"numpy.concatenate",
"numpy.max",
"numpy.random.randint"
],
[
"numpy.dot",
"numpy.savez",
"numpy.linspace",
"scipy.stats.gaussian_kde",
"numpy.mean",
"numpy.argmin",
"numpy.var",
"numpy.asscalar",
"numpy.save",
"numpy.argmax",
"numpy.load",
"numpy.isnan",
"numpy.delete",
"numpy.append",
"numpy.cov",
"numpy.transpose",
"numpy.array",
"numpy.linalg.norm",
"numpy.ones",
"numpy.isinf"
],
[
"numpy.min",
"numpy.linalg.norm",
"numpy.save",
"numpy.ones",
"numpy.delete",
"scipy.stats.gaussian_kde",
"numpy.argmin",
"numpy.argmax",
"numpy.append",
"numpy.transpose",
"numpy.var",
"numpy.load",
"numpy.copy",
"numpy.array",
"numpy.where",
"numpy.empty"
]
] |
BolunDai0216/ConsensusControl
|
[
"12f36fa3a70897b9e6cbcdab19734ca8360211a5",
"12f36fa3a70897b9e6cbcdab19734ca8360211a5"
] |
[
"series3/Exercise2.py",
"series2/code/series2.py"
] |
[
"import numpy as np\nimport math\nfrom numpy.linalg import matrix_rank\n\n\ndef main():\n R = np.array([[-2, 0, 2, 0, 0, 0, 0, 0],\n [0, 0, 0, 2, 0, -2, 0, 0],\n [-2, 2, 0, 0, 2, -2, 0, 0],\n [(math.sqrt(14)-2)/2, (math.sqrt(14)+2)/2, 0, 0,\n 0, 0, (2-math.sqrt(14))/2, -(math.sqrt(14)+2)/2],\n [0, 0, 0, 0, (2+math.sqrt(14))/2, (math.sqrt(14)-2)/2, -(2+math.sqrt(14))/2, (2-math.sqrt(14))/2]])\n print(\"The rank for the rigidity matrix is {}\".format(matrix_rank(R)))\n\n\nif __name__ == \"__main__\":\n main()\n",
"import numpy as np\nfrom numpy import linalg as LA\nimport matplotlib.pyplot as plt\nfrom pdb import set_trace\nfrom scipy.linalg import eig\n\n\ndef get_laplacian(E, n_vertices, directed=False):\n L = np.zeros((n_vertices, n_vertices))\n for e in E:\n if directed:\n L[e[1]][e[1]] += 1\n L[e[1]][e[0]] = -1\n else:\n L[e[1]][e[1]] += 1\n L[e[0]][e[0]] += 1\n L[e[1]][e[0]] = -1\n L[e[0]][e[1]] = -1\n return L\n\n\ndef get_cycle_graph(N):\n n_vertices = N\n E = []\n\n for i in range(N-1):\n E.append([i, i+1])\n E.append([N-1, 0])\n\n return E, n_vertices\n\n\ndef simulate_consensus(x_0, T, L, dt=0.001):\n x_current = x_0\n t = 0\n ts = [t]\n xs = x_0\n converged = False\n t_converged = np.inf\n while t <= T:\n x_next = x_current - np.matmul(L, x_current) * dt\n xs = np.hstack((xs, x_next))\n x_current = x_next\n t += dt\n ts.append(t)\n\n if not converged and np.amax(x_current) - np.amin(x_current) <= 0.01:\n converged = True\n t_converged = t\n\n return xs, np.array(ts), t_converged\n\n\ndef get_complete_graph(N):\n n_vertices = N\n E = []\n for i in range(N):\n for j in range(N-i-1):\n E.append([i, j+i+1])\n\n return E, n_vertices\n\n\ndef main():\n # Exercise 1\n E = [[0, 1], [1, 2], [2, 0]]\n n_vertices = 3\n\n laplacian = get_laplacian(E, n_vertices, False)\n print(laplacian)\n laplacian = get_laplacian(E, n_vertices, True)\n print(laplacian)\n\n num = [5, 15, 199]\n\n for n in num:\n E, n_vertices = get_cycle_graph(n)\n laplacian = get_laplacian(E, n_vertices, False)\n eigvals = LA.eigvals(laplacian)\n eigvals = np.sort(eigvals)\n print(\"C_{}: {}, {}, {}, {}\".format(n, eigvals[0], eigvals[1], eigvals[-2], eigvals[-1]))\n\n # Exercise 2\n x_0 = np.transpose(np.array([[10, 20, 12, 5, 30, 12, 15, 16, 25]]))\n n_vertices = 9\n E = [[0, 2], [1, 2], [2, 3], [2, 4], [2, 6], [4, 5], [4, 6], [4, 7], [5, 6], [6, 7], [6, 8]]\n L = get_laplacian(E, n_vertices, False)\n set_trace()\n xs, ts, t_converge = simulate_consensus(x_0, 20, L)\n\n # 2.b\n plt.figure()\n for i in range(xs.shape[0]):\n plt.plot(ts, xs[i, :])\n plt.title(\"Original Graph, t_converge = {}\".format(t_converge))\n plt.xlabel('t / sec')\n plt.ylabel('state')\n plt.savefig('/home/bolun/Documents/swarmrobotics/series2/original_graph_sim.png')\n\n # 2.c\n E = [[0, 2], [1, 2], [2, 3], [4, 5], [4, 6], [4, 7], [5, 6], [6, 7], [6, 8]]\n L = get_laplacian(E, n_vertices, False)\n set_trace()\n xs, ts, t_converge = simulate_consensus(x_0, 20, L)\n\n plt.figure()\n for i in range(xs.shape[0]):\n plt.plot(ts, xs[i, :])\n plt.title(\"Disconnected Graph, t_converge = {}\".format(t_converge))\n plt.xlabel('t / sec')\n plt.ylabel('state')\n plt.savefig('/home/bolun/Documents/swarmrobotics/series2/disconnected_graph_sim.png')\n\n # 2.d\n E, n_vertices = get_complete_graph(9)\n L = get_laplacian(E, n_vertices, False)\n set_trace()\n xs, ts, t_converge = simulate_consensus(x_0, 20, L)\n\n plt.figure()\n for i in range(xs.shape[0]):\n plt.plot(ts, xs[i, :])\n plt.title(\"Complete Graph, t_converge = {}\".format(t_converge))\n plt.xlabel('t / sec')\n plt.ylabel('state')\n plt.savefig('/home/bolun/Documents/swarmrobotics/series2/complete_graph_sim.png')\n\n # 2.e\n E, n_vertices = get_cycle_graph(9)\n L = get_laplacian(E, n_vertices, False)\n set_trace()\n xs, ts, t_converge = simulate_consensus(x_0, 20, L)\n\n plt.figure()\n for i in range(xs.shape[0]):\n plt.plot(ts, xs[i, :])\n plt.title(\"Cycle Graph, t_converge = {}\".format(t_converge))\n plt.xlabel('t / sec')\n plt.ylabel('state')\n plt.savefig('/home/bolun/Documents/swarmrobotics/series2/cycle_graph_sim.png')\n\n # Exercise 3\n # 3.c\n x_0 = np.transpose(np.array([[10, 5, 1, -5, -10]]))\n n_vertices = 5\n E = [[1, 0], [1, 2], [2, 4], [3, 2], [4, 1], [4, 3]]\n L = get_laplacian(E, n_vertices, True)\n vals, vl, vr = eig(L, left=True)\n xs, ts, t_converge = simulate_consensus(x_0, 10, L)\n plt.figure()\n for i in range(xs.shape[0]):\n plt.plot(ts, xs[i, :])\n plt.title(\"Original Graph, t_converge = {}\".format(t_converge))\n plt.xlabel('t / sec')\n plt.ylabel('state')\n plt.savefig('/home/bolun/Documents/swarmrobotics/series2/unchanged_graph_sim.png')\n\n # 3.d\n x_0 = np.transpose(np.array([[10, 5, 1, -5, -10]]))\n n_vertices = 5\n E = [[0, 4], [1, 0], [2, 4], [3, 2], [4, 1], [4, 3]]\n L = get_laplacian(E, n_vertices, True)\n vals, vl, vr = eig(L, left=True)\n xs, ts, t_converge = simulate_consensus(x_0, 10, L)\n plt.figure()\n for i in range(xs.shape[0]):\n plt.plot(ts, xs[i, :])\n plt.title(\"Converge to Average Graph, t_converge = {}\".format(t_converge))\n plt.xlabel('t / sec')\n plt.ylabel('state')\n plt.savefig('/home/bolun/Documents/swarmrobotics/series2/average_graph_sim.png')\n\n # 3.e\n x_0 = np.transpose(np.array([[10, 5, 1, -5, -10]]))\n n_vertices = 5\n E = [[0, 4], [1, 0], [2, 4], [3, 2], [4, 1]]\n L = get_laplacian(E, n_vertices, True)\n vals, vl, vr = eig(L, left=True)\n xs, ts, t_converge = simulate_consensus(x_0, 30, L)\n plt.figure()\n for i in range(xs.shape[0]):\n plt.plot(ts, xs[i, :])\n plt.title(\"Leader Graph, t_converge = {}\".format(t_converge))\n plt.xlabel('t / sec')\n plt.ylabel('state')\n plt.savefig('/home/bolun/Documents/swarmrobotics/series2/leader_graph_sim.png')\n\n\nif __name__ == \"__main__\":\n main()\n"
] |
[
[
"numpy.linalg.matrix_rank"
],
[
"numpy.hstack",
"numpy.amax",
"numpy.linalg.eigvals",
"numpy.amin",
"matplotlib.pyplot.figure",
"numpy.matmul",
"matplotlib.pyplot.savefig",
"numpy.sort",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.xlabel",
"numpy.array",
"numpy.zeros",
"scipy.linalg.eig",
"matplotlib.pyplot.ylabel"
]
] |
slps20425/reinforment-learn
|
[
"fcae362d1fe8458c2b8f00a624aae93c48318141"
] |
[
"finlab-20210319T093946Z-001/finlab/crawler.py"
] |
[
"import datetime\nimport requests\nimport pandas as pd\nimport pickle\nimport time\nimport urllib\nimport os\nfrom io import StringIO\nimport numpy as np\nimport warnings\nimport os\nimport datetime\nimport time\nfrom tqdm import tnrange, tqdm_notebook\nfrom requests.exceptions import ConnectionError\nfrom requests.exceptions import ReadTimeout\nimport ipywidgets as widgets\n\nimport pip\n\ndef import_or_install(package):\n try:\n __import__(package)\n except ImportError:\n print('Please install lxml(pip install lxml)')\n\nimport_or_install(\"lxml\")\n\nheaders = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36'}\n\ndate_range_record_file = os.path.join('history', 'date_range.pickle')\n\n\ndef requests_get(*args1, **args2):\n i = 3\n while i >= 0:\n try:\n return requests.get(*args1, **args2)\n except (ConnectionError, ReadTimeout) as error:\n print(error)\n print('retry one more time after 60s', i, 'times left')\n time.sleep(60)\n i -= 1\n return pd.DataFrame()\n\n### ----------\n### Helper\n### ----------\n\ndef otc_date_str(date):\n \"\"\"將datetime.date轉換成民國曆\n\n Args:\n date (datetime.date): 西元歷的日期\n\n Returns:\n str: 民國歷日期 ex: 109/01/01\n \"\"\"\n return str(date.year - 1911) + date.strftime('%Y/%m/%d')[4:]\n\n\ndef combine_index(df, n1, n2):\n\n \"\"\"將dataframe df中的股票代號與股票名稱合併\n\n Keyword arguments:\n\n Args:\n df (pandas.DataFrame): 此dataframe含有column n1, n2\n n1 (str): 股票代號\n n2 (str): 股票名稱\n\n Returns:\n df (pandas.DataFrame): 此dataframe的index為「股票代號+股票名稱」\n \"\"\"\n\n return df.set_index(df[n1].astype(str).str.replace(' ', '') + \\\n ' ' + df[n2].astype(str).str.replace(' ', '')).drop([n1, n2], axis=1)\n\ndef crawl_benchmark(date):\n\n date_str = date.strftime('%Y%m%d')\n res = requests_get(\"https://www.twse.com.tw/exchangeReport/MI_5MINS_INDEX?response=csv&date=\" +\n date_str + \"&_=1544020420045\")\n\n # 利用 pandas 將資料整理成表格\n\n if len(res.text) < 10:\n return pd.DataFrame()\n\n df = pd.read_csv(StringIO(res.text.replace(\"=\",\"\")), header=1, index_col='時間')\n\n # 資料處理\n\n df = df.dropna(how='all', axis=0).dropna(how='all', axis=1)\n df.index = pd.to_datetime(date.strftime('%Y %m %d ') + pd.Series(df.index))\n df = df.apply(lambda s: s.astype(str).str.replace(\",\", \"\").astype(float))\n df = df.reset_index().rename(columns={'時間':'date'})\n df['stock_id'] = '台股指數'\n return df.set_index(['stock_id', 'date'])\n\ndef crawl_capital():\n res = requests_get('https://dts.twse.com.tw/opendata/t187ap03_L.csv', headers=headers)\n res.encoding = 'utf-8'\n df = pd.read_csv(StringIO(res.text))\n time.sleep(5)\n res = requests_get('https://dts.twse.com.tw/opendata/t187ap03_O.csv', headers=headers)\n res.encoding = 'utf-8'\n df = df.append(pd.read_csv(StringIO(res.text)))\n\n df['date'] = pd.to_datetime(str(datetime.datetime.now().year) + df['出表日期'].str[3:])\n df.set_index([df['公司代號'].astype(str) + ' ' + df['公司簡稱'].astype(str), 'date'], inplace=True)\n df.index.levels[0].name = '股票名稱'\n return df\n\n\ndef interest():\n res = requests_get('https://www.twse.com.tw/exchangeReport/TWT48U_ALL?response=open_data', headers=headers)\n res.encoding = 'utf-8'\n df = pd.read_csv(StringIO(res.text))\n\n time.sleep(5)\n\n res = requests_get('https://www.tpex.org.tw/web/stock/exright/preAnnounce/prepost_result.php?l=zh-tw&o=data', headers=headers)\n res.encoding = 'utf-8'\n df = df.append(pd.read_csv(StringIO(res.text)))\n\n df['date'] = df['除權息日期'].str.replace('年', '/').str.replace('月', '/').str.replace('日', '')\n df['date'] = pd.to_datetime(str(datetime.datetime.now().year) + df['date'].str[3:])\n df = df.set_index([df['股票代號'].astype(str) + ' ' + df['名稱'].astype(str), 'date'])\n return df\n\n\ndef preprocess(df, date):\n df = df.dropna(axis=1, how='all').dropna(axis=0, how='all')\n df.columns = df.columns.str.replace(' ', '')\n df.index.name = 'stock_id'\n df.columns.name = ''\n df['date'] = pd.to_datetime(date)\n df = df.reset_index().set_index(['stock_id', 'date'])\n df = df.apply(lambda s: s.astype(str).str.replace(',',''))\n\n return df\n\n\n\ndef bargin_twe(date):\n datestr = date.strftime('%Y%m%d')\n \n res = requests_get('https://www.twse.com.tw/fund/T86?response=csv&date='\\\n +datestr+'&selectType=ALLBUT0999')\n try:\n df = pd.read_csv(StringIO(res.text.replace('=','')), header=1)\n except:\n print('holiday')\n return pd.DataFrame()\n \n df = combine_index(df, '證券代號', '證券名稱')\n df = preprocess(df, date)\n return df\n\ndef bargin_otc(date):\n datestr = otc_date_str(date)\n \n url = 'https://www.tpex.org.tw/web/stock/3insti/daily_trade/3itrade_hedge_result.php?l=zh-tw&o=csv&se=EW&t=D&d='+datestr+'&s=0,asc'\n res = requests_get(url, headers=headers)\n try:\n df = pd.read_csv(StringIO(res.text), header=1)\n except:\n print('holiday')\n return pd.DataFrame()\n\n df = combine_index(df, '代號', '名稱')\n df = preprocess(df, date)\n return df\n\ndef price_twe(date):\n date_str = date.strftime('%Y%m%d')\n res = requests_get('https://www.twse.com.tw/exchangeReport/MI_INDEX?response=csv&date='+date_str+'&type=ALLBUT0999', headers=headers, )\n\n if res.text == '':\n print('holiday')\n return pd.DataFrame()\n\n header = np.where(list(map(lambda l: '證券代號' in l, res.text.split('\\n')[:200])))[0][0]\n\n df = pd.read_csv(StringIO(res.text.replace('=','')), header=header-1)\n df = combine_index(df, '證券代號', '證券名稱')\n df = preprocess(df, date)\n return df\n\ndef price_otc(date):\n datestr = otc_date_str(date)\n link = 'https://www.tpex.org.tw/web/stock/aftertrading/daily_close_quotes/stk_quote_download.php?l=zh-tw&d='+datestr+'&s=0,asc,0'\n res = requests_get(link, headers=headers)\n df = pd.read_csv(StringIO(res.text), header=2)\n\n if len(df) < 30:\n print('holiday')\n return pd.DataFrame()\n\n df = combine_index(df, '代號', '名稱')\n df = preprocess(df, date)\n df = df[df['成交筆數'].str.replace(' ', '') != '成交筆數']\n return df\n\ndef pe_twe(date):\n datestr = date.strftime('%Y%m%d')\n res = requests_get('https://www.twse.com.tw/exchangeReport/BWIBBU_d?response=csv&date='+datestr+'&selectType=ALL', headers=headers)\n try:\n df = pd.read_csv(StringIO(res.text), header=1)\n except:\n print('holiday')\n return pd.DataFrame()\n\n df = combine_index(df, '證券代號', '證券名稱')\n df = preprocess(df, date)\n return df\n\ndef pe_otc(date):\n datestr = otc_date_str(date)\n res = requests_get('https://www.tpex.org.tw/web/stock/aftertrading/peratio_analysis/pera_result.php?l=zh-tw&o=csv&charset=UTF-8&d='+datestr+'&c=&s=0,asc', headers=headers)\n try:\n df = pd.read_csv(StringIO(res.text), header=3)\n df = combine_index(df, '股票代號', '名稱')\n df = preprocess(df, date)\n except:\n print('holiday')\n return pd.DataFrame()\n\n return df\n\ndef month_revenue(name, date):\n\n year = date.year - 1911\n month = (date.month+10)%12+1\n if month == 12:\n year -= 1\n url = 'https://mops.twse.com.tw/nas/t21/%s/t21sc03_%d_%d.html' % (name, year, month)\n print(url)\n res = requests_get(url, headers=headers)\n res.encoding = 'big5'\n\n try:\n dfs = pd.read_html(StringIO(res.text), encoding='big-5')\n except:\n print('MONTH ' + name + ': cannot parse ' + str(date))\n return pd.DataFrame()\n\n df = pd.concat([df for df in dfs if df.shape[1] <= 11 and df.shape[1] > 5])\n\n if 'levels' in dir(df.columns):\n df.columns = df.columns.get_level_values(1)\n else:\n df = df[list(range(0,10))]\n column_index = df.index[(df[0] == '公司代號')][0]\n df.columns = df.iloc[column_index]\n\n df = df.loc[:,~df.columns.isnull()]\n df = df.loc[~pd.to_numeric(df['當月營收'], errors='coerce').isnull()]\n df = df[df['公司代號'] != '合計']\n df = combine_index(df, '公司代號', '公司名稱')\n df = preprocess(df, datetime.date(date.year, date.month, 10))\n return df.drop_duplicates()\n\ndef crawl_split_twe():\n\n res = requests_get('https://www.twse.com.tw/exchangeReport/TWTAVU?response=csv&_=1537824706232', headers=headers)\n\n df = pd.read_csv(StringIO(res.text),header=1)\n df = df.dropna(how='all', axis=1).dropna(thresh=3, axis=0)\n\n def process_date(s):\n return pd.to_datetime(str(datetime.datetime.now().year) + s.str[3:])\n\n df['停止買賣日期'] = process_date(df['停止買賣日期'])\n df['恢復買賣日期'] = process_date(df['恢復買賣日期'])\n df['股票代號'] = df['股票代號'].astype(int).astype(str)\n df['stock_id'] = df['股票代號'] + ' ' + df['名稱']\n df['date'] = df['恢復買賣日期']\n df = df.set_index(['stock_id', 'date'])\n\n return df\n\n\ndef crawl_split_otc():\n res = requests_get(\"https://www.tpex.org.tw/web/stock/exright/decap/decap_download.php?l=zh-tw&d=107/09/21&s=0,asc,0\", headers=headers)\n df = pd.read_csv(StringIO(res.text), header=1)\n df = df.dropna(thresh=5, axis=0)\n df['stock_id'] = df['代號'] + ' ' + df['名稱']\n def process_date(s):\n ss = s.astype(int).astype(str)\n return pd.to_datetime(str(datetime.datetime.now().year) + '/' + ss.str[3:5] + '/' + ss.str[5:])\n\n df['停止買賣日期'] = process_date(df['停止買賣日期'])\n df['恢復買賣日期'] = process_date(df['恢復買賣日期'])\n df['date'] = df['恢復買賣日期']\n df = df.rename(columns={'代號':'股票代號'})\n df = df.set_index(['stock_id', 'date'])\n return df\n\nimport io\nimport json\nimport requests\nimport datetime\nimport pandas as pd\n\ndef crawl_twse_divide_ratio():\n\n datestr = datetime.datetime.now().strftime('%Y%m%d')\n res = requests_get(\"https://www.twse.com.tw/exchangeReport/TWT49U?response=csv&strDate=20040101&endDate=\"+datestr+\"&_=1551532565786\")\n\n df = pd.read_csv(io.StringIO(res.text.replace(\"=\", \"\")), header=1)\n\n df = df.dropna(thresh=5).dropna(how='all', axis=1)\n\n df = df[~df['資料日期'].isnull()]\n\n # set stock id\n df['stock_id'] = df['股票代號'] + ' ' + df['股票名稱']\n\n # set dates\n df = df[~df['資料日期'].isnull()]\n years = df['資料日期'].str.split('年').str[0].astype(int) + 1911\n years.loc[df['資料日期'].str[3] != '年'] = np.nan\n years.loc[years > datetime.datetime.now().year] = np.nan\n years.ffill(inplace=True)\n dates = years.astype(int).astype(str) +'/'+ df['資料日期'].str.split('年').str[1].str.replace('月', '/').str.replace('日', '')\n df['date'] = pd.to_datetime(dates, errors='coerce')\n\n # convert to float\n float_name_list = ['除權息前收盤價', '除權息參考價', '權值+息值', '漲停價格',\n '跌停價格', '開盤競價基準', '減除股利參考價' , '最近一次申報每股 (單位)淨值',\n '最近一次申報每股 (單位)盈餘']\n\n df[float_name_list] = df[float_name_list].astype(str).apply(lambda s:s.str.replace(',', '')).astype(float)\n\n\n df['twse_divide_ratio'] = df['除權息前收盤價'] / df['開盤競價基準']\n return df.set_index(['stock_id', 'date'])\n\ndef crawl_otc_divide_ratio():\n\n y = datetime.datetime.now().year\n m = datetime.datetime.now().month\n d = datetime.datetime.now().day\n\n y = str(y-1911)\n m = str(m) if m > 9 else '0' + str(m)\n d = str(d) if d > 9 else '0' + str(d)\n\n datestr = '%s/%s/%s' % (y,m,d)\n res_otc = requests_get('https://www.tpex.org.tw/web/stock/exright/dailyquo/exDailyQ_result.php?l=zh-tw&d=097/01/02&ed=' + datestr + '&_=1551594269115')\n\n df = pd.DataFrame(json.loads(res_otc.text)['aaData'])\n df.columns = ['除權息日期', '代號', '名稱', '除權息前收盤價', '除權息參考價',\n '權值', '息值',\"權+息值\",\"權/息\",\"漲停價格\",\"跌停價格\",\"開盤競價基準\",\n \"減除股利參考價\",\"現金股利\", \"每千股無償配股\", \"-\", \"現金增資股數\", \"現金增資認購價\",\n \"公開承銷股數\", \"員工認購股數\",\"原股東認購數\", \"按持股比例千股認購\"]\n\n\n float_name_list = [ '除權息前收盤價', '除權息參考價',\n '權值', '息值',\"權+息值\",\"漲停價格\",\"跌停價格\",\"開盤競價基準\",\n \"減除股利參考價\",\"現金股利\", \"每千股無償配股\", \"現金增資股數\", \"現金增資認購價\",\n \"公開承銷股數\", \"員工認購股數\",\"原股東認購數\", \"按持股比例千股認購\"\n ]\n df[float_name_list] = df[float_name_list].astype(str).apply(lambda s:s.str.replace(',', '')).astype(float)\n\n # set stock id\n df['stock_id'] = df['代號'] + ' ' + df['名稱']\n\n # set dates\n dates = df['除權息日期'].str.split('/')\n dates = (dates.str[0].astype(int) + 1911).astype(str) + '/' + dates.str[1] + '/' + dates.str[2]\n df['date'] = pd.to_datetime(dates)\n\n df['otc_divide_ratio'] = df['除權息前收盤價'] / df['開盤競價基準']\n return df.set_index(['stock_id', 'date'])\n\n\ndef crawl_twse_cap_reduction():\n\n datestr = datetime.datetime.now().strftime('%Y%m%d')\n res3 = requests_get(\"https://www.twse.com.tw/exchangeReport/TWTAUU?response=csv&strDate=20110101&endDate=\" + datestr + \"&_=1551597854043\")\n df = pd.read_csv(io.StringIO(res3.text), header=1)\n df = df.dropna(thresh=5).dropna(how='all',axis=1)\n dates = (df['恢復買賣日期'].str.split('/').str[0].astype(int) + 1911).astype(str) + df['恢復買賣日期'].str[3:]\n df['date'] = pd.to_datetime(dates, errors='coerce')\n df['stock_id'] = df['股票代號'].astype(int).astype(str) + ' ' + df['名稱']\n df.head()\n\n df['twse_cap_divide_ratio'] = df['停止買賣前收盤價格']/df['開盤競價基準']\n\n return df.set_index(['stock_id', 'date'])\n\ndef crawl_otc_cap_reduction():\n\n y = datetime.datetime.now().year\n m = datetime.datetime.now().month\n d = datetime.datetime.now().day\n\n y = str(y-1911)\n m = str(m) if m > 9 else '0' + str(m)\n d = str(d) if d > 9 else '0' + str(d)\n\n datestr = '%s/%s/%s' % (y,m,d)\n res4 = requests_get(\"https://www.tpex.org.tw/web/stock/exright/revivt/revivt_result.php?l=zh-tw&d=102/01/01&ed=\"+datestr+\"&_=1551611342446\")\n\n df = pd.DataFrame(json.loads(res4.text)['aaData'])\n\n name = ['恢復買賣日期', '股票代號', '股票名稱', '最後交易之收盤價格',\n '減資恢復買賣開始日參考價格', '漲停價格', '跌停價格', '開始交易基準價', '除權參考價', '減資源因', '詳細資料']\n\n float_name_list = ['最後交易之收盤價格', '減資恢復買賣開始日參考價格', '漲停價格', '跌停價格', '開始交易基準價', '除權參考價']\n df.columns = name\n df[float_name_list] = df[float_name_list].astype(str).apply(lambda s:s.str.replace(',', '')).astype(float)\n df['stock_id'] = df['股票代號'] + ' ' + df['股票名稱']\n dates = (df['恢復買賣日期'].astype(str).str[:-4].astype(int) + 1911).astype(str) + df['恢復買賣日期'].astype(str).str[-4:]\n df['date'] = pd.to_datetime(dates)\n df['date'] = pd.to_datetime(dates, errors='coerce')\n\n df['otc_cap_divide_ratio'] = df['最後交易之收盤價格'] / df['開始交易基準價']\n\n return df.set_index(['stock_id', 'date'])\n\n\n\n\no2tp = {'成交股數':'成交股數',\n '成交筆數':'成交筆數',\n '成交金額(元)':'成交金額',\n '收盤':'收盤價',\n '開盤':'開盤價',\n '最低':'最低價',\n '最高':'最高價',\n '最後買價':'最後揭示買價',\n '最後賣價':'最後揭示賣價',\n }\n\no2tpe = {\n '殖利率(%)':'殖利率(%)',\n '本益比':'本益比',\n '每股股利':'股利年度',\n '股價淨值比':'股價淨值比',\n}\n\no2tb = {\n '外資及陸資(不含外資自營商)-買進股數':'外陸資買進股數(不含外資自營商)',\n '外資及陸資買股數': '外陸資買進股數(不含外資自營商)',\n \n '外資及陸資(不含外資自營商)-賣出股數':'外陸資賣出股數(不含外資自營商)',\n '外資及陸資賣股數': '外陸資賣出股數(不含外資自營商)',\n \n '外資及陸資(不含外資自營商)-買賣超股數':'外陸資買賣超股數(不含外資自營商)',\n '外資及陸資淨買股數': '外陸資買賣超股數(不含外資自營商)',\n \n '外資自營商-買進股數':'外資自營商買進股數',\n '外資自營商-賣出股數':'外資自營商賣出股數',\n '外資自營商-買賣超股數':'外資自營商買賣超股數',\n '投信-買進股數':'投信買進股數',\n '投信買進股數': '投信買進股數',\n '投信-賣出股數': '投信賣出股數',\n '投信賣股數': '投信賣出股數',\n \n '投信-買賣超股數':'投信買賣超股數',\n '投信淨買股數': '投信買賣超股數',\n \n '自營商(自行買賣)-買進股數':'自營商買進股數(自行買賣)',\n '自營商(自行買賣)買股數':'自營商買進股數(自行買賣)',\n \n '自營商(自行買賣)-賣出股數':'自營商賣出股數(自行買賣)',\n '自營商(自行買賣)賣股數':'自營商賣出股數(自行買賣)',\n \n '自營商(自行買賣)-買賣超股數': '自營商買賣超股數(自行買賣)',\n '自營商(自行買賣)淨買股數': '自營商買賣超股數(自行買賣)',\n \n '自營商(避險)-買進股數':'自營商買進股數(避險)',\n '自營商(避險)買股數': '自營商買進股數(避險)',\n '自營商(避險)-賣出股數':'自營商賣出股數(避險)',\n '自營商(避險)賣股數': '自營商賣出股數(避險)',\n '自營商(避險)-買賣超股數': '自營商買賣超股數(避險)',\n '自營商(避險)淨買股數': '自營商買賣超股數(避險)',\n \n}\n\no2tm = {n:n for n in ['當月營收', '上月營收', '去年當月營收', '上月比較增減(%)', '去年同月增減(%)', '當月累計營收', '去年累計營收',\n '前期比較增減(%)']}\n\ndef merge(twe, otc, t2o):\n t2o2 = {k:v for k,v in t2o.items() if k in otc.columns}\n otc = otc[list(t2o2.keys())]\n otc = otc.rename(columns=t2o2)\n twe = twe[otc.columns & twe.columns]\n\n return twe.append(otc)\n\n\ndef crawl_price(date):\n dftwe = price_twe(date)\n time.sleep(5)\n dfotc = price_otc(date)\n if len(dftwe) != 0 and len(dfotc) != 0:\n df = merge(dftwe, dfotc, o2tp)\n return df\n else:\n return pd.DataFrame()\n\n\ndef crawl_bargin(date):\n dftwe = bargin_twe(date)\n dfotc = bargin_otc(date)\n if len(dftwe) != 0 and len(dfotc) != 0:\n return merge(dftwe, dfotc, o2tb)\n else:\n return pd.DataFrame()\n\n\ndef crawl_monthly_report(date):\n dftwe = month_revenue('sii', date)\n time.sleep(5)\n dfotc = month_revenue('otc', date)\n if len(dftwe) != 0 and len(dfotc) != 0:\n return merge(dftwe, dfotc, o2tm)\n else:\n return pd.DataFrame()\n\ndef crawl_pe(date):\n\n dftwe = pe_twe(date)\n dfotc = pe_otc(date)\n if len(dftwe) != 0 and len(dfotc) != 0:\n return merge(dftwe, dfotc, o2tpe)\n else:\n return pd.DataFrame()\n\nout = widgets.Output(layout={'border': '1px solid black'})\n\[email protected]()\ndef update_table(table_name, crawl_function, dates):\n\n if dates:\n if len(dates) == 0:\n print(\"該時間段沒有可以爬取之資料\")\n return\n print('start crawl ' + table_name + ' from ', dates[0] , 'to', dates[-1])\n else:\n print('起始、結束日期有點怪怪的,請重新選擇一下喔')\n return\n \n\n df = pd.DataFrame()\n dfs = {}\n\n progress = tqdm_notebook(dates, )\n\n for d in progress:\n\n print('crawling', d)\n progress.set_description('crawl' + table_name + str(d))\n\n data = crawl_function(d)\n\n if data is None or len(data) == 0:\n print('fail, check if ' + str(d) + ' is a holiday')\n\n # update multiple dataframes\n elif isinstance(data, dict):\n if len(dfs) == 0:\n dfs = {i:pd.DataFrame() for i in data.keys()}\n\n for i, d in data.items():\n dfs[i] = dfs[i].append(d)\n\n # update single dataframe\n else:\n df = df.append(data)\n print('success')\n\n time.sleep(5)\n\n\n\n if df is not None and len(df) != 0:\n to_pickle(df, table_name)\n\n if len(dfs) != 0:\n for i, d in dfs.items():\n print('saveing df', d.head(), len(d))\n if len(d) != 0:\n print('save df', d.head())\n to_pickle(df, table_name)\n \nimport datetime\nfrom dateutil.relativedelta import relativedelta\n\ndef check_monthly_revenue():\n \n df = pd.read_pickle(\"history/tables/monthly_report.pkl\")\n \n if df.loc['1101 台泥', '2017-10-10']['當月營收'] == '8387381':\n print(\"fix monthly report errors\")\n df = df.reset_index()\n df['date'] = [d + relativedelta(months=1) for d in df['date']]\n df.set_index(['stock_id', 'date'], inplace=True)\n df.to_pickle(\"history/tables/monthly_report.pkl\")\n print(\"done\")\n commit(\"monthlu_report\")\n\nimport pickle\ndef to_pickle(df, name):\n \n if not os.path.isdir('history'):\n os.mkdir('history')\n\n if not os.path.isdir(os.path.join('history', 'tables')):\n os.mkdir(os.path.join('history', 'tables'))\n\n\n fname = os.path.join('history', 'tables', name + '.pkl')\n newfname = os.path.join('history', 'tables', 'new' + name + '.pkl')\n \n # refine patch for monthly revenue\n \n if name == 'monthly_report' :\n check_monthly_revenue()\n\n if os.path.isfile(fname):\n old_df = pd.read_pickle(fname)\n old_df = old_df.append(df, sort=False)\n\n old_df = old_df[~old_df.index.duplicated(keep='last')]\n old_df = old_df.sort_index()\n old_df.to_pickle(newfname)\n os.remove(fname)\n os.rename(newfname, fname)\n else:\n df = df[~df.index.duplicated(keep='last')]\n df.to_pickle(fname)\n old_df = df\n \n if not os.path.isfile(date_range_record_file):\n pickle.dump({}, open(date_range_record_file, 'wb'))\n\n dates = pickle.load(open(date_range_record_file, 'rb'))\n dates[name] = (old_df.index.levels[1][0], old_df.index.levels[1][-1])\n pickle.dump(dates, open(date_range_record_file, 'wb'))\n \n commit(name)\n\n\nfrom datetime import date\nfrom dateutil.rrule import rrule, DAILY, MONTHLY\n\ndef date_range(start_date, end_date):\n return [dt.date() for dt in rrule(DAILY, dtstart=start_date, until=end_date)]\n\ndef month_range(start_date, end_date):\n return [dt.date() for dt in rrule(MONTHLY, dtstart=start_date, until=end_date)]\n\ndef season_range(start_date, end_date):\n\n if isinstance(start_date, datetime.datetime):\n start_date = start_date.date()\n\n if isinstance(end_date, datetime.datetime):\n end_date = end_date.date()\n\n ret = []\n for year in range(start_date.year-1, end_date.year+1):\n ret += [ datetime.date(year, 5, 15),\n datetime.date(year, 8, 14),\n datetime.date(year, 11, 14),\n datetime.date(year+1, 3, 31)]\n ret = [r for r in ret if start_date < r < end_date]\n\n return ret\n\nimport ipywidgets as widgets\nfrom IPython.display import display\n\ndef table_date_range(table_name):\n if os.path.isfile(date_range_record_file):\n with open(date_range_record_file, 'rb') as f:\n dates = pickle.load(f)\n if table_name in dates:\n return dates[table_name]\n else:\n return [None, None]\n else:\n return [None, None]\n\nfrom inspect import signature\n\n\ndef widget(table_name, crawl_func, range_date=None):\n\n\n sig = signature(crawl_func)\n\n if len(sig.parameters) == 0:\n @out.capture()\n def onupdate(x):\n print('updating ', table_name)\n df = crawl_func()\n to_pickle(df, table_name)\n print('done')\n\n btn = widgets.Button(description='update ')\n btn.on_click(onupdate)\n\n first_date, last_date = table_date_range(table_name)\n label = widgets.Label(table_name + ' | ' + str(first_date) + ' ~ ' + str(last_date))\n items = [btn]\n display(widgets.VBox([label, widgets.HBox(items)]))\n\n else:\n\n date_picker_from = widgets.DatePicker(\n description='from',\n disabled=False,\n )\n\n first_date, last_date = table_date_range(table_name)\n\n if last_date:\n date_picker_from.value = last_date\n\n date_picker_to = widgets.DatePicker(\n description='to',\n disabled=False,\n )\n\n date_picker_to.value = datetime.datetime.now().date()\n\n btn = widgets.Button(description='update ')\n\n def onupdate(x):\n dates = range_date(date_picker_from.value, date_picker_to.value)\n\n if len(dates) == 0:\n print('no data to parse')\n\n update_table(table_name, crawl_func, dates)\n\n btn.on_click(onupdate)\n\n\n label = widgets.Label(table_name + ' | ' + str(first_date) + ' ~ ' + str(last_date))\n\n items = [date_picker_from, date_picker_to, btn]\n display(widgets.VBox([label, widgets.HBox(items)]))\n\nimport requests\nfrom io import StringIO\nimport pandas as pd\nimport numpy as np\nfrom tqdm import tqdm_notebook as tqdm\nimport os\nimport pickle\nimport datetime\nimport random\n\ndef afterIFRS(year, season):\n season2date = [ datetime.datetime(year, 5, 15),\n datetime.datetime(year, 8, 14),\n datetime.datetime(year, 11, 14),\n datetime.datetime(year+1, 3, 31)]\n\n return pd.to_datetime(season2date[season-1].date())\n\ndef clean(year, season, balance_sheet):\n\n if len(balance_sheet) == 0:\n print('**WARRN: no data to parse')\n return balance_sheet\n balance_sheet = balance_sheet.transpose().reset_index().rename(columns={'index':'stock_id'})\n\n\n if '會計項目' in balance_sheet:\n s = balance_sheet['會計項目']\n balance_sheet = balance_sheet.drop('會計項目', axis=1).apply(pd.to_numeric)\n balance_sheet['會計項目'] = s.astype(str)\n\n balance_sheet['date'] = afterIFRS(year, season)\n\n balance_sheet['stock_id'] = balance_sheet['stock_id'].astype(str)\n balance = balance_sheet.set_index(['stock_id', 'date'])\n return balance\n\ndef download_html(year, season, stock_ids, report_type='C'):\n\n directory = os.path.join('history', 'financial_statement', str(year) + str(season))\n if not os.path.exists(directory):\n os.makedirs(directory)\n\n headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36'}\n files = [os.path.join(directory, str(i) + '.html') for i in stock_ids]\n pbar = tqdm([sid for file, sid in zip(files, stock_ids) if not os.path.exists(file) or os.stat(file).st_size < 10000])\n\n for sid in pbar:\n\n pbar.set_description('downloading stock %s in report type %s' % (sid, report_type))\n\n file = os.path.join(directory, str(sid) + '.html')\n\n # start parsing\n if int(year) >= 2013:\n url = ('https://mops.twse.com.tw/server-java/t164sb01?step=1&CO_ID=' + str(sid) + '&SYEAR=' + str(year) + '&SSEASON='+str(season)+'&REPORT_ID=' + str(report_type))\n else:\n url = ('https://mops.twse.com.tw/server-java/t147sb02?t203sb01Form=t203sb01Form&step=0&comp_id='+str(sid)+'&YEAR1='+str(year)+'&SEASON1='+str(season)+'&R_TYPE1=B')\n\n try:\n r = requests_get(url, headers=headers)\n except:\n print('**WARRN: requests cannot get stock:')\n print(url)\n continue\n\n r.encoding = 'big5'\n\n # write files\n f = open(file, 'w', encoding='utf-8')\n\n f.write('<meta charset=\"UTF-8\">\\n')\n f.write(r.text)\n f.close()\n\n # finish\n # print(percentage, i, 'end')\n\n # sleep a while\n time.sleep(random.uniform(0, 3))\n\nimport requests\nimport os\nimport time\nimport requests\nimport datetime\nimport random\nimport requests\nimport io\nimport shutil\nimport zipfile\nimport sys\nimport urllib.request\ndef crawl_finance_statement2019(year, season):\n\n def ifrs_url(year, season):\n url = \"https://mops.twse.com.tw/server-java/FileDownLoad?step=9&fileName=tifrs-\"+str(year)+\"Q\"+str(season)\\\n +\".zip&filePath=/home/html/nas/ifrs/\"+str(year)+\"/\"\n print(url)\n return url\n\n\n headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36'}\n\n from tqdm import tqdm\n class DownloadProgressBar(tqdm):\n def update_to(self, b=1, bsize=1, tsize=None):\n if tsize is not None:\n self.total = tsize\n self.update(b * bsize - self.n)\n\n\n def download_url(url, output_path):\n with DownloadProgressBar(unit='B', unit_scale=True,\n miniters=1, desc=url.split('/')[-1]) as t:\n urllib.request.urlretrieve(url, filename=output_path, reporthook=t.update_to)\n\n def download_file(url, filename):\n \"\"\"\n Helper method handling downloading large files from `url` to `filename`. Returns a pointer to `filename`.\n \"\"\"\n chunkSize = 1024\n r = requests.get(url, stream=True, verify=True)\n with open(filename, 'wb') as f:\n # pbar = tqdm( unit=\"B\", total=int( r.headers['content-length'] ) )\n for chunk in r.iter_content(chunk_size=chunkSize): \n if chunk: # filter out keep-alive new chunks\n # pbar.update (len(chunk))\n f.write(chunk)\n return r\n\n def ifrs_url(year, season):\n url = \"https://mops.twse.com.tw/server-java/FileDownLoad?step=9&fileName=tifrs-\"+str(year)+\"Q\"+str(season)\\\n +\".zip&filePath=/home/html/nas/ifrs/\"+str(year)+\"/\"\n print(url)\n return url\n\n url = ifrs_url(year,season)\n print('start download')\n download_file(url, 'temp.zip')\n print('finished!')\n \n\n path = os.path.join('history', 'financial_statement', str(year) + str(season))\n\n if os.path.isdir(path):\n shutil.rmtree(path)\n\n print('create new dir')\n\n zipfiles = zipfile.ZipFile(open('temp.zip', 'rb'))\n zipfiles.extractall(path=path)\n\n print('extract all files')\n\n fnames = [f for f in os.listdir(path) if f[-5:] == '.html']\n fnames = sorted(fnames)\n\n newfnames = [f.split(\"-\")[5] + '.html' for f in fnames]\n\n for fold, fnew in zip(fnames, newfnames):\n if len(fnew) != 9:\n print('remove strange code id', fnew)\n os.remove(os.path.join(path, fold))\n continue\n \n if not os.path.exists(os.path.join(path, fnew)):\n os.rename(os.path.join(path, fold), os.path.join(path, fnew))\n else:\n os.remove(os.path.join(path, fold))\n\n\ndef crawl_finance_statement(year, season, stock_ids):\n\n directory = os.path.join('history', 'financial_statement', str(year) + str(season))\n if not os.path.exists(directory):\n os.makedirs(directory)\n\n if year >= 2013:\n download_html(year, season, stock_ids, 'C')\n download_html(year, season, stock_ids, 'B')\n download_html(year, season, stock_ids, 'A')\n\ndef remove_english(s):\n result = re.sub(r'[a-zA-Z()]', \"\", s)\n return result\n\ndef patch2019(df):\n df = df.copy()\n dfname = df.columns.levels[0][0]\n\n df = df.iloc[:,1:].rename(columns={'會計項目Accounting Title':'會計項目'})\n\n\n refined_name = df[(dfname,'會計項目')].str.split(\" \").str[0].str.replace(\" \", \"\").apply(remove_english)\n\n subdf = df[dfname].copy()\n subdf['會計項目'] = refined_name\n df[dfname] = subdf\n\n df.columns = pd.MultiIndex(levels=[df.columns.levels[1], df.columns.levels[0]],codes=[df.columns.codes[1], df.columns.codes[0]])\n\n def neg(s):\n\n if isinstance(s, float):\n return s\n\n if str(s) == 'nan':\n return np.nan\n\n s = s.replace(\",\", \"\")\n if s[0] == '(':\n return -float(s[1:-1])\n else:\n return float(s)\n\n df.iloc[:,1:] = df.iloc[:,1:].applymap(neg)\n return df\n\ndef read_html2019(file):\n dfs = pd.read_html(file)\n return [pd.DataFrame(), patch2019(dfs[0]), patch2019(dfs[1]), patch2019(dfs[2])]\n\n\nimport re\ndef pack_htmls(year, season, directory):\n balance_sheet = {}\n income_sheet = {}\n cash_flows = {}\n income_sheet_cumulate = {}\n pbar = tqdm(os.listdir(directory))\n\n for i in pbar:\n\n # 將檔案路徑建立好\n file = os.path.join(directory, i)\n\n # 假如檔案不是html結尾,或是太小,代表不是正常的檔案,略過\n if file[-4:] != 'html' or os.stat(file).st_size < 10000:\n continue\n\n # 顯示目前運行的狀況\n stock_id = i.split('.')[0]\n pbar.set_description('parse htmls %d season %d stock %s' % (year, season, stock_id))\n\n # 讀取html\n if year < 2019:\n dfs = pd.read_html(file)\n else:\n try:\n dfs = read_html2019(file)\n except:\n print(\"**ERROR: fail to parse \", file)\n continue\n\n # 處理pandas0.24.1以上,會把columns parse好的問題\n for df in dfs:\n if 'levels' in dir(df.columns):\n df.columns = list(range(df.values.shape[1]))\n\n # 假如html不完整,則略過\n if len(dfs) < 4:\n print('**WARRN html file broken', year, season, i)\n continue\n \n if year <= 2012:\n df = dfs[1]\n category = (df[0] == '會計科目').cumsum()\n df[category == 1]\n dfs = {\n 1: df[category == 0],\n 2: df[category == 1],\n 3: df[category == 2],\n }\n\n # 取得 balance sheet\n df = dfs[1].copy().drop_duplicates(subset=0, keep='last')\n df = df.set_index(0)\n balance_sheet[stock_id] = df[1].dropna()\n #balance_sheet = combine(balance_sheet, df[1].dropna(), stock_id)\n\n # 取得 income statement\n df = dfs[2].copy().drop_duplicates(subset=0, keep='last')\n df = df.set_index(0)\n\n # 假如有4個columns,則第1與第3條column是單季跟累計的income statement\n if len(df.columns) == 4:\n income_sheet[stock_id] = df[1].dropna()\n income_sheet_cumulate[stock_id] = df[3].dropna()\n # 假如有2個columns,則代表第3條column為累計的income statement,單季的從缺\n elif len(df.columns) == 2:\n income_sheet_cumulate[stock_id] = df[1].dropna()\n\n # 假如是第一季財報 累計 跟單季 的數值是一樣的\n if season == 1:\n income_sheet[stock_id] = df[1].dropna()\n\n # 取得 cash_flows\n df = dfs[3].copy().drop_duplicates(subset=0, keep='last')\n df = df.set_index(0)\n cash_flows[stock_id] = df[1].dropna()\n\n # 將dictionary整理成dataframe\n balance_sheet = pd.DataFrame(balance_sheet)\n income_sheet = pd.DataFrame(income_sheet)\n income_sheet_cumulate = pd.DataFrame(income_sheet_cumulate)\n cash_flows = pd.DataFrame(cash_flows)\n\n print('balance_sheet', balance_sheet.shape)\n print('income_sheet', income_sheet.shape)\n print('cumulate_income_sheet', income_sheet_cumulate.shape)\n print('cash_flows', cash_flows.shape)\n\n # 做清理\n ret = {'balance_sheet':clean(year, season, balance_sheet), 'income_sheet':clean(year, season, income_sheet),\n 'income_sheet_cumulate':clean(year, season, income_sheet_cumulate), 'cash_flows':clean(year, season, cash_flows)}\n\n # 假如是第一季的話,則 單季 跟 累計 是一樣的\n if season == 1:\n ret['income_sheet'] = ret['income_sheet_cumulate'].copy()\n\n ret['income_sheet_cumulate'].columns = '累計' + ret['income_sheet_cumulate'].columns\n\n pickle.dump(ret, open(os.path.join('history', 'financial_statement', 'pack' + str(year) + str(season) + '.pickle'), 'wb'))\n\n return ret\n\ndef get_all_pickles(directory):\n ret = {}\n for i in os.listdir(directory):\n if i[:4] != 'pack':\n continue\n ret[i[4:9]] = pd.read_pickle(os.path.join(directory, i))\n return ret\n\ndef combine(d):\n\n tnames = ['balance_sheet',\n 'cash_flows',\n 'income_sheet',\n 'income_sheet_cumulate']\n\n tbs = {t:pd.DataFrame() for t in tnames}\n\n for i, dfs in d.items():\n for tname in tnames:\n tbs[tname] = tbs[tname].append(dfs[tname])\n return tbs\n\n\ndef fill_season4(tbs):\n # copy income sheet (will modify it later)\n income_sheet = tbs['income_sheet'].copy()\n\n # calculate the overlap columns\n c1 = set(tbs['income_sheet'].columns)\n c2 = set(tbs['income_sheet_cumulate'].columns)\n\n overlap_columns = []\n for i in c1:\n if '累計' + i in c2:\n overlap_columns.append('累計' + i)\n\n # get all years\n years = set(tbs['income_sheet_cumulate'].index.levels[1].year)\n\n for y in years:\n\n # get rows of the dataframe that is season 4\n ys = tbs['income_sheet_cumulate'].reset_index('stock_id').index.year == y\n ds4 = tbs['income_sheet_cumulate'].reset_index('stock_id').index.month == 3\n df4 = tbs['income_sheet_cumulate'][ds4 & ys].apply(lambda s: pd.to_numeric(s, errors='coerce')).reset_index('date')\n\n # get rows of the dataframe that is season 3\n yps = tbs['income_sheet_cumulate'].reset_index('stock_id').index.year == y - 1\n ds3 = tbs['income_sheet_cumulate'].reset_index('stock_id').index.month == 11\n df3 = tbs['income_sheet_cumulate'][ds3 & yps].apply(lambda s: pd.to_numeric(s, errors='coerce')).reset_index('date')\n \n if len(df3) == 0:\n print('skip ', y)\n continue\n # calculate the differences of income_sheet_cumulate to get income_sheet single season\n diff = df4 - df3\n diff = diff.drop(['date'], axis=1)[overlap_columns]\n\n # remove 累計\n diff.columns = diff.columns.str[2:]\n\n # 加上第四季的日期\n diff['date'] = pd.to_datetime(str(y) + '-03-31')\n diff = diff[list(c1) + ['date']].reset_index().set_index(['stock_id','date'])\n\n # 新增資料於income_sheet尾部\n income_sheet = income_sheet.append(diff)\n\n # 排序好並更新tbs\n income_sheet = income_sheet.reset_index().sort_values(['stock_id', 'date']).set_index(['stock_id', 'date'])\n tbs['income_sheet'] = income_sheet\n\ndef to_db(tbs):\n\n for i, df in tbs.items():\n df = df.reset_index().sort_values(['stock_id', 'date']).drop_duplicates(['stock_id', 'date']).set_index(['stock_id', 'date'])\n df.to_pickle(os.path.join('history', 'tables', i + '.pkl'))\n\n if not os.path.isfile(date_range_record_file):\n pickle.dump({}, open(date_range_record_file, 'wb'))\n\n dates = pickle.load(open(date_range_record_file, 'rb'))\n dates['financial_statement'] = (df.index.levels[1][0], df.index.levels[1][-1])\n pickle.dump(dates, open(date_range_record_file, 'wb'))\n\n\ndef html2db(year, season):\n\n pack_htmls(year, season, os.path.join('history', 'financial_statement', str(year) + str(season)))\n d = get_all_pickles(os.path.join('history', 'financial_statement'))\n tbs = combine(d)\n fill_season4(tbs)\n to_db(tbs)\n return {}\n\ndef crawl_finance_statement_by_date(date):\n year = date.year\n if date.month == 3:\n season = 4\n year = year - 1\n month = 11\n elif date.month == 5:\n season = 1\n month = 2\n elif date.month == 8:\n season = 2\n month = 5\n elif date.month == 11:\n season = 3\n month = 8\n else:\n return None\n\n if year < 2019:\n df = crawl_monthly_report(datetime.datetime(year, month, 1))\n crawl_finance_statement(year, season, df.index.levels[0].str.split(' ').str[0])\n else:\n crawl_finance_statement2019(year, season)\n\n html2db(year, season)\n commit()\n return {}\n\n\nimport os\nimport gc\nimport shutil\nimport pandas as pd\nimport numpy as np\n\ndef commit(*commit_tables):\n \n ftables = os.path.join('history', 'tables')\n fitems = os.path.join('history', 'items')\n\n fnames = [os.path.join(ftables, f) for f in os.listdir(ftables)]\n tnames = [f[:-4] for f in os.listdir(ftables)]\n \n if len(commit_tables) == 0:\n commit_tables = tnames\n\n for fname, tname in zip(fnames, tnames):\n \n if tname not in commit_tables:\n continue\n \n if fname[-4:] != '.pkl':\n continue\n\n fdir = os.path.join(fitems, tname)\n \n if os.path.isdir(fdir) and os.path.getmtime(fname) < os.path.getmtime(fdir):\n print(\"已經成功commit過\", tname, \"了,跳過!\")\n continue\n \n if os.path.isdir(fdir):\n shutil.rmtree(fdir)\n os.mkdir(fdir)\n else:\n os.mkdir(fdir)\n \n try:\n df = pd.read_pickle(fname)\n except:\n print(\"**檔案過大,無法成功commit\", fname)\n continue\n\n # remove stock name\n df.reset_index(inplace=True)\n if sum(df['stock_id'].str.find(' ') >= 0) > 0:\n cond = df.stock_id.str[4] == ' '\n df = df[cond]\n gc.collect()\n new_sid = df['stock_id'].str[:4]\n df['stock_id'] = new_sid\n \n df.set_index(['stock_id', 'date'], inplace=True)\n\n # select 4 digit stock ids\n if tname == 'price':\n sids = df.index.get_level_values(0)\n df = df[sids.str.len()==4]\n gc.collect()\n\n if tname == 'monthly_report':\n check_monthly_revenue()\n\n df = df.apply(lambda s: pd.to_numeric(s, errors='coerce'))\n gc.collect()\n\n df[df == 0] = np.nan\n\n\n df = df[~df.index.duplicated(keep='first')]\n gc.collect()\n \n items = list(df.columns)\n df.reset_index(inplace=True)\n \n df = df.pivot(\"date\", \"stock_id\")\n gc.collect()\n\n for name, (_, series) in zip(items, df.items()):\n\n print(tname, '--', name)\n fitem = os.path.join(fdir, name.replace('+', '_').replace('/', '_'))\n #series.reset_index()\\\n # .pivot(\"date\", \"stock_id\")[name].to_pickle(fitem + '.pkl')\n df[name].to_pickle(fitem + '.pkl')"
] |
[
[
"pandas.concat",
"pandas.to_datetime",
"pandas.Series",
"pandas.MultiIndex",
"pandas.DataFrame",
"pandas.read_html",
"pandas.read_pickle",
"pandas.to_numeric"
]
] |
abhishekkumkar/dockrized-neural-photo-editor-using-GAN
|
[
"d234cf1f80cf8c8f621f871dc704dc43e212201f"
] |
[
"ML/discgen_utils.py"
] |
[
"# Plot Image Grid function imported from Discriminative Regularization for Generative Models by Lamb et al:\n# https://github.com/vdumoulin/discgen\nimport six\nimport matplotlib\nmatplotlib.use('Agg')\nfrom matplotlib import cm, pyplot\nfrom mpl_toolkits.axes_grid1 import ImageGrid\n\n\n\ndef plot_image_grid(images, num_rows, num_cols, save_path=None):\n \"\"\"Plots images in a grid.\n\n Parameters\n ----------\n images : numpy.ndarray\n Images to display, with shape\n ``(num_rows * num_cols, num_channels, height, width)``.\n num_rows : int\n Number of rows for the image grid.\n num_cols : int\n Number of columns for the image grid.\n save_path : str, optional\n Where to save the image grid. Defaults to ``None``,\n which causes the grid to be displayed on screen.\n\n \"\"\"\n figure = pyplot.figure()\n grid = ImageGrid(figure, 111, (num_rows, num_cols), axes_pad=0.1)\n\n for image, axis in zip(images, grid):\n axis.imshow(image.transpose(1, 2, 0), interpolation='nearest')\n axis.set_yticklabels(['' for _ in range(image.shape[1])])\n axis.set_xticklabels(['' for _ in range(image.shape[2])])\n axis.axis('off')\n\n if save_path is None:\n pyplot.show()\n else:\n pyplot.savefig(save_path, transparent=True, bbox_inches='tight',dpi=212)\n pyplot.close()"
] |
[
[
"matplotlib.use",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.close",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
]
] |
bainro/loss_landscape
|
[
"30bdd84d6946facee973151128bf0ea108c12ca1"
] |
[
"plot_surface.py"
] |
[
"\"\"\"\n Calculate and visualize the loss surface.\n Usage example:\n >> python plot_surface.py --x=-1:1:101 --y=-1:1:101 --model resnet56 --cuda\n\"\"\"\nimport argparse\nimport copy\nimport h5py\nimport torch\nimport time\nimport socket\nimport os\nimport sys\nimport numpy as np\nimport torchvision\nimport torch.nn as nn\nimport dataloader\nimport evaluation\nimport projection as proj\nimport net_plotter\nimport plot_2D\nimport plot_1D\nimport model_loader\nimport scheduler\nimport mpi4pytorch as mpi\n\ndef name_surface_file(args, dir_file):\n # skip if surf_file is specified in args\n if args.surf_file:\n return args.surf_file\n\n # use args.dir_file as the perfix\n surf_file = dir_file\n\n # resolution\n surf_file += '_x[%s,%s,%d]' % (str(args.xmin), str(args.xmax), int(args.xnum))\n if args.y:\n surf_file += 'y[%s,%s,%d]' % (str(args.ymin), str(args.ymax), int(args.ynum))\n if args.z:\n surf_file += 'z[%s,%s,%d]' % (str(args.zmin), str(args.zmax), int(args.znum))\n if args.t:\n surf_file += 't[%s,%s,%d]' % (str(args.tmin), str(args.tmax), int(args.tnum))\n\n # dataloder parameters\n if args.raw_data: # without data normalization\n surf_file += '_rawdata'\n if args.data_split > 1:\n surf_file += '_datasplit=' + str(args.data_split) + '_splitidx=' + str(args.split_idx)\n\n return surf_file + \".h5\"\n\n\ndef setup_surface_file(args, surf_file, dir_file):\n # skip if the direction file already exists\n if os.path.exists(surf_file):\n f = h5py.File(surf_file, 'r')\n if (args.y and 'ycoordinates' in f.keys()) or 'xcoordinates' in f.keys():\n f.close()\n print (\"%s is already set up\" % surf_file)\n return\n\n f = h5py.File(surf_file, 'a')\n f['dir_file'] = dir_file\n\n # Create the coordinates(resolutions) at which the function is evaluated\n xcoordinates = np.linspace(int(args.xmin), int(args.xmax), num=int(args.xnum))\n f['xcoordinates'] = xcoordinates\n\n if args.y:\n ycoordinates = np.linspace(int(args.ymin), int(args.ymax), num=int(args.ynum))\n f['ycoordinates'] = ycoordinates\n\n if args.z:\n zcoordinates = np.linspace(int(args.zmin), int(args.zmax), num=int(args.znum))\n f['zcoordinates'] = zcoordinates\n\n if args.t:\n tcoordinates = np.linspace(int(args.tmin), int(args.tmax), num=int(args.tnum))\n f['tcoordinates'] = tcoordinates\n\n f.close()\n\n return surf_file\n\n\ndef crunch(surf_file, net, w, s, d, dataloader, loss_key, acc_key, comm, rank, args):\n \"\"\"\n Calculate the loss values and accuracies of modified models in parallel\n using MPI reduce.\n \"\"\"\n\n #print(surf_file,234)\n f = h5py.File(surf_file, 'r+' if rank == 0 else 'r')\n losses, accuracies = [], []\n xcoordinates = f['xcoordinates'][:]\n ycoordinates = f['ycoordinates'][:] if 'ycoordinates' in f.keys() else None\n zcoordinates = f['zcoordinates'][:] if 'zcoordinates' in f.keys() else None\n tcoordinates = f['tcoordinates'][:] if 'tcoordinates' in f.keys() else None\n\n if loss_key not in f.keys():\n shape = xcoordinates.shape if ycoordinates is None else (len(xcoordinates),len(ycoordinates))\n if ycoordinates is not None:\n if zcoordinates is not None:\n if tcoordinates is not None:\n shape = (len(xcoordinates),len(ycoordinates),len(zcoordinates),len(tcoordinates))\n else: \n shape = (len(xcoordinates),len(ycoordinates),len(zcoordinates))\n else:\n shape = (len(xcoordinates),len(ycoordinates))\n else:\n shape = xcoordinates.shape\n losses = -np.ones(shape=shape)\n accuracies = -np.ones(shape=shape)\n if rank == 0:\n f[loss_key] = losses\n f[acc_key] = accuracies\n else:\n losses = f[loss_key][:]\n accuracies = f[acc_key][:]\n\n # Generate a list of indices of 'losses' that need to be filled in.\n # The coordinates of each unfilled index (with respect to the direction vectors\n # stored in 'd') are stored in 'coords'.\n inds, coords, inds_nums = scheduler.get_job_indices(losses, xcoordinates, ycoordinates, zcoordinates, tcoordinates, comm)\n\n print('Computing %d values for rank %d'% (len(inds), rank))\n start_time = time.time()\n total_sync = 0.0\n\n criterion = nn.CrossEntropyLoss()\n if args.loss_name == 'mse':\n criterion = nn.MSELoss()\n\n # Loop over all uncalculated loss values\n for count, ind in enumerate(inds):\n # Get the coordinates of the loss value being calculated\n coord = coords[count]\n\n # Load the weights corresponding to those coordinates into the net\n if args.dir_type == 'weights':\n net_plotter.set_weights(net.module if args.ngpu > 1 else net, w, d, coord)\n elif args.dir_type == 'states':\n net_plotter.set_states(net.module if args.ngpu > 1 else net, s, d, coord)\n\n # Record the time to compute the loss value\n loss_start = time.time()\n loss, acc = evaluation.eval_loss(net, criterion, dataloader, args.cuda)\n loss_compute_time = time.time() - loss_start\n\n # Record the result in the local array\n losses.ravel()[ind] = loss\n accuracies.ravel()[ind] = acc\n\n # Send updated plot data to the master node\n syc_start = time.time()\n losses = mpi.reduce_max(comm, losses)\n accuracies = mpi.reduce_max(comm, accuracies)\n syc_time = time.time() - syc_start\n total_sync += syc_time\n\n # Only the master node writes to the file - this avoids write conflicts\n if rank == 0:\n f[loss_key][:] = losses\n f[acc_key][:] = accuracies\n f.flush()\n\n print('Evaluating rank %d %d/%d (%.1f%%) coord=%s \\t%s= %.3f \\t%s=%.2f \\ttime=%.2f \\tsync=%.2f' % (\n rank, count, len(inds), 100.0 * count/len(inds), str(coord), loss_key, loss,\n acc_key, acc, loss_compute_time, syc_time))\n\n # This is only needed to make MPI run smoothly. If this process has less work than\n # the rank0 process, then we need to keep calling reduce so the rank0 process doesn't block\n for i in range(max(inds_nums) - len(inds)):\n losses = mpi.reduce_max(comm, losses)\n accuracies = mpi.reduce_max(comm, accuracies)\n\n total_time = time.time() - start_time\n print('Rank %d done! Total time: %.2f Sync: %.2f' % (rank, total_time, total_sync))\n\n f.close()\n\n###############################################################\n# MAIN\n###############################################################\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='plotting loss surface')\n parser.add_argument('--mpi', '-m', action='store_true', help='use mpi')\n parser.add_argument('--cuda', '-c', action='store_true', help='use cuda')\n parser.add_argument('--threads', default=2, type=int, help='number of threads')\n parser.add_argument('--ngpu', type=int, default=1, help='number of GPUs to use for each rank, useful for data parallel evaluation')\n parser.add_argument('--batch_size', default=128, type=int, help='minibatch size')\n\n # data parameters\n parser.add_argument('--dataset', default='cifar10', help='cifar10 | imagenet')\n parser.add_argument('--datapath', default='cifar10/data', metavar='DIR', help='path to the dataset')\n parser.add_argument('--raw_data', action='store_true', default=False, help='no data preprocessing')\n parser.add_argument('--data_split', default=1, type=int, help='the number of splits for the dataloader')\n parser.add_argument('--split_idx', default=0, type=int, help='the index of data splits for the dataloader')\n parser.add_argument('--trainloader', default='', help='path to the dataloader with random labels')\n parser.add_argument('--test_loader', default='', help='path to the test_loader with random labels')\n parser.add_argument('--eval_count', default=None, type=int, help='the number of test examples to evaluate the avg loss.')\n\n # model parameters\n parser.add_argument('--model', default='resnet56', help='model name')\n parser.add_argument('--model_folder', default='', help='the common folder that contains model_file and model_file2')\n parser.add_argument('--model_file', default='', help='path to the trained model file')\n parser.add_argument('--model_file2', default='', help='use (model_file2 - model_file) as the xdirection')\n parser.add_argument('--model_file3', default='', help='use (model_file3 - model_file) as the ydirection')\n parser.add_argument('--loss_name', '-l', default='crossentropy', help='loss functions: crossentropy | mse')\n\n # direction parameters\n parser.add_argument('--dir_file', default='', help='specify the name of direction file, or the path to an eisting direction file')\n parser.add_argument('--dir_type', default='weights', help='direction type: weights | states (including BN\\'s running_mean/var)')\n parser.add_argument('--x', default='-1:1:51', help='A string with format xmin:x_max:xnum')\n parser.add_argument('--y', default=None, help='A string with format ymin:ymax:ynum')\n parser.add_argument('--z', default=None, help='A string with format zmin:zmax:znum')\n parser.add_argument('--t', default=None, help='A string with format tmin:tmax:tnum')\n parser.add_argument('--xnorm', default='', help='direction normalization: filter | layer | weight')\n parser.add_argument('--ynorm', default='', help='direction normalization: filter | layer | weight')\n parser.add_argument('--znorm', default='', help='direction normalization: filter | layer | weight')\n parser.add_argument('--tnorm', default='', help='direction normalization: filter | layer | weight')\n parser.add_argument('--xignore', default='', help='ignore bias and BN parameters: biasbn')\n parser.add_argument('--yignore', default='', help='ignore bias and BN parameters: biasbn')\n parser.add_argument('--zignore', default='', help='ignore bias and BN parameters: biasbn')\n parser.add_argument('--tignore', default='', help='ignore bias and BN parameters: biasbn')\n parser.add_argument('--same_dir', action='store_true', default=False, help='use the same random direction for both x-axis and y-axis')\n parser.add_argument('--idx', default=0, type=int, help='the index for the repeatness experiment')\n parser.add_argument('--surf_file', default='', help='customize the name of surface file, could be an existing file.')\n\n # plot parameters\n parser.add_argument('--proj_file', default='', help='the .h5 file contains projected optimization trajectory.')\n parser.add_argument('--loss_max', default=5, type=float, help='Maximum value to show in 1D plot')\n parser.add_argument('--vmax', default=10, type=float, help='Maximum value to map')\n parser.add_argument('--vmin', default=0.1, type=float, help='Miminum value to map')\n parser.add_argument('--vlevel', default=0.5, type=float, help='plot contours every vlevel')\n parser.add_argument('--show', action='store_true', default=False, help='show plotted figures')\n parser.add_argument('--log', action='store_true', default=False, help='use log scale for loss values')\n parser.add_argument('--plot', action='store_true', default=False, help='plot figures after computation')\n parser.add_argument('--seed', default=123, type=int, help='sets torch random seed, not numpys')\n\n args = parser.parse_args()\n\n torch.manual_seed(args.seed)\n\n #--------------------------------------------------------------------------\n # Environment setup\n #--------------------------------------------------------------------------\n if args.mpi:\n comm = mpi.setup_MPI()\n rank, nproc = comm.Get_rank(), comm.Get_size()\n else:\n comm, rank, nproc = None, 0, 1\n\n # in case of multiple GPUs per node, set the GPU to use for each rank\n if args.cuda:\n if not torch.cuda.is_available():\n raise Exception('User selected cuda option, but cuda is not available on this machine')\n gpu_count = torch.cuda.device_count()\n # torch.cuda.set_device(rank % gpu_count)\n # print('Rank %d use GPU %d of %d GPUs on %s' %\n # (rank, torch.cuda.current_device(), gpu_count, socket.gethostname()))\n\n #--------------------------------------------------------------------------\n # Check plotting resolution\n #--------------------------------------------------------------------------\n try:\n args.xmin, args.xmax, args.xnum = [float(a) for a in args.x.split(':')]\n args.ymin, args.ymax, args.ynum = (None, None, None)\n args.zmin, args.zmax, args.znum = (None, None, None)\n args.tmin, args.tmax, args.tnum = (None, None, None)\n if args.y:\n args.ymin, args.ymax, args.ynum = [float(a) for a in args.y.split(':')]\n assert args.ymin and args.ymax and args.ynum, \\\n 'You specified some arguments for the y axis, but not all'\n if args.z:\n args.zmin, args.zmax, args.znum = [float(a) for a in args.z.split(':')]\n if args.t:\n args.tmin, args.tmax, args.tnum = [float(a) for a in args.t.split(':')]\n except:\n raise Exception('Improper format for x- or y-coordinates. Try something like -1:1:51')\n\n #--------------------------------------------------------------------------\n # Load models and extract parameters\n #--------------------------------------------------------------------------\n net = model_loader.load(args.dataset, args.model, args.model_file)\n w = net_plotter.get_weights(net) # initial parameters\n s = copy.deepcopy(net.state_dict()) # deepcopy since state_dict are references\n if args.ngpu > 1:\n # data parallel with multiple GPUs on a single node\n net = nn.DataParallel(net, device_ids=range(torch.cuda.device_count()))\n\n #--------------------------------------------------------------------------\n # Setup the direction file and the surface file\n #--------------------------------------------------------------------------\n dir_file = net_plotter.name_direction_file(args) # name the direction file\n #print(dir_file,123)\n if rank == 0:\n #print(\"LOLOL\")\n net_plotter.setup_direction(args, dir_file, net)\n\n surf_file = name_surface_file(args, dir_file)\n if rank == 0:\n setup_surface_file(args, surf_file, dir_file)\n\n #print(dir_file, surf_file)\n\n # wait until master has setup the direction file and surface file\n mpi.barrier(comm)\n\n # load directions\n #print(dir_file)\n d = net_plotter.load_directions(dir_file)\n #print(d);exit()\n # calculate the consine similarity of the two directions\n if len(d) == 2 and rank == 0:\n similarity = proj.cal_angle(proj.nplist_to_tensor(d[0]), proj.nplist_to_tensor(d[1]))\n print('cosine similarity between x-axis and y-axis: %f' % similarity)\n\n #--------------------------------------------------------------------------\n # Setup dataloader\n #--------------------------------------------------------------------------\n # download CIFAR10 if it does not exit\n if rank == 0 and args.dataset == 'cifar10':\n torchvision.datasets.CIFAR10(root=args.dataset + '/data', train=True, download=True)\n\n mpi.barrier(comm)\n\n trainloader, test_loader = dataloader.load_dataset(args.dataset, args.datapath,\n args.batch_size, args.threads, args.raw_data,\n args.data_split, args.split_idx,\n args.trainloader, args.test_loader, eval_count=args.eval_count) \n #print(\"# of train ex's:\", len(trainloader), len(test_loader))\n\n #--------------------------------------------------------------------------\n # Start the computation\n #--------------------------------------------------------------------------\n #crunch(surf_file, net, w, s, d, trainloader, 'train_loss', 'train_acc', comm, rank, args)\n crunch(surf_file, net, w, s, d, test_loader, 'test_loss', 'test_acc', comm, rank, args)\n \n #--------------------------------------------------------------------------\n # Plot figures\n #--------------------------------------------------------------------------\n if args.plot and rank == 0:\n if args.y and args.proj_file:\n plot_2D.plot_contour_trajectory(surf_file, dir_file, args.proj_file, 'train_loss', args.show)\n elif args.y:\n if args.z:\n if args.t:\n #plot_2D.plot_4d_path(surf_file, 'test_loss', args.show)\n print(\"congrats! Now you have to implement the 4d plot...\")\n else:\n plot_2D.plot_3d_scatter(surf_file, 'test_loss', args.show)\n else:\n #plot_2D.plot_2d_contour(surf_file + '_train_loss', 'train_loss', args.vmin, args.vmax, args.vlevel, args.show)\n plot_2D.plot_2d_contour(surf_file, 'test_loss', args.vmin, args.vmax, args.vlevel, args.show)\n else:\n plot_1D.plot_1d_loss_err(surf_file, args.xmin, args.xmax, args.loss_max, args.log, args.show)\n"
] |
[
[
"torch.nn.CrossEntropyLoss",
"torch.manual_seed",
"numpy.ones",
"torch.cuda.is_available",
"torch.cuda.device_count",
"torch.nn.MSELoss"
]
] |
WangYuxuan93/IJCAI2019-dp-sa
|
[
"02ca4234160a102e5481761522a149257bedcc6a",
"02ca4234160a102e5481761522a149257bedcc6a"
] |
[
"biaffine-parser-sa-bert/data/Dataloader.py",
"biaffine-parser-sa/data/Dataloader.py"
] |
[
"from data.Vocab import *\nimport numpy as np\nimport torch\nfrom torch.autograd import Variable\n\ndef read_corpus(file_path, vocab=None):\n data = []\n with open(file_path, 'r') as infile:\n for sentence in readDepTree(infile, vocab):\n data.append(sentence)\n return data\n\ndef sentences_numberize(sentences, vocab):\n for sentence in sentences:\n yield sentence2id(sentence, vocab)\n\ndef sentence2id(sentence, vocab):\n result = []\n for dep in sentence:\n wordid = vocab.word2id(dep.form)\n extwordid = vocab.extword2id(dep.form)\n tagid = vocab.tag2id(dep.tag)\n head = dep.head\n relid = vocab.rel2id(dep.rel)\n word = dep.form\n charid = dep.charid\n senid = dep.senid\n id=dep.id\n result.append([wordid, extwordid, tagid, head, relid, word, charid, id, senid])\n\n return result\n\n\n\ndef batch_slice(data, batch_size):\n batch_num = int(np.ceil(len(data) / float(batch_size)))\n for i in range(batch_num):\n cur_batch_size = batch_size if i < batch_num - 1 else len(data) - batch_size * i\n sentences = [data[i * batch_size + b] for b in range(cur_batch_size)]\n\n yield sentences\n\n\ndef data_iter(data, batch_size, shuffle=True):\n \"\"\"\n randomly permute data, then sort by source length, and partition into batches\n ensure that the length of sentences in each batch\n \"\"\"\n\n batched_data = []\n if shuffle: np.random.shuffle(data)\n batched_data.extend(list(batch_slice(data, batch_size)))\n\n if shuffle: np.random.shuffle(batched_data)\n for batch in batched_data:\n yield batch\n\n\ndef batch_data_variable(batch, vocab):\n length = len(batch[0])\n batch_size = len(batch)\n for b in range(1, batch_size):\n if len(batch[b]) > length: length = len(batch[b])\n\n words = Variable(torch.LongTensor(batch_size, length).zero_(), requires_grad=False)\n extwords = Variable(torch.LongTensor(batch_size, length).zero_(), requires_grad=False)\n tags = Variable(torch.LongTensor(batch_size, length).zero_(), requires_grad=False)\n masks = Variable(torch.Tensor(batch_size, length).zero_(), requires_grad=False)\n positions = Variable(torch.LongTensor(batch_size, length).zero_(), requires_grad=False)\n heads = []\n rels = []\n lengths = []\n sentences = []\n elmosens=[]\n berts=[]\n\n b = 0\n for sentence in sentences_numberize(batch, vocab):\n index = 0\n sen=[]\n elmosen=[]\n length = len(sentence)\n lengths.append(length)\n elmosen.append(length)\n head = np.zeros((length), dtype=np.int32)\n rel = np.zeros((length), dtype=np.int32)\n for dep in sentence:\n words[b, index] = dep[0]\n extwords[b, index] = dep[1]\n tags[b, index] = dep[2]\n head[index] = dep[3]\n rel[index] = dep[4]\n sen.append(dep[5])\n masks[b, index] = 1\n positions[b,index] = index\n index += 1\n if dep[7] == 1:\n startcharid = dep[6]\n berts.append(dep[8])\n '''\n if startcharid == 0:\n print(\"the char id is 0:\",dep[5])\n print(\"the sen is is 0:\",dep[8])\n if startcharid == 55:\n print(\"the char id is 8\",dep[5])\n print(\"the sen is is 2:\",dep[8])\n if startcharid == 37:\n print(\"the char id is 37\",dep[5])\n print(\"the sen is is 1:\",dep[8])\n if startcharid == 83:\n print(\"the char id is 83\",dep[5])\n print(\"the sen is is 2:\",dep[8])\n '''\n \n elmosen.append(startcharid)\n \n b += 1\n heads.append(head)\n rels.append(rel)\n sentences.append(sen)\n elmosens.append(elmosen)\n \n #use_cuda=True\n #if use_cuda:\n # positions=positions.cuda()\n\n return words, extwords, tags, heads, rels, lengths, masks, positions, sentences,elmosens,berts\n\ndef batch_variable_depTree(trees, heads, rels, lengths, vocab):\n for tree, head, rel, length in zip(trees, heads, rels, lengths):\n sentence = []\n for idx in range(length):\n sentence.append(Dependency(idx, tree[idx].org_form, tree[idx].tag, head[idx], vocab.id2rel(rel[idx]),tree[idx].charid,tree[idx].senid))\n yield sentence\n\n\n\n",
"from data.Vocab import *\nimport numpy as np\nimport torch\nfrom torch.autograd import Variable\n\ndef read_corpus(file_path, vocab=None):\n data = []\n with open(file_path, 'r') as infile:\n for sentence in readDepTree(infile, vocab):\n data.append(sentence)\n return data\n\ndef sentences_numberize(sentences, vocab):\n for sentence in sentences:\n yield sentence2id(sentence, vocab)\n\ndef sentence2id(sentence, vocab):\n result = []\n for dep in sentence:\n wordid = vocab.word2id(dep.form)\n extwordid = vocab.extword2id(dep.form)\n tagid = vocab.tag2id(dep.tag)\n head = dep.head\n relid = vocab.rel2id(dep.rel)\n result.append([wordid, extwordid, tagid, head, relid])\n\n return result\n\n\n\ndef batch_slice(data, batch_size):\n batch_num = int(np.ceil(len(data) / float(batch_size)))\n for i in range(batch_num):\n cur_batch_size = batch_size if i < batch_num - 1 else len(data) - batch_size * i\n sentences = [data[i * batch_size + b] for b in range(cur_batch_size)]\n\n yield sentences\n\n\ndef data_iter(data, batch_size, shuffle=True):\n \"\"\"\n randomly permute data, then sort by source length, and partition into batches\n ensure that the length of sentences in each batch\n \"\"\"\n\n batched_data = []\n if shuffle: np.random.shuffle(data)\n batched_data.extend(list(batch_slice(data, batch_size)))\n\n if shuffle: np.random.shuffle(batched_data)\n for batch in batched_data:\n yield batch\n\n\ndef batch_data_variable(batch, vocab):\n length = len(batch[0])\n batch_size = len(batch)\n for b in range(1, batch_size):\n if len(batch[b]) > length: length = len(batch[b])\n\n words = Variable(torch.LongTensor(batch_size, length).zero_(), requires_grad=False)\n extwords = Variable(torch.LongTensor(batch_size, length).zero_(), requires_grad=False)\n tags = Variable(torch.LongTensor(batch_size, length).zero_(), requires_grad=False)\n masks = Variable(torch.Tensor(batch_size, length).zero_(), requires_grad=False)\n positions = Variable(torch.LongTensor(batch_size, length).zero_(), requires_grad=False)\n heads = []\n rels = []\n lengths = []\n\n b = 0\n for sentence in sentences_numberize(batch, vocab):\n index = 0\n length = len(sentence)\n lengths.append(length)\n head = np.zeros((length), dtype=np.int32)\n rel = np.zeros((length), dtype=np.int32)\n for dep in sentence:\n words[b, index] = dep[0]\n extwords[b, index] = dep[1]\n tags[b, index] = dep[2]\n head[index] = dep[3]\n rel[index] = dep[4]\n masks[b, index] = 1\n positions[b,index] = index\n index += 1\n b += 1\n heads.append(head)\n rels.append(rel)\n #use_cuda=True\n #if use_cuda:\n # positions=positions.cuda()\n\n return words, extwords, tags, heads, rels, lengths, masks, positions\n\ndef batch_variable_depTree(trees, heads, rels, lengths, vocab):\n for tree, head, rel, length in zip(trees, heads, rels, lengths):\n sentence = []\n for idx in range(length):\n sentence.append(Dependency(idx, tree[idx].org_form, tree[idx].tag, head[idx], vocab.id2rel(rel[idx])))\n yield sentence\n\n\n\n"
] |
[
[
"torch.LongTensor",
"numpy.zeros",
"torch.Tensor",
"numpy.random.shuffle"
],
[
"torch.LongTensor",
"numpy.zeros",
"torch.Tensor",
"numpy.random.shuffle"
]
] |
VIGNESHinZONE/dgl-lifesci
|
[
"9a892fd0935a7d8ab125530f54ce1e2a38b2377a"
] |
[
"python/dgllife/model/pretrain/__init__.py"
] |
[
"# -*- coding: utf-8 -*-\n#\n# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.\n# SPDX-License-Identifier: Apache-2.0\n#\n# pylint: disable= no-member, arguments-differ, invalid-name\n#\n# Utilities for using pre-trained models.\n\nimport torch\n\nfrom dgl.data.utils import _get_dgl_url, download\n\nfrom .moleculenet import *\nfrom .generative_models import *\nfrom .property_prediction import *\nfrom .reaction import *\n\n__all__ = ['load_pretrained']\n\nurl = {**moleculenet_url, **generative_url, **property_url, **reaction_url}\n\ndef download_and_load_checkpoint(model_name, model, model_postfix,\n local_pretrained_path='pre_trained.pth', log=True):\n \"\"\"Download pretrained model checkpoint\n\n The model will be loaded to CPU.\n\n Parameters\n ----------\n model_name : str\n Name of the model\n model : nn.Module\n Instantiated model instance\n model_postfix : str\n Postfix for pretrained model checkpoint\n local_pretrained_path : str\n Local name for the downloaded model checkpoint\n log : bool\n Whether to print progress for model loading\n\n Returns\n -------\n model : nn.Module\n Pretrained model\n \"\"\"\n url_to_pretrained = _get_dgl_url(model_postfix)\n local_pretrained_path = '_'.join([model_name, local_pretrained_path])\n download(url_to_pretrained, path=local_pretrained_path, log=log)\n checkpoint = torch.load(local_pretrained_path, map_location='cpu')\n model.load_state_dict(checkpoint['model_state_dict'])\n\n if log:\n print('Pretrained model loaded')\n\n return model\n\n# pylint: disable=I1101\ndef load_pretrained(model_name, log=True):\n \"\"\"Load a pretrained model\n\n Parameters\n ----------\n model_name : str\n Currently supported options include\n\n * ``'GCN_Tox21'``: A GCN-based model for molecular property prediction on Tox21\n * ``'GAT_Tox21'``: A GAT-based model for molecular property prediction on Tox21\n * ``'Weave_Tox21'``: A Weave model for molecular property prediction on Tox21\n * ``'AttentiveFP_Aromaticity'``: An AttentiveFP model for predicting number of\n aromatic atoms on a subset of Pubmed\n * ``'DGMG_ChEMBL_canonical'``: A DGMG model trained on ChEMBL with a canonical\n atom order\n * ``'DGMG_ChEMBL_random'``: A DGMG model trained on ChEMBL for molecule generation\n with a random atom order\n * ``'DGMG_ZINC_canonical'``: A DGMG model trained on ZINC for molecule generation\n with a canonical atom order\n * ``'DGMG_ZINC_random'``: A DGMG model pre-trained on ZINC for molecule generation\n with a random atom order\n * ``'JTNN_ZINC'``: A JTNN model pre-trained on ZINC for molecule generation\n * ``'wln_center_uspto'``: A WLN model pre-trained on USPTO for reaction prediction\n * ``'wln_rank_uspto'``: A WLN model pre-trained on USPTO for candidate product ranking\n * ``'gin_supervised_contextpred'``: A GIN model pre-trained with supervised learning\n and context prediction\n * ``'gin_supervised_infomax'``: A GIN model pre-trained with supervised learning\n and deep graph infomax\n * ``'gin_supervised_edgepred'``: A GIN model pre-trained with supervised learning\n and edge prediction\n * ``'gin_supervised_masking'``: A GIN model pre-trained with supervised learning\n and attribute masking\n * ``'GCN_canonical_BACE'``: A GCN model trained on BACE with canonical\n featurization for atoms\n * ``'GCN_attentivefp_BACE'``: A GCN model trained on BACE with attentivefp\n featurization for atoms\n * ``'GAT_canonical_BACE'``: A GAT model trained on BACE with canonical\n featurization for atoms\n * ``'GAT_attentivefp_BACE'``: A GAT model trained on BACE with attentivefp\n featurization for atoms\n * ``'Weave_canonical_BACE'``: A Weave model trained on BACE with canonical\n featurization for atoms and bonds\n * ``'Weave_attentivefp_BACE'``: A Weave model trained on BACE with attentivefp\n featurization for atoms and bonds\n * ``'MPNN_canonical_BACE'``: An MPNN model trained on BACE with canonical\n featurization for atoms and bonds\n * ``'MPNN_attentivefp_BACE'``: An MPNN model trained on BACE with attentivefp\n featurization for atoms and bonds\n * ``'AttentiveFP_canonical_BACE'``: An AttentiveFP model trained on BACE with\n canonical featurization for atoms and bonds\n * ``'AttentiveFP_attentivefp_BACE'``: An AttentiveFP model trained on BACE with\n attentivefp featurization for atoms and bonds\n * ``'gin_supervised_contextpred_BACE'``: A GIN model pre-trained with supervised\n learning and context prediction, and fine-tuned on BACE\n * ``'gin_supervised_infomax_BACE'``: A GIN model pre-trained with supervised learning\n and infomax, and fine-tuned on BACE\n * ``'gin_supervised_edgepred_BACE'``: A GIN model pre-trained with supervised learning\n and edge prediction, and fine-tuned on BACE\n * ``'gin_supervised_masking_BACE'``: A GIN model pre-trained with supervised learning\n and masking, and fine-tuned on BACE\n * ``'NF_canonical_BACE'``: An NF model trained on BACE with canonical\n featurization for atoms\n * ``'GCN_canonical_BBBP'``: A GCN model trained on BBBP with canonical\n featurization for atoms\n * ``'GCN_attentivefp_BBBP'``: A GCN model trained on BBBP with attentivefp\n featurization for atoms\n * ``'GAT_canonical_BBBP'``: A GAT model trained on BBBP with canonical\n featurization for atoms\n * ``'GAT_attentivefp_BBBP'``: A GAT model trained on BBBP with attentivefp\n featurization for atoms\n * ``'Weave_canonical_BBBP'``: A Weave model trained on BBBP with canonical\n featurization for atoms and bonds\n * ``'Weave_attentivefp_BBBP'``: A Weave model trained on BBBP with attentivefp\n featurization for atoms and bonds\n * ``'MPNN_canonical_BBBP'``: An MPNN model trained on BBBP with canonical\n featurization for atoms and bonds\n * ``'MPNN_attentivefp_BBBP'``: An MPNN model trained on BBBP with attentivefp\n featurization for atoms and bonds\n * ``'AttentiveFP_canonical_BBBP'``: An AttentiveFP model trained on BBBP with\n canonical featurization for atoms and bonds\n * ``'AttentiveFP_attentivefp_BBBP'``: An AttentiveFP model trained on BBBP with\n attentivefp featurization for atoms and bonds\n * ``'gin_supervised_contextpred_BBBP'``: A GIN model pre-trained with supervised\n learning and context prediction, and fine-tuned on BBBP\n * ``'gin_supervised_infomax_BBBP'``: A GIN model pre-trained with supervised learning\n and infomax, and fine-tuned on BBBP\n * ``'gin_supervised_edgepred_BBBP'``: A GIN model pre-trained with supervised learning\n and edge prediction, and fine-tuned on BBBP\n * ``'gin_supervised_masking_BBBP'``: A GIN model pre-trained with supervised learning\n and masking, and fine-tuned on BBBP\n * ``'NF_canonical_BBBP'``: An NF model pre-trained on BBBP with canonical\n featurization for atoms\n * ``'GCN_canonical_ClinTox'``: A GCN model trained on ClinTox with canonical\n featurization for atoms\n * ``'GCN_attentivefp_ClinTox'``: A GCN model trained on ClinTox with attentivefp\n featurization for atoms\n * ``'GAT_canonical_ClinTox'``: A GAT model trained on ClinTox with canonical\n featurization for atoms\n * ``'GAT_attentivefp_ClinTox'``: A GAT model trained on ClinTox with attentivefp\n featurization for atoms\n * ``'Weave_canonical_ClinTox'``: A Weave model trained on ClinTox with canonical\n featurization for atoms and bonds\n * ``'Weave_attentivefp_ClinTox'``: A Weave model trained on ClinTox with attentivefp\n featurization for atoms and bonds\n * ``'MPNN_canonical_ClinTox'``: An MPNN model trained on ClinTox with canonical\n featurization for atoms and bonds\n * ``'MPNN_attentivefp_ClinTox'``: An MPNN model trained on ClinTox with attentivefp\n featurization for atoms and bonds\n * ``'AttentiveFP_canonical_ClinTox'``: An AttentiveFP model trained on ClinTox with\n canonical featurization for atoms and bonds\n * ``'AttentiveFP_attentivefp_BACE'``: An AttentiveFP model trained on ClinTox with\n attentivefp featurization for atoms and bonds\n * ``'GCN_canonical_ESOL'``: A GCN model trained on ESOL with canonical\n featurization for atoms\n * ``'GCN_attentivefp_ESOL'``: A GCN model trained on ESOL with attentivefp\n featurization for atoms\n * ``'GAT_canonical_ESOL'``: A GAT model trained on ESOL with canonical\n featurization for atoms\n * ``'GAT_attentivefp_ESOL'``: A GAT model trained on ESOL with attentivefp\n featurization for atoms\n * ``'Weave_canonical_ESOL'``: A Weave model trained on ESOL with canonical\n featurization for atoms and bonds\n * ``'Weave_attentivefp_ESOL'``: A Weave model trained on ESOL with attentivefp\n featurization for atoms and bonds\n * ``'MPNN_canonical_ESOL'``: An MPNN model trained on ESOL with canonical\n featurization for atoms and bonds\n * ``'MPNN_attentivefp_ESOL'``: An MPNN model trained on ESOL with attentivefp\n featurization for atoms and bonds\n * ``'AttentiveFP_canonical_ESOL'``: An AttentiveFP model trained on ESOL with\n canonical featurization for atoms and bonds\n * ``'AttentiveFP_attentivefp_ESOL'``: An AttentiveFP model trained on ESOL with\n attentivefp featurization for atoms and bonds\n * ``'gin_supervised_contextpred_ESOL'``: A GIN model pre-trained with supervised\n learning and context prediction, and fine-tuned on ESOL\n * ``'gin_supervised_infomax_ESOL'``: A GIN model pre-trained with supervised learning\n and infomax, and fine-tuned on ESOL\n * ``'gin_supervised_edgepred_ESOL'``: A GIN model pre-trained with supervised learning\n and edge prediction, and fine-tuned on ESOL\n * ``'gin_supervised_masking_ESOL'``: A GIN model pre-trained with supervised learning\n and masking, and fine-tuned on ESOL\n * ``'GCN_canonical_FreeSolv'``: A GCN model trained on FreeSolv with canonical\n featurization for atoms\n * ``'GCN_attentivefp_FreeSolv'``: A GCN model trained on FreeSolv with attentivefp\n featurization for atoms\n * ``'GAT_canonical_FreeSolv'``: A GAT model trained on FreeSolv with canonical\n featurization for atoms\n * ``'GAT_attentivefp_FreeSolv'``: A GAT model trained on FreeSolv with attentivefp\n featurization for atoms\n * ``'Weave_canonical_FreeSolv'``: A Weave model trained on FreeSolv with canonical\n featurization for atoms and bonds\n * ``'Weave_attentivefp_FreeSolv'``: A Weave model trained on FreeSolv with attentivefp\n featurization for atoms and bonds\n * ``'MPNN_canonical_FreeSolv'``: An MPNN model trained on FreeSolv with canonical\n featurization for atoms and bonds\n * ``'MPNN_attentivefp_FreeSolv'``: An MPNN model trained on FreeSolv with attentivefp\n featurization for atoms and bonds\n * ``'AttentiveFP_canonical_FreeSolv'``: An AttentiveFP model trained on FreeSolv with\n canonical featurization for atoms and bonds\n * ``'AttentiveFP_attentivefp_FreeSolv'``: An AttentiveFP model trained on FreeSolv with\n attentivefp featurization for atoms and bonds\n * ``'gin_supervised_contextpred_FreeSolv'``: A GIN model pre-trained with supervised\n learning and context prediction, and fine-tuned on FreeSolv\n * ``'gin_supervised_infomax_FreeSolv'``: A GIN model pre-trained with supervised learning\n and infomax, and fine-tuned on FreeSolv\n * ``'gin_supervised_edgepred_FreeSolv'``: A GIN model pre-trained with supervised learning\n and edge prediction, and fine-tuned on FreeSolv\n * ``'gin_supervised_masking_FreeSolv'``: A GIN model pre-trained with supervised learning\n and masking, and fine-tuned on FreeSolv\n * ``'GCN_canonical_HIV'``: A GCN model trained on HIV with canonical\n featurization for atoms\n * ``'GCN_attentivefp_HIV'``: A GCN model trained on HIV with attentivefp\n featurization for atoms\n * ``'GAT_canonical_HIV'``: A GAT model trained on BACE with canonical\n featurization for atoms\n * ``'GAT_attentivefp_HIV'``: A GAT model trained on BACE with attentivefp\n featurization for atoms\n * ``'Weave_canonical_HIV'``: A Weave model trained on HIV with canonical\n featurization for atoms and bonds\n * ``'Weave_attentivefp_HIV'``: A Weave model trained on HIV with attentivefp\n featurization for atoms and bonds\n * ``'MPNN_canonical_HIV'``: An MPNN model trained on HIV with canonical\n featurization for atoms and bonds\n * ``'MPNN_attentivefp_HIV'``: An MPNN model trained on HIV with attentivefp\n featurization for atoms and bonds\n * ``'AttentiveFP_canonical_HIV'``: An AttentiveFP model trained on HIV with canonical\n featurization for atoms and bonds\n * ``'AttentiveFP_attentivefp_HIV'``: An AttentiveFP model trained on HIV with attentivefp\n featurization for atoms and bonds\n * ``'gin_supervised_contextpred_HIV'``: A GIN model pre-trained with supervised learning\n and context prediction, and fine-tuned on HIV\n * ``'gin_supervised_infomax_HIV'``: A GIN model pre-trained with supervised learning\n and infomax, and fine-tuned on HIV\n * ``'gin_supervised_edgepred_HIV'``: A GIN model pre-trained with supervised learning\n and edge prediction, and fine-tuned on HIV\n * ``'gin_supervised_masking_HIV'``: A GIN model pre-trained with supervised learning\n and masking, and fine-tuned on HIV\n * ``'NF_canonical_HIV'``: An NF model trained on HIV with canonical\n featurization for atoms\n * ``'GCN_canonical_Lipophilicity'``: A GCN model trained on Lipophilicity with canonical\n featurization for atoms\n * ``'GCN_attentivefp_Lipophilicity'``: A GCN model trained on Lipophilicity with\n attentivefp featurization for atoms\n * ``'GAT_canonical_Lipophilicity'``: A GAT model trained on Lipophilicity with canonical\n featurization for atoms\n * ``'GAT_attentivefp_Lipophilicity'``: A GAT model trained on Lipophilicity with\n attentivefp featurization for atoms\n * ``'Weave_canonical_Lipophilicity'``: A Weave model trained on Lipophilicity with\n canonical featurization for atoms and bonds\n * ``'Weave_attentivefp_Lipophilicity'``: A Weave model trained on Lipophilicity with\n attentivefp featurization for atoms and bonds\n * ``'MPNN_canonical_Lipophilicity'``: An MPNN model trained on Lipophilicity with\n canonical featurization for atoms and bonds\n * ``'MPNN_attentivefp_Lipophilicity'``: An MPNN model trained on Lipophilicity with\n attentivefp featurization for atoms and bonds\n * ``'AttentiveFP_canonical_Lipophilicity'``: An AttentiveFP model trained on\n Lipophilicity with canonical featurization for atoms and bonds\n * ``'AttentiveFP_attentivefp_Lipophilicity'``: An AttentiveFP model trained on\n Lipophilicity with attentivefp featurization for atoms and bonds\n * ``'gin_supervised_contextpred_Lipophilicity'``: A GIN model pre-trained with supervised\n learning and context prediction, and fine-tuned on Lipophilicity\n * ``'gin_supervised_infomax_Lipophilicity'``: A GIN model pre-trained with supervised\n learning and infomax, and fine-tuned on Lipophilicity\n * ``'gin_supervised_edgepred_Lipophilicity'``: A GIN model pre-trained with supervised\n learning and edge prediction, and fine-tuned on Lipophilicity\n * ``'gin_supervised_masking_Lipophilicity'``: A GIN model pre-trained with supervised\n learning and masking, and fine-tuned on Lipophilicity\n * ``'GCN_canonical_MUV'``: A GCN model trained on MUV with canonical\n featurization for atoms\n * ``'GCN_attentivefp_MUV'``: A GCN model trained on MUV with attentivefp\n featurization for atoms\n * ``'GAT_canonical_MUV'``: A GAT model trained on MUV with canonical\n featurization for atoms\n * ``'GAT_attentivefp_MUV'``: A GAT model trained on MUV with attentivefp\n featurization for atoms\n * ``'Weave_canonical_MUV'``: A Weave model trained on MUV with canonical\n featurization for atoms and bonds\n * ``'Weave_attentivefp_MUV'``: A Weave model trained on MUV with attentivefp\n featurization for atoms and bonds\n * ``'MPNN_canonical_MUV'``: An MPNN model trained on MUV with canonical\n featurization for atoms and bonds\n * ``'MPNN_attentivefp_MUV'``: An MPNN model trained on MUV with attentivefp\n featurization for atoms and bonds\n * ``'AttentiveFP_canonical_MUV'``: An AttentiveFP model trained on MUV with canonical\n featurization for atoms and bonds\n * ``'AttentiveFP_attentivefp_MUV'``: An AttentiveFP model trained on MUV with attentivefp\n featurization for atoms and bonds\n * ``'gin_supervised_contextpred_MUV'``: A GIN model pre-trained with supervised learning\n and context prediction, and fine-tuned on MUV\n * ``'gin_supervised_infomax_MUV'``: A GIN model pre-trained with supervised learning\n and infomax, and fine-tuned on MUV\n * ``'gin_supervised_edgepred_MUV'``: A GIN model pre-trained with supervised learning\n and edge prediction, and fine-tuned on MUV\n * ``'gin_supervised_masking_MUV'``: A GIN model pre-trained with supervised learning\n and masking, and fine-tuned on MUV\n * ``'GCN_canonical_PCBA'``: A GCN model trained on PCBA with canonical\n featurization for atoms\n * ``'GCN_attentivefp_PCBA'``: A GCN model trained on PCBA with attentivefp\n featurization for atoms\n * ``'GAT_canonical_PCBA'``: A GAT model trained on PCBA with canonical\n featurization for atoms\n * ``'GAT_attentivefp_PCBA'``: A GAT model trained on PCBA with attentivefp\n featurization for atoms\n * ``'Weave_canonical_PCBA'``: A Weave model trained on PCBA with canonical\n featurization for atoms and bonds\n * ``'Weave_attentivefp_PCBA'``: A Weave model trained on PCBA with attentivefp\n featurization for atoms and bonds\n * ``'MPNN_canonical_PCBA'``: An MPNN model trained on PCBA with canonical\n featurization for atoms and bonds\n * ``'MPNN_attentivefp_PCBA'``: An MPNN model trained on PCBA with attentivefp\n featurization for atoms and bonds\n * ``'AttentiveFP_canonical_PCBA'``: An AttentiveFP model trained on PCBA with\n canonical featurization for atoms and bonds\n * ``'AttentiveFP_attentivefp_PCBA'``: An AttentiveFP model trained on PCBA with\n attentivefp featurization for atoms and bonds\n * ``'GCN_canonical_SIDER'``: A GCN model trained on SIDER with canonical\n featurization for atoms\n * ``'GCN_attentivefp_SIDER'``: A GCN model trained on SIDER with attentivefp\n featurization for atoms\n * ``'GAT_canonical_SIDER'``: A GAT model trained on SIDER with canonical\n featurization for atoms\n * ``'GAT_attentivefp_SIDER'``: A GAT model trained on SIDER with attentivefp\n featurization for atoms\n * ``'Weave_canonical_SIDER'``: A Weave model trained on SIDER with canonical\n featurization for atoms and bonds\n * ``'Weave_attentivefp_SIDER'``: A Weave model trained on SIDER with attentivefp\n featurization for atoms and bonds\n * ``'MPNN_canonical_SIDER'``: An MPNN model trained on SIDER with canonical\n featurization for atoms and bonds\n * ``'MPNN_attentivefp_SIDER'``: An MPNN model trained on SIDER with attentivefp\n featurization for atoms and bonds\n * ``'AttentiveFP_canonical_SIDER'``: An AttentiveFP model trained on SIDER with\n canonical featurization for atoms and bonds\n * ``'AttentiveFP_attentivefp_SIDER'``: An AttentiveFP model trained on SIDER with\n attentivefp featurization for atoms and bonds\n * ``'gin_supervised_contextpred_SIDER'``: A GIN model pre-trained with supervised learning\n and context prediction, and fine-tuned on SIDER\n * ``'gin_supervised_infomax_SIDER'``: A GIN model pre-trained with supervised learning\n and infomax, and fine-tuned on SIDER\n * ``'gin_supervised_edgepred_SIDER'``: A GIN model pre-trained with supervised learning\n and edge prediction, and fine-tuned on SIDER\n * ``'gin_supervised_masking_SIDER'``: A GIN model pre-trained with supervised learning\n and masking, and fine-tuned on SIDER\n * ``'NF_canonical_SIDER'``: An NF model trained on SIDER with canonical\n featurization for atoms\n * ``'GCN_canonical_Tox21'``: A GCN model trained on Tox21 with canonical\n featurization for atoms\n * ``'GCN_attentivefp_Tox21'``: A GCN model trained on Tox21 with attentivefp\n featurization for atoms\n * ``'GAT_canonical_Tox21'``: A GAT model trained on Tox21 with canonical\n featurization for atoms\n * ``'GAT_attentivefp_Tox21'``: A GAT model trained on Tox21 with attentivefp\n featurization for atoms\n * ``'Weave_canonical_Tox21'``: A Weave model trained on Tox21 with canonical\n featurization for atoms and bonds\n * ``'Weave_attentivefp_Tox21'``: A Weave model trained on Tox21 with attentivefp\n featurization for atoms and bonds\n * ``'MPNN_canonical_Tox21'``: An MPNN model trained on Tox21 with canonical\n featurization for atoms and bonds\n * ``'MPNN_attentivefp_Tox21'``: An MPNN model trained on Tox21 with attentivefp\n featurization for atoms and bonds\n * ``'AttentiveFP_canonical_Tox21'``: An AttentiveFP model trained on Tox21 with\n canonical featurization for atoms and bonds\n * ``'AttentiveFP_attentivefp_Tox21'``: An AttentiveFP model trained on Tox21 with\n attentivefp featurization for atoms and bonds\n * ``'gin_supervised_contextpred_Tox21'``: A GIN model pre-trained with supervised\n learning and context prediction, and fine-tuned on Tox21\n * ``'gin_supervised_infomax_Tox21'``: A GIN model pre-trained with supervised learning\n and infomax, and fine-tuned on Tox21\n * ``'gin_supervised_edgepred_Tox21'``: A GIN model pre-trained with supervised learning\n and edge prediction, and fine-tuned on Tox21\n * ``'gin_supervised_masking_Tox21'``: A GIN model pre-trained with supervised learning\n and masking, and fine-tuned on Tox21\n * ``'NF_canonical_Tox21'``: An NF model trained on Tox21 with canonical\n featurization for atoms\n * ``'GCN_canonical_ToxCast'``: A GCN model trained on ToxCast with canonical\n featurization for atoms\n * ``'GCN_attentivefp_ToxCast'``: A GCN model trained on ToxCast with attentivefp\n featurization for atoms\n * ``'GAT_canonical_ToxCast'``: A GAT model trained on ToxCast with canonical\n featurization for atoms\n * ``'GAT_attentivefp_ToxCast'``: A GAT model trained on ToxCast with attentivefp\n featurization for atoms\n * ``'Weave_canonical_ToxCast'``: A Weave model trained on ToxCast with canonical\n featurization for atoms and bonds\n * ``'Weave_attentivefp_ToxCast'``: A Weave model trained on ToxCast with attentivefp\n featurization for atoms and bonds\n * ``'MPNN_canonical_ToxCast'``: An MPNN model trained on ToxCast with canonical\n featurization for atoms and bonds\n * ``'MPNN_attentivefp_ToxCast'``: An MPNN model trained on ToxCast with attentivefp\n featurization for atoms and bonds\n * ``'AttentiveFP_canonical_ToxCast'``: An AttentiveFP model trained on ToxCast with\n canonical featurization for atoms and bonds\n * ``'AttentiveFP_attentivefp_ToxCast'``: An AttentiveFP model trained on ToxCast with\n attentivefp featurization for atoms and bonds\n * ``'gin_supervised_contextpred_ToxCast'``: A GIN model pre-trained with supervised\n learning and context prediction, and fine-tuned on ToxCast\n * ``'gin_supervised_infomax_ToxCast'``: A GIN model pre-trained with supervised learning\n and infomax, and fine-tuned on ToxCast\n * ``'gin_supervised_edgepred_ToxCast'``: A GIN model pre-trained with supervised learning\n and edge prediction, and fine-tuned on ToxCast\n * ``'gin_supervised_masking_ToxCast'``: A GIN model pre-trained with supervised learning\n and masking, and fine-tuned on ToxCast\n * ``'NF_canonical_ToxCast'``: An NF model trained on ToxCast with canonical\n featurization for atoms and bonds\n\n log : bool\n Whether to print progress for model loading\n\n Returns\n -------\n model\n \"\"\"\n if model_name not in url:\n raise RuntimeError(\"Cannot find a pretrained model with name {}\".format(model_name))\n\n for func in [create_moleculenet_model, create_generative_model,\n create_property_model, create_reaction_model]:\n model = func(model_name)\n if model is not None:\n break\n\n return download_and_load_checkpoint(model_name, model, url[model_name], log=log)\n"
] |
[
[
"torch.load"
]
] |
siddheshshaji/FLAML
|
[
"ffee24e8afd9009ccb5d269e72f5d50c894da531"
] |
[
"test/reg.py"
] |
[
"from flaml import AutoML\nfrom sklearn.datasets import fetch_california_housing\n\n# Initialize an AutoML instance\nautoml = AutoML()\n# Specify automl goal and constraint\nautoml_settings = {\n \"time_budget\": 1, # in seconds\n \"metric\": \"r2\",\n \"task\": \"regression\",\n \"log_file_name\": \"test/california.log\",\n}\nX_train, y_train = fetch_california_housing(return_X_y=True)\n# Train with labeled input data\nautoml.fit(X_train=X_train, y_train=y_train, **automl_settings)\nprint(automl.model)\nprint(automl.model.estimator)\n\nprint(automl.best_estimator)\nprint(automl.best_config)\nprint(automl.best_config_per_estimator)\n\nprint(automl.best_config_train_time)\nprint(automl.best_iteration)\nprint(automl.best_loss)\nprint(automl.time_to_find_best_model)\nprint(automl.config_history)\n"
] |
[
[
"sklearn.datasets.fetch_california_housing"
]
] |
tanishqjha2298/Toxic-message-filtering-app
|
[
"bc182b5e2503d5b332e8928aa0e42cc9b58dae2d"
] |
[
"flask_api_output.py"
] |
[
"# Load libraries\nimport flask\nimport pandas as pd\nimport tensorflow as tf\nimport keras\nfrom keras.models import load_model\n\n# instantiate flask \napp = flask.Flask(__name__)\n\n# load the model, and pass in the custom metric function\nglobal graph\ngraph = tf.get_default_graph()\nmodel = load_model('Model_final.h5')\n\[email protected]('/apitest/<arg>')\ndef apitest(arg):\n return 'API working'+arg\n\napp.run(host='0.0.0.0', debug=False, port=5005)\n"
] |
[
[
"tensorflow.get_default_graph"
]
] |
mlopezarango/Python
|
[
"2d3d660155241113b23e4ed810e05479b2fc4bba",
"4866b1330bc7c77c0ed0e050e6b99efdeb026448"
] |
[
"machine_learning/random_forest_regressor.py",
"maths/gaussian.py"
] |
[
"# Random Forest Regressor Example\n\nfrom sklearn.datasets import load_boston\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.ensemble import RandomForestRegressor\nfrom sklearn.metrics import mean_absolute_error\nfrom sklearn.metrics import mean_squared_error\n\n\ndef main():\n\n \"\"\"\n Random Forest Regressor Example using sklearn function.\n Boston house price dataset is used to demonstrate the algorithm.\n \"\"\"\n\n # Load Boston house price dataset\n boston = load_boston()\n print(boston.keys())\n\n # Split dataset into train and test data\n X = boston[\"data\"] # features\n Y = boston[\"target\"]\n x_train, x_test, y_train, y_test = train_test_split(\n X, Y, test_size=0.3, random_state=1\n )\n\n # Random Forest Regressor\n rand_for = RandomForestRegressor(random_state=42, n_estimators=300)\n rand_for.fit(x_train, y_train)\n\n # Predict target for test data\n predictions = rand_for.predict(x_test)\n predictions = predictions.reshape(len(predictions), 1)\n\n # Error printing\n print(f\"Mean Absolute Error:\\t {mean_absolute_error(y_test, predictions)}\")\n print(f\"Mean Square Error :\\t {mean_squared_error(y_test, predictions)}\")\n\n\nif __name__ == \"__main__\":\n main()\n",
"\"\"\"\nReference: https://en.wikipedia.org/wiki/Gaussian_function\n\npython/black : True\npython : 3.7.3\n\n\"\"\"\nfrom numpy import pi, sqrt, exp\n\n\ndef gaussian(x, mu: float = 0.0, sigma: float = 1.0) -> int:\n \"\"\"\n >>> gaussian(1)\n 0.24197072451914337\n \n >>> gaussian(24)\n 3.342714441794458e-126\n\n Supports NumPy Arrays\n Use numpy.meshgrid with this to generate gaussian blur on images.\n >>> import numpy as np\n >>> x = np.arange(15)\n >>> gaussian(x)\n array([3.98942280e-01, 2.41970725e-01, 5.39909665e-02, 4.43184841e-03,\n 1.33830226e-04, 1.48671951e-06, 6.07588285e-09, 9.13472041e-12,\n 5.05227108e-15, 1.02797736e-18, 7.69459863e-23, 2.11881925e-27,\n 2.14638374e-32, 7.99882776e-38, 1.09660656e-43])\n \n >>> gaussian(15)\n 5.530709549844416e-50\n\n >>> gaussian([1,2, 'string'])\n Traceback (most recent call last):\n ...\n TypeError: unsupported operand type(s) for -: 'list' and 'float'\n\n >>> gaussian('hello world')\n Traceback (most recent call last):\n ...\n TypeError: unsupported operand type(s) for -: 'str' and 'float'\n\n >>> gaussian(10**234) # doctest: +IGNORE_EXCEPTION_DETAIL\n Traceback (most recent call last):\n ...\n OverflowError: (34, 'Result too large')\n\n >>> gaussian(10**-326)\n 0.3989422804014327\n\n >>> gaussian(2523, mu=234234, sigma=3425)\n 0.0\n \"\"\"\n return 1 / sqrt(2 * pi * sigma ** 2) * exp(-((x - mu) ** 2) / 2 * sigma ** 2)\n\n\nif __name__ == \"__main__\":\n import doctest\n\n doctest.testmod()\n"
] |
[
[
"sklearn.ensemble.RandomForestRegressor",
"sklearn.metrics.mean_absolute_error",
"sklearn.model_selection.train_test_split",
"sklearn.metrics.mean_squared_error",
"sklearn.datasets.load_boston"
],
[
"numpy.exp",
"numpy.sqrt"
]
] |
mengzaiqiao/TVBR
|
[
"cdac86a753c41f8f3c55a025be8d88dd305325f5"
] |
[
"beta_rec/models/ngcf.py"
] |
[
"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.sparse as sparse\n\nfrom beta_rec.models.torch_engine import ModelEngine\n\n\nclass NGCF(torch.nn.Module):\n \"\"\"Model initialisation, embedding generation and prediction of NGCF.\"\"\"\n\n def __init__(self, config, norm_adj):\n \"\"\"Initialize NGCF Class.\"\"\"\n super(NGCF, self).__init__()\n self.config = config\n self.n_users = config[\"n_users\"]\n self.n_items = config[\"n_items\"]\n self.emb_dim = config[\"emb_dim\"]\n self.layer_size = config[\"layer_size\"]\n self.norm_adj = norm_adj\n self.n_layers = len(self.layer_size)\n self.dropout = nn.ModuleList()\n self.GC_weights = nn.ModuleList()\n self.Bi_weights = nn.ModuleList()\n self.dropout_list = list(config[\"mess_dropout\"])\n self.layer_size = [self.emb_dim] + self.layer_size\n # Create GNN layers\n\n for i in range(self.n_layers):\n self.GC_weights.append(\n nn.Linear(self.layer_size[i], self.layer_size[i + 1])\n )\n self.Bi_weights.append(\n nn.Linear(self.layer_size[i], self.layer_size[i + 1])\n )\n self.dropout.append(nn.Dropout(self.dropout_list[i]))\n\n self.user_embedding = nn.Embedding(self.n_users, self.emb_dim)\n self.item_embedding = nn.Embedding(self.n_items, self.emb_dim)\n self.init_emb()\n\n def init_emb(self):\n \"\"\"Initialize users and itmes' embeddings.\"\"\"\n # Initialize users and items' embeddings\n nn.init.xavier_uniform_(self.user_embedding.weight)\n nn.init.xavier_uniform_(self.item_embedding.weight)\n\n def forward(self, norm_adj):\n \"\"\"Perform GNN function on users and item embeddings.\n\n Args:\n norm_adj (torch sparse tensor): the norm adjacent matrix of the user-item interaction matrix.\n Returns:\n u_g_embeddings (tensor): processed user embeddings.\n i_g_embeddings (tensor): processed item embeddings.\n \"\"\"\n ego_embeddings = torch.cat(\n (self.user_embedding.weight, self.item_embedding.weight), dim=0\n )\n all_embeddings = [ego_embeddings]\n\n norm_adj = norm_adj.to(self.device)\n for i in range(self.n_layers):\n side_embeddings = sparse.mm(norm_adj, ego_embeddings)\n sum_embeddings = F.leaky_relu(self.GC_weights[i](side_embeddings))\n bi_embeddings = torch.mul(ego_embeddings, side_embeddings)\n bi_embeddings = F.leaky_relu(self.Bi_weights[i](bi_embeddings))\n ego_embeddings = sum_embeddings + bi_embeddings\n ego_embeddings = self.dropout[i](ego_embeddings)\n\n norm_embeddings = F.normalize(ego_embeddings, p=2, dim=1)\n all_embeddings += [norm_embeddings]\n\n all_embeddings = torch.cat(all_embeddings, dim=1)\n u_g_embeddings, i_g_embeddings = torch.split(\n all_embeddings, [self.n_users, self.n_items], dim=0\n )\n\n return u_g_embeddings, i_g_embeddings\n\n def predict(self, users, items):\n \"\"\"Predict result with the model.\n\n Args:\n users (int, or list of int): user id.\n items (int, or list of int): item id.\n Return:\n scores (int): dot product.\n \"\"\"\n users_t = torch.tensor(users, dtype=torch.int64, device=self.device)\n items_t = torch.tensor(items, dtype=torch.int64, device=self.device)\n\n with torch.no_grad():\n ua_embeddings, ia_embeddings = self.forward(self.norm_adj)\n u_g_embeddings = ua_embeddings[users_t]\n i_g_embeddings = ia_embeddings[items_t]\n scores = torch.mul(u_g_embeddings, i_g_embeddings).sum(dim=1)\n return scores\n\n\nclass NGCFEngine(ModelEngine):\n \"\"\"NGCFEngine Class.\"\"\"\n\n # A class includes train an epoch and train a batch of NGCF\n\n def __init__(self, config):\n \"\"\"Initialize NGCFEngine Class.\"\"\"\n self.config = config\n self.regs = config[\"model\"][\"regs\"] # reg is the regularisation\n self.decay = self.regs[0]\n self.batch_size = config[\"model\"][\"batch_size\"]\n self.norm_adj = config[\"model\"][\"norm_adj\"]\n self.model = NGCF(config[\"model\"], self.norm_adj)\n super(NGCFEngine, self).__init__(config)\n self.model.to(self.device)\n\n def train_single_batch(self, batch_data):\n \"\"\"Train the model in a single batch.\n\n Args:\n batch_data (list): batch users, positive items and negative items.\n Return:\n loss (float): batch loss.\n \"\"\"\n assert hasattr(self, \"model\"), \"Please specify the exact model !\"\n self.optimizer.zero_grad()\n norm_adj = self.norm_adj\n ua_embeddings, ia_embeddings = self.model.forward(norm_adj)\n\n batch_users, pos_items, neg_items = batch_data\n\n u_g_embeddings = ua_embeddings[batch_users]\n pos_i_g_embeddings = ia_embeddings[pos_items]\n neg_i_g_embeddings = ia_embeddings[neg_items]\n\n batch_mf_loss, batch_emb_loss, batch_reg_loss = self.bpr_loss(\n u_g_embeddings, pos_i_g_embeddings, neg_i_g_embeddings\n )\n\n batch_loss = batch_mf_loss + batch_emb_loss + batch_reg_loss\n\n batch_loss.backward()\n self.optimizer.step()\n loss = batch_loss.item()\n return loss, batch_reg_loss\n\n def train_an_epoch(self, train_loader, epoch_id):\n \"\"\"Train the model in one epoch.\n\n Args:\n epoch_id (int): the number of epoch.\n train_loader (function): user, pos_items and neg_items generator.\n \"\"\"\n assert hasattr(self, \"model\"), \"Please specify the exact model !\"\n self.model.train()\n total_loss = 0.0\n regularizer = 0.0\n for batch_data in train_loader:\n loss, reg = self.train_single_batch(batch_data)\n total_loss += loss\n regularizer += reg\n print(f\"[Training Epoch {epoch_id}], Loss {loss}, Regularizer {regularizer}\")\n self.writer.add_scalar(\"model/loss\", total_loss, epoch_id)\n self.writer.add_scalar(\"model/regularizer\", regularizer, epoch_id)\n\n def bpr_loss(self, users, pos_items, neg_items):\n \"\"\"Bayesian Personalised Ranking (BPR) pairwise loss function.\n\n Note that the sizes of pos_scores and neg_scores should be equal.\n\n Args:\n pos_scores (tensor): Tensor containing predictions for known positive items.\n neg_scores (tensor): Tensor containing predictions for sampled negative items.\n\n Returns:\n loss.\n \"\"\"\n # Calculate BPR loss\n pos_scores = torch.sum(torch.mul(users, pos_items), dim=1)\n neg_scores = torch.sum(torch.mul(users, neg_items), dim=1)\n\n regularizer = (\n 1.0 / 2 * (users ** 2).sum()\n + 1.0 / 2 * (pos_items ** 2).sum()\n + 1.0 / 2 * (neg_items ** 2).sum()\n )\n regularizer = regularizer / self.batch_size\n\n maxi = F.logsigmoid(pos_scores - neg_scores)\n mf_loss = -torch.mean(maxi)\n\n emb_loss = self.decay * regularizer\n reg_loss = 0.0\n return mf_loss, emb_loss, reg_loss\n"
] |
[
[
"torch.nn.functional.normalize",
"torch.mean",
"torch.nn.Dropout",
"torch.cat",
"torch.nn.ModuleList",
"torch.nn.functional.logsigmoid",
"torch.sparse.mm",
"torch.nn.Embedding",
"torch.tensor",
"torch.nn.Linear",
"torch.nn.init.xavier_uniform_",
"torch.mul",
"torch.no_grad",
"torch.split"
]
] |
Tim232/Python-Things
|
[
"05f0f373a4cf298e70d9668c88a6e3a9d1cd8146",
"05f0f373a4cf298e70d9668c88a6e3a9d1cd8146",
"05f0f373a4cf298e70d9668c88a6e3a9d1cd8146",
"05f0f373a4cf298e70d9668c88a6e3a9d1cd8146",
"05f0f373a4cf298e70d9668c88a6e3a9d1cd8146"
] |
[
"Lectures/DeepLearningClass/chapter5/train_neuralnet_mnist_3_layer_momentum.py",
"Books/DeepLearningfromScratch/P01_HelloPython/numpy_pyplot.py",
"Books/LearningTensorFlow/Chapter8_Queue_Thread_DataLoading/subchapter_02_tfrecords_read_write.py",
"Books/DeepLearningfromScratch/P07_CNN/p01_convolutional_pooling_layer.py",
"Books/FirstContactWithTensorFlow/P04_Simple_Neural_Network/simple_neural_network.py"
] |
[
"# epoch - 0 , train_acc - 0.0754 , test_acc - 0.0728\n# epoch - 1 , train_acc - 0.86505 , test_acc - 0.865\n# epoch - 2 , train_acc - 0.9139 , test_acc - 0.9139\n# epoch - 3 , train_acc - 0.938466666667 , test_acc - 0.9385\n# epoch - 4 , train_acc - 0.95845 , test_acc - 0.9538\n# epoch - 5 , train_acc - 0.967166666667 , test_acc - 0.9631\n# epoch - 6 , train_acc - 0.971666666667 , test_acc - 0.9654\n# epoch - 7 , train_acc - 0.97515 , test_acc - 0.9669\n# epoch - 8 , train_acc - 0.978633333333 , test_acc - 0.9683\n# epoch - 9 , train_acc - 0.982266666667 , test_acc - 0.9711\n# epoch - 10 , train_acc - 0.984766666667 , test_acc - 0.9729\n# epoch - 11 , train_acc - 0.985766666667 , test_acc - 0.9733\n# epoch - 12 , train_acc - 0.986483333333 , test_acc - 0.9726\n# epoch - 13 , train_acc - 0.989583333333 , test_acc - 0.9761\n# epoch - 14 , train_acc - 0.991133333333 , test_acc - 0.9736\n# epoch - 15 , train_acc - 0.990016666667 , test_acc - 0.9744\n# epoch - 16 , train_acc - 0.993816666667 , test_acc - 0.9761\nimport sys, os\n\nsys.path.append(os.pardir)\n\nimport numpy as np\nfrom DeepLearningClass.dataset.mnist import load_mnist\nfrom DeepLearningClass.chapter5.two_layer_net_3_layer import TwoLayerNet\nfrom DeepLearningClass.common.optimizer import Momentum\n\n# 데이터 읽기\n(x_train, t_train), (x_test, t_test) = load_mnist(normalize=True, one_hot_label=True)\n\nnetwork = TwoLayerNet(input_size=784, hidden_size1=200, hidden_size2=200, output_size=10)\n\niters_num = 10000\ntrain_size = x_train.shape[0]\nbatch_size = 100\nlearning_rate = 0.1\n\ntrain_loss_list = []\ntrain_acc_list = []\ntest_acc_list = []\n\niter_per_epoch = max(train_size / batch_size, 1)\n\nmomentum = Momentum()\n\nfor i in range(iters_num):\n batch_mask = np.random.choice(train_size, batch_size)\n x_batch = x_train[batch_mask]\n t_batch = t_train[batch_mask]\n\n # 기울기 계산\n # grad = network.numerical_gradient(x_batch, t_batch) # 수치 미분 방식\n grad = network.gradient(x_batch, t_batch) # 오차역전파법 방식(훨씬 빠르다)\n\n # 갱신\n momentum.update(network.params, grad)\n\n loss = network.loss(x_batch, t_batch)\n train_loss_list.append(loss)\n\n if i % iter_per_epoch == 0:\n train_acc = network.accuracy(x_train, t_train)\n test_acc = network.accuracy(x_test, t_test)\n train_acc_list.append(train_acc)\n test_acc_list.append(test_acc)\n print('epoch -', int(i / iter_per_epoch), ', train_acc -', train_acc, ', test_acc -', test_acc)",
"import numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib.image import imread\n\n# 데이터 준비\nx = np.arange(0, 6, 0.1) # 0에서 6까지 0.1 간격으로 생성\ny1 = np.sin(x)\ny2 = np.cos(x)\n\n# 그래프 그리기\nplt.plot(x, y1, label='sin')\nplt.plot(x, y2, linestyle='--', label='cos') # cos 함수는 점선으로 그리기\nplt.xlabel('x') # x축 이름\nplt.ylabel('y') # y축 이름\nplt.title('sin & cos') # 제목\nplt.legend()\n\n# 이미지 표시\nimg = imread('background.jpg')\nplt.imshow(img)\nplt.show()",
"import os\nimport tensorflow as tf\nfrom tensorflow.contrib.learn.python.learn.datasets import mnist\nimport numpy as np\n\nsave_dir = './Mnist_data'\n\n# save_dir 에 데이터 내려받기\ndata_sets = mnist.read_data_sets(save_dir,\n dtype=tf.uint8,\n reshape=False,\n validation_size=1000)\n\ndata_splits = ['train', 'test', 'validation']\n\n#todo mnist dataset -> tfrecord 변환\nfor d in range(len(data_splits)):\n print('saving:' + data_splits[d])\n data_set = data_sets[d]\n print('data_set.images shape:', data_set.images.shape, ', data_set.labels shape:', data_set.labels.shape)\n\n filename = os.path.join(save_dir, 'tfrecord', data_splits[d] + '.tfrecords')\n writer = tf.python_io.TFRecordWriter(filename)\n\n for index in range(data_set.images.shape[0]):\n image = data_set.images[index].tostring()\n example = tf.train.Example(features=tf.train.Features(feature={\n 'height': tf.train.Feature(int64_list=tf.train.Int64List(value=[data_set.images.shape[1]])),\n 'width': tf.train.Feature(int64_list=tf.train.Int64List(value=[data_set.images.shape[2]])),\n 'depth': tf.train.Feature(int64_list=tf.train.Int64List(value=[data_set.images.shape[3]])),\n 'label': tf.train.Feature(int64_list=tf.train.Int64List(value=[int(data_set.labels[index])])),\n 'image_raw': tf.train.Feature(bytes_list=tf.train.BytesList(value=[image]))\n }))\n\n writer.write(example.SerializeToString())\n\n writer.close()\n\n#todo tfrecord data read\nfilename = os.path.join(save_dir, 'tfrecord', 'train.tfrecords')\nrecord_iterator = tf.python_io.tf_record_iterator(filename)\nserialized_img_example = next(record_iterator)\n\nexample = tf.train.Example()\nexample.ParseFromString(serialized_img_example)\nimage = example.features.feature['image_raw'].bytes_list.value\nlabel = example.features.feature['label'].int64_list.value[0]\nwidth = example.features.feature['width'].int64_list.value[0]\nheight = example.features.feature['height'].int64_list.value[0]\n\nimg_flat = np.fromstring(image[0], dtype=np.uint8)\nimg_reshaped = img_flat.reshape((height, width, -1))\n\nprint(img_reshaped)",
"import numpy as np\n\nx = np.random.rand(10, 1, 28, 28) # 높이 28, 너비 28, 채널 1, 데이터 10\nprint(x.shape)\nprint(x[0].shape) # 첫 번째 데이터\nprint(x[1].shape) # 두 번째 데이터\nprint(x[0, 0].shape) # 첫 번째 데이터의 첫 채널의 공간 데이터\n\nimport sys, os\nsys.path.append(os.pardir)\nfrom DeepLearningfromScratch.common.util import im2col\n\nx1 = np.random.rand(1, 3, 7, 7) # (데이터 수, 채널 수, 높이, 너비)\ncol1 = im2col(x1, 5, 5, stride=1, pad=0)\nprint(col1.shape)\n\nx2 = np.random.rand(10, 3, 7, 7)\ncol2 = im2col(x2, 5, 5, stride=1, pad=0)\nprint(col2.shape)\n\nclass Convolution:\n '''합성곱 계층\n ▣ __init__() method\n parameters\n ----------\n W : 필터\n b : 편향\n stride : 스트라이드\n pad : 패딩\n\n ▣ forward() method\n parameters\n ----------\n x : 입력 값\n '''\n def __init__(self, W, b, stride=1, pad=0):\n self.W = W\n self.b = b\n self.stride = stride\n self.pad = pad\n\n def forward(self, x):\n FN, C, FH, FW = self.W.shape\n N, C, H, W = x.shape\n out_h = int(1 + (H + 2*self.pad - FH) / self.stride)\n out_w = int(1 + (W + 2*self.pad - FW) / self.stride)\n\n col = im2col(x, FH, FW, self.stride, self.pad) # 입력 값 변경\n col_W = self.W.reshape(FN, -1).T # 필터 변경\n out = np.dot(col, col_W) + self.b # 입력 값과 필터간의 내적 수행\n\n out = out.reshape(N, out_h, out_w, -1).transpose(0, 3, 1, 2)\n\n return out\n\nclass Pooling:\n def __init__(self, pool_h, pool_w, stride=1, pad=0):\n self.pool_h = pool_h\n self.pool_w = pool_w\n self.stride = stride\n self.pad = pad\n\n def forward(self, x):\n N, C, H, W = x.shape\n out_h = int(1 + (H - self.pool_h) / self.stride)\n out_w = int(1 + (H - self.pool_w) / self.stride)\n\n # 전개 (1)\n col = im2col(x, self.pool_h, self.pool_w, self.stride, self.pad)\n col = col.reshape(-1, self.pool_w*self.pool_h)\n\n # 최대값 (2)\n out = np.max(col, axis=1)\n\n # 성형 (3)\n out = out.reshape(N, out_h, out_w, C).transpose(0, 3, 1, 2)\n\n return out",
"from tensorflow.examples.tutorials.mnist import input_data\nimport tensorflow as tf\n\n# read_data_sets() : mnist.train : 훈련 데이터, mnist.test : 테스트 데이터가 들어 있는 데이터 셋을 가져온다.\n# 데이터의 각 원소는 이미지(xs)와 레이블(ys)로 구성되어 있다.\n# 훈련 이미지는 mnist.train.images 로 참조가 가능하고, 훈련 레이블은 mnist.train.labels 로 참조가 가능하다.\n# 각각의 이미지의 픽셀들은 0~1 사이의 값을 가진다. (0으로 갈수록 흰색, 1로 갈수록 검은색)\n# mnist.train.images -> (55000, 784)\n# mnist.train.labels -> (55000, 10)\nmnist = input_data.read_data_sets('MNIST_data\\\\', one_hot=True)\n\n# 1. 입력층\n# - 입력값에 가중치 W 를 곱하고 편향 b 를 더해 그 다음 계층으로 넘겨주는 계층.\n# - tf.zeros(shape) : shape 차원의 상수 텐서를 생성하면서 초기 값은 0 으로 지정.\nW = tf.Variable(tf.zeros([784, 10]))\nb = tf.Variable(tf.zeros([10]))\nx = tf.placeholder('float', [None, 784]) # shape 크기가 None 은 어떤 크기나 가능하다는 뜻.\n\n# 2. 출력층\n# - affine 계층으로 연산된 값이 들어오고, 해당 값을 가지고 활성화 함수를 적용해 출력하는 계층.\n# - tf.matmul(x, W) : 두 텐서를 행렬곱셈하여 결과 텐서를 리턴. (affine 계층)\n# - tf.nn.softmax() : 활성화 함수인 softmax 함수를 구현하는 함수.\ny = tf.nn.softmax(tf.matmul(x, W) + b)\ny_ = tf.placeholder('float', [None, 10])\n\n# 3. 학습을 위한 손실함수 구현\n# - 평균 제곱 오차 : loss = tf.reduce_mean(tf.square(y-y_data))\n# - 교차 엔트로피 : cross_entropy = -tf.reduce_sum(tf.multiply(y_, tf.log(y)))\n# - tf.multiply(a, b) : 두 개의 텐서간의 행렬곱을 구해주는 함수.\n# - tf.reduce_sum(tensor) : axis 를 설정하지 않으면 전체 sum 결과를 출력하는 함수.\ncross_entropy = -tf.reduce_sum(tf.multiply(y_, tf.log(y)))\n\n# 4. 경사 감소법\n# - 여기서는 가장 일반적인 경사감소법인 SGD(확률적 경사 하강법)를 사용했다.\n# - tf.train.GradientDescentOptimizer(HyperParameter) : HyperParameter 값에 대해 경사 감소를 수행.\n# - optimizer.minimize(error_rate) : 최소가 되는 error_rate 를 찾아주는 함수.\ntrain_step = tf.train.GradientDescentOptimizer(0.01).minimize(cross_entropy)\n\nsess = tf.Session()\nsess.run(tf.global_variables_initializer())\n\n# 5. 학습 수행\n# - mnist.train.next_batch(batch_cnt) : MNIST 데이터 셋에서 batch_cnt 만큼 데이터를 추출.\nfor i in range(1000):\n batch_xs, batch_ys = mnist.train.next_batch(100)\n sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys})\n\n# 6. 정확도 검증\n# - tf.equal(tensor1, tensor2) : tensor1 과 tensor2 의 요소들을 비교해서 같으면 True, 다르면 False 를 리턴하는 함수.\n# - tf.cast(tensor, type) : tensor 를 특정 type 으로 변형하는 함수\n# - tf.reduce_mean(tensor) : axis 축 값이 없으면 전체 데이터에 대한 평균을 구한다.\ncorrect_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))\naccuracy = tf.reduce_mean(tf.cast(correct_prediction, 'float')) # 마지막 테스트 데이터에 대한 정확도를 검증한다.\n\nprint(sess.run(accuracy, feed_dict={x: mnist.test.images, y_: mnist.test.labels}))"
] |
[
[
"numpy.random.choice"
],
[
"matplotlib.pyplot.legend",
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.title",
"numpy.arange",
"numpy.cos",
"numpy.sin",
"matplotlib.pyplot.plot",
"matplotlib.image.imread",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel"
],
[
"tensorflow.train.Example",
"tensorflow.contrib.learn.python.learn.datasets.mnist.read_data_sets",
"tensorflow.python_io.TFRecordWriter",
"numpy.fromstring",
"tensorflow.train.BytesList",
"tensorflow.python_io.tf_record_iterator",
"tensorflow.train.Int64List"
],
[
"numpy.max",
"numpy.dot",
"numpy.random.rand"
],
[
"tensorflow.matmul",
"tensorflow.zeros",
"tensorflow.cast",
"tensorflow.placeholder",
"tensorflow.global_variables_initializer",
"tensorflow.train.GradientDescentOptimizer",
"tensorflow.log",
"tensorflow.Session",
"tensorflow.argmax",
"tensorflow.examples.tutorials.mnist.input_data.read_data_sets"
]
] |
astrojhgu/ares
|
[
"42008c8e4bf79f0b000cc833e02a86510bce7611",
"42008c8e4bf79f0b000cc833e02a86510bce7611",
"42008c8e4bf79f0b000cc833e02a86510bce7611",
"42008c8e4bf79f0b000cc833e02a86510bce7611"
] |
[
"ares/static/Grid.py",
"ares/static/VolumeGlobal.py",
"ares/simulations/MetaGalacticBackground.py",
"ares/simulations/MultiPhaseMedium.py"
] |
[
"\"\"\"\n\nGrid.py\n\nAuthor: Jordan Mirocha\nAffiliation: University of Colorado at Boulder\nCreated on: Thu Sep 20 14:18:27 2012\n\nDescription: \n\n\"\"\"\n\nimport copy, types\nimport numpy as np\nfrom ..util.Stats import rebin\nfrom collections import Iterable\nfrom ..physics.Hydrogen import Hydrogen\nfrom ..physics.Cosmology import Cosmology\nfrom ..util.ParameterFile import ParameterFile\nfrom ..physics.CrossSections import PhotoIonizationCrossSection\nfrom ..physics.Constants import k_B, cm_per_kpc, s_per_myr, m_H, mH_amu, \\\n mHe_amu\n\nclass fake_chianti:\n def __init__(self):\n pass\n\n def z2element(self, i):\n if i == 1:\n return 'h'\n elif i == 2:\n return 'he'\n\n def element2z(self, name):\n if name == 'h':\n return 1\n elif name == 'he':\n return 2 \n\n def zion2name(self, Z, i):\n if Z == 1:\n if i == 1:\n return 'h_1'\n elif i == 2:\n return 'h_2'\n elif Z == 2:\n if i == 1:\n return 'he_1'\n elif i == 2:\n return 'he_2'\n elif i == 3:\n return 'he_3' \n\n def convertName(self, species):\n element, i = species.split('_')\n \n Z = self.element2z(element)\n \n tmp = {}\n tmp['Element'] = element\n tmp['Ion'] = self.zion2name(Z, int(i))\n tmp['Z'] = self.element2z(element)\n\n return tmp\n \nutil = fake_chianti()\n\ntiny_number = 1e-8 # A relatively small species fraction\n\nclass Grid(object):\n def __init__(self, **kwargs):\n \"\"\"\n Initialize grid object.\n \n Parameters\n ----------\n dims : int\n Number of resolution elements in grid.\n length_units : float\n Size of domain in centimeters.\n start_radius : float\n Radius (in code units) within which to ignore.\n \n \"\"\"\n \n self.pf = ParameterFile(**kwargs)\n \n self.dims = int(self.pf['grid_cells'])\n self.length_units = self.pf['length_units']\n self.start_radius = self.pf['start_radius']\n self.approx_Salpha = self.pf['approx_Salpha']\n self.log_grid = self.pf['logarithmic_grid']\n\n # Compute cell centers and edges\n if self.pf['logarithmic_grid']:\n self.r_edg = self.r = \\\n np.logspace(np.log10(self.R0), np.log10(self.length_units), \n self.dims + 1) \n else:\n self.r_edg = self.r = \\\n np.linspace(self.R0, self.length_units, self.dims + 1)\n \n # Compute interior cell walls, spacing, and mid-points \n self.r_int = self.r_edg[0:-1]\n self.dr = np.diff(self.r_edg)\n self.r_mid = rebin(self.r_edg)\n \n self.zi = 0\n \n # Override, to set ICs by cosmology\n self.cosmological_ics = self.pf['cosmological_ics']\n \n @property\n def zeros_absorbers(self):\n return np.zeros(self.N_absorbers)\n \n @property\n def zeros_absorbers2(self):\n return np.zeros([self.N_absorbers] * 2) \n \n @property\n def zeros_grid_x_absorbers(self):\n return np.zeros([self.dims, self.N_absorbers])\n \n @property\n def zeros_grid_x_absorbers2(self):\n return np.zeros([self.dims, self.N_absorbers, self.N_absorbers]) \n \n @property\n def R0(self):\n \"\"\" Start radius in length_units. \"\"\"\n return self.start_radius * self.length_units\n \n @property\n def Vsh(self):\n \"\"\" Shell volume in length_units**3. \"\"\"\n if not hasattr(self, '_Vsh_all'):\n self._Vsh_all = self.ShellVolume(self.r_edg[0:-1], self.dr)\n \n return self._Vsh_all\n \n @property \n def neutrals(self):\n \"\"\" Return list of all neutral species. \"\"\" \n if not hasattr(self, '_neutral_species'):\n self._neutral_species = []\n for element in self.elements:\n self._neutral_species.append('%s_1' % element)\n\n return self._neutral_species\n \n @property \n def ions(self):\n \"\"\" Return list of all ionized species. \"\"\" \n if not hasattr(self, '_ionized_species'):\n neutrals = self.neutrals\n self._ionized_species = []\n for ion in self.all_ions:\n if ion in neutrals:\n continue\n \n self._ionized_species.append(ion)\n \n return self._ionized_species\n \n @property\n def absorbers(self): \n \"\"\" Return list of absorbers (don't include electrons). \"\"\"\n if not hasattr(self, '_absorbing_species'):\n self._absorbing_species = copy.copy(self.neutrals)\n for parent in self.ions_by_parent:\n self._absorbing_species.extend(self.ions_by_parent[parent][1:-1])\n\n return self._absorbing_species\n \n @property\n def N_absorbers(self):\n \"\"\" Return number of absorbing species. \"\"\"\n if not hasattr(self, 'self._num_of_absorbers'):\n absorbers = self.absorbers\n self._num_of_absorbers = int(len(absorbers))\n \n return self._num_of_absorbers\n \n @property\n def species_abundances(self):\n \"\"\"\n Return dictionary containing abundances of parent\n elements of all ions.\n \"\"\"\n if not hasattr(self, '_species_abundances'):\n self._species_abundances = {}\n for ion in self.ions_by_parent:\n for state in self.ions_by_parent[ion]:\n self._species_abundances[state] = \\\n self.element_abundances[self.elements.index(ion)]\n \n return self._species_abundances\n \n @property\n def species(self):\n if not hasattr(self, '_species'):\n self._species = []\n for parent in self.ions_by_parent:\n for ion in self.ions_by_parent[parent]:\n self._species.append(ion)\n \n return self._species\n \n @property\n def types(self):\n \"\"\"\n Return list (matching evolving_fields) with integers describing\n species type:\n 0 = neutral\n +1 = ion\n -1 = other\n \"\"\"\n \n if not hasattr(self, '_species_types'):\n self._species_types = []\n for species in self.evolving_fields:\n if species in self.neutrals:\n self._species_types.append(0)\n elif species in self.ions:\n self._species_types.append(1)\n else:\n self._species_types.append(-1) \n \n self._species_types = np.array(self._species_types) \n \n return self._species_types \n \n @property\n def ioniz_thresholds(self):\n \"\"\"\n Return dictionary containing ionization threshold energies (in eV)\n for all absorbers.\n \"\"\" \n \n if not hasattr(self, '_ioniz_thresholds'):\n self._ioniz_thresholds = {}\n #for absorber in self.absorbers:\n #if absorber == 'h_1':\n self._ioniz_thresholds['h_1'] = 13.6\n #elif absorber == 'he_1':\n self._ioniz_thresholds['he_1'] = 24.4\n #elif absorber == 'he_2':\n self._ioniz_thresholds['he_2'] = 54.4\n \n return self._ioniz_thresholds\n \n @property\n def bf_cross_sections(self):\n \"\"\"\n Return dictionary containing functions that compute the bound-free \n absorption cross-sections for all absorbers.\n \"\"\" \n \n if not hasattr(self, 'all_xsections'):\n self._bf_xsections = {}\n #for absorber in self.absorbers:\n #ion = cc.continuum(absorber)\n #ion.vernerCross(energy = np.logspace(1, 5, 1000))\n #if absorber == 'h_1':\n self._bf_xsections['h_1'] = lambda E: \\\n PhotoIonizationCrossSection(E, species=0)\n #elif absorber == 'he_1':\n self._bf_xsections['he_1'] = lambda E: \\\n PhotoIonizationCrossSection(E, species=1)\n #elif absorber == 'he_2':\n self._bf_xsections['he_2'] = lambda E: \\\n PhotoIonizationCrossSection(E, species=2) \n \n return self._bf_xsections\n \n @property\n def x_to_n(self):\n \"\"\"\n Return dictionary containing conversion factor between species\n fraction and number density for all species.\n \"\"\"\n if not hasattr(self, '_x_to_n_converter'):\n self._x_to_n_converter = {}\n for ion in self.all_ions:\n self._x_to_n_converter[ion] = self.n_ref \\\n * self.species_abundances[ion] \n \n return self._x_to_n_converter\n \n @property\n def expansion(self):\n if not hasattr(self, '_expansion'):\n self.set_physics()\n return self._expansion\n \n @property\n def isothermal(self):\n if not hasattr(self, '_isothermal'):\n self.set_physics()\n return self._isothermal\n \n @property\n def secondary_ionization(self):\n if not hasattr(self, '_secondary_ionization'):\n self.set_physics()\n return self._secondary_ionization\n \n @property\n def compton_scattering(self):\n if not hasattr(self, '_compton_scattering'):\n self.set_physics()\n return self._compton_scattering\n \n @property\n def recombination(self):\n if not hasattr(self, '_recombination'):\n self.set_physics()\n return self._recombination\n \n @property\n def collisional_ionization(self):\n if not hasattr(self, '_collisional_ionization'):\n self.set_physics()\n return self._collisional_ionization \n \n @property\n def clumping_factor(self):\n if not hasattr(self, '_clumping_factor'):\n self.set_physics()\n return self._clumping_factor\n \n @property\n def hydr(self):\n if not hasattr(self, '_hydr'):\n self._hydr = Hydrogen(self.cosm, **self.pf)\n return self._hydr \n \n @property\n def cosm(self):\n if not hasattr(self, '_cosm'):\n self._cosm = Cosmology()\n return self._cosm \n \n def set_properties(self, **kwargs):\n \"\"\"\n Initialize grid properties all in one go.\n \"\"\" \n\n self.set_physics(\n isothermal=kwargs['isothermal'], \n compton_scattering=kwargs['compton_scattering'],\n secondary_ionization=kwargs['secondary_ionization'], \n expansion=kwargs['expansion'],\n recombination=kwargs['recombination'],\n clumping_factor=kwargs['clumping_factor'],\n collisional_ionization=kwargs['collisional_ionization']\n )\n\n self.set_cosmology(\n initial_redshift=kwargs['initial_redshift'], \n omega_m_0=kwargs[\"omega_m_0\"], \n omega_l_0=kwargs[\"omega_l_0\"], \n omega_b_0=kwargs[\"omega_b_0\"], \n hubble_0=kwargs[\"hubble_0\"], \n helium_by_number=kwargs['helium_by_number'], \n cmb_temp_0=kwargs[\"cmb_temp_0\"],\n approx_highz=kwargs[\"approx_highz\"])\n\n self.set_chemistry(kwargs['include_He'])\n self.set_density(kwargs['density_units'])\n self.set_ionization(kwargs['initial_ionization'])\n self.set_temperature(kwargs['initial_temperature'])\n\n def set_physics(self, isothermal=False, compton_scattering=False,\n secondary_ionization=0, expansion=False, recombination='B',\n clumping_factor=1.0, collisional_ionization=True):\n self._isothermal = isothermal\n self._compton_scattering = compton_scattering\n self._secondary_ionization = secondary_ionization\n self._expansion = expansion\n self._recombination = recombination\n self._collisional_ionization = collisional_ionization\n\n if type(clumping_factor) is not types.FunctionType:\n self._clumping_factor = lambda z: clumping_factor\n else:\n self._clumping_factor = clumping_factor\n \n if self._expansion:\n self.set_cosmology()\n \n @property\n def is_cgm_patch(self): \n if not hasattr(self, '_is_cgm_patch'):\n self.set_recombination_rate()\n \n return self._is_cgm_patch\n \n def set_recombination_rate(self, is_cgm_patch=False):\n self._is_cgm_patch = is_cgm_patch \n \n def set_cosmology(self, initial_redshift=1e3, omega_m_0=0.272, \n omega_l_0=0.728, omega_b_0=0.044, hubble_0=0.702, \n helium_by_number=None, helium_by_mass=0.2454, cmb_temp_0=2.725, \n approx_highz=False):\n \n self.zi = initial_redshift\n self._cosm = Cosmology(omega_m_0=omega_m_0, \n omega_l_0=omega_l_0, omega_b_0=omega_b_0,\n hubble_0=hubble_0, \n helium_by_mass=helium_by_mass,\n cmb_temp_0=cmb_temp_0, \n approx_highz=approx_highz) \n \n def set_chemistry(self, include_He=False):\n \"\"\"\n Initialize chemistry.\n \n This routine sets the chemical composition of the medium being \n simulated.\n \n Parameters\n ----------\n include_He : bool\n Solve for helium?\n\n Example\n -------\n grid = Grid(dims=32)\n grid.set_chemistry() # H-only\n \n \"\"\" \n \n self.Z = [1] \n self.abundances = [1.]\n if include_He:\n self.Z.append(2)\n self.abundances.append(self.cosm.helium_by_number)\n \n self.Z = np.array(self.Z)\n self.ions_by_parent = {} # Ions sorted by parent element in dictionary\n self.parents_by_ion = {} # From ion name, determine parent element\n self.elements = [] # Just a list of element names\n self.all_ions = [] # All ion species \n self.evolving_fields = []# Anything with an ODE we'll later solve\n \n for i, element in enumerate(self.Z):\n element_name = util.z2element(element)\n \n self.ions_by_parent[element_name] = []\n self.elements.append(element_name)\n for ion in xrange(element + 1):\n name = util.zion2name(element, ion + 1)\n self.all_ions.append(name)\n self.ions_by_parent[element_name].append(name)\n self.parents_by_ion[name] = element_name\n self.evolving_fields.append(name)\n\n self.solve_ge = False \n self.evolving_fields.append('e')\n if not self.isothermal:\n self.evolving_fields.append('Tk')\n\n # Create blank data fields \n if not hasattr(self, 'data'): \n self.data = {}\n for field in self.evolving_fields:\n self.data[field] = np.zeros(self.dims)\n \n self.abundances_by_number = self.abundances\n self.element_abundances = [1.0]\n if include_He:\n self.element_abundances.append(self.cosm.helium_by_number)\n \n # Initialize mapping between q-vector and physical quantities (dengo) \n self._set_qmap()\n\n def set_density(self, nH=None):\n \"\"\"\n Initialize hydrogen number density.\n \n Setting the gas density is necessary for computing the hydrogen \n number density, which normalizes fractional abundances of elements\n to proper number densities of all species.\n\n Parameters\n ----------\n rho0 : float, array\n Density of medium in g / cm**3. Can be a float (uniform medium),\n or an array of values the same size as the grid itself.\n \n \"\"\"\n \n if self.cosmological_ics:\n self.n_H = self.cosm.nH(self.zi) \n elif isinstance(nH, Iterable): \n self.n_H = nH\n else:\n self.n_H = nH * np.ones(self.dims) \n \n if 2 in self.Z:\n self.n_He = self.n_H * self.abundances[1]\n else:\n self.n_He = 0.0 \n \n self.n_ref = self.n_H \n \n self.data['rho'] = m_H * (self.n_H * mH_amu + self.n_He * mHe_amu)\n \n def set_temperature(self, T0):\n \"\"\"\n Set initial temperature in grid. \n \n Parameters\n ----------\n T0 : float, array\n Initial temperature in grid. Can be constant value (corresponding\n to uniform medium), or an array of values like the grid.\n \"\"\"\n \n if self.cosmological_ics:\n Tgas = self.cosm.Tgas(self.zi)\n if isinstance(T0, Iterable):\n self.data['Tk'] = np.array(Tgas)\n else:\n self.data['Tk'] = Tgas * np.ones(self.dims)\n elif isinstance(T0, Iterable):\n self.data['Tk'] = np.array(T0)\n else:\n self.data['Tk'] = T0 * np.ones(self.dims)\n \n def set_ionization(self, x=None):\n \"\"\"\n Set initial ionization state. \n \n Parameters\n ----------\n x : float, list\n Initial ionization state for all species. Must be a 1:1 mapping\n between values in this list and values in self.species.\n \n \"\"\" \n \n if x is not None:\n\n for j, species in enumerate(self.species):\n element, state = species.split('_')\n Z = util.element2z(element)\n i = int(state)\n \n name = util.zion2name(Z, i)\n self.data[name].fill(x[j])\n \n # Otherwise assume neutral\n else:\n for sp in self.ions:\n self.data[sp].fill(1e-8)\n for sp in self.neutrals:\n self.data[sp].fill(1.0 - 1e-8)\n \n # Set electron density\n self._set_electron_fraction()\n \n if self.solve_ge:\n self.set_gas_energy()\n \n def set_ics(self, data):\n \"\"\"\n Simple way of setting all initial conditions at once with a data \n dictionary.\n \"\"\"\n \n self.data = {}\n for key in data.keys():\n if type(data[key]) is float:\n self.data[key] = data[key]\n continue\n \n self.data[key] = data[key].copy()\n \n def create_slab(self, **kwargs):\n \"\"\" Create a slab. \"\"\"\n \n if not kwargs['slab']:\n return \n \n # Figure out where the clump is\n gridarr = np.linspace(0, 1, self.dims)\n isslab = (gridarr >= (kwargs['slab_position'] - kwargs['slab_radius'])) \\\n & (gridarr <= (kwargs['slab_position'] + kwargs['slab_radius']))\n \n # First, modify density and temperature\n if kwargs['slab_profile'] == 0:\n self.data['rho'][isslab] *= kwargs['slab_overdensity']\n self.n_H[isslab] *= kwargs['slab_overdensity']\n self.data['Tk'][isslab] = kwargs['slab_temperature']\n else:\n raise NotImplemented('only know uniform slabs')\n \n # Ionization state - could generalize this more\n for j, species in enumerate(self.species):\n element, state = species.split('_')\n Z = util.element2z(element)\n i = int(state)\n \n name = util.zion2name(Z, i)\n self.data[name][isslab] = np.ones(isslab.sum()) \\\n * kwargs['slab_ionization'][j]\n \n # Reset electron density, particle density, and gas energy\n self._set_electron_fraction()\n \n if hasattr(self, '_x_to_n_converter'): \n del self._x_to_n_converter\n \n def _set_electron_fraction(self):\n \"\"\"\n Set electron density - must have run set_density beforehand.\n \"\"\"\n \n self.data['e'] = np.zeros(self.dims)\n for i, Z in enumerate(self.Z):\n for j in np.arange(1, 1 + Z): # j = number of electrons donated by ion j + 1\n x_i_jp1 = self.data[util.zion2name(Z, j + 1)]\n self.data['e'] += j * x_i_jp1 * self.n_ref \\\n * self.element_abundances[i] \n \n self.data['e'] /= self.n_H \n \n def particle_density(self, data, z=0):\n \"\"\"\n Compute total particle number density.\n \"\"\" \n \n n = data['e'].copy()\n #for ion in self.all_ions:\n # n += data[ion] * self.x_to_n[ion] * (1. + z)**3 \\\n # / (1. + self.zi)**3\n \n if self.expansion:\n n *= self.cosm.nH(z)\n n += self.cosm.nH(z)\n \n if 2 in self.Z:\n n += self.cosm.nHe(z)\n \n else:\n n *= self.n_H\n \n n += self.n_H\n \n if 2 in self.Z:\n n += self.n_H * self.cosm.helium_by_number\n \n return n \n \n def electron_fraction(self, data, z):\n de = np.zeros(self.dims)\n for i, Z in enumerate(self.Z):\n for j in np.arange(1, 1 + Z): # j = number of electrons donated by ion j + 1\n x_i_jp1 = data[util.zion2name(Z, j + 1)]\n de += j * x_i_jp1 * self.n_ref * (1. + z)**3 / (1. + self.zi)**3 \\\n * self.element_abundances[i]\n\n return de / self.n_H\n\n def ColumnDensity(self, data):\n \"\"\" Compute column densities for all absorbing species. \"\"\" \n \n N = {}\n Nc = {}\n logN = {}\n for absorber in self.absorbers:\n Nc[absorber] = self.dr * data[absorber] * self.x_to_n[absorber] \n N[absorber] = np.cumsum(Nc[absorber])\n logN[absorber] = np.log10(N[absorber])\n \n return N, logN, Nc\n\n def _set_qmap(self):\n \"\"\"\n The vector 'q' is an array containing the values of all ion fractions and the\n gas energy. This routine sets up the mapping between elements in q and the\n corrresponding physical quantities.\n \n Will be in order of increasing Z, then de, then ge.\n \"\"\"\n \n self.qmap = []\n for species in self.evolving_fields:\n self.qmap.append(species)\n \n def ShellVolume(self, r, dr):\n \"\"\"\n Return volume of shell at distance r, thickness dr.\n \"\"\"\n \n return 4. * np.pi * ((r + dr)**3 - r**3) / 3. \n\n \n\n ",
"\"\"\"\n\nIntergalacticMedium.py\n\nAuthor: Jordan Mirocha\nAffiliation: University of Colorado at Boulder\nCreated on: Fri May 24 11:31:06 2013\n\nDescription: \n\n\"\"\"\n\nimport numpy as np\nfrom ..util.Warnings import *\nfrom ..util import ProgressBar\nfrom ..physics.Constants import *\nimport types, os, re, sys, pickle\nfrom ..util.Misc import num_freq_bins\nfrom ..physics import SecondaryElectrons\nfrom scipy.integrate import dblquad, romb, simps, quad, trapz\nfrom ..util.Warnings import tau_tab_z_mismatch, tau_tab_E_mismatch\n\ntry:\n import h5py\n have_h5py = True \nexcept ImportError:\n have_h5py = False\n\ntry:\n from mpi4py import MPI\n rank = MPI.COMM_WORLD.rank\n size = MPI.COMM_WORLD.size\nexcept ImportError:\n rank = 0\n size = 1\n\nlog10 = np.log(10.)\nE_th = np.array([13.6, 24.4, 54.4])\n\ndefkwargs = \\\n{\n 'zf':None, \n 'xray_flux':None, \n 'epsilon_X': None,\n 'Gamma': None,\n 'gamma': None,\n 'return_rc': False, \n 'energy_units':False, \n 'Emax': None,\n #'zxavg':0.0,\n #'igm':True,\n 'xavg': 0.0,\n 'igm_h_1': 1.0,\n 'igm_h_2': 0.0,\n 'igm_he_2': 0.0,\n 'igm_he_3': 0.0,\n 'cgm_h_1': 1.0,\n 'cgm_h_2': 0.0,\n 'cgm_he_2': 0.0,\n 'cgm_he_3': 0.0,\n 'igm_e': 0.0,\n}\n\nspecies_i_to_str = {0:'h_1', 1:'he_1', 2:'he_2'}\n\nclass GlobalVolume(object):\n def __init__(self, background):\n \"\"\"\n Initialize a GlobalVolume.\n \n Parameters\n ----------\n background : ares.solvers.UniformBackground instance.\n \n \"\"\"\n\n self.background = background\n self.pf = background.pf\n self.grid = background.grid\n self.cosm = background.cosm\n self.hydr = background.hydr\n self.pops = background.pops\n self.Npops = len(self.pops)\n \n # Include helium opacities approximately?\n self.approx_He = self.pf['include_He'] and self.pf['approx_He']\n \n # Include helium opacities self-consistently?\n self.self_consistent_He = self.pf['include_He'] \\\n and (not self.pf['approx_He'])\n\n self.esec = \\\n SecondaryElectrons(method=self.pf[\"secondary_ionization\"]) \n\n # Choose function for computing bound-free absorption cross-sections \n if self.pf['approx_sigma']:\n from ..physics.CrossSections import \\\n ApproximatePhotoIonizationCrossSection as sigma\n else:\n from ..physics.CrossSections import \\\n PhotoIonizationCrossSection as sigma\n\n self.sigma = sigma\n self.sigma0 = sigma(E_th[0]) # Hydrogen ionization threshold\n\n self._set_integrator()\n\n @property\n def rates_no_RT(self):\n if not hasattr(self, '_rates_no_RT'):\n self._rates_no_RT = \\\n {'k_ion': np.zeros((self.grid.dims,\n self.grid.N_absorbers)),\n 'k_heat': np.zeros((self.grid.dims,\n self.grid.N_absorbers)),\n 'k_ion2': np.zeros((self.grid.dims,\n self.grid.N_absorbers, self.grid.N_absorbers)),\n }\n\n return self._rates_no_RT\n\n #def _fetch_tau(self, pop, zpf, Epf):\n # \"\"\"\n # Look for optical depth tables. Supply corrected energy and redshift\n # arrays if there is a mistmatch between those generated from information\n # in the parameter file and those found in the optical depth table.\n # \n # .. note:: This will only be called from UniformBackground, and on\n # populations which are using the generator framework.\n # \n # Parameters\n # ----------\n # popid : int\n # ID # for population of interest.\n # zpf : np.ndarray\n # What the redshifts should be according to the parameter file. \n # Epf : np.ndarray\n # What the energies should be according to the parameter file.\n # \n # Returns\n # -------\n # Energies and redshifts, potentially revised from Epf and zpf.\n # \n # \"\"\"\n # \n # for i in range(self.Npops):\n # if pop == self.pops[i]:\n # band = self.background.bands_by_pop[i]\n # break\n # \n # # First, look in CWD or $ARES (if it exists)\n # self.tabname = self._load_tau(pop, pop.pf['tau_prefix'])\n # \n # if not self.tabname:\n # return zpf, Epf, None\n # \n # # If we made it this far, we found a table that may be suitable\n # ztab, Etab, tau = self._read_tau(self.tabname)\n # \n # # Return right away if there's no potential for conflict\n # if (zpf is None) and (Epf is None):\n # return ztab, Etab, tau\n # \n # # Figure out if the tables need fixing \n # zmax_ok = \\\n # (ztab.max() >= zpf.max()) or \\\n # np.allclose(ztab.max(), zpf.max())\n # zmin_ok = \\\n # (ztab.min() <= zpf.min()) or \\\n # np.allclose(ztab.min(), zpf.min())\n # \n # Emin_ok = \\\n # (Etab.min() <= Epf.min()) or \\\n # np.allclose(Etab.min(), Epf.min())\n # \n # # Results insensitive to Emax (so long as its relatively large)\n # # so be lenient with this condition (100 eV or 1% difference\n # # between parameter file and lookup table)\n # Emax_ok = np.allclose(Etab.max(), Epf.max(), atol=100., rtol=1e-2)\n # \n # # Check redshift bounds\n # if not (zmax_ok and zmin_ok):\n # if not zmax_ok:\n # tau_tab_z_mismatch(self, zmin_ok, zmax_ok, ztab)\n # sys.exit(1)\n # else:\n # if self.pf['verbose']:\n # tau_tab_z_mismatch(self, zmin_ok, zmax_ok, ztab)\n # \n # if not (Emax_ok and Emin_ok):\n # if self.pf['verbose']:\n # tau_tab_E_mismatch(pop, self.tabname, Emin_ok, Emax_ok, Etab)\n # \n # if Etab.max() < Epf.max():\n # sys.exit(1)\n # \n # # Correct for inconsistencies between parameter file and table\n # # By effectively masking out those elements with tau -> inf\n # if Epf.min() > Etab.min():\n # Ediff = Etab - Epf.min()\n # i_E0 = np.argmin(np.abs(Ediff))\n # if Ediff[i_E0] < 0:\n # i_E0 += 1\n # \n # #tau[:,0:i_E0+1] = np.inf\n # else:\n # i_E0 = 0\n # \n # if Epf.max() < Etab.max():\n # Ediff = Etab - Epf.max()\n # i_E1 = np.argmin(np.abs(Ediff))\n # if Ediff[i_E1] < 0:\n # i_E1 += 1\n # \n # #tau[:,i_E1+1:] = np.inf\n # else:\n # i_E1 = None\n # \n # # We're done!\n # return ztab, Etab[i_E0:i_E1], tau[:,i_E0:i_E1]\n\n @property\n def E(self):\n if not hasattr(self, '_E'):\n self._tabulate_atomic_data()\n \n return self._E\n\n @property\n def sigma_E(self):\n if not hasattr(self, '_sigma_E'):\n self._tabulate_atomic_data()\n \n return self._sigma_E\n \n def _tabulate_atomic_data(self):\n \"\"\"\n Pre-compute cross sections and such for each source population.\n \n Returns\n -------\n Nothing. Sets the following attributes:\n \n sigma_E\n log_sigma_E\n fheat, flya, fion\n \n \"\"\"\n\n # Remember: these will all be [Npops, Nbands/pop, Nenergies/band]\n self._E = self.background.energies\n self.logE = [[] for k in range(self.Npops)]\n self.dlogE = [[] for k in range(self.Npops)]\n self.fheat = [[] for k in range(self.Npops)]\n self.flya = [[] for k in range(self.Npops)]\n \n # These are species dependent\n self._sigma_E = {}\n self.fion = {}\n for species in ['h_1', 'he_1', 'he_2']:\n self._sigma_E[species] = [[] for k in range(self.Npops)]\n self.fion[species] = [[] for k in range(self.Npops)]\n \n ##\n # Note: If secondary_ionization > 1, there will be an ionized fraction\n # dimension in fion and fheat.\n ## \n \n # Loop over populations\n for i, pop in enumerate(self.pops):\n \n # This means the population is completely approximate\n if not np.any(self.background.solve_rte[i]):\n self.logE[i] = [None]\n self.dlogE[i] = [None]\n self.fheat[i] = [None]\n self.flya[i] = [None]\n \n for species in ['h_1', 'he_1', 'he_2']:\n self.fion[species][i] = [None]\n self._sigma_E[species][i] = [None]\n \n continue\n \n ##\n # If we make it here, the population has at least one band that\n # requires a detailed solution to the RTE \n ##\n \n Nbands = len(self.background.energies[i])\n \n self.logE[i] = [None for k in range(Nbands)]\n self.dlogE[i] = [None for k in range(Nbands)]\n self.fheat[i] = [None for k in range(Nbands)]\n self.flya[i] = [None for k in range(Nbands)]\n for species in ['h_1', 'he_1', 'he_2']:\n self.fion[species][i] = [None for k in range(Nbands)]\n self._sigma_E[species][i] = [None for k in range(Nbands)]\n\n # Loop over each band for this population\n for j, band in enumerate(self.background.bands_by_pop[i]):\n\n if band is None:\n continue\n \n need_tab = self.pops[i].is_xray_src \\\n and np.any(np.array(band) > E_LL)\n \n if (not self.background.solve_rte[i][j]) or \\\n (not need_tab):\n continue\n else: \n self.fheat[i][j] = \\\n [np.ones([self.background.energies[i][j].size, \n len(self.esec.x)]) \\\n for j in range(Nbands)]\n self.flya[i] = \\\n [np.ones([self.background.energies[i][j].size, \n len(self.esec.x)]) \\\n for j in range(Nbands)]\n \n for species in ['h_1', 'he_1', 'he_2']:\n if self.esec.method > 1:\n self._sigma_E[species][i] = \\\n [np.ones([self.background.energies[i][j].size, \n len(self.esec.x)]) \\\n for j in range(Nbands)]\n self.fion[species][i] = \\\n [np.ones([self.background.energies[i][j].size, \n len(self.esec.x)]) \\\n for j in range(Nbands)]\n\n else:\n self._sigma_E[species][i] = [None for k in range(Nbands)]\n self.fion[species][i] = [None for k in range(Nbands)]\n self.fheat[i] = [None for k in range(Nbands)]\n self.flya[i] = [None for k in range(Nbands)] \n \n # More convenient variables\n E = self._E[i][j]\n N = E.size\n\n # Compute some things we need, like bound-free cross-section\n self.logE[i][j] = np.log10(E)\n self.dlogE[i][j] = np.diff(self.logE[i][j])\n \n # \n for k, species in enumerate(['h_1', 'he_1', 'he_2']):\n self._sigma_E[species][i][j] = \\\n np.array(map(lambda E: self.sigma(E, k), E))\n\n # Pre-compute secondary ionization and heating factors\n if self.esec.method > 1:\n \n # Don't worry: we'll fill these in in a sec!\n self.fheat[i][j] = np.ones([N, len(self.esec.x)])\n self.flya[i][j] = np.ones([N, len(self.esec.x)])\n \n # Must evaluate at ELECTRON energy, not photon energy\n for k, nrg in enumerate(E - E_th[0]):\n self.fheat[i][j][k] = \\\n self.esec.DepositionFraction(self.esec.x, E=nrg, \n channel='heat')\n self.fion['h_1'][i][j][k] = \\\n self.esec.DepositionFraction(self.esec.x, E=nrg, \n channel='h_1')\n \n if self.pf['secondary_lya']:\n self.flya[i][j][k] = \\\n self.esec.DepositionFraction(self.esec.x, E=nrg, \n channel='lya') \n \n # Helium\n if self.pf['include_He'] and not self.pf['approx_He']:\n \n # Don't worry: we'll fill these in in a sec!\n self.fion['he_1'][i][j] = np.ones([N, len(self.esec.x)])\n self.fion['he_2'][i][j] = np.ones([N, len(self.esec.x)])\n \n for k, nrg in enumerate(E - E_th[1]):\n self.fion['he_1'][i][j][k] = \\\n self.esec.DepositionFraction(self.esec.x, \n E=nrg, channel='he_1')\n \n for k, nrg in enumerate(E - E_th[2]):\n self.fion['he_2'][i][j][k] = \\\n self.esec.DepositionFraction(self.esec.x, \n E=nrg, channel='he_2') \n \n else:\n self.fion['he_1'][i][j] = np.zeros([N, len(self.esec.x)])\n self.fion['he_2'][i][j] = np.zeros([N, len(self.esec.x)])\n \n \n \n \n \n return \n \n def _set_integrator(self):\n self.integrator = self.pf[\"unsampled_integrator\"]\n self.sampled_integrator = self.pf[\"sampled_integrator\"]\n self.rtol = self.pf[\"integrator_rtol\"]\n self.atol = self.pf[\"integrator_atol\"]\n self.divmax = int(self.pf[\"integrator_divmax\"])\n \n #def _read_tau(self, fn):\n # \"\"\" Read optical depth table. \"\"\"\n # \n # if type(fn) is dict:\n # \n # E0 = fn['E'].min()\n # E1 = fn['E'].max()\n # E = fn['E']\n # z = fn['z']\n # x = z + 1\n # N = E.size\n # \n # R = x[1] / self.x[0]\n # \n # tau = fn['tau']\n #\n # elif re.search('hdf5', fn):\n #\n # f = h5py.File(self.tabname, 'r')\n #\n # E0 = min(f['photon_energy'].value)\n # E1 = max(f['photon_energy'].value)\n # E = f['photon_energy'].value\n # z = f['redshift'].value\n # x = z + 1\n # N = E.size\n # \n # R = x[1] / x[0]\n # \n # tau = f['tau'].value\n # f.close()\n #\n # elif re.search('npz', fn) or re.search('pkl', fn): \n #\n # if re.search('pkl', fn):\n # f = open(fn, 'rb')\n # data = pickle.load(f)\n # else:\n # f = open(fn, 'r')\n # data = dict(np.load(f))\n # \n # E0 = data['E'].min()\n # E1 = data['E'].max() \n # E = data['E']\n # z = data['z']\n # x = z + 1\n # N = E.size\n # \n # R = x[1] / x[0]\n # \n # tau = tau = data['tau']\n # f.close()\n # else:\n # raise NotImplemented('Don\\'t know how to read %s.' % fn)\n #\n # return z, E, tau\n \n #def _tau_name(self, pop, suffix='hdf5'):\n # \"\"\"\n # Return name of table based on its properties.\n # \"\"\"\n #\n # if not have_h5py:\n # suffix == 'pkl'\n #\n # HorHe = 'He' if self.pf['include_He'] else 'H'\n #\n # zf = self.pf['final_redshift']\n # zi = self.pf['initial_redshift']\n #\n # L, N = self._tau_shape(pop)\n #\n # E0 = pop.pf['pop_Emin']\n # E1 = pop.pf['pop_Emax']\n #\n # fn = lambda z1, z2, E1, E2: \\\n # 'optical_depth_%s_%ix%i_z_%i-%i_logE_%.2g-%.2g.%s' \\\n # % (HorHe, L, N, z1, z2, E1, E2, suffix)\n #\n # return fn(zf, zi, np.log10(E0), np.log10(E1)), fn\n \n #def _load_tau(self, pop, prefix=None):\n # \"\"\"\n # Find an optical depth table.\n # \"\"\"\n # \n # fn, fn_func = self._tau_name(pop)\n #\n # if prefix is None:\n # ares_dir = os.environ.get('ARES')\n # if not ares_dir:\n # print \"No ARES environment variable.\"\n # return None\n # \n # input_dirs = [os.path.join(ares_dir,'input','optical_depth')]\n #\n # else:\n # if type(prefix) is str:\n # input_dirs = [prefix]\n # else:\n # input_dirs = prefix\n #\n # guess = os.path.join(input_dirs[0], fn)\n # if os.path.exists(guess):\n # return guess\n #\n # ## Find exactly what table should be\n # zmin, zmax, Nz, lEmin, lEmax, chem, pre, post = self._parse_tab(fn)\n #\n # ok_matches = []\n # perfect_matches = []\n # \n # # Loop through input directories\n # for input_dir in input_dirs:\n # \n # # Loop over files in input_dir, look for best match\n # for fn1 in os.listdir(input_dir):\n # \n # if re.search('hdf5', fn1) and (not have_h5py):\n # continue\n #\n # tab_name = os.path.join(input_dir, fn1)\n # \n # try:\n # zmin_f, zmax_f, Nz_f, lEmin_f, lEmax_f, chem_f, p1, p2 = \\\n # self._parse_tab(fn1)\n # except:\n # continue\n #\n # # Dealbreakers\n # if Nz_f != Nz:\n # continue\n # if zmax_f < zmax:\n # continue\n # if chem_f != chem:\n # continue\n #\n # # Continue with possible matches\n # for fmt in ['pkl', 'npz', 'hdf5']:\n #\n # if fn1 == fn and fmt == self.pf['preferred_format']:\n # perfect_matches.append(tab_name)\n # continue\n #\n # if c and fmt == self.pf['preferred_format']:\n # perfect_matches.append(tab_name)\n # continue\n #\n # # If number of redshift bins and energy range right...\n # if re.search(pre, fn1) and re.search(post, fn1):\n # if re.search(fmt, fn1) and fmt == self.pf['preferred_format']:\n # perfect_matches.append(tab_name)\n # else:\n # ok_matches.append(tab_name)\n # \n # # If number of redshift bins is right...\n # elif re.search(pre, fn1):\n # \n # if re.search(fmt, fn1) and fmt == self.pf['preferred_format']:\n # perfect_matches.append(tab_name)\n # else:\n # ok_matches.append(tab_name)\n # \n # if perfect_matches:\n # return perfect_matches[0]\n # elif ok_matches:\n # return ok_matches[0]\n # else:\n # return None\n \n #def _parse_tab(self, fn):\n # \n # tmp1, tmp2 = fn.split('_z_')\n # pre = tmp1[0:tmp1.rfind('x')]\n # red, tmp3 = fn.split('_logE_')\n # post = '_logE_' + tmp3.replace('.hdf5', '')\n # \n # # Find exactly what table should be\n # zmin, zmax = map(float, red[red.rfind('z')+2:].partition('-')[0::2])\n # logEmin, logEmax = map(float, tmp3[tmp3.rfind('E')+1:tmp3.rfind('.')].partition('-')[0::2])\n # \n # Nz = pre[pre.rfind('_')+1:]\n # \n # # Hack off Nz string and optical_depth_\n # chem = pre.strip(Nz)[14:-1]#.strip('optical_depth_')\n # \n # return zmin, zmax, int(Nz), logEmin, logEmax, chem, pre, post\n # \n #def _tau_shape(self, pop):\n # \"\"\"\n # Determine dimensions of optical depth table.\n # \n # Unfortunately, this is a bit redundant with the procedure in\n # self._init_xrb, but that's the way it goes.\n # \"\"\"\n # \n # # Set up log-grid in parameter x = 1 + z\n # x = np.logspace(np.log10(1+self.pf['final_redshift']),\n # np.log10(1+self.pf['initial_redshift']),\n # int(pop.pf['pop_tau_Nz']))\n # z = x - 1.\n # logx = np.log10(x)\n # logz = np.log10(z)\n #\n # # Constant ratio between elements in x-grid\n # R = x[1] / x[0]\n # logR = np.log10(R)\n # \n # E0 = pop.pf['pop_Emin']\n # \n # # Create mapping to frequency space\n # E = 1. * E0\n # n = 1\n # while E < pop.pf['pop_Emax']:\n # E = E0 * R**(n - 1)\n # n += 1 \n # \n # # Set attributes for dimensions of optical depth grid\n # L = len(x)\n # \n # # Frequency grid must be index 1-based.\n # N = num_freq_bins(L, zi=self.pf['initial_redshift'], \n # zf=self.pf['final_redshift'], Emin=E0, \n # Emax=pop.pf['pop_Emax'])\n # N -= 1\n # \n # return L, N\n \n def RestFrameEnergy(self, z, E, zp):\n \"\"\"\n Return energy of a photon observed at (z, E) and emitted at zp.\n \"\"\"\n \n return E * (1. + zp) / (1. + z)\n \n def ObserverFrameEnergy(self, z, Ep, zp):\n \"\"\"\n What is the energy of a photon observed at redshift z and emitted \n at redshift zp and energy Ep?\n \"\"\"\n\n return Ep * (1. + z) / (1. + zp)\n\n def Jc(self, z, E):\n \"\"\"\n Flux corresponding to one photon per hydrogen atom at redshift z.\n \"\"\"\n\n return c * self.cosm.nH0 * (1. + z)**3 / 4. / np.pi \\\n / (E * erg_per_ev / h)\n\n def rate_to_coefficient(self, z, species=0, zone='igm', **kw):\n \"\"\"\n Convert an ionization/heating rate to a rate coefficient.\n \n Provides units of per atom.\n \"\"\"\n\n if self.pf['photon_counting']:\n prefix = zone\n else:\n prefix = 'igm'\n \n if species == 0: \n weight = 1. / self.cosm.nH(z) / kw['%s_h_1' % prefix]\n elif species == 1:\n weight = 1. / self.cosm.nHe(z) / kw['%s_he_1' % prefix]\n elif species == 2:\n weight = 1. / self.cosm.nHe(z) / kw['%s_he_2' % prefix]\n\n return weight\n\n def coefficient_to_rate(self, z, species=0, **kw):\n return 1. / self.rate_to_coefficient(z, species, **kw)\n\n def _fix_kwargs(self, functionify=False, popid=0, band=0, **kwargs):\n\n kw = defkwargs.copy()\n kw.update(kwargs)\n\n pop = self.pops[popid]\n\n if functionify and type(kw['xavg']) is not types.FunctionType:\n tmp = kw['xavg']\n kw['xavg'] = lambda z: tmp\n\n if kw['zf'] is None and pop is not None:\n kw['zf'] = pop.zform\n \n if not self.background.solve_rte[popid][band]:\n pass\n elif (kw['Emax'] is None) and self.background.solve_rte[popid][band] and \\\n np.any(self.background.bands_by_pop[popid] > pop.pf['pop_EminX']):\n \n kw['Emax'] = self.background.energies[popid][band][-1]\n \n return kw\n \n def HeatingRate(self, z, species=0, popid=0, band=0, **kwargs):\n \"\"\"\n Compute heating rate density due to emission from this population. \n \n Parameters\n ----------\n z : int, float\n Redshift of interest.\n species : int\n Atom whose liberated electrons cause heating.\n Can be 0, 1, or 2 (HI, HeI, and HeII, respectively)\n \n ===============\n relevant kwargs\n ===============\n xray_flux : np.ndarray\n Array of fluxes corresponding to photon energies in self.igm.E.\n return_rc : bool\n Return actual heating rate, or rate coefficient for heating?\n Former has units of erg s**-1 cm**-3, latter has units of \n erg s**-1 cm**-3 atom**-1. \n \n Returns\n -------\n Proper heating rate density in units of in erg s**-1 cm**-3 at redshift z,\n due to electrons previously bound to input species.\n\n \"\"\"\n \n pop = self.pops[popid]\n \n if not pop.pf['pop_heat_src_igm'] or (z >= pop.zform):\n return 0.0 \n \n if pop.pf['pop_heat_rate'] is not None:\n return pop.HeatingRate(z)\n \n # Grab defaults, do some patches if need be \n kw = self._fix_kwargs(**kwargs)\n \n species_str = species_i_to_str[species]\n\n if pop.pf['pop_k_heat_igm'] is not None:\n return pop.pf['pop_k_heat_igm'](z)\n \n if band is not None:\n solve_rte = self.background.solve_rte[popid][band]\n else:\n solve_rte = False \n \n # Compute fraction of photo-electron energy deposited as heat\n if pop.pf['pop_fXh'] is None:\n \n # Interpolate in energy and ionized fraction\n if (self.esec.method > 1) and solve_rte:\n if kw['igm_e'] <= self.esec.x[0]:\n fheat = self.fheat[popid][band][:,0]\n else:\n i_x = np.argmin(np.abs(kw['igm_e'] - self.esec.x))\n if self.esec.x[i_x] > kw['igm_e']:\n i_x -= 1\n \n j = i_x + 1 \n \n fheat = self.fheat[popid][band][:,i_x] \\\n + (self.fheat[popid][band][:,j] - self.fheat[popid][band][:,i_x]) \\\n * (kw['igm_e'] - self.esec.x[i_x]) \\\n / (self.esec.x[j] - self.esec.x[i_x]) \n elif self.esec.method > 1:\n raise ValueError('Only know how to do advanced secondary ionization with solve_rte=True')\n else:\n fheat = self.esec.DepositionFraction(kw['igm_e'])[0]\n\n else:\n fheat = pop.pf['pop_fXh']\n \n # Assume heating rate density at redshift z is only due to emission\n # from sources at redshift z\n if not solve_rte:\n weight = self.rate_to_coefficient(z, species, **kw)\n \n Lx = pop.LuminosityDensity(z, Emin=pop.pf['pop_Emin_xray'], \n Emax=pop.pf['pop_Emax'])\n \n return weight * fheat * Lx * (1. + z)**3\n \n ##\n # Otherwise, do the full calculation\n ##\n \n # Re-normalize to help integrator\n norm = J21_num * self.sigma0\n \n # Computes excess photo-electron energy due to ionizations by\n # photons with energy E (normalized by sigma0 * Jhat)\n if kw['fluxes'][popid] is None:\n\n # If we're approximating helium, must add contributions now\n # since we'll never explicitly call this method w/ species=1.\n if self.approx_He:\n integrand = lambda E, zz: \\\n self.rb.AngleAveragedFluxSlice(z, E, zz, xavg=kw['xavg']) \\\n * (self.sigma(E) * (E - E_th[0]) \\\n + self.cosm.y * self.sigma(E, species=1) * (E - E_th[1])) \\\n * fheat / norm / ev_per_hz\n \n # Otherwise, just heating via hydrogen photo-electrons\n else:\n integrand = lambda E, zz: \\\n self.rb.AngleAveragedFluxSlice(z, E, zz, xavg=kw['xavg'], \n zxavg=kw['zxavg']) * self.sigma(E, species=1) \\\n * (E - E_th[species]) * fheat / norm / ev_per_hz\n \n # This means the fluxes have been computed already - integrate\n # over discrete set of points\n else:\n \n integrand = self.sigma_E[species_str][popid][band] \\\n * (self._E[popid][band] - E_th[species])\n\n if self.approx_He:\n integrand += self.cosm.y * self.sigma_E['he_1'][popid][band] \\\n * (self._E[popid][band] - E_th[1])\n \n integrand *= kw['fluxes'][popid][band] * fheat / norm / ev_per_hz\n \n # Compute integral over energy\n if type(integrand) == types.FunctionType:\n heat, err = dblquad(integrand, z, kw['zf'], lambda a: self.E0, \n lambda b: kw['Emax'], epsrel=self.rtol, epsabs=self.atol)\n else:\n if kw['Emax'] is not None:\n imax = np.argmin(np.abs(self._E[popid][band] - kw['Emax']))\n if imax == 0:\n return 0.0\n elif imax == (len(self._E[popid][band]) - 1): \n imax = None \n \n if self.sampled_integrator == 'romb':\n raise ValueError(\"Romberg's method cannot be used for integrating subintervals.\")\n heat = romb(integrand[0:imax] * self.E[0:imax], \n dx=self.dlogE[0:imax])[0] * log10\n else:\n heat = simps(integrand[0:imax] * self._E[popid][band][0:imax], \n x=self.logE[popid][band][0:imax]) * log10\n \n else:\n imin = np.argmin(np.abs(self._E[popid][band] - pop.pf['pop_Emin']))\n \n if self.sampled_integrator == 'romb':\n heat = romb(integrand[imin:] * self._E[popid][band][imin:], \n dx=self.dlogE[popid][band][imin:])[0] * log10\n elif self.sampled_integrator == 'trapz':\n heat = np.trapz(integrand[imin:] * self._E[popid][band][imin:], \n x=self.logE[popid][band][imin:]) * log10\n else:\n heat = simps(integrand[imin:] * self._E[popid][band][imin:], \n x=self.logE[popid][band][imin:]) * log10\n \n # Re-normalize, get rid of per steradian units\n heat *= 4. * np.pi * norm * erg_per_ev\n\n # Currently a rate coefficient, returned value depends on return_rc \n if kw['return_rc']:\n pass\n else:\n heat *= self.coefficient_to_rate(z, species, **kw)\n\n return heat \n \n def IonizationRateCGM(self, z, species=0, popid=0, band=0, **kwargs):\n \"\"\"\n Compute growth rate of HII regions.\n\n Parameters\n ----------\n z : float\n current redshift\n species : int\n Ionization rate for what atom?\n Can be 0, 1, or 2 (HI, HeI, and HeII, respectively)\n \n ===============\n relevant kwargs\n ===============\n fluxes : np.ndarray\n Array of fluxes corresponding to photon energies in self.igm.E.\n return_rc : bool\n Return actual heating rate, or rate coefficient for heating?\n Former has units of erg s**-1 cm**-3, latter has units of \n erg s**-1 cm**-3 atom**-1. \n\n Returns\n -------\n Ionization rate. Units determined by value of return_rc keyword\n argument, which is False by default.\n\n \"\"\"\n \n pop = self.pops[popid]\n \n if band is not None:\n b = self.background.bands_by_pop[popid][band]\n if not np.any(np.array(b) > E_LL):\n return 0.0\n if not np.allclose(b[0], E_LL, atol=0.1, rtol=0):\n return 0.0\n else:\n b = [13.6, 24.6]\n \n if (not pop.pf['pop_ion_src_cgm']) or (z > pop.zform):\n return 0.0\n \n # Need some guidance from 1-D calculations to do this\n if species > 0:\n return 0.0\n\n if pop.pf['pop_ion_rate'] is not None:\n return pop.IonizationRateCGM(z) \n\n kw = defkwargs.copy()\n kw.update(kwargs)\n\n if pop.pf['pop_k_ion_cgm'] is not None:\n return self.pf['pop_k_ion_cgm'](z)\n\n if kw['return_rc']:\n weight = self.rate_to_coefficient(z, species, **kw)\n else:\n weight = 1.0\n \n Qdot = pop.PhotonLuminosityDensity(z, Emin=13.6, Emax=24.6)\n \n return weight * Qdot * (1. + z)**3\n \n def IonizationRateIGM(self, z, species=0, popid=0, band=0, **kwargs):\n \"\"\"\n Compute volume averaged hydrogen ionization rate.\n \n Parameters\n ----------\n z : float\n redshift\n species : int\n HI, HeI, or HeII (species=0, 1, 2, respectively)\n \n Returns\n -------\n Volume averaged ionization rate in units of ionizations per \n second. If return_rc=True, will be in units of ionizations per\n second per atom.\n \n \"\"\"\n\n pop = self.pops[popid]\n\n # z between zform, zdead? must be careful for BHs\n if (not pop.pf['pop_ion_src_igm']) or (z > pop.zform):\n return 0.0\n\n # Grab defaults, do some patches if need be\n kw = self._fix_kwargs(**kwargs)\n \n species_str = species_i_to_str[species]\n\n if pop.pf['pop_k_ion_igm'] is not None:\n return pop.pf['pop_k_ion_igm'](z)\n\n if band is not None:\n solve_rte = self.background.solve_rte[popid][band]\n else:\n solve_rte = False\n\n if (not solve_rte) or \\\n (not np.any(self.background.bands_by_pop[popid] > pop.pf['pop_EminX'])):\n \n Lx = pop.LuminosityDensity(z, Emin=pop.pf['pop_Emin_xray'], \n Emax=pop.pf['pop_Emax'])\n \n weight = self.rate_to_coefficient(z, species, **kw)\n primary = weight * Lx \\\n * (1. + z)**3 / pop.pf['pop_Ex'] / erg_per_ev\n fion = self.esec.DepositionFraction(kw['igm_e'], channel='h_1')[0]\n\n return primary * (1. + fion) * (pop.pf['pop_Ex'] - E_th[0]) \\\n / E_th[0]\n\n # Full calculation - much like computing integrated flux\n norm = J21_num * self.sigma0\n \n # Integrate over function\n if kw['fluxes'][popid] is None:\n integrand = lambda E, zz: \\\n self.rb.AngleAveragedFluxSlice(z, E, zz, xavg=kw['xavg'], \n zxavg=kw['zxavg']) * self.sigma(E, species=species) \\\n / norm / ev_per_hz\n \n ion, err = dblquad(integrand, z, kw['zf'], lambda a: self.E0, \n lambda b: kw['Emax'], epsrel=self.rtol, epsabs=self.atol) \n \n # Integrate over set of discrete points\n else: \n integrand = self.sigma_E[species_str][popid][band] \\\n * kw['fluxes'][popid][band] / norm / ev_per_hz\n \n if self.sampled_integrator == 'romb':\n ion = romb(integrand * self.E[popid][band], \n dx=self.dlogE[popid][band])[0] * log10\n else:\n ion = simps(integrand * self.E[popid][band], \n x=self.logE[popid][band]) * log10\n \n # Re-normalize\n ion *= 4. * np.pi * norm\n \n # Currently a rate coefficient, returned value depends on return_rc\n if kw['return_rc']:\n pass\n else:\n ion *= self.coefficient_to_rate(z, species, **kw) \n \n return ion\n \n def SecondaryIonizationRateIGM(self, z, species=0, donor=0, popid=0, \n band=0, **kwargs):\n \"\"\"\n Compute volume averaged secondary ionization rate.\n\n Parameters\n ----------\n z : float\n redshift\n species : int\n Ionization rate of what atom?\n Can be 0, 1, or 2 (HI, HeI, and HeII, respectively)\n donor : int\n Which atom gave the electron?\n Can be 0, 1, or 2 (HI, HeI, and HeII, respectively) \n\n ===============\n relevant kwargs\n ===============\n fluxes : np.ndarray\n Array of fluxes corresponding to photon energies in self.igm.E.\n return_rc : bool\n Return actual heating rate, or rate coefficient for heating?\n Former has units of erg s**-1 cm**-3, latter has units of \n erg s**-1 cm**-3 atom**-1. \n\n Returns\n -------\n Volume averaged ionization rate due to secondary electrons, \n in units of ionizations per second.\n\n \"\"\" \n \n pop = self.pops[popid]\n \n if self.pf['secondary_ionization'] == 0:\n return 0.0\n\n if not pop.pf['pop_ion_src_igm']:\n return 0.0 \n\n if band is not None:\n solve_rte = self.background.solve_rte[popid][band]\n else:\n solve_rte = False\n\n # Computed in IonizationRateIGM in this case\n if not solve_rte:\n return 0.0\n\n if not np.any(self.background.bands_by_pop[popid] > pop.pf['pop_EminX']):\n return 0.0\n \n if ((donor or species) in [1,2]) and (not self.pf['include_He']):\n return 0.0\n\n # Grab defaults, do some patches if need be\n kw = self._fix_kwargs(**kwargs)\n\n #if self.pf['gamma_igm'] is not None:\n # return self.pf['gamma_igm'](z)\n\n species_str = species_i_to_str[species]\n donor_str = species_i_to_str[donor]\n\n if self.esec.method > 1 and solve_rte:\n\n fion_const = 1.\n if kw['igm_e'] == 0:\n fion = self.fion[species_str][popid][band][:,0]\n else:\n i_x = np.argmin(np.abs(kw['igm_e'] - self.esec.x))\n if self.esec.x[i_x] > kw['igm_e']:\n i_x -= 1\n\n j = i_x + 1 \n\n fion = self.fion[species_str][popid][band][:,i_x] \\\n + (self.fion[species_str][popid][band][:,j] - self.fion[species_str][popid][:,i_x]) \\\n * (kw['igm_e'] - self.esec.x[i_x]) \\\n / (self.esec.x[j] - self.esec.x[i_x])\n elif self.esec.method > 1:\n raise ValueError('Only know how to do advanced secondary ionization with solve_rte=True')\n else:\n fion = 1.0\n fion_const = self.esec.DepositionFraction(kw['igm_e'], \n channel=species_str)[0]\n\n norm = J21_num * self.sigma0\n \n if kw['fluxes'][popid] is None: \n if self.pf['approx_He']: # assumes lower integration limit > 4 Ryd\n integrand = lambda E, zz: \\\n self.rb.AngleAveragedFluxSlice(z, E, zz, xavg=kw['xavg'], \n zxavg=kw['zxavg']) * (self.sigma(E) * (E - E_th[0]) \\\n + self.cosm.y * self.sigma(E, 1) * (E - E_th[1])) \\\n / E_th[0] / norm / ev_per_hz\n else:\n integrand = lambda E, zz: \\\n self.rb.AngleAveragedFluxSlice(z, E, zz, xavg=kw['xavg'], \n zxavg=kw['zxavg']) * self.sigma(E) * (E - E_th[0]) \\\n / E_th[0] / norm / ev_per_hz\n else:\n integrand = fion * self.sigma_E[donor_str][popid][band] \\\n * (self.E[popid][band] - E_th[donor])\n \n if self.pf['approx_He']:\n integrand += self.cosm.y * self.sigma_E['he_1'][popid][band] \\\n * (self.E[popid][band] - E_th[1])\n \n integrand = integrand\n integrand *= kw['fluxes'][popid][band] / E_th[species] / norm \\\n / ev_per_hz\n \n if type(integrand) == types.FunctionType:\n ion, err = dblquad(integrand, z, kw['zf'], lambda a: self.E0, \n lambda b: kw['Emax'], epsrel=self.rtol, epsabs=self.atol)\n else:\n if self.sampled_integrator == 'romb':\n ion = romb(integrand * self.E[popid][band], \n dx=self.dlogE[popid][band])[0] * log10\n else:\n ion = simps(integrand * self.E[popid][band], \n x=self.logE[popid][band]) * log10 \n \n # Re-normalize\n ion *= 4. * np.pi * norm * fion_const\n \n # Currently a rate coefficient, returned value depends on return_rc\n if kw['return_rc']:\n pass\n else:\n ion *= self.coefficient_to_rate(z, species, **kw) \n \n return ion\n \n def DiffuseLymanAlphaFlux(self, z, **kwargs):\n \"\"\"\n Flux of Lyman-alpha photons induced by photo-electron collisions.\n \n \"\"\"\n \n raise NotImplemented('hey fix me')\n \n if not self.pf['secondary_lya']:\n return 0.0\n \n #return 1e-25\n \n # Grab defaults, do some patches if need be \n kw = self._fix_kwargs(**kwargs)\n \n # Compute fraction of photo-electron energy deposited as Lya excitation\n if self.esec.method > 1 and (kw['fluxes'][popid] is not None):\n if kw['igm_e'] == 0:\n flya = self.flya[:,0]\n else:\n i_x = np.argmin(np.abs(kw['igm_e'] - self.esec.x))\n if self.esec.x[i_x] > kw['igm_e']:\n i_x -= 1\n \n j = i_x + 1 \n \n flya = self.flya[:,i_x] \\\n + (self.flya[:,j] - self.flya[:,i_x]) \\\n * (kw['igm_e'] - self.esec.x[i_x]) \\\n / (self.esec.x[j] - self.esec.x[i_x]) \n else:\n return 0.0\n \n # Re-normalize to help integrator\n norm = J21_num * self.sigma0\n \n # Compute integrand\n integrand = self.sigma_E[species_str] * (self.E - E_th[species])\n \n integrand *= kw['fluxes'] * flya / norm / ev_per_hz\n \n if kw['Emax'] is not None:\n imax = np.argmin(np.abs(self.E - kw['Emax']))\n if imax == 0:\n return 0.0\n \n if self.sampled_integrator == 'romb':\n raise ValueError(\"Romberg's method cannot be used for integrating subintervals.\")\n heat = romb(integrand[0:imax] * self.E[0:imax], dx=self.dlogE[0:imax])[0] * log10\n else:\n heat = simps(integrand[0:imax] * self.E[0:imax], x=self.logE[0:imax]) * log10\n \n else:\n imin = np.argmin(np.abs(self.E - self.pop.pf['source_Emin']))\n \n if self.sampled_integrator == 'romb':\n heat = romb(integrand[imin:] * self.E[imin:], \n dx=self.dlogE[imin:])[0] * log10\n elif self.sampled_integrator == 'trapz':\n heat = np.trapz(integrand[imin:] * self.E[imin:], \n x=self.logE[imin:]) * log10\n else:\n heat = simps(integrand[imin:] * self.E[imin:], \n x=self.logE[imin:]) * log10\n \n # Re-normalize, get rid of per steradian units\n heat *= 4. * np.pi * norm * erg_per_ev\n\n # Currently a rate coefficient, returned value depends on return_rc \n if kw['return_rc']:\n pass\n else:\n heat *= self.coefficient_to_rate(z, species, **kw)\n\n return heat\n \n",
"\"\"\"\n\nMetaGalacticBackground.py\n\nAuthor: Jordan Mirocha\nAffiliation: University of Colorado at Boulder\nCreated on: Mon Feb 16 12:43:06 MST 2015\n\nDescription: \n\n\"\"\"\n\nimport numpy as np\nfrom ..util import ParameterFile\nfrom scipy.interpolate import interp1d\nfrom ..solvers import UniformBackground\nfrom ..util.ReadData import _sort_history, flatten_energies, flatten_flux\n\nclass MetaGalacticBackground(UniformBackground):\n def __init__(self, grid=None, **kwargs):\n \"\"\"\n Initialize a MetaGalacticBackground object. \n \"\"\"\n\n self._is_thru_run = False\n \n UniformBackground.__init__(self, grid=grid, **kwargs)\n \n def run(self):\n \"\"\"\n Evolve radiation background in time.\n\n .. note:: Assumes we're using the generator, otherwise the time \n evolution must be controlled manually.\n\n Returns\n -------\n Nothing: sets `history` attribute containing the entire evolution\n of the background for each population.\n\n \"\"\"\n\n self._is_thru_run = True\n\n all_z = [] # sometimes not deterministic\n all_fluxes = []\n for (z, fluxes) in self.step():\n all_z.append(z)\n all_fluxes.append(fluxes)\n\n # At this stage, redshift is in descending order\n self.all_z = all_z\n self.all_fluxes = all_fluxes\n\n self._history = _sort_history(all_fluxes)\n\n def _init_stepping(self):\n \"\"\"\n Initialize lists which bracket radiation background fluxes.\n \n The structure of these lists is as follows:\n (1) Each list contains one element per source population.\n (2) If that population will approximate the RTE, this entry will be \n None.\n (3) The redshift lists, _zlo and _zhi, will just be a sequences of \n floats. \n (4) The flux entires, if not None, will be lists, since in general an\n emission band can be broken up into several pieces. In this case,\n the number of entries (for each source population) will be equal\n to the number of bands, which you can find in self.bands_by_pop.\n \n Sets\n ----\n Several attributes:\n (1) _zhi, _zlo\n (2) _fhi, _flo\n \n \"\"\"\n \n # For \"smart\" time-stepping\n self._zhi = []; self._zlo = []\n self._fhi = []; self._flo = []\n \n # Looping over populations.\n z_by_pop = []\n for i, generator in enumerate(self.generators):\n\n # Recall that each generator may actually be a list of generators,\n # one for each (sub-)band.\n \n if (generator == [None]) or (generator is None):\n self._zhi.append(None)\n self._zlo.append(None)\n self._fhi.append(None)\n self._flo.append(None)\n continue\n\n # Only make it here when real RT is happenin'\n\n # Setup arrays (or lists) for flux solutions\n _fhi = []\n _flo = []\n for j, gen in enumerate(generator):\n if gen.__name__ == '_flux_generator_generic':\n _fhi.append(np.zeros_like(self.energies[i][j]))\n _flo.append(np.zeros_like(self.energies[i][j]))\n continue\n\n # Otherwise, there are sub-bands (i.e., sawtooth)\n _fhi.append(np.zeros_like(np.concatenate(self.energies[i][j])))\n _flo.append(np.zeros_like(np.concatenate(self.energies[i][j])))\n\n # Loop over sub-bands and retrieve fluxes\n for j, gen in enumerate(generator):\n\n # Tap generator, grab fluxes\n zhi, flux = gen.next()\n\n # Increment the flux\n _fhi[j] += flux.copy()\n \n # Tap generator, grab fluxes (again)\n zlo, flux = gen.next()\n \n # Increment the flux (again)\n _flo[j] += flux.copy()\n\n # Save fluxes for this population\n self._zhi.append([zhi for k in range(len(generator))])\n self._zlo.append([zlo for k in range(len(generator))])\n \n self._fhi.append(_fhi)\n self._flo.append(_flo)\n \n z_by_pop.append(zlo)\n \n # Set the redshift based on whichever population took the smallest\n # step. Other populations will interpolate to find flux.\n self.update_redshift(max(z_by_pop))\n \n def step(self):\n \"\"\"\n Initialize generator for the meta-galactic radiation background.\n \n ..note:: This can run asynchronously with a MultiPhaseMedium object.\n\n Returns\n -------\n Generator for the background radiation field. Yields the flux for \n each population.\n\n \"\"\"\n\n t = 0.0\n z = self.pf['initial_redshift']\n zf = self.pf['final_redshift']\n \n # Start the generator\n while z > zf: \n z, fluxes = self.update_fluxes() \n \n yield z, fluxes\n\n def update_redshift(self, z):\n self.z = z\n\n @property\n def history(self):\n if hasattr(self, '_history'):\n pass\n elif hasattr(self, 'all_fluxes'):\n self._history = _sort_history(self.all_fluxes)\n else:\n raise NotImplemented('help!')\n \n return self._history\n \n def update_fluxes(self):\n \"\"\"\n Loop over flux generators and retrieve the next values.\n \n ..note:: Populations need not have identical redshift sampling.\n \n Returns\n -------\n Current redshift and dictionary of fluxes. Each element of the flux\n dictionary corresponds to a single population, and within that, there\n are separate lists for each sub-band over which we solve the RTE.\n \n \"\"\"\n \n if (not self._is_thru_run) and (not self.approx_all_pops) and \\\n not hasattr(self, '_fhi'):\n \n self._init_stepping()\n \n # Save fluxes by pop as simulations run\n self.all_z = []\n self.all_fluxes = []\n \n z_by_pop = [None for i in range(self.Npops)]\n \n fluxes = {}\n for i, pop_generator in enumerate(self.generators):\n \n # Skip approximate (or non-contributing) backgrounds\n if pop_generator is None:\n fluxes[i] = None\n continue\n \n fluxes_by_band = []\n\n # For each population, the band is broken up into pieces\n for j, generator in enumerate(pop_generator):\n \n # Those pieces might have a sawtooth component!\n \n # If not being run as part of another simulation, there are \n # no external time-stepping constraints, so just poke the \n # generator and move on\n if self._is_thru_run:\n z, f = generator.next()\n z_by_pop[i] = z\n fluxes_by_band.append(f)\n continue\n \n # Otherwise, we potentially need to sub-cycle the background.\n # This may happen if (1) the time-step is being regulated\n # from the simulation in which this background is embedded \n # (i.e., epsilon_dt requires smaller timestep than redshift\n # step allowed by this population) or (2) if other populations\n # have a different requirement for the redshift sampling, \n # such that this population must interpolate between its\n # (larger) redshift steps while other populations churn away.\n\n # For redshifts before this background turns on...\n # (this should only happen once)\n if self.z > self._zhi[i][j]:\n if generator.__name__ == '_flux_generator_generic':\n z, f = self.z, np.zeros_like(self.energies[i][j])\n else:\n z = self.z \n f = np.zeros_like(flatten_energies(self.energies[i][j]))\n\n fluxes_by_band.append(f)\n continue\n\n # If we've surpassed the lower redshift bound, poke the \n # generator\n elif self.z <= self._zlo[i][j]:\n\n self._zhi[i][j] = self._zlo[i][j]\n self._fhi[i][j] = self._flo[i][j]\n z, f = generator.next()\n \n # Sometimes the generator's redshift sampling will be finer\n # than needed by e.g., a MultiPhaseMedium, so we cycle\n # multiple times before exiting.\n while z > self.z:\n self._zhi[i][j] = self._zlo[i][j]\n self._fhi[i][j] = self._flo[i][j]\n \n z, f = generator.next()\n \n self._zlo[i][j] = z\n self._flo[i][j] = f\n else:\n z = self.z\n\n # If zlo < z <= self.zhi, we'll interpolate\n\n # If we're between redshift steps, interpolate to find the \n # background flux\n if self.z == self._zhi[i][j]:\n f = self._fhi[i][j]\n elif self.z > self._zlo[i][j]:\n \n z = self.z\n\n interp = interp1d([self._zlo[i][j], self._zhi[i][j]], \n [self._flo[i][j], self._fhi[i][j]], \n axis=0, assume_sorted=True, kind='linear') \n \n f = interp(z)\n\n elif self.z == self._zlo[i][j]:\n f = self._flo[i][j]\n\n fluxes_by_band.append(f)\n \n if not self._is_thru_run: \n z_by_pop[i] = max(self._zlo[i]) \n \n fluxes[i] = fluxes_by_band\n\n # Set the redshift based on whichever population took the smallest\n # step. Other populations will interpolate to find flux.\n znext = max(z_by_pop)\n \n if (not self._is_thru_run):\n self.all_z.append(z_by_pop)\n self.all_fluxes.append(fluxes)\n \n # If being externally controlled, we can't tamper with the redshift!\n if self._is_thru_run:\n self.update_redshift(znext)\n\n return znext, fluxes\n\n def update_rate_coefficients(self, z, **kwargs):\n \"\"\"\n Compute ionization and heating rate coefficients.\n\n Parameters\n ----------\n z : float\n Current redshift.\n\n Returns\n -------\n Dictionary of rate coefficients.\n\n \"\"\"\n \n # Must compute rate coefficients from fluxes \n if self.approx_all_pops:\n kwargs['fluxes'] = [None] * self.Npops\n else: \n z, fluxes = self.update_fluxes()\n kwargs['fluxes'] = fluxes\n \n # Run update_rate_coefficients within MultiPhaseMedium\n return super(MetaGalacticBackground, self).update_rate_coefficients(z, \n **kwargs)\n \n def get_integrated_flux(self, band, popid=0):\n \"\"\"\n Return integrated flux in supplied (Emin, Emax) band at all redshifts.\n \"\"\"\n \n zarr, Earr, flux = self.get_history(popid, True, True)\n \n i1 = np.argmin(np.abs(Earr - band[0]))\n i2 = np.argmin(np.abs(Earr - band[1]))\n \n return zarr, np.trapz(flux[:,i1:i2], x=Earr[i1:i2], axis=1)\n\n def get_history(self, popid=0, flatten=False, uniquify=True):\n \"\"\"\n Grab data associated with a single population.\n\n Parameters\n ----------\n popid : int\n ID number for population of interest.\n flatten : bool\n For sawtooth calculations, the energies are broken apart into \n different bands which have different sizes. Set this to true if\n you just want a single array, rather than having the energies\n and fluxes broken apart by their band.\n\n Returns\n -------\n Tuple containing the redshifts, energies, and fluxes for the given\n population, in that order.\n \n if flatten == True:\n The energy array is 1-D.\n The flux array will have shape (z, E)\n else:\n The energies are stored as a list. The number of elements will\n be determined by how many sub-bands there are. Each element will\n be a list or an array, depending on whether or not there is a \n sawtooth component to that particular background.\n \n \"\"\"\n \n hist = self.history\n \n # First, get redshifts. If not run \"thru run\", then they will\n # be in descending order so flip 'em.\n if self._is_thru_run:\n z = self.redshifts[popid]\n else:\n # This may change on the fly due to sub-cycling and such\n z = np.array(self.all_z).T[popid][-1::-1]\n\n if flatten:\n E = flatten_energies(self.energies[popid])\n\n f = np.zeros([len(z), E.size])\n for i, flux in enumerate(hist[popid]):\n fzflat = []\n for j in range(len(self.energies[popid])):\n fzflat.extend(flux[j])\n\n f[i] = np.array(fzflat)\n \n # \"tr\" = \"to return\"\n z_tr = z\n E_tr = E\n f_tr = np.array(f)[-1::-1,:]\n else:\n z_tr = z\n E_tr = self.energies[popid]\n f_tr = hist[popid][-1::-1,:]\n \n # We've flipped the fluxes too since they are inherently in \n # order of descending redshift. \n \n if uniquify:\n z_uni, indi = np.unique(z_tr, return_index=True)\n return z_uni, E_tr, f_tr[indi,:]\n else: \n return z_tr, E_tr, f_tr\n\n ",
"\"\"\"\n\nMultiPhaseMedium.py\n\nAuthor: Jordan Mirocha\nAffiliation: University of Colorado at Boulder\nCreated on: Mon Feb 16 12:46:28 MST 2015\n\nDescription: \n\n\"\"\"\n\nimport numpy as np\nfrom .GasParcel import GasParcel\nfrom ..util import ParameterFile, ProgressBar\nfrom ..util.ReadData import _sort_history, _load_inits\nfrom .MetaGalacticBackground import MetaGalacticBackground\nfrom ..util.SetDefaultParameterValues import MultiPhaseParameters\n\n_mpm_defs = MultiPhaseParameters()\n\nclass MultiPhaseMedium(object):\n def __init__(self, **kwargs):\n \"\"\"\n Initialize a MultiPhaseMedium object.\n \n By default, this is a two-zone model, consisting of a \"bulk IGM\"\n grid patch and an \"HII regions\" grid patch, dubbed \"igm\" and \"cgm\", \n respectively. To perform a single-zone calculation, simply set \n ``include_cgm=False`` or ``include_igm=False``.\n \n \"\"\"\n\n if 'load_ics' not in kwargs:\n kwargs['load_ics'] = True \n self.kwargs = kwargs\n \n @property\n def pf(self):\n if not hasattr(self, '_pf'):\n inits = self.inits\n self._pf = ParameterFile(**self.kwargs)\n return self._pf\n \n @property\n def inits(self):\n if not hasattr(self, '_inits'): \n self._inits = inits = _load_inits()\n zi = self.pf['initial_redshift']\n if not np.all(np.diff(inits['z']) > 0):\n raise ValueError('Redshifts in ICs must be in ascending order!')\n \n Ti = np.interp(zi, inits['z'], inits['Tk'])\n xi = np.interp(zi, inits['z'], inits['xe'])\n \n #if self.pf['include_He']:\n new = {'igm_initial_temperature': Ti, \n 'initial_ionization': [1. - xi, xi, 1.-xi-1e-10, xi, 1e-10]}\n self.kwargs.update(new) \n \n #else:\n # new_pars = {'cosmological_ics': False,\n # 'igm_initial_temperature': Ti,\n # 'igm_initial_ionization': [1. - xi, xi]}\n #\n #self.kwargs.update(new_pars) \n \n return self._inits \n \n @property\n def field(self):\n if not hasattr(self, '_field'):\n if self.pf['include_igm']:\n self._field = MetaGalacticBackground(grid=self.parcel_igm.grid, \n **self.kwargs)\n else:\n self._field = MetaGalacticBackground(grid=self.parcel_cgm.grid, \n **self.kwargs)\n \n return self._field\n \n @property\n def pops(self):\n return self.field.pops\n \n @property\n def grid(self):\n return self.field.grid \n \n @property\n def parcels(self):\n if not hasattr(self, '_parcels'):\n self._initialize_zones()\n return self._parcels \n\n @property\n def parcel_igm(self):\n if not hasattr(self, '_parcel_igm'):\n self._parcel_igm = self.parcels[0]\n return self._parcel_igm\n \n @property\n def parcel_cgm(self):\n if not hasattr(self, '_parcel_cgm'):\n if self.pf['include_igm']:\n self._parcel_cgm = self.parcels[1]\n else:\n self._parcel_cgm = self.parcels[0]\n \n return self._parcel_cgm\n \n def rates_no_RT(self, grid):\n _rates_no_RT = \\\n {'k_ion': np.zeros((grid.dims, grid.N_absorbers)),\n 'k_heat': np.zeros((grid.dims, grid.N_absorbers)),\n 'k_ion2': np.zeros((grid.dims, grid.N_absorbers, grid.N_absorbers)),\n }\n \n return _rates_no_RT \n \n @property\n def tf(self):\n if not hasattr(self, '_tf'):\n z = self.pf['initial_redshift']\n zf = self.pf['final_redshift']\n self._tf = self.default_parcel.grid.cosm.LookbackTime(zf, z)\n self.pf['stop_time'] = self._tf / self.pf['time_units']\n return self._tf\n\n def _initialize_zones(self):\n \"\"\"\n Initialize (up to two) GasParcels.\n \"\"\"\n \n # Reset stop time based on final redshift.\n z = self.pf['initial_redshift']\n zf = self.pf['final_redshift']\n \n self._parcels = []\n for zone in ['igm', 'cgm']:\n if not self.pf['include_%s' % zone]:\n continue\n \n kw = self.pf.copy()\n \n # Loop over defaults, pull out the ones for this zone \n for key in _mpm_defs:\n if key[0:4] != '%s_' % zone:\n continue\n\n # Have to rename variables so Grid class will know them\n grid_key = key.replace('%s_' % zone, '')\n\n if key in self.kwargs:\n kw[grid_key] = self.kwargs[key]\n else:\n kw[grid_key] = _mpm_defs[key]\n \n if zone == 'igm':\n self.kw_igm = kw.copy()\n parcel_igm = GasParcel(**self.kw_igm)\n \n self.gen_igm = parcel_igm.step()\n\n # Set initial values for rate coefficients\n parcel_igm.update_rate_coefficients(parcel_igm.grid.data, \n **self.rates_no_RT(parcel_igm.grid))\n \n self._parcels.append(parcel_igm)\n \n else:\n self.kw_cgm = kw.copy()\n parcel_cgm = GasParcel(**self.kw_cgm)\n parcel_cgm.grid.set_recombination_rate(True)\n parcel_cgm._set_chemistry()\n self.gen_cgm = parcel_cgm.step()\n \n parcel_cgm.chem.chemnet.monotonic_EoR = \\\n self.pf['monotonic_EoR']\n \n parcel_cgm.update_rate_coefficients(parcel_cgm.grid.data, \n **self.rates_no_RT(parcel_cgm.grid))\n \n self._parcels.append(parcel_cgm)\n \n self._parcels[-1].pf['stop_time'] = self.tf / self.pf['time_units']\n \n @property\n def zones(self):\n if not hasattr(self, '_zones'):\n self._zones = int(self.pf['include_igm']) \\\n + int(self.pf['include_cgm'])\n \n return self._zones\n\n @property\n def default_parcel(self):\n if not hasattr(self, '_default_parcel'):\n self._default_parcel = self.parcel_igm if self.pf['include_igm'] \\\n else self.parcel_cgm\n \n return self._default_parcel\n\n @property\n def dynamic_tau(self):\n return self.pf['tau_dynamic']\n\n def update_optical_depth(self):\n \"\"\"\n Dynamically update optical depth as simulation runs.\n \"\"\"\n \n # Recall that self.field.tau is a list with as many elements as there\n # are distinct populations\n \n \n tau = []\n for i in range(self.field.Npops):\n pass\n \n \n self.field.tau = tau\n \n\n def subcycle(self):\n \"\"\"\n See if we need to re-do the previous timestep.\n \n This mean:\n (1) Re-compute the IGM optical depth.\n (2)\n \"\"\"\n\n return False\n\n # Check IGM ionization state between last two steps. \n # Converged to desired tolerance?\n \n #self.\n \n \n def _stop_criteria_met(self):\n pass \n \n def run(self):\n \"\"\"\n Run simulation from start to finish.\n\n Returns\n -------\n Nothing: sets `history` attribute.\n\n \"\"\"\n \n self._insert_inits()\n\n pb = ProgressBar(self.tf, use=self.pf['progress_bar'])\n pb.start()\n \n # Evolve in time\n for t, z, data_igm, data_cgm, RC_igm, RC_cgm in self.step():\n \n pb.update(t)\n \n # Save data\n self.all_z.append(z)\n self.all_t.append(t)\n \n if self.pf['include_cgm']: \n self.all_data_cgm.append(data_cgm.copy())\n \n if self.pf['include_igm']:\n self.all_data_igm.append(data_igm.copy()) \n \n if self.pf['save_rate_coefficients']:\n if self.pf['include_cgm']: \n self.all_RCs_cgm.append(RC_cgm.copy())\n if self.pf['include_igm']:\n self.all_RCs_igm.append(RC_igm.copy())\n\n pb.finish() \n\n # Sort everything by time\n if self.pf['include_igm']:\n self.history_igm = \\\n _sort_history(self.all_data_igm, prefix='igm_', squeeze=True)\n self.history = self.history_igm.copy()\n else:\n self.history = {}\n \n if self.pf['include_cgm']: \n self.history_cgm = \\\n _sort_history(self.all_data_cgm, prefix='cgm_', squeeze=True) \n self.history.update(self.history_cgm)\n\n # Save rate coefficients [optional]\n if self.pf['save_rate_coefficients']:\n if self.pf['include_igm']:\n self.rates_igm = \\\n _sort_history(self.all_RCs_igm, prefix='igm_', squeeze=True)\n self.history.update(self.rates_igm)\n \n if self.pf['include_cgm']: \n self.rates_cgm = \\\n _sort_history(self.all_RCs_cgm, prefix='cgm_', squeeze=True)\n self.history.update(self.rates_cgm)\n\n self.history['t'] = np.array(self.all_t)\n self.history['z'] = np.array(self.all_z)\n \n def step(self):\n \"\"\"\n Generator for a two-phase intergalactic medium.\n \n Returns\n -------\n Tuple containing the current time, redshift, and dictionaries for the\n IGM and CGM data at a single snapshot.\n \n \"\"\"\n\n t = 0.0\n z = self.pf['initial_redshift']\n dt = self.pf['time_units'] * self.pf['initial_timestep']\n zf = self.pf['final_redshift']\n \n # Read initial conditions\n if self.pf['include_igm']: \n data_igm = self.parcel_igm.grid.data.copy()\n \n if self.pf['include_cgm']:\n data_cgm = self.parcel_cgm.grid.data.copy()\n\n # Evolve in time!\n while z > zf:\n\n # Increment time / redshift\n dtdz = self.default_parcel.grid.cosm.dtdz(z)\n t += dt\n z -= dt / dtdz\n \n # The (potential) generators need this\n self.field.update_redshift(z)\n \n # IGM rate coefficients\n if self.pf['include_igm']:\n done = False\n if self.pf['stop_igm_h_2'] is not None:\n if data_igm['h_2'] > self.pf['stop_igm_h_2']:\n data_igm = data_igm_pre.copy() \n dt1 = 1e50\n done = True\n \n if not done:\n RC_igm = self.field.update_rate_coefficients(z, \n zone='igm', return_rc=True, igm_h_1=data_igm['h_1'])\n \n # Now, update IGM parcel\n t1, dt1, data_igm = self.gen_igm.next()\n \n # Pass rate coefficients off to the IGM parcel\n self.parcel_igm.update_rate_coefficients(data_igm, **RC_igm)\n else:\n dt1 = 1e50\n RC_igm = data_igm = None\n data_igm = {'h_1': 1.0}\n \n if self.pf['include_cgm']:\n \n done = False\n if self.pf['stop_cgm_h_2'] is not None:\n if data_cgm['h_2'] > self.pf['stop_cgm_h_2']:\n data_cgm = data_cgm_pre.copy()\n dt2 = 1e50\n done = True\n \n if not done:\n \n # CGM rate coefficients\n RC_cgm = self.field.update_rate_coefficients(z,\n zone='cgm', return_rc=True, cgm_h_1=data_cgm['h_1'])\n \n # Pass rate coefficients off to the CGM parcel\n self.parcel_cgm.update_rate_coefficients(data_cgm, **RC_cgm)\n \n # Now, update CGM parcel\n t2, dt2, data_cgm = self.gen_cgm.next()\n else:\n dt2 = 1e50\n RC_cgm = data_cgm = None\n\n # Must update timesteps in unison\n dt_pre = dt * 1.\n dt = min(dt1, dt2)\n dt = min(dt, self.pf['max_timestep'] * self.pf['time_units'])\n\n # Might need these...\n if self.pf['include_igm']:\n data_igm_pre = data_igm.copy()\n if self.pf['include_cgm']: \n data_cgm_pre = data_cgm.copy()\n\n # If we're computing the IGM optical depth dynamically, we may\n # need to \"re-do\" this step to ensure convergence.\n\n redo = self.subcycle()\n \n if not redo: \n \n # Changing attribute! A little scary, but we must make sure\n # these parcels are evolved in unison\n if self.pf['include_igm']:\n self.parcel_igm.dt = dt\n if self.pf['include_cgm']:\n self.parcel_cgm.dt = dt\n\n yield t, z, data_igm, data_cgm, RC_igm, RC_cgm\n \n continue\n\n # If we've made it here, we need to trick our generators a bit\n \n # \"undo\" this time-step\n t -= dt_pre\n z += dt_pre / dtdz\n\n self.update_optical_depth()\n\n def _insert_inits(self):\n \"\"\"\n Prepend provided initial conditions to the data storage lists.\n \"\"\"\n \n if not self.pf['load_ics']:\n self.all_t, self.all_z, self.all_data_igm, self.all_data_cgm = \\\n [], [], [], []\n if self.pf['save_rate_coefficients']: \n self.all_RCs_igm, self.all_RCs_cgm = [], []\n \n if not self.pf['include_cgm']:\n del self.all_RCs_cgm, self.all_data_cgm \n return\n \n # Flip to descending order (in redshift)\n z_inits = self.inits['z'][-1::-1]\n Tk_inits = self.inits['Tk'][-1::-1]\n xe_inits = self.inits['xe'][-1::-1]\n inits_all = {'z': z_inits, 'Tk': Tk_inits, 'xe': xe_inits}\n\n # Stop pre-pending once we hit the first light redshift\n i_trunc = np.argmin(np.abs(z_inits - self.pf['initial_redshift'])) \n if z_inits[i_trunc] <= self.pf['initial_redshift']:\n i_trunc += 1\n\n self.all_t = []\n self.all_data_igm = []\n self.all_z = list(z_inits[0:i_trunc])\n self.all_RCs_igm = [self.rates_no_RT(self.parcel_igm.grid)] * len(self.all_z)\n self.all_RCs_cgm = [self.rates_no_RT(self.parcel_igm.grid)] * len(self.all_z)\n\n # Don't mess with the CGM (much)\n if self.pf['include_cgm']:\n tmp = self.parcel_cgm.grid.data\n self.all_data_cgm = [tmp.copy() for i in range(len(self.all_z))]\n for i, cgm_data in enumerate(self.all_data_cgm):\n self.all_data_cgm[i]['rho'] = \\\n self.parcel_cgm.grid.cosm.MeanBaryonDensity(self.all_z[i])\n \n self.all_data_cgm[i]['n'] = \\\n self.parcel_cgm.grid.particle_density(cgm_data, self.all_z[i])\n \n if not self.pf['include_igm']:\n return\n \n # Loop over redshift and derive things for the IGM\n for i, red in enumerate(self.all_z): \n\n snapshot = {}\n for key in self.parcel_igm.grid.data.keys():\n if key in self.inits.keys():\n snapshot[key] = inits_all[key][i]\n continue\n \n # Electron fraction\n snapshot['e'] = inits_all['xe'][i]\n \n # Hydrogen neutral fraction\n xe = inits_all['xe'][i]\n \n if 2 not in self.parcel_igm.grid.Z:\n xe = min(xe, 1.0)\n \n xi = xe / (1. + self.parcel_igm.grid.cosm.y)\n\n snapshot['h_1'] = 1. - xi\n snapshot['h_2'] = xi\n \n # Add helium, assuming xHeII = xHII, and xHeIII << 1\n if self.parcel_igm.pf['include_He']:\n snapshot['he_1'] = 1. - xi\n snapshot['he_2'] = xi\n snapshot['he_3'] = 1e-10\n \n snapshot['rho'] = self.parcel_igm.grid.cosm.MeanBaryonDensity(red)\n snapshot['n'] = \\\n self.parcel_igm.grid.particle_density(snapshot.copy(), red)\n\n self.all_t.append(0.0)\n self.all_data_igm.append(snapshot.copy())\n\n\n "
] |
[
[
"numpy.linspace",
"numpy.arange",
"numpy.cumsum",
"numpy.ones",
"numpy.log10",
"numpy.diff",
"numpy.array",
"numpy.zeros"
],
[
"numpy.log",
"numpy.allclose",
"numpy.abs",
"scipy.integrate.dblquad",
"numpy.log10",
"numpy.diff",
"numpy.any",
"scipy.integrate.romb",
"scipy.integrate.simps",
"numpy.array",
"numpy.zeros",
"numpy.trapz"
],
[
"numpy.abs",
"numpy.unique",
"numpy.concatenate",
"scipy.interpolate.interp1d",
"numpy.zeros_like",
"numpy.array",
"numpy.trapz"
],
[
"numpy.abs",
"numpy.diff",
"numpy.interp",
"numpy.array",
"numpy.zeros"
]
] |
yfukai/exputils
|
[
"aab7bb69d12887f069e6768144dc767ea82e6306"
] |
[
"lib/exputils/plotutils/__init__.py"
] |
[
"import numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib as mpl\nfrom matplotlib import ticker\nfrom . import cm\n\n#https://stackoverflow.com/questions/31940285/plot-a-polar-color-wheel-based-on-a-colormap-using-python-matplotlib\ndef color_wheel(cmap,fig=plt.figure(),figsize=(4,4)):\n #Generate a figure with a polar projection\n fg = plt.figure(figsize=figsize)\n ax = fg.add_axes([0.1,0.1,0.8,0.8], projection='polar')\n\n #define colormap normalization for 0 to 2*pi\n norm = mpl.colors.Normalize(0, 2*np.pi) \n\n #Plot a color mesh on the polar plot\n #with the color set by the angle\n\n n = 200 #the number of secants for the mesh\n t = np.linspace(0,2*np.pi,n) #theta values\n r = np.linspace(0,1,2) #raidus values change 0.6 to 0 for full circle\n rg, tg = np.meshgrid(r,t) #create a r,theta meshgrid\n c = tg #define color values as theta value\n im = ax.pcolormesh(t, r, c.T,norm=norm,cmap=cmap) #plot the colormesh on axis with colormap\n ax.set_yticklabels([]) #turn of radial tick labels (yticks)\n ax.tick_params(pad=15,labelsize=24) #cosmetic changes to tick labels\n ax.spines['polar'].set_visible(False) #turn off the axis spine.\n \ndef legend_reverse(ax=None,**kwargs):\n if ax is None: ax=plt.gca()\n handles, labels = ax.get_legend_handles_labels()\n ax.legend(handles[::-1], labels[::-1],**kwargs)\ndef errorbar_arg_to_plot_arg(args):\n args_plot=args.copy()\n fmt=args_plot.pop(\"fmt\",\".\")\n args_plot.pop(\"capsize\",None)\n args_plot.pop(\"ecolor\",None)\n args_plot.pop(\"capthick\",None)\n return fmt, args_plot\ndef errorbar_limited(err_indices,x,y,yerr=None,xerr=None,ax=None,last_params={},**args):\n indices=np.argsort(x)\n x=x[indices]\n y=y[indices]\n if ax is None: ax=plt.gca()\n wo_err_indices=np.setdiff1d(np.arange(len(x)),err_indices)\n fmt,args_plot=errorbar_arg_to_plot_arg(args)\n args_plot.pop(\"label\",None)\n ax.plot(x[wo_err_indices],y[wo_err_indices],fmt,**args_plot)\n yerr2=None if yerr is None else yerr[err_indices]\n xerr2=None if xerr is None else xerr[err_indices]\n args.update({\"zorder\":args_plot.get(\"zorder\",3)+2})\n args.update(last_params)\n ax.errorbar(x[err_indices],y[err_indices],yerr2,xerr2,**args)\n\ndef get_all_data(ax=None):\n if not ax:\n ax=plt.gca()\n if len(ax.lines)>0:\n xss,yss=zip(*[l.get_data() for l in ax.lines])\n return xss,yss\n else:\n return None\n\ndef get_data_lim(ax=None,xlims=(-np.inf,np.inf),ylims=(-np.inf,np.inf)):\n if ax is None: ax=plt.gca()\n data=[np.concatenate(datum) for datum in get_all_data(ax)] #all xs, ys\n data=[datum[np.logical_and(vmin<datum,datum<vmax)] \n for datum,vmin,vmax in zip(data,*zip(xlims,ylims))]\n return [(np.min(datum),np.max(datum)) for datum in data]\n\ndef calc_lim(vmin,vmax,margin,islog=False):\n if islog:\n vr=vmax/vmin\n if vr>0:\n vm=np.exp(np.log(vr)*margin)\n vmin=vmin/vm ; vmax=vmax*vm\n else:\n vr=vmax-vmin\n vm=vr*margin\n vmin=vmin-vm ; vmax=vmax+vm\n return vmin,vmax\n\ndef fit_data_lim(ax=None,which=\"both\",\n margin=0,xlog=True,ylog=True,\n xlims=[-np.inf,np.inf],ylims=[-np.inf,np.inf]):\n if ax is None: ax=plt.gca()\n if xlog and xlims[0]<0: xlims[0]=0\n if ylog and ylims[0]<0: ylims[0]=0\n limss=get_data_lim(ax,xlims,ylims)\n xlim,ylim=[calc_lim(*lims,margin,islog) \n for lims,islog in zip(limss,(xlog,ylog))]\n if which==\"both\" or which==\"x\":\n ax.set_xlim(xlim)\n if which==\"both\" or which==\"y\":\n ax.set_ylim(ylim)\n\ndef set_log_minor(ax=None,which=\"both\",subs=(2,5)):\n if ax is None: ax=plt.gca()\n if which in (\"both\",\"x\"):\n ax.xaxis.set_minor_locator(ticker.LogLocator(subs=subs))\n ax.xaxis.set_minor_formatter(ticker.LogFormatter(labelOnlyBase=False))\n if which in (\"both\",\"y\"):\n ax.yaxis.set_minor_locator(ticker.LogLocator(subs=subs))\n ax.yaxis.set_minor_formatter(ticker.LogFormatter(labelOnlyBase=False))\n# else:\n# raise ValueError(\"which parameter must be both, x, or y\")\n\ndef plot_guideline(b,e,slope,label=\"\",style=\"-b\",left=False,ha=\"left\",va=\"bottom\",fontsize=10,plotargs={},textargs={},ax=None):\n if ax is None: ax=plt.gca()\n if len(b) == 2 and len(e) == 1:\n bx = b[0]\n by = b[1]\n ex = e[0]\n ey = by+((ex-bx)*slope)\n elif len(b) == 1 and len(e) == 2:\n bx = b[0]\n ex = e[0]\n ey = e[1]\n by = ey-((ex-bx)*slope)\n ax.plot([bx,ex],[by,ey],style,**plotargs)\n x = bx if left else ex\n y = by if left else ey\n ax.text(x,y,label,ha=ha,va=va,fontsize=fontsize,**textargs)\n\ndef plot_guideline_log(b,e,exponent,label=\"\",style=\"-b\",left=False,ha=\"left\",va=\"bottom\",\n fontsize=10,plotargs={},textargs={},ax=None,xoffset=0,yoffset=0):\n if ax is None: ax=plt.gca()\n if len(b) == 2 and len(e) == 1:\n bx = b[0]\n by = b[1]\n ex = e[0]\n ey = by*((ex/bx)**exponent)\n elif len(b) == 1 and len(e) == 2:\n bx = b[0]\n ex = e[0]\n ey = e[1]\n by = ey/((ex/bx)**exponent)\n ax.loglog([bx,ex],[by,ey],style,**plotargs)\n x = (bx if left else ex)+xoffset\n y = (by if left else ey)+yoffset\n ax.text(x,y,label,ha=ha,va=va,fontsize=fontsize,**textargs)\n\ndef plot_horizontal_line(y,label=\"\",linestyle=\"--\",color=\"k\",left=False,ha=\"left\",va=\"center\",fontsize=10,xoffset=0,yoffset=0,plotargs={},textargs={},ax=None):\n if ax is None: ax=plt.gca()\n ax.axhline(y,linestyle=linestyle,color=color,**plotargs)\n xlims=ax.get_xlim()\n x=xlims[0] if left else xlims[1]\n ax.text(x+xoffset,y+yoffset,label,horizontalalignment=ha,va=va,fontsize=fontsize,**textargs)\ndef imshow_color(img1,img2,img3,ax=None,*args,**kargs):\n if ax is None: ax=plt.gca()\n im = np.transpose([img1,img2,img3],(1,2,0))\n kargs.update({\"interpolation\":\"none\"})\n ax.imshow(im,*args,**kargs)\n\ndef set_str_formatters(fmt,ax=None,which=\"both\"):\n if ax is None: ax=plt.gca()\n if which==\"both\" or which==\"x\":\n ax.xaxis.set_major_formatter(ticker.FormatStrFormatter(fmt))\n ax.xaxis.set_minor_formatter(ticker.FormatStrFormatter(fmt))\n if which==\"both\" or which==\"y\":\n ax.yaxis.set_major_formatter(ticker.FormatStrFormatter(fmt))\n ax.yaxis.set_minor_formatter(ticker.FormatStrFormatter(fmt))\n\ndef hide_tick_label(ax=None,which=\"both\"):\n if ax is None: ax=plt.gca()\n if which==\"both\" or which==\"x\":\n plt.setp(ax.get_xmajorticklabels(), visible=False)\n plt.setp(ax.get_xminorticklabels(), visible=False)\n if which==\"both\" or which==\"y\":\n plt.setp(ax.get_ymajorticklabels(), visible=False)\n plt.setp(ax.get_yminorticklabels(), visible=False)\n"
] |
[
[
"matplotlib.pyplot.gca",
"numpy.log",
"matplotlib.ticker.FormatStrFormatter",
"numpy.linspace",
"numpy.min",
"numpy.logical_and",
"matplotlib.colors.Normalize",
"numpy.concatenate",
"numpy.max",
"matplotlib.ticker.LogLocator",
"numpy.transpose",
"matplotlib.ticker.LogFormatter",
"numpy.argsort",
"numpy.meshgrid",
"matplotlib.pyplot.figure"
]
] |
gujralsanyam22/pyrobot
|
[
"a0448714857b684d8b280f710e9304988524d2e0"
] |
[
"src/pyrobot/vrep_locobot/camera.py"
] |
[
"# Copyright (c) Facebook, Inc. and its affiliates.\n\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport numpy as np\nimport pyrobot.utils.util as prutil\nfrom pyrobot.core import Camera\n\nfrom pyrobot.utils.util import try_cv2_import\n\ncv2 = try_cv2_import()\n\nfrom cv_bridge import CvBridge, CvBridgeError\n\n\nfrom pyrep.objects.vision_sensor import VisionSensor\nfrom pyrep.const import ObjectType, PerspectiveMode, RenderMode\nfrom pyrep.objects.joint import Joint\n\n\nclass LoCoBotCamera(Camera):\n \"\"\"docstring for SimpleCamera\"\"\"\n\n def __init__(self, configs, simulator):\n\n self.sim = simulator.sim\n self.rgb_cam = VisionSensor(\"kinect_rgb\")\n self.depth_cam = VisionSensor(\"kinect_depth\")\n self.rgb_cam.set_render_mode(RenderMode.OPENGL3)\n self.depth_cam.set_render_mode(RenderMode.OPENGL3)\n\n # Pan and tilt related variables.\n self.pan_joint = Joint(\"LoCoBot_head_pan_joint\")\n self.tilt_joint = Joint(\"LoCoBot_head_tilt_joint\")\n\n def get_rgb(self):\n\n return self.rgb_cam.capture_rgb()\n\n def get_depth(self):\n\n return self.depth_cam.capture_depth()\n\n def get_rgb_depth(self):\n\n return self.get_rgb(), self.get_depth()\n\n def get_intrinsics(self):\n\n # Todo: Remove this after we fix intrinsics\n raise NotImplementedError\n \"\"\"\n\t\tReturns the instrinsic matrix of the camera\n\n\t\t:return: the intrinsic matrix (shape: :math:`[3, 3]`)\n\t\t:rtype: np.ndarray\n\t\t\"\"\"\n # fx = self.configs['Camera.fx']\n # fy = self.configs['Camera.fy']\n # cx = self.configs['Camera.cx']\n # cy = self.configs['Camera.cy']\n Itc = np.array([[fx, 0, cx], [0, fy, cy], [0, 0, 1]])\n return Itc\n\n def pix_to_3dpt(self, rs, cs, in_cam=False):\n \"\"\"\n\t\tGet the 3D points of the pixels in RGB images.\n\n\t\t:param rs: rows of interest in the RGB image.\n\t\t It can be a list or 1D numpy array\n\t\t which contains the row indices.\n\t\t The default value is None,\n\t\t which means all rows.\n\t\t:param cs: columns of interest in the RGB image.\n\t\t It can be a list or 1D numpy array\n\t\t which contains the column indices.\n\t\t The default value is None,\n\t\t which means all columns.\n\t\t:param in_cam: return points in camera frame,\n\t\t otherwise, return points in base frame\n\n\t\t:type rs: list or np.ndarray\n\t\t:type cs: list or np.ndarray\n\t\t:type in_cam: bool\n\n\t\t:returns: tuple (pts, colors)\n\n\t\t pts: point coordinates in world frame\n\t\t (shape: :math:`[N, 3]`)\n\n\t\t colors: rgb values for pts_in_cam\n\t\t (shape: :math:`[N, 3]`)\n\n\t\t:rtype: tuple(np.ndarray, np.ndarray)\n\t\t\"\"\"\n\n raise NotImplementedError\n\n def get_current_pcd(self, in_cam=True):\n \"\"\"\n\t\tReturn the point cloud at current time step (one frame only)\n\n\t\t:param in_cam: return points in camera frame,\n\t\t otherwise, return points in base frame\n\n\t\t:type in_cam: bool\n\t\t:returns: tuple (pts, colors)\n\n\t\t pts: point coordinates in world frame (shape: :math:`[N, 3]`)\n\n\t\t colors: rgb values for pts_in_cam (shape: :math:`[N, 3]`)\n\t\t:rtype: tuple(np.ndarray, np.ndarray)\n\t\t\"\"\"\n\n raise NotImplementedError\n\n @property\n def state(self):\n \"\"\"\n\t\tReturn the current pan and tilt joint angles of the robot camera.\n\n\t\t:return:\n\t\t pan_tilt: A list the form [pan angle, tilt angle]\n\t\t:rtype: list\n\t\t\"\"\"\n return self.get_state()\n\n def get_state(self):\n \"\"\"\n\t\tReturn the current pan and tilt joint angles of the robot camera.\n\n\t\t:return:\n\t\t pan_tilt: A list the form [pan angle, tilt angle]\n\t\t:rtype: list\n\t\t\"\"\"\n return [self.get_pan(), self.get_tilt()]\n\n def get_pan(self):\n \"\"\"\n\t\tReturn the current pan joint angle of the robot camera.\n\n\t\t:return:\n\t\t pan: Pan joint angle\n\t\t:rtype: float\n\t\t\"\"\"\n return self.pan_joint.get_joint_position()\n\n def get_tilt(self):\n \"\"\"\n\t\tReturn the current tilt joint angle of the robot camera.\n\n\t\t:return:\n\t\t tilt: Tilt joint angle\n\t\t:rtype: float\n\t\t\"\"\"\n return self.tilt_joint.get_joint_position()\n\n def set_pan(self, pan, wait=True):\n \"\"\"\n\t\tSets the pan joint angle to the specified value.\n\n\t\t:param pan: value to be set for pan joint\n\t\t:param wait: wait until the pan angle is set to\n\t\t the target angle.\n\n\t\t:type pan: float\n\t\t:type wait: bool\n\t\t\"\"\"\n\n self.pan_joint.set_joint_position(pan)\n # [self.sim.step() for _ in range(50)]\n\n def set_tilt(self, tilt, wait=True):\n \"\"\"\n\t\tSets the tilt joint angle to the specified value.\n\n\t\t:param tilt: value to be set for the tilt joint\n\t\t:param wait: wait until the tilt angle is set to\n\t\t the target angle.\n\n\t\t:type tilt: float\n\t\t:type wait: bool\n\t\t\"\"\"\n\n self.tilt_joint.set_joint_position(tilt)\n\n def set_pan_tilt(self, pan, tilt, wait=True):\n \"\"\"\n\t\tSets both the pan and tilt joint angles to the specified values.\n\n\t\t:param pan: value to be set for pan joint\n\t\t:param tilt: value to be set for the tilt joint\n\t\t:param wait: wait until the pan and tilt angles are set to\n\t\t the target angles.\n\n\t\t:type pan: float\n\t\t:type tilt: float\n\t\t:type wait: bool\n\t\t\"\"\"\n\n self.set_pan(pan)\n self.set_tilt(tilt)\n\n def reset(self):\n \"\"\"\n\t\tThis function resets the pan and tilt joints by actuating\n\t\tthem to their home configuration.\n\t\t\"\"\"\n self.set_pan_tilt(self.configs.CAMERA.RESET_PAN, self.configs.CAMERA.RESET_TILT)\n"
] |
[
[
"numpy.array"
]
] |
DPBayes/data-sharing-examples
|
[
"f9fffc5b8f45d8dd7b93cb7e812439decfa51193",
"f9fffc5b8f45d8dd7b93cb7e812439decfa51193",
"f9fffc5b8f45d8dd7b93cb7e812439decfa51193"
] |
[
"adult/dp_logistic_regression_onehot/classify_anticipated.py",
"carat/mixture_model/carat_dp_main.py",
"ard/non_dp_runs/female/female_main.py"
] |
[
"import pickle, torch\nimport numpy as np\nimport pandas as pd\n\ntarget_epsilons = [1.1, 2.0, 4.0, 8.0, 14.0]\nanticipated_Ts = [2, 5, 10, 20]\nmodels_dict = {}\nfor eps in target_epsilons:\n\tmodels_dict[eps] = pickle.load(open('./res/models_2019-11-05_{}.p'.format(eps), 'rb'))\n\n\nX_test = pd.read_csv('./onehotted_data/encoded_X_test.csv', sep=';')\ny_test = pd.read_csv('./onehotted_data/encoded_y_test.csv', sep=';', header=None).values.squeeze()\n\nfeature_names = list(X_test.columns)\nX_test['Intercept'] = np.ones(len(X_test))\nX_test = X_test[['Intercept'] + feature_names]\n\naccs_dict={}\nfor eps in target_epsilons:\n\tmodels = models_dict[eps]\n\taccs = np.zeros(40)\n\tfor i, model in enumerate(models):\n\t\tw_map = model.reparam.bias.data.numpy()\n\t\tS_N = model.reparam.weight.exp().data.numpy()**2\n\t\tmu_a = X_test.dot(w_map) \n\t\tsigma_a2 = (X_test**2).dot(S_N)\n\t\tkappa = (1+np.pi*sigma_a2/8)**-0.5\n\t\tsigmoid = lambda x : (1+np.exp(-x))**-1\n\t\ty_pred = 1*(sigmoid(kappa*mu_a)>0.5)\n\t\taccs[i] = np.mean(y_pred==y_test)\n\taccs = np.array(np.split(accs, 10))\n\t## accs \\in R^{10 x 4}, column corresponds to a anticipated runs\n\taccs_dict[eps]=accs\n\nmean_accs_dict = {eps : accs_dict[eps].mean(0) for eps in target_epsilons}\nstd_accs_dict = {eps : accs_dict[eps].std(0) for eps in target_epsilons}\n\npickle.dump({'means': mean_accs_dict, 'stds': std_accs_dict},\\\n\t\topen('../plot_scripts/plot_pickles/anticipated_res_onehot.p', 'wb'))\n",
"import torch, sys, math, pickle, datetime, time\nimport numpy as np\nimport numpy.random as npr\nfrom collections import OrderedDict\n\nuse_cuda = torch.cuda.is_available()\nnpr.seed(1234)\nif use_cuda : \n\ttorch.set_default_tensor_type('torch.cuda.DoubleTensor')\n\ttorch.cuda.manual_seed(1234)\nelse : \n\ttorch.set_default_tensor_type('torch.DoubleTensor')\n\ttorch.manual_seed(1234)\n\n\n\"\"\"\nDPVI for Carat app data. Model countries with Categorical dist and apps as a binary vector.\n\"\"\"\nfrom dpvi import DPVI\n\ndef infer(T, C, sigma, batch_size, Optimizer, lr, X_apps, k):\n\tfrom linear import ReparamXpand\n\tN = len(X_apps)\n\t## Initialize and expand model\n\t### Define model for reparametrization\n\tparam_dims = {'theta_unconstrained' : [k, X_apps.shape[-1]], 'pi_unconstrained' : [k-1]}\n\tparam_dims = OrderedDict(param_dims)\n\n\t### Compute the total number of parameters in model\n\tinput_dim = int(np.sum([np.prod(value) for value in param_dims.values()]))\n\tflat_param_dims = np.array([np.prod(value) for value in param_dims.values()])\n\n\tif sigma>0 : \n\t\tmodel = ReparamXpand(batch_size, input_dim, param_dims, flat_param_dims)\n\t\toptimizer = Optimizer(model.parameters(), lr=lr)\n\telse : \n\t\tmodel = ReparamXpand(1, input_dim, param_dims, flat_param_dims)\n\t\toptimizer = Optimizer(model.parameters(), lr=lr)\n\tif use_cuda:\n\t\tX_apps = X_apps.cuda()\n\t\tmodel.cuda()\n\tmodel.reparam.weight.data[:,-(k-1):].mul_(0)\n\tmodel.reparam.bias.data[:,-(k-1):].mul_(0)\n\t## Training model\n\tmodel = DPVI(model, T, N, batch_size, X_apps, sigma, C, optimizer)\n\t## Create a generative model based on model parameters and return it\n\tgenerative_model = ReparamXpand(1, input_dim, param_dims, flat_param_dims)\n\tgenerative_model.reparam.bias.detach_()\n\tgenerative_model.reparam.weight.detach_()\n\tgenerative_model.reparam.bias.data = torch.tensor(model.reparam.bias.data.cpu()[0].data.numpy(), device='cpu') \n\tgenerative_model.reparam.weight.data = torch.tensor(model.reparam.weight.data.cpu()[0].data.numpy(), device='cpu')\n\treturn generative_model\n\n##################################################\ndef main():\n\t### Set number of mixture components (k)\n\tk = 20\n\t## Training parameters\n\tT = 30000\n\tC = 1.0\n\tq = .001\n\tlr = .001\n\t\n\t### Pick dimension from argv\n\td = int(sys.argv[1])\n\t### Compute privacy budget\n\tfrom privacy.analysis.compute_dp_sgd_privacy import compute_rdp, get_privacy_spent\n\tdelta = 1e-5\n\trdp_orders = range(2, 500)\n\tsigma = 2.0\n\tif sigma>0:\n\t\tfrom privacy.analysis.compute_dp_sgd_privacy import get_privacy_spent, compute_rdp\n\t\trdp_alpha = range(2,500)\n\t\tdelta = 1e-5\n\t\tprint(sigma)\n\t\trdp_eps = compute_rdp(q, sigma, T, rdp_alpha)\n\t\tepsilon = 2*get_privacy_spent(rdp_alpha, rdp_eps, target_delta = delta/2)[0]\n\t### Check that epsilon < 1.0\n\tassert(epsilon<1.0)\n\n\t### Save log\n\tdate = datetime.date.today().isoformat()\n\twall_start = time.time()\n\tcpu_start = time.clock()\n\tout_file = open(\"out_file_{}_{}.txt\".format(date, d), \"a\")\n\tsys.stdout = out_file\n\t### Load carat-data\n\timport pandas as pd\n\tapp_data = pd.read_csv('../data/subsets/carat_apps_sub{}.dat'.format(d), sep=' ', header=None)\\\n\t\t\t\t\t\t\t\t\t\t\t\t\t.astype('float').values\n\tN = len(app_data)\n\tbatch_size = int(N*q)\n\tX_apps = torch.tensor(app_data).view([N, 1, d])\n\tmodels = [] ## container to save gen_models\n\tfor run in range(10):\n\t\tfrom torch.optim import Adam\n\t\tgen_model = infer(T, C, float(sigma), batch_size, Adam, lr, X_apps, k)\n\t\tmodels.append(gen_model)\n\n\twall_end = time.time()\n\tcpu_end = time.clock()\n\tpickle.dump(models, open('models_{}_{}.p'.format(date, d), 'wb'))\n\tprint('Wall time {}'.format(wall_end-wall_start))\n\tprint('CPU time {}'.format(cpu_end-cpu_start))\n\tout_file.close()\n\tparams = {'T':T, 'C':C, 'q':q, 'lr':lr, 'sigma':sigma, 'epsilon' : epsilon, 'd':d}\n\tpickle.dump(params, open('params_{}_{}.p'.format(date, d), 'wb'))\n\nif __name__ == \"__main__\":\n\tmain()\n",
"import torch, sys, math, pickle, datetime, time\nimport numpy as np\nimport pandas as pd\nimport numpy.random as npr\nfrom itertools import count\nfrom collections import OrderedDict\n\n\nuse_cuda = torch.cuda.is_available()\n\nfrom linear import ReparamXpand\n\n##################################################\n### Inference ###\n\"\"\"\n Runs DPVI for given parameters and returns a generative model\n\"\"\"\nfrom vi import VI\ndef infer(T, batch_size, Optimizer, learning_rate, train_data, variable_types, k):\n ## Initialize and expand model\n param_dims = OrderedDict()\n for key, value in variable_types.items():\n if key == 'pi_unconstrained':\n param_dims[key] = [k-1]\n else:\n if value == 'Bernoulli':\n param_dims[key] = [k]\n elif (key=='lex.dur' and variable_types[key]==None):\n param_dims[key] = [2, k]\n elif (key=='ep' and variable_types[key]==None):\n param_dims[key] = [k]\n elif (key=='dead' and variable_types[key]==None):\n param_dims[key] = [k]\n elif value == 'Beta':\n param_dims[key] = [2, k]\n elif value == 'Categorical':\n param_dims[key] = [k, len(np.unique(train_data[key]))]\n \n input_dim = int(np.sum([np.prod(value) for value in param_dims.values()]))\n flat_param_dims = np.array([np.prod(value) for value in param_dims.values()])\n model = ReparamXpand(1, input_dim, param_dims, flat_param_dims)\n model.reparam.bias.data = model.reparam.bias.data.flatten()\n model.reparam.weight.data = model.reparam.weight.data.flatten()\n\n ### Init model close to feature means\n def logit(y):\n return torch.log(y)-torch.log(1.-y)\n def inverse_softmax(y):\n last = 1e-23*torch.ones(1) # just something small\n sum_term = -50.-torch.log(last)\n x = torch.log(y)-sum_term\n return x\n ### Init model close to feature means\n ## Laplace mech with small epsilon to guarantee DP of the initialization\n for key in train_data.columns: \n if variable_types[key]=='Bernoulli' or key in ['dead']:\n param_mean = torch.as_tensor(train_data[key].mean(0))\n param_location = list(model.param_dims.keys()).index(key)\n init_param = logit(torch.rand(k)*(param_mean*2.-param_mean*0.5)+param_mean*0.5)\n\n start_index = np.sum(model.flat_param_dims[:param_location])\n model.reparam.bias.data[start_index:(start_index+np.sum(model.param_dims[key]))] =\\\n init_param\n elif variable_types[key]=='Categorical':\n freqs = np.unique(train_data[key], return_counts=1)[1]\n num_cats = len(freqs)\n param_mean = torch.as_tensor(freqs/np.sum(freqs))\n init_param = inverse_softmax(param_mean)\n init_param = 0.5*torch.randn(k, num_cats)+init_param\n init_param = init_param.flatten()\n param_location = list(model.param_dims.keys()).index(key)\n start_index = np.sum(model.flat_param_dims[:param_location])\n model.reparam.bias.data[start_index:(start_index+np.prod(model.param_dims[key]))] =\\\n init_param\n\n \n\n if use_cuda:\n model.cuda()\n optimizer = Optimizer(model.parameters(), lr=learning_rate)\n N = len(train_data)\n model = VI(model, T, N, batch_size, train_data, optimizer, variable_types)\n\n ## Create a generative model based on model parameters and return it\n generative_model = ReparamXpand(1, input_dim, param_dims, flat_param_dims)\n generative_model.reparam.bias.detach_()\n generative_model.reparam.weight.detach_()\n generative_model.reparam.bias.data = torch.tensor(model.reparam.bias.data.cpu().data.numpy(), device='cpu') \n generative_model.reparam.weight.data = torch.tensor(model.reparam.weight.data.cpu().data.numpy(), device='cpu')\n #return generative_model, z_maps\n return generative_model\n\n\n##################################################\n### Load diabetes data ###\n## Encode data\nfrom load_diabetes import fetch_data\nfemale_df, male_df, data_dtypes = fetch_data()\ndata_dtypes['G03.DDD'] = 'int64'\nfemale_N = len(female_df)\nmale_N = len(male_df)\n\n##################################################\n### Define model ###\n## For female\n\n# Load variable type dictionaries for both independent and dependent types\nfrom variable_types import independent_model as female_variable_types_\n\ndead_female_variable_types = female_variable_types_.copy()\ndead_female_variable_types.pop('dead')\nalive_female_variable_types = dead_female_variable_types.copy()\nalive_female_variable_types.pop('ep')\nalive_female_variable_types.pop('lex.dur')\n\n# Pick features for training\nfemale_features = list(female_variable_types_.keys())\nfemale_features.remove('pi_unconstrained')\n\n# Cast features to appropriate dtypes\nfemale_dtypes = {key:value if value!='O' else 'int64' for key, value in \\\n data_dtypes[female_features].items()} \n\nalive_features = list(alive_female_variable_types.keys())\nalive_features.remove('pi_unconstrained')\ndead_features = list(dead_female_variable_types.keys())\ndead_features.remove('pi_unconstrained')\n\n# Separate training datas to alives and deads\nalive_female_df = female_df[female_df.dead == 0][alive_features]\ndead_female_df = female_df[female_df.dead == 1][dead_features]\n\ndef main():\n # Set DPVI params\n #T = 10000\n T = 40000\n C = 1.0\n #lr = 1e-2\n lr = 1e-3\n # set number of mixture components\n female_k = 40\n q = 0.005\n n_runs = int(sys.argv[1])\n seed = int(sys.argv[2])\n # Set optimizer\n optimizer = torch.optim.Adam\n ## Set random seed\n npr.seed(seed)\n if use_cuda:\n torch.set_default_tensor_type('torch.cuda.DoubleTensor')\n torch.cuda.manual_seed(seed)\n else:\n torch.set_default_tensor_type('torch.DoubleTensor')\n torch.manual_seed(seed)\n\n ## Compute privacy budget\n print(\"NON DP RUN!! k = {}\".format(female_k))\n\n ## Save parameters\n res_dir = './res/'\n params = {'T':T, 'C':C, 'lr':lr, 'female_k':female_k,\\\n 'q':q, 'n_runs':n_runs, 'seed':seed}\n ## Determine filename\n fname_i = 0\n date = datetime.date.today().isoformat()\n fname = 'k={}_{}_{}'.format(female_k, date, seed)\n while True:\n try : \n param_file = open(res_dir+'params_{}_NONDP.p'.format(fname), 'r')\n param_file.close()\n if fname_i == 0: fname += '_({})'.format(fname_i)\n else: fname = fname[:-4]+'_({})'.format(fname_i)\n fname_i += 1\n except :\n break\n \n pickle.dump(params, open(res_dir+'params_{}_NONDP.p'.format(fname), 'wb'))\n learn_counter = count()\n alive_female_models = []\n dead_female_models = []\n out_file = open(res_dir+'out_{}_NONDP.txt'.format(fname), 'w')\n for i in range(n_runs):\n start_time = time.time()\n print(learn_counter.__next__())\n # train female and models\n # alives\n alive_female_model = infer(T, int(q*len(alive_female_df)),\\\n optimizer, lr, alive_female_df, alive_female_variable_types, female_k)\n alive_female_models.append(alive_female_model)\n pickle.dump(alive_female_models, open('./female_models/'+'alive_female_models_{}_NONDP.p'\\\n .format(fname), 'wb'))\n # deads\n dead_female_model = infer(T, int(q*len(dead_female_df)),\\\n optimizer, lr, dead_female_df, dead_female_variable_types, female_k)\n dead_female_models.append(dead_female_model)\n pickle.dump(dead_female_models, open('./female_models/'+'dead_female_models_{}_NONDP.p'\\\n .format(fname), 'wb'))\n stop_time = time.time()\n time_delta = stop_time-start_time\n out_file.writelines(\"Took {} seconds to learn alive and dead\\n\".format(time_delta))\n print(\"Took {} seconds to learn alive and dead\\n\".format(time_delta))\n out_file.close()\nif __name__ == \"__main__\":\n main()\n"
] |
[
[
"numpy.split",
"pandas.read_csv",
"numpy.mean",
"numpy.exp",
"numpy.zeros"
],
[
"torch.set_default_tensor_type",
"numpy.random.seed",
"torch.cuda.manual_seed",
"torch.manual_seed",
"torch.tensor",
"torch.cuda.is_available",
"numpy.prod"
],
[
"torch.set_default_tensor_type",
"torch.ones",
"numpy.random.seed",
"torch.cuda.manual_seed",
"numpy.unique",
"torch.manual_seed",
"torch.randn",
"torch.log",
"torch.rand",
"torch.cuda.is_available",
"numpy.prod",
"numpy.sum"
]
] |
aldajo92/UDACITY-SDC_BehavioralCloning
|
[
"c2119a1bd244d7a4a1da37209e8c6174c9273628",
"c2119a1bd244d7a4a1da37209e8c6174c9273628"
] |
[
"read_and_train_6.py",
"read_and_train_2.py"
] |
[
"import csv\nimport cv2\nimport numpy as np\n\n# dataPath: folder path where all IMG's and driving_log's are stored\ndataPath = 'data'\ndriving_log_list = {'driving_log.csv':'IMG', 'driving_log2.csv':'IMG2'}\n\ncorrection = 0.5 # this is a parameter to tune\n\ndef get_image_from_sourcepath(source_path, folder):\n filename = source_path.split('/')[-1]\n current_path = './{}/{}/{}'.format(dataPath,folder,filename)\n image = cv2.imread(current_path)\n return image\n\n# filename: String path asociated with the specific csv file that contains the relation between images an values (driving_log).\n# local_lines : list of all rows in the csv file. Each row have information about the image paths and values as an inner list.\ndef read_lines_from_filename(filename):\n local_lines = []\n with open('./{}/{}'.format(dataPath, filename)) as csvfile:\n reader = csv.reader(csvfile)\n for line in reader:\n local_lines.append(line)\n return local_lines\n\n# images: global list that contains all the images used to train the model as the input\n# measurements: global list that contains all measurements used to train the model as the output\nimages = []\nmeasurements = []\n\n# lines: list that contains each row of the csv file\n# line: row that contains the image path for images, and also the steering and throttle values associated, as a list.\n# images: global array that contains all the images used to train the model as the input\n# measurements: global array that contains all measurements used to train the model as the output\n# correction: a parameter that needs to be tuned. It provides a correction in the scenario when the car sees the lane lines.\nprint('Reading from: ./{}/'.format(dataPath))\nfor (d_log, folder) in driving_log_list.items():\n print('Reading file: {}'.format(d_log))\n lines = read_lines_from_filename(d_log)\n\n for line in lines:\n steering_center = float(line[3])\n steering_left = steering_center + correction\n steering_right = steering_center - correction\n\n image_center = get_image_from_sourcepath(line[0], folder)\n image_left = get_image_from_sourcepath(line[1], folder)\n image_right = get_image_from_sourcepath(line[2], folder)\n \n images.extend([image_center, image_left, image_right])\n measurements.extend([steering_center, steering_left, steering_right])\n\naugmented_images, augmented_measurements = [], []\nfor image, measurement in zip(images, measurements):\n augmented_images.append(image)\n augmented_measurements.append(measurement)\n augmented_images.append(cv2.flip(image,1))\n augmented_measurements.append(measurement*-1.0)\n\nX_train = np.array(augmented_images)\nY_train = np.array(augmented_measurements)\n\nfrom keras.models import Sequential\nfrom keras.layers import Flatten, Dense, Lambda, Conv2D, MaxPool2D, Cropping2D\nfrom keras.layers.convolutional import Convolution2D\n\nmodel = Sequential()\nmodel.add(Lambda(lambda x: (x / 255.0) - 0.5, input_shape=(160,320,3)))\nmodel.add(Cropping2D(cropping=((70,25),(0,0))))\nmodel.add(Conv2D(filters=6, kernel_size=(5, 5), activation='relu'))\nmodel.add(MaxPool2D())\nmodel.add(Conv2D(filters=6, kernel_size=(5, 5), activation='relu'))\nmodel.add(MaxPool2D())\nmodel.add(Flatten())\nmodel.add(Dense(120))\nmodel.add(Dense(84))\nmodel.add(Dense(1))\n\nmodel.compile(loss = 'mse', optimizer = 'adam')\nmodel.fit(X_train, Y_train, validation_split = 0.2, shuffle = True, nb_epoch=4)\n\nmodel.save('model.h5')",
"import csv\nimport cv2\nimport numpy as np\n\n# dataPath: folder path where all IMG's and driving_log's are stored\ndataPath = 'train/'\n\nlines = []\nprint('Reading from: ./{}'.format(dataPath))\nwith open('./{}driving_log.csv'.format(dataPath)) as csvfile:\n reader = csv.reader(csvfile)\n for line in reader:\n lines.append(line)\n\n# images: global list that contains all the images used to train the model as the input\n# measurements: global list that contains all measurements used to train the model as the output\nimages = []\nmeasurements = []\n\n# lines: list that contains each row of the csv file\n# line: row that contains the image path for images, and also the steering and throttle values associated, as a list.\nfor line in lines:\n source_path = line[0]\n filename = source_path.split('/')[-1]\n current_path = './{}IMG/{}'.format(dataPath,filename)\n image = cv2.imread(current_path)\n images.append(image)\n measurement = float(line[3])\n measurements.append(measurement)\n\nX_train = np.array(images)\nY_train = np.array(measurements)\n\nfrom keras.models import Sequential\nfrom keras.layers import Flatten, Dense, Lambda\n\nmodel = Sequential()\nmodel.add(Lambda(lambda x: (x / 255.0) - 0.5, input_shape=(160,320,3)))\nmodel.add(Flatten())\nmodel.add(Dense(1))\n\nmodel.compile(loss = 'mse', optimizer = 'adam')\nmodel.fit(X_train, Y_train, validation_split = 0.2, shuffle = True, nb_epoch=2)\n\nmodel.save('model.h5')"
] |
[
[
"numpy.array"
],
[
"numpy.array"
]
] |
qcc4cp/qcc
|
[
"63227bbe36251b6f0bb3f78f2233337edcef547e",
"63227bbe36251b6f0bb3f78f2233337edcef547e"
] |
[
"src/subset_sum.py",
"src/lib/state.py"
] |
[
"# python3\n\"\"\"Example: Number set partitioning such set sum(A) == sum(B).\"\"\"\n\n\n# Based on this paper:\n# https://cds.cern.ch/record/467590/files/0010018.pdf\n#\n# For a set A of integers, can A be partitioned into\n# two sets A1 and A2, such that:\n# sum(A1) == sum(A2)\n#\n# For this to work, sum(A) must not be odd.\n# We should reach 100% consistent results.\n\nimport random\nfrom typing import List\n\nfrom absl import app\nfrom absl import flags\nimport numpy as np\n\nfrom src.lib import helper\n\nflags.DEFINE_integer('nmax', 15, 'Maximum number')\nflags.DEFINE_integer('nnum', 6,\n 'Maximum number of set elements [1-nmax]')\nflags.DEFINE_integer('iterations', 20, 'Number of experiments')\n\n\ndef select_numbers(nmax: int, nnum: int) -> List[int]:\n \"\"\"Select nnum random, unique numbers in range 1 to nmax.\"\"\"\n\n while True:\n sample = random.sample(range(1, nmax), nnum)\n if sum(sample) % 2 == 0:\n return sample\n\n\ndef tensor_diag(n: int, num: int):\n \"\"\"Construct tensor product from diagonal matrices.\"\"\"\n\n def tensor_product(w1: float, w2: float, diag):\n return [j for i in zip([x * w1 for x in diag],\n [x * w2 for x in diag]) for j in i]\n\n diag = [1, -1] if num == 0 else [1, 1]\n for i in range(1, n):\n if i == num:\n diag = tensor_product(i, -i, diag)\n else:\n diag = tensor_product(1, 1, diag)\n return diag\n\n\ndef set_to_diagonal_h(num_list: List[int],\n nmax: int) -> np.ndarray:\n \"\"\"Construct diag(H).\"\"\"\n\n h = [0.0] * 2**nmax\n for num in num_list:\n diag = tensor_diag(nmax, num)\n for idx, val in enumerate(diag):\n h[idx] += val\n return h\n\n\ndef compute_partition(num_list: List[int]):\n \"\"\"Compute paritions that add up.\"\"\"\n\n solutions = []\n for bits in helper.bitprod(len(num_list)):\n iset = []\n oset = []\n for idx, val in enumerate(bits):\n (iset.append(num_list[idx]) if val == 0 else\n oset.append(num_list[idx]))\n if sum(iset) == sum(oset):\n solutions.append(bits)\n return solutions\n\n\ndef dump_solution(bits: List[int], num_list: List[int]):\n iset = []\n oset = []\n for idx, val in enumerate(bits):\n (iset.append(f'{num_list[idx]:d}') if val == 0 else\n oset.append(f'{num_list[idx]:d}'))\n return '+'.join(iset) + ' == ' + '+'.join(oset)\n\n\ndef run_experiment() -> None:\n \"\"\"Run an experiment, compute H, match against 0.\"\"\"\n\n nmax = flags.FLAGS.nmax\n num_list = select_numbers(nmax, flags.FLAGS.nnum)\n solutions = compute_partition(num_list)\n\n diag = set_to_diagonal_h(num_list, nmax)\n\n non_zero = np.count_nonzero(diag)\n if non_zero != 2**nmax:\n print('Solution should exist...', end='')\n if solutions:\n print(' Found Solution:',\n dump_solution(solutions[0], num_list))\n return True\n raise AssertionError('False positive found.')\n if solutions:\n raise AssertionError('False negative found.')\n return False\n\n\ndef main(argv):\n if len(argv) > 1:\n raise app.UsageError('Too many command-line arguments.')\n\n for i in range(flags.FLAGS.iterations):\n ret = run_experiment()\n\n\nif __name__ == '__main__':\n app.run(main)\n",
"# python3\n\"\"\"class State wraps a tensor as underlying representation.\"\"\"\n\nimport cmath\nimport math\nimport random\nfrom typing import List, Optional\n\nimport numpy as np\n\nfrom src.lib import helper\nfrom src.lib import tensor\n\n\nclass State(tensor.Tensor):\n \"\"\"class State represents single and multi-qubit states.\"\"\"\n\n def __repr__(self) -> str:\n s = 'State('\n s += super().__str__().replace('\\n', '\\n' + ' ' * len(s))\n s += ')'\n return s\n\n def __str__(self) -> str:\n s = f'{self.nbits}-qubit state.'\n s += ' Tensor:\\n'\n s += super().__str__()\n return s\n\n def dump(self, desc: Optional[str] = None, prob_only: bool = True) -> None:\n dump_state(self, desc, prob_only)\n\n def density(self) -> tensor.Tensor:\n return tensor.Tensor(np.outer(self, self.conj()))\n\n def adjoint(self) -> tensor.Tensor:\n return self.conj().transpose()\n\n def normalize(self) -> None:\n \"\"\"Renormalize the state. Sum of squared amplitudes==1.0.\"\"\"\n\n dprod = np.conj(self) @ self\n if dprod.is_close(0.0):\n raise AssertionError('Normalizing to zero-probability state.')\n self /= np.sqrt(np.real(dprod))\n\n def ampl(self, *bits) -> np.complexfloating:\n \"\"\"Return amplitude for state indexed by 'bits'.\"\"\"\n\n idx = helper.bits2val(bits)\n return self[idx]\n\n def prob(self, *bits) -> float:\n \"\"\"Return probability for state indexed by 'bits'.\"\"\"\n\n amplitude = self.ampl(*bits)\n return np.real(amplitude.conj() * amplitude)\n\n def phase(self, *bits) -> float:\n \"\"\"Return phase of a state from the complex amplitude.\"\"\"\n\n amplitude = self.ampl(*bits)\n return math.degrees(cmath.phase(amplitude))\n\n def maxprob(self) -> (List[float], float):\n \"\"\"Find state with highest probability.\"\"\"\n\n maxbits, maxprob = [], 0.0\n for bits in helper.bitprod(self.nbits):\n cur_prob = self.prob(*bits)\n if cur_prob > maxprob:\n maxbits, maxprob = bits, cur_prob\n return maxbits, maxprob\n\n # The Schmidt number is an entanglement measure for a state.\n #\n # - A separable state has a schmidt number of 1.\n # - An entangled state has a schmidt number > 1.\n #\n # This implementation is borrowed from qcircuits (which has a different\n # internal representation).\n #\n # TODO(rhundt): Change implementation to use full matrices.\n # Use partial trace to trace out 'excluded_indices'\n # and perform the SVD decomp, similar to below.\n #\n def schmidt_number(self, indices) -> float:\n \"\"\"Compute schmidt number of a sub-state for entanglement.\"\"\"\n\n if len(indices) in [0, self.nbits]:\n raise ValueError('At least one qubit index should be included '\n 'and at least one should be excluded')\n if min(indices) < 0 or max(indices) >= self.nbits:\n raise ValueError('Indices must be between 0 and d-1 for a d-qubit state.')\n if not all([isinstance(idx, int) for idx in indices]):\n raise ValueError('Indices should be integers.')\n\n included_indices = set(indices)\n excluded_indices = set(range(self.nbits)) - included_indices\n permutation = list(included_indices) + list(excluded_indices)\n twos = self.reshape([2] * self.nbits)\n m = twos.transpose(permutation).reshape(\n (2**len(included_indices), 2**len(excluded_indices))\n )\n\n _, d, _ = np.linalg.svd(m)\n qc = np.sum(d > 1e-10)\n return qc\n\n def apply1(self, gate, index) -> None:\n \"\"\"Apply single-qubit gate to this state.\"\"\"\n\n # To maintain qubit ordering in this infrastructure,\n # index needs to be reversed.\n #\n index = self.nbits - index - 1\n two_q = 1 << index\n g00 = gate[0, 0]\n g01 = gate[0, 1]\n g10 = gate[1, 0]\n g11 = gate[1, 1]\n for g in range(0, 1 << self.nbits, 1 << (index+1)):\n for i in range(g, g + two_q):\n t1 = g00 * self[i] + g01 * self[i + two_q]\n t2 = g10 * self[i] + g11 * self[i + two_q]\n self[i] = t1\n self[i + two_q] = t2\n\n def applyc(self, gate, control, target) -> None:\n \"\"\"Apply a controlled 2-qubit gate via explicit indexing.\"\"\"\n\n # To maintain qubit ordering in this infrastructure,\n # index needs to be reversed.\n qbit = self.nbits - target - 1\n two_q = 2**qbit\n control = self.nbits - control - 1\n g00 = gate[0, 0]\n g01 = gate[0, 1]\n g10 = gate[1, 0]\n g11 = gate[1, 1]\n for g in range(0, 1 << self.nbits, 1 << (qbit+1)):\n idx_base = g * (1 << self.nbits)\n for i in range(g, g + two_q):\n idx = idx_base + i\n if idx & (1 << control):\n t1 = g00 * self[i] + g01 * self[i + two_q]\n t2 = g10 * self[i] + g11 * self[i + two_q]\n self[i] = t1\n self[i + two_q] = t2\n\n\n# Produce a given state for a single qubit.\n# We allow specification of a global phase, even though states cannot\n# be distinguished when multiplied with an arbitrary complex number, aka,\n# global phase.\n#\ndef qubit(alpha: Optional[np.complexfloating] = None,\n beta: Optional[np.complexfloating] = None) -> State:\n \"\"\"Produce a given state for a single qubit.\"\"\"\n\n if alpha is None and beta is None:\n raise ValueError('Both alpha and beta need to be specified')\n\n if beta is None:\n beta = math.sqrt(1.0 - np.conj(alpha) * alpha)\n if alpha is None:\n alpha = math.sqrt(1.0 - np.conj(beta) * beta)\n\n if not math.isclose(np.conj(alpha) * alpha +\n np.conj(beta) * beta, 1.0):\n raise ValueError('Qubit probabilities do not sum to 1.')\n\n qb = np.zeros(2, dtype=tensor.tensor_type())\n qb[0] = alpha\n qb[1] = beta\n return State(qb)\n\n\n# The functions zeros() and ones() produce the all-zero or all-one\n# computational basis vector for `d` qubits, ie,\n# |000...0> or\n# |111...1>\n#\n# The result of this tensor product is\n# always [1, 0, 0, ..., 0]^T or [0, 0, 0, ..., 1]^T\n#\ndef zeros_or_ones(d: int = 1, idx: int = 0) -> State:\n \"\"\"Produce the all-0/1 basis vector for `d` qubits.\"\"\"\n\n if d < 1:\n raise ValueError('Rank must be at least 1.')\n shape = 2**d\n t = np.zeros(shape, dtype=tensor.tensor_type())\n t[idx] = 1\n return State(t)\n\n\ndef zeros(d: int = 1) -> State:\n \"\"\"Produce state with 'd' |0>, eg., |0000>.\"\"\"\n return zeros_or_ones(d, 0)\n\n\ndef ones(d: int = 1) -> State:\n \"\"\"Produce state with 'd' |1>, eg., |1111>.\"\"\"\n return zeros_or_ones(d, 2**d - 1)\n\n\ndef bitstring(*bits) -> State:\n \"\"\"Produce a state from a given bit sequence, eg., |0101>.\"\"\"\n\n d = len(bits)\n if d == 0:\n raise ValueError('Rank must be at least 1.')\n for _, val in enumerate(bits):\n if val != 0 and val != 1:\n raise ValueError(f'Bits must be 0 or 1, got: {val}')\n t = np.zeros(1 << d, dtype=tensor.tensor_type())\n t[helper.bits2val(bits)] = 1\n return State(t)\n\n\ndef rand(n: int) -> State:\n \"\"\"Produce random combination of |0> and |1>.\"\"\"\n\n bits = [random.randint(0, 1) for _ in range(n)]\n return bitstring(*bits)\n\n\n# These two are used so commonly, make them constants.\nzero = zeros(1)\none = ones(1)\n\n\nclass Reg():\n \"\"\"Simple register class.\"\"\"\n\n def __init__(self, size: int, it=0, global_reg: int = None):\n self.size = size\n self.global_idx = list(range(global_reg,\n global_reg + size))\n self.val = [0] * size\n global_reg += size\n\n if it:\n if isinstance(it, int):\n it = format(it, '0{}b'.format(size))\n if isinstance(it, (str, tuple, list)):\n for idx, val in enumerate(it):\n if val == '1' or val == 1:\n self.val[idx] = 1\n\n def __str__(self) -> str:\n s = '|'\n for _, val in enumerate(self.val):\n s += f'{val}'\n return s + '>'\n\n def __getitem__(self, idx: int) -> int:\n return self.global_idx[idx]\n\n def __setitem__(self, idx: int, val: int) -> None:\n self.val[idx] = val\n\n def psi(self) -> State:\n return bitstring(*self.val)\n\n @property\n def nbits(self) -> int:\n return self.size\n\n\ndef fromregs(*argv):\n \"\"\"Make a state from multiple registers.\"\"\"\n\n psi = 1.0\n for arg in argv:\n psi = psi * arg.psi()\n return psi\n\n\n# =====================================================\n# Various Helper Functions pertaining to State.\n# =====================================================\n\n\ndef state_to_string(bits) -> str:\n \"\"\"Convert state to string like |010>.\"\"\"\n\n s = ''.join(str(i) for i in bits)\n return '|{:s}> (|{:d}>)'.format(s, int(s, 2))\n\n\ndef dump_state(psi, desc: Optional[str] = None,\n prob_only: bool = True) -> None:\n \"\"\"Dump probabilities for a state, as well as local qubit state.\"\"\"\n\n if desc:\n print('|', end='')\n for i in range(psi.nbits):\n print(i % 10, end='')\n print(f'> \\'{desc}\\'')\n\n state_list: List[str] = []\n for bits in helper.bitprod(psi.nbits):\n if prob_only and (psi.prob(*bits) < 10e-6):\n continue\n\n state_list.append(\n '{:s}: ampl: {:+.2f} prob: {:.2f} Phase: {:5.1f}'\n .format(state_to_string(bits),\n psi.ampl(*bits),\n psi.prob(*bits),\n psi.phase(*bits)))\n state_list.sort()\n print(*state_list, sep='\\n')\n"
] |
[
[
"numpy.count_nonzero"
],
[
"numpy.conj",
"numpy.linalg.svd",
"numpy.real",
"numpy.sum"
]
] |
wookayin/acme
|
[
"71b2ab8577a118c103718f034fa62c5ad2c0fd97",
"71b2ab8577a118c103718f034fa62c5ad2c0fd97",
"71b2ab8577a118c103718f034fa62c5ad2c0fd97"
] |
[
"acme/agents/jax/ppo/networks.py",
"acme/wrappers/frame_stacking_test.py",
"acme/agents/tf/ddpg/learning.py"
] |
[
"# Copyright 2018 DeepMind Technologies Limited. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"PPO network definitions.\"\"\"\n\nimport dataclasses\nfrom typing import Any, Callable, Optional, Sequence\n\nfrom acme import specs\nfrom acme.agents.jax import actor_core as actor_core_lib\nfrom acme.jax import networks as networks_lib\nfrom acme.jax import utils\n\nimport haiku as hk\nimport jax.numpy as jnp\nimport numpy as np\n\nEntropyFn = Callable[[Any], jnp.ndarray]\n\n\[email protected]\nclass PPONetworks:\n \"\"\"Network and pure functions for the PPO agent.\n\n If 'network' returns tfd.Distribution, you can use make_ppo_networks() to\n create this object properly.\n If one is building this object manually, one has a freedom to make 'network'\n object return anything that is later being passed as input to\n log_prob/entropy/sample functions to perform the corresponding computations.\n \"\"\"\n network: networks_lib.FeedForwardNetwork\n log_prob: networks_lib.LogProbFn\n entropy: EntropyFn\n sample: networks_lib.SampleFn\n sample_eval: Optional[networks_lib.SampleFn] = None\n\n\ndef make_inference_fn(\n ppo_networks: PPONetworks,\n evaluation: bool = False) -> actor_core_lib.FeedForwardPolicyWithExtra:\n \"\"\"Returns a function to be used for inference by a PPO actor.\"\"\"\n\n def inference(params: networks_lib.Params, key: networks_lib.PRNGKey,\n observations: networks_lib.Observation):\n distribution, _ = ppo_networks.network.apply(params, observations)\n if evaluation and ppo_networks.sample_eval:\n actions = ppo_networks.sample_eval(distribution, key)\n else:\n actions = ppo_networks.sample(distribution, key)\n if evaluation:\n return actions, {}\n log_prob = ppo_networks.log_prob(distribution, actions)\n return actions, {'log_prob': log_prob}\n\n return inference\n\n\ndef make_networks(\n spec: specs.EnvironmentSpec, hidden_layer_sizes: Sequence[int] = (256, 256)\n) -> PPONetworks:\n if isinstance(spec.actions, specs.DiscreteArray):\n return make_discrete_networks(spec, hidden_layer_sizes)\n else:\n return make_continuous_networks(\n spec,\n policy_layer_sizes=hidden_layer_sizes,\n value_layer_sizes=hidden_layer_sizes)\n\n\ndef make_ppo_networks(network: networks_lib.FeedForwardNetwork) -> PPONetworks:\n \"\"\"Constructs a PPONetworks instance from the given FeedForwardNetwork.\n\n Args:\n network: a transformed Haiku network that takes in observations and returns\n the action distribution and value.\n\n Returns:\n A PPONetworks instance with pure functions wrapping the input network.\n \"\"\"\n return PPONetworks(\n network=network,\n log_prob=lambda distribution, action: distribution.log_prob(action),\n entropy=lambda distribution: distribution.entropy(),\n sample=lambda distribution, key: distribution.sample(seed=key),\n sample_eval=lambda distribution, key: distribution.mode())\n\n\ndef make_discrete_networks(\n environment_spec: specs.EnvironmentSpec,\n hidden_layer_sizes: Sequence[int] = (512,),\n use_conv: bool = True,\n) -> PPONetworks:\n \"\"\"Creates networks used by the agent for discrete action environments.\n\n Args:\n environment_spec: Environment spec used to define number of actions.\n hidden_layer_sizes: Network definition.\n use_conv: Whether to use a conv or MLP feature extractor.\n Returns:\n PPONetworks\n \"\"\"\n\n num_actions = environment_spec.actions.num_values\n\n def forward_fn(inputs):\n layers = []\n if use_conv:\n layers.extend([networks_lib.AtariTorso()])\n layers.extend([\n hk.nets.MLP(hidden_layer_sizes, activate_final=True),\n networks_lib.CategoricalValueHead(num_values=num_actions)\n ])\n policy_value_network = hk.Sequential(layers)\n return policy_value_network(inputs)\n\n forward_fn = hk.without_apply_rng(hk.transform(forward_fn))\n dummy_obs = utils.zeros_like(environment_spec.observations)\n dummy_obs = utils.add_batch_dim(dummy_obs) # Dummy 'sequence' dim.\n network = networks_lib.FeedForwardNetwork(\n lambda rng: forward_fn.init(rng, dummy_obs), forward_fn.apply)\n # Create PPONetworks to add functionality required by the agent.\n return make_ppo_networks(network)\n\n\ndef make_continuous_networks(\n environment_spec: specs.EnvironmentSpec,\n policy_layer_sizes: Sequence[int] = (64, 64),\n value_layer_sizes: Sequence[int] = (64, 64),\n) -> PPONetworks:\n \"\"\"Creates PPONetworks to be used for continuous action environments.\"\"\"\n\n # Get total number of action dimensions from action spec.\n num_dimensions = np.prod(environment_spec.actions.shape, dtype=int)\n\n def forward_fn(inputs):\n policy_network = hk.Sequential([\n utils.batch_concat,\n hk.nets.MLP(policy_layer_sizes, activate_final=True),\n # Note: we don't respect bounded action specs here and instead\n # rely on CanonicalSpecWrapper to clip actions accordingly.\n networks_lib.MultivariateNormalDiagHead(num_dimensions)\n ])\n value_network = hk.Sequential([\n utils.batch_concat,\n hk.nets.MLP(value_layer_sizes, activate_final=True),\n hk.Linear(1),\n lambda x: jnp.squeeze(x, axis=-1)\n ])\n\n action_distribution = policy_network(inputs)\n value = value_network(inputs)\n return (action_distribution, value)\n\n # Transform into pure functions.\n forward_fn = hk.without_apply_rng(hk.transform(forward_fn))\n\n dummy_obs = utils.zeros_like(environment_spec.observations)\n dummy_obs = utils.add_batch_dim(dummy_obs) # Dummy 'sequence' dim.\n network = networks_lib.FeedForwardNetwork(\n lambda rng: forward_fn.init(rng, dummy_obs), forward_fn.apply)\n # Create PPONetworks to add functionality required by the agent.\n return make_ppo_networks(network)\n",
"# Copyright 2018 DeepMind Technologies Limited. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for the single precision wrapper.\"\"\"\n\nfrom acme import wrappers\nfrom acme.testing import fakes\nimport numpy as np\nimport tree\n\nfrom absl.testing import absltest\n\n\nclass FakeNonZeroObservationEnvironment(fakes.ContinuousEnvironment):\n \"\"\"Fake environment with non-zero observations.\"\"\"\n\n def _generate_fake_observation(self):\n original_observation = super()._generate_fake_observation()\n return tree.map_structure(np.ones_like, original_observation)\n\n\nclass FrameStackingTest(absltest.TestCase):\n\n def test_specs(self):\n original_env = FakeNonZeroObservationEnvironment()\n env = wrappers.FrameStackingWrapper(original_env, 2)\n\n original_observation_spec = original_env.observation_spec()\n expected_shape = original_observation_spec.shape + (2,)\n observation_spec = env.observation_spec()\n self.assertEqual(expected_shape, observation_spec.shape)\n\n expected_action_spec = original_env.action_spec()\n action_spec = env.action_spec()\n self.assertEqual(expected_action_spec, action_spec)\n\n expected_reward_spec = original_env.reward_spec()\n reward_spec = env.reward_spec()\n self.assertEqual(expected_reward_spec, reward_spec)\n\n expected_discount_spec = original_env.discount_spec()\n discount_spec = env.discount_spec()\n self.assertEqual(expected_discount_spec, discount_spec)\n\n def test_step(self):\n original_env = FakeNonZeroObservationEnvironment()\n env = wrappers.FrameStackingWrapper(original_env, 2)\n observation_spec = env.observation_spec()\n action_spec = env.action_spec()\n\n timestep = env.reset()\n self.assertEqual(observation_spec.shape, timestep.observation.shape)\n self.assertTrue(np.all(timestep.observation[..., 0] == 0))\n\n timestep = env.step(action_spec.generate_value())\n self.assertEqual(observation_spec.shape, timestep.observation.shape)\n\n def test_second_reset(self):\n original_env = FakeNonZeroObservationEnvironment()\n env = wrappers.FrameStackingWrapper(original_env, 2)\n action_spec = env.action_spec()\n\n env.reset()\n env.step(action_spec.generate_value())\n timestep = env.reset()\n self.assertTrue(np.all(timestep.observation[..., 0] == 0))\n\n\nif __name__ == '__main__':\n absltest.main()\n",
"# Copyright 2018 DeepMind Technologies Limited. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"DDPG learner implementation.\"\"\"\n\nimport time\nfrom typing import List, Optional\n\nimport acme\nfrom acme import types\nfrom acme.tf import losses\nfrom acme.tf import savers as tf2_savers\nfrom acme.tf import utils as tf2_utils\nfrom acme.utils import counting\nfrom acme.utils import loggers\nimport numpy as np\nimport sonnet as snt\nimport tensorflow as tf\nimport tree\nimport trfl\n\n\nclass DDPGLearner(acme.Learner):\n \"\"\"DDPG learner.\n\n This is the learning component of a DDPG agent. IE it takes a dataset as input\n and implements update functionality to learn from this dataset.\n \"\"\"\n\n def __init__(\n self,\n policy_network: snt.Module,\n critic_network: snt.Module,\n target_policy_network: snt.Module,\n target_critic_network: snt.Module,\n discount: float,\n target_update_period: int,\n dataset: tf.data.Dataset,\n observation_network: types.TensorTransformation = lambda x: x,\n target_observation_network: types.TensorTransformation = lambda x: x,\n policy_optimizer: Optional[snt.Optimizer] = None,\n critic_optimizer: Optional[snt.Optimizer] = None,\n clipping: bool = True,\n counter: Optional[counting.Counter] = None,\n logger: Optional[loggers.Logger] = None,\n checkpoint: bool = True,\n ):\n \"\"\"Initializes the learner.\n\n Args:\n policy_network: the online (optimized) policy.\n critic_network: the online critic.\n target_policy_network: the target policy (which lags behind the online\n policy).\n target_critic_network: the target critic.\n discount: discount to use for TD updates.\n target_update_period: number of learner steps to perform before updating\n the target networks.\n dataset: dataset to learn from, whether fixed or from a replay buffer\n (see `acme.datasets.reverb.make_reverb_dataset` documentation).\n observation_network: an optional online network to process observations\n before the policy and the critic.\n target_observation_network: the target observation network.\n policy_optimizer: the optimizer to be applied to the DPG (policy) loss.\n critic_optimizer: the optimizer to be applied to the critic loss.\n clipping: whether to clip gradients by global norm.\n counter: counter object used to keep track of steps.\n logger: logger object to be used by learner.\n checkpoint: boolean indicating whether to checkpoint the learner.\n \"\"\"\n\n # Store online and target networks.\n self._policy_network = policy_network\n self._critic_network = critic_network\n self._target_policy_network = target_policy_network\n self._target_critic_network = target_critic_network\n\n # Make sure observation networks are snt.Module's so they have variables.\n self._observation_network = tf2_utils.to_sonnet_module(observation_network)\n self._target_observation_network = tf2_utils.to_sonnet_module(\n target_observation_network)\n\n # General learner book-keeping and loggers.\n self._counter = counter or counting.Counter()\n self._logger = logger or loggers.make_default_logger('learner')\n\n # Other learner parameters.\n self._discount = discount\n self._clipping = clipping\n\n # Necessary to track when to update target networks.\n self._num_steps = tf.Variable(0, dtype=tf.int32)\n self._target_update_period = target_update_period\n\n # Create an iterator to go through the dataset.\n # TODO(b/155086959): Fix type stubs and remove.\n self._iterator = iter(dataset) # pytype: disable=wrong-arg-types\n\n # Create optimizers if they aren't given.\n self._critic_optimizer = critic_optimizer or snt.optimizers.Adam(1e-4)\n self._policy_optimizer = policy_optimizer or snt.optimizers.Adam(1e-4)\n\n # Expose the variables.\n policy_network_to_expose = snt.Sequential(\n [self._target_observation_network, self._target_policy_network])\n self._variables = {\n 'critic': target_critic_network.variables,\n 'policy': policy_network_to_expose.variables,\n }\n\n self._checkpointer = tf2_savers.Checkpointer(\n time_delta_minutes=5,\n objects_to_save={\n 'counter': self._counter,\n 'policy': self._policy_network,\n 'critic': self._critic_network,\n 'target_policy': self._target_policy_network,\n 'target_critic': self._target_critic_network,\n 'policy_optimizer': self._policy_optimizer,\n 'critic_optimizer': self._critic_optimizer,\n 'num_steps': self._num_steps,\n },\n enable_checkpointing=checkpoint,\n )\n\n # Do not record timestamps until after the first learning step is done.\n # This is to avoid including the time it takes for actors to come online and\n # fill the replay buffer.\n self._timestamp = None\n\n @tf.function\n def _step(self):\n # Update target network.\n online_variables = (\n *self._observation_network.variables,\n *self._critic_network.variables,\n *self._policy_network.variables,\n )\n target_variables = (\n *self._target_observation_network.variables,\n *self._target_critic_network.variables,\n *self._target_policy_network.variables,\n )\n\n # Make online -> target network update ops.\n if tf.math.mod(self._num_steps, self._target_update_period) == 0:\n for src, dest in zip(online_variables, target_variables):\n dest.assign(src)\n self._num_steps.assign_add(1)\n\n # Get data from replay (dropping extras if any). Note there is no\n # extra data here because we do not insert any into Reverb.\n inputs = next(self._iterator)\n transitions: types.Transition = inputs.data\n\n # Cast the additional discount to match the environment discount dtype.\n discount = tf.cast(self._discount, dtype=transitions.discount.dtype)\n\n with tf.GradientTape(persistent=True) as tape:\n # Maybe transform the observation before feeding into policy and critic.\n # Transforming the observations this way at the start of the learning\n # step effectively means that the policy and critic share observation\n # network weights.\n o_tm1 = self._observation_network(transitions.observation)\n o_t = self._target_observation_network(transitions.next_observation)\n # This stop_gradient prevents gradients to propagate into the target\n # observation network. In addition, since the online policy network is\n # evaluated at o_t, this also means the policy loss does not influence\n # the observation network training.\n o_t = tree.map_structure(tf.stop_gradient, o_t)\n\n # Critic learning.\n q_tm1 = self._critic_network(o_tm1, transitions.action)\n q_t = self._target_critic_network(o_t, self._target_policy_network(o_t))\n\n # Squeeze into the shape expected by the td_learning implementation.\n q_tm1 = tf.squeeze(q_tm1, axis=-1) # [B]\n q_t = tf.squeeze(q_t, axis=-1) # [B]\n\n # Critic loss.\n critic_loss = trfl.td_learning(q_tm1, transitions.reward,\n discount * transitions.discount, q_t).loss\n critic_loss = tf.reduce_mean(critic_loss, axis=0)\n\n # Actor learning.\n dpg_a_t = self._policy_network(o_t)\n dpg_q_t = self._critic_network(o_t, dpg_a_t)\n\n # Actor loss. If clipping is true use dqda clipping and clip the norm.\n dqda_clipping = 1.0 if self._clipping else None\n policy_loss = losses.dpg(\n dpg_q_t,\n dpg_a_t,\n tape=tape,\n dqda_clipping=dqda_clipping,\n clip_norm=self._clipping)\n policy_loss = tf.reduce_mean(policy_loss, axis=0)\n\n # Get trainable variables.\n policy_variables = self._policy_network.trainable_variables\n critic_variables = (\n # In this agent, the critic loss trains the observation network.\n self._observation_network.trainable_variables +\n self._critic_network.trainable_variables)\n\n # Compute gradients.\n policy_gradients = tape.gradient(policy_loss, policy_variables)\n critic_gradients = tape.gradient(critic_loss, critic_variables)\n\n # Delete the tape manually because of the persistent=True flag.\n del tape\n\n # Maybe clip gradients.\n if self._clipping:\n policy_gradients = tf.clip_by_global_norm(policy_gradients, 40.)[0]\n critic_gradients = tf.clip_by_global_norm(critic_gradients, 40.)[0]\n\n # Apply gradients.\n self._policy_optimizer.apply(policy_gradients, policy_variables)\n self._critic_optimizer.apply(critic_gradients, critic_variables)\n\n # Losses to track.\n return {\n 'critic_loss': critic_loss,\n 'policy_loss': policy_loss,\n }\n\n def step(self):\n # Run the learning step.\n fetches = self._step()\n\n # Compute elapsed time.\n timestamp = time.time()\n elapsed_time = timestamp - self._timestamp if self._timestamp else 0\n self._timestamp = timestamp\n\n # Update our counts and record it.\n counts = self._counter.increment(steps=1, walltime=elapsed_time)\n fetches.update(counts)\n\n # Checkpoint and attempt to write the logs.\n self._checkpointer.save()\n self._logger.write(fetches)\n\n def get_variables(self, names: List[str]) -> List[List[np.ndarray]]:\n return [tf2_utils.to_numpy(self._variables[name]) for name in names]\n"
] |
[
[
"numpy.prod"
],
[
"numpy.all"
],
[
"tensorflow.Variable",
"tensorflow.reduce_mean",
"tensorflow.cast",
"tensorflow.squeeze",
"tensorflow.clip_by_global_norm",
"tensorflow.math.mod",
"tensorflow.GradientTape"
]
] |
Leajian/lpp-py
|
[
"299860a5d5f52189bb62e50cd4b3eda8aab01553"
] |
[
"lpIO.py"
] |
[
"import re\nimport json\nfrom numpy import array, squeeze\n\n\ndef sanityCheck(problem):\n hasNaturalConstraints = False\n keywordPattern = re.compile('max|min|s\\.?t\\.?|subject\\s*to|with|end', re.IGNORECASE)\n keywords = re.findall(keywordPattern, problem)\n\n if re.match('max|min', keywords[0], re.IGNORECASE):\n if len(keywords) >= 2 and re.match('s\\.?t\\.?|subject\\s*to', keywords[1], re.IGNORECASE):\n\n if len(keywords) == 4 and re.match('with', keywords[2], re.IGNORECASE):\n hasNaturalConstraints = True\n \n if not re.match('end', keywords[3], re.IGNORECASE):\n raise Exception('Expression \"end\" not found, include it after you end the problem\\'s description.')\n\n if len(keywords) == 3:\n print('WARNING! Expression \"with\" not found. Assuming all constraints are non-negative.')\n\n if not re.match('end', keywords[2], re.IGNORECASE):\n raise Exception('Expression \"end\" not found, include it after you end the problem\\'s description.')\n \n else:\n raise Exception('Expression \"s.t.\" or \"st\" or \"subject to\" not found, include it after you state the objective function.')\n else:\n raise Exception('Expression \"min\" or \"max\" not found, include it at the beginning of the problem\\'s description.')\n\n return hasNaturalConstraints\n\ndef openLP(fileName, hasNaturalConstraints=False):\n \"\"\"\n Description\n Opens the file which contains the linear problem description and splits\n it into segments so it can be parsed easier.\n Input\n string fileName The file's relative name.\n Output\n A list of containing segments of the problem, splitted on keywords.\n \"\"\"\n with open(fileName, 'r') as file:\n # Just note that read() function seeks until EOF,\n # so if it's called again, it has nothing.\n problem = file.read()\n\n # Simple sanity checks to avoid future problems and\n # also check if natural constraints are given.\n hasNaturalConstraints = sanityCheck(problem)\n\n # Cut the file into segments, from one keyword to another\n # (#1 max/min, #2 st, #optional with, #3 end).\n pattern = re.compile('s\\.?\\s*t\\.?|subject\\s*to|with|end', re.IGNORECASE)\n segmentedList = pattern.split(problem)\n\n # Unless 'with' natural constraints are given indeed,\n # we must return 3 parts.\n if hasNaturalConstraints:\n return segmentedList[:3], hasNaturalConstraints\n\n # Otherwise, we return only the first 2 parts,\n # but from 2 and beyond include the part with \"end\" delimiter,\n # which might contain nothing, a new line character or more than that\n # We don't care about content past the \"end\" delimiter.\n # Any other whitespace character is managed when necessary.\n # If there is any gibberish, the corresponding extractor function\n # is responsible to figure it out.\n return segmentedList[:2], hasNaturalConstraints\n\ndef writeLP2(MinMax, c, A, Eqin, b, naturalConstraints, inputFile, outputName=''):\n \"\"\"\n Description\n Writes the linear problem to a file in a presentable form.\n Input\n MinMax problem type\n c objective function's coefficients numpy.array\n A constraints' coefficients numpy.array\n Eqin constraints' types numpy.array\n b constraints' constants numpy.array\n naturalConstraints (optional) natural constraints' types numpy.array\n inputFile input file name\n outputName (optional) output file name\n Output\n A file which describes the problem in a presentable form.\n \"\"\"\n if outputName == '':\n outputName = '(LP-2) ' + inputFile\n with open(outputName, 'w+') as output:\n \"\"\"\n if MinMax == 1:\n output.write('max\\n')\n elif MinMax == -1:\n output.write('min\\n')\n \"\"\"\n output.write('MinMax = ' + str(MinMax) + '\\n\\n')\n output.write('c =\\n' + str(array(c)) + '\\n\\n') # 1 x n\n output.write('A =\\n' + str(array(A)) + '\\n\\n') # m x n\n output.write('Eqin =\\n' + str(array(Eqin).reshape(len(Eqin), 1)) + '\\n\\n') # m x 1\n output.write('b =\\n' + str(array(b).reshape(len(b), 1)) + '\\n\\n') # m x 1\n output.write('naturalConstraints =\\n' + str(squeeze(array(naturalConstraints).reshape(1, len(naturalConstraints))).tolist()) + '\\n\\n') # 1 x n\n\ndef writeLP2HumanReadable(MinMax, c, A, Eqin, b, naturalConstraints, inputFile, outputName=''):\n \"\"\"\n Description\n Writes the linear problem to a file in a human readable form.\n Input\n MinMax problem type\n c objective function's coefficients numpy.array\n A constraints' coefficients numpy.array\n Eqin constraints' types numpy.array\n b constraints' constants numpy.array\n naturalConstraints (optional) natural constraints' types numpy.array\n inputFile input file name\n outputName (optional) output file name\n Output\n A file which describes the problem in a human readable form.\n \"\"\"\n if outputName == '':\n outputName = '(LP-2) ' + inputFile\n with open(outputName, 'w+') as output:\n\n if MinMax == 1:\n output.write('max\\t')\n elif MinMax == -1:\n output.write('min\\t')\n\n # Enumarate each coefficient so we can name them\n for i, coeff in enumerate(c, start=1):\n # Ignore those with 0 coefficient\n if coeff == 0:\n output.write('\\t')\n continue\n\n # Put back the plus sign, unless it's the first term\n if str(coeff)[0] != '-' and i != 1:\n coeff = '+' + str(coeff)\n\n output.write(str(coeff) +'x' + str(i) + '\\t')\n output.write('\\n')\n\n output.write('s.t.')\n\n # For each row\n for i in zip(A, Eqin, b):\n output.write('\\t')\n\n # Enumarate each coefficient so we can name them\n for j, coeff in enumerate(i[0], start=1):\n # Ignore those with 0 coefficient\n if coeff == 0.0:\n output.write('\\t')\n continue\n\n # Put back the plus sign, unless it's the first term\n if str(coeff)[0] != '-' and j != 1:\n coeff = '+' + str(coeff)\n \n # Writting each term\n output.write(str(coeff) + 'x' + str(j) + '\\t')\n \n # Mapping the signs\n signs = {'0': '= ', '1':'>=', '-1':'<='}\n \n output.write(signs[str(squeeze(i[1]))] + ' ' + str(squeeze(i[2])) + '\\n')\n\n # Mapping the signs\n signs = {'0': 'free', '1':'>= 0', '-1':'<= 0'}\n for i, constr in enumerate(naturalConstraints, start=1):\n # Writting each constraint\n \n output.write('x' + str(i) + ' ' + signs[str(squeeze(constr))])\n if i != len(naturalConstraints):\n output.write(', ')\n output.write('\\n')\n\n\"\"\"\nJSON-related\n\"\"\"\n\ndef writeLP2json(MinMax, c, A, Eqin, b, naturalConstraints, inputFile, outputName=''):\n \"\"\"\n Description\n Writes the linear problem to a file in a serializable form.\n Input\n MinMax problem type\n c objective function's coefficients numpy.array\n A constraints' coefficients numpy.array\n Eqin constraints' types numpy.array\n b constraints' constants numpy.array\n naturalConstraints (optional) natural constraints' types numpy.array\n inputFile input file name\n outputName (optional) output file name\n Output\n A file which describes the problem in a serializable form.\n \"\"\"\n if outputName == '':\n outputName = '(LP-2) ' + inputFile + '.json'\n problem = {\n 'MinMax': MinMax,\n 'c': c.tolist(),\n 'A': A.tolist(),\n 'Eqin': Eqin.tolist(),\n 'b': b.tolist(),\n 'naturalConstraints': naturalConstraints\n }\n\n with open(outputName, 'w+') as output:\n json.dump(problem, output, indent=1)\n\ndef loadLP2json(inputFile):\n \"\"\"\n Description\n Returns a list of all information required for the for the linear problem.\n Input\n An LP-2 file name, which contains a problem parsed and saved by this parser in JSON format.\n Output\n In a list\n As floats\n int MinMax containing constraints' coefficients\n list c containing linear problem's coefficients array and its dimensions\n list A containing constraints' coefficients array and its dimensions\n list Eqin containing constraints' inequalities array and its dimensions\n list b containing constraints' constant parts array and its dimensions\n list naturalConstraints containing natural constraints\n \"\"\"\n with open(inputFile, 'r') as f:\n problem = json.load(f)\n\n MinMax = problem['MinMax']\n c = array(problem['c'])\n A = array(problem['A'])\n Eqin = array(problem['Eqin'])\n b = array(problem['b'])\n naturalConstraints = problem['naturalConstraints']\n \n return MinMax, c, A, Eqin, b, naturalConstraints\n"
] |
[
[
"numpy.squeeze",
"numpy.array"
]
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.